ya falta menos

This commit is contained in:
paezner
2019-01-23 17:58:10 +01:00
parent 337c072b34
commit dde49417c4
10 changed files with 186 additions and 159 deletions

View File

@@ -2,7 +2,6 @@
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import scrapertools
from core import servertools
from core.item import Item

View File

@@ -7,20 +7,18 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://www.porn300.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/es/videos/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/es/mas-vistos/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/es/mas-votados/"))
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/es/videos/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/es/mas-vistos/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/es/mas-votados/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/es/canales/"))
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="catalogo", url=host + "/es/pornostars/?sort=views"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/es/categorias/?sort=videos"))
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="categorias", url=host + "/es/pornostars/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/es/categorias/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -30,7 +28,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/es/buscar/?q=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -38,63 +36,56 @@ def search(item, texto):
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a itemprop="url" href="([^"]+)".*?title="([^"]+)">.*?<img itemprop="image" src=([^"]+) alt=.*?</svg>\s+([^"]+) </li>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad +")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a class="btn btn-primary--light btn-pagination" itemprop="name" href="([^"]+)" title="Siguiente">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a itemprop="url" href="([^"]+)".*?title="([^"]+)">.*?<img itemprop="image" src="([^"]+)".*?</svg>([^"]+) </small>'
patron = '<a itemprop="url" href="([^"]+)".*?'
patron += 'title="([^"]+)">.*?'
if "/pornostars/" in item.url:
patron += '<img itemprop="image" src=([^"]+) alt=.*?'
patron += '</svg>([^<]+)<'
else:
patron += '<img itemprop="image" src="([^"]+)" alt=.*?'
patron += '</svg>([^<]+)<'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
cantidad = re.compile("\s+", re.DOTALL).sub(" ", cantidad)
scrapedtitle = scrapedtitle + " (" + cantidad +")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/?sort=latest"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a class="btn btn-primary--light btn-pagination" itemprop="name" href="([^"]+)" title="Siguiente">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="categorias" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a itemprop="url" href="([^"]+)" data-video-id="\d+" title="([^"]+)">.*?<img itemprop="thumbnailUrl" src="([^"]+)".*?</svg>\s+(.*?) </li>'
patron = '<a itemprop="url" href="([^"]+)" data-video-id="\d+" title="([^"]+)">.*?'
patron += '<img itemprop="thumbnailUrl" src="([^"]+)".*?'
patron += '</svg>([^<]+)<'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
cantidad = re.compile("\s+", re.DOTALL).sub(" ", cantidad)
title = "[COLOR yellow]" + cantidad + "[/COLOR] " + scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a class="btn btn-primary--light btn-pagination" itemprop="name" href="([^"]+)" title="Siguiente">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
plot=plot, contentTitle = contentTitle) )
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):

View File

@@ -2,25 +2,21 @@
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://porneq.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host + "/videos/browse/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistos" , action="peliculas", url=host + "/videos/most-viewed/"))
itemlist.append( Item(channel=item.channel, title="Mas Votado" , action="peliculas", url=host + "/videos/most-liked/"))
itemlist.append( Item(channel=item.channel, title="Big Tits" , action="peliculas", url=host + "/show/big+tits&sort=w"))
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host + "/videos/browse/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistos" , action="lista", url=host + "/videos/most-viewed/"))
itemlist.append( Item(channel=item.channel, title="Mas Votado" , action="lista", url=host + "/videos/most-liked/"))
itemlist.append( Item(channel=item.channel, title="Big Tits" , action="lista", url=host + "/show/big+tits&sort=w"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -30,7 +26,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/show/%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -38,20 +34,23 @@ def search(item, texto):
return []
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="clip-link" data-id="\d+" title="([^"]+)" href="([^"]+)">.*?<img src="([^"]+)".*?<span class="timer">(.*?)</span></div>'
patron = '<a class="clip-link" data-id="\d+" title="([^"]+)" href="([^"]+)">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<span class="timer">(.*?)</span></div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedtitle,scrapedurl,scrapedthumbnail,scrapedtime in matches:
scrapedplot = ""
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<nav id="page_nav"><a href="(.*?)"')
if next_page_url!="":
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<nav id="page_nav"><a href="(.*?)"')
if next_page !="":
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
@@ -59,10 +58,7 @@ def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '"video-setup".*?file: "(.*?)",'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl in matches:
scrapedurl = str(scrapedurl)
scrapedurl = scrapertools.find_single_match(data,'<source src="([^"]+)"')
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -2,23 +2,19 @@
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://www.pornhive.tv/en'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="MOVIES" , action="peliculas", url=host))
# No busca los videos tiene un capchka
# itemlist.append( Item(channel=item.channel, title="CHANNELS" , action="catalogo", url=host))
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -29,7 +25,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search?keyword=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -41,18 +37,22 @@ def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'Categories(.*?)<li class="divider"')
if item.title == "Categorias" :
data = scrapertools.get_match(data,'Categories(.*?)Channels')
else:
data = scrapertools.get_match(data,'Channels(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="[^"]+">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -66,11 +66,11 @@ def peliculas(item):
title = scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=title, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" data-ci-pagination-page="\d+" rel="next">Next &rsaquo;')
if next_page_url!="":
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="play" , title=title, url=scrapedurl, thumbnail=thumbnail,
plot=plot, contentTitle=title))
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" data-ci-pagination-page="\d+" rel="next">Next &rsaquo;')
if next_page != "" :
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
@@ -79,9 +79,8 @@ def play(item):
itemlist = servertools.find_video_items(data=item.url)
data = scrapertools.cachePage(item.url)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.fulltitle
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videochannel=item.channel

View File

@@ -14,7 +14,7 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="peliculas", title="Novedades", fanart=item.fanart,
url="http://es.pornhub.com/video?o=cm"))
itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias", fanart=item.fanart,
url="http://es.pornhub.com/categories?o=al"))
url="http://es.pornhub.com/categories"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", fanart=item.fanart,
url="http://es.pornhub.com/video/search?search=%s&o=mr"))
return itemlist
@@ -38,19 +38,18 @@ def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<div id="categoriesStraightImages">(.*?)</ul>')
patron = '<li class="cat_pic" data-category=".*?'
patron += '<a href="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += 'alt="([^"]+)"'
patron += 'data-thumb_url="(.*?)".*?'
patron += 'alt="([^"]+)".*?'
patron += '<var>(.*?)</var>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
for scrapedurl, scrapedthumbnail, scrapedtitle, cantidad in matches:
if "?" in scrapedurl:
url = urlparse.urljoin(item.url, scrapedurl + "&o=cm")
else:
url = urlparse.urljoin(item.url, scrapedurl + "?o=cm")
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, fanart=item.fanart,
thumbnail=scrapedthumbnail))
itemlist.sort(key=lambda x: x.title)
@@ -64,10 +63,10 @@ def peliculas(item):
videodata = scrapertools.find_single_match(data, 'videos search-video-thumbs">(.*?)<div class="reset"></div>')
patron = '<div class="phimage">.*?'
patron += '<a href="([^"]+)" title="([^"]+).*?'
patron += '<var class="duration">([^<]+)</var>(.*?)</div>.*?'
patron += 'data-mediumthumb="([^"]+)"'
patron += 'data-mediumthumb="([^"]+)".*?'
patron += '<var class="duration">([^<]+)</var>(.*?)</div>'
matches = re.compile(patron, re.DOTALL).findall(videodata)
for url, scrapedtitle, duration, scrapedhd, thumbnail in matches:
for url, scrapedtitle, thumbnail, duration, scrapedhd in matches:
title = "(" + duration + ") " + scrapedtitle.replace("&amp;amp;", "&amp;")
scrapedhd = scrapertools.find_single_match(scrapedhd, '<span class="hd-thumbnail">(.*?)</span>')
if scrapedhd == 'HD':
@@ -90,7 +89,7 @@ def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '"defaultQuality":true,"format":"","quality":"\d+","videoUrl":"(.*?)"'
patron = '"defaultQuality":true,"format":"mp4","quality":"\d+","videoUrl":"(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl in matches:
url = scrapedurl.replace("\/", "/")

View File

@@ -7,20 +7,15 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://www.pornrewind.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/videos/?sort_by=post_date"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/videos/?sort_by=rating"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/videos/?sort_by=video_viewed"))
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/videos/?sort_by=post_date"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=host + "/videos/?sort_by=rating"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/videos/?sort_by=video_viewed"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -31,7 +26,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search/%s/" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -49,11 +44,12 @@ def categorias(item):
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -66,11 +62,11 @@ def peliculas(item):
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
plot=plot, contentTitle = title))
next_page = scrapertools.find_single_match(data, '<li class="direction"><a href="([^"]+)" data-ajax="pagination">')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page ) )
return itemlist

View File

@@ -2,23 +2,21 @@
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://qwertty.net'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Recientes" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/?filter=most-viewed"))
itemlist.append( Item(channel=item.channel, title="Mas popular" , action="peliculas", url=host + "/?filter=popular"))
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url=host + "/?filter=random"))
itemlist.append( Item(channel=item.channel, title="Recientes" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/?filter=most-viewed"))
itemlist.append( Item(channel=item.channel, title="Mas popular" , action="lista", url=host + "/?filter=popular"))
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="lista", url=host + "/?filter=random"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -28,7 +26,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -46,28 +44,33 @@ def categorias(item):
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = host + scrapedurl
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<div class="videos-list">(.*?)<div class="videos-list">')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<article id="post-\d+".*?<a href="([^"]+)" title="([^"]+)">.*?<img data-src="(.*?)".*?<span class="duration"><i class="fa fa-clock-o"></i>([^<]+)</span>'
patron = '<article id="post-\d+".*?'
patron += '<a href="([^"]+)" title="([^"]+)">.*?'
patron += '<img data-src="(.*?)".*?'
patron += '<span class="duration"><i class="fa fa-clock-o"></i>([^<]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
scrapedplot = ""
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
itemlist.append( Item(channel=item.channel, action="play", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Next</a>')
if next_page_url=="":
next_page_url = scrapertools.find_single_match(data,'<li><a class="current">.*?<li><a href=\'([^\']+)\' class="inactive">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Next</a>')
if next_page=="":
next_page = scrapertools.find_single_match(data,'<li><a class="current">.*?<li><a href=\'([^\']+)\' class="inactive">')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
@@ -75,13 +78,54 @@ def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
pornhub = scrapertools.find_single_match(data,'<iframe src="([^"]+)"')
data = scrapertools.cachePage(pornhub)
patron = '"videoUrl":"(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl in matches:
scrapedurl = scrapedurl.replace("\/", "/")
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
url = scrapertools.find_single_match(data,'<meta itemprop="embedURL" content="([^"]+)"')
url = url.replace("pornhub.com/embed/", "pornhub.com/view_video.php?viewkey=")
data = scrapertools.cachePage(url)
if "xvideos" in url :
scrapedurl = scrapertools.find_single_match(data,'setVideoHLS\(\'([^\']+)\'')
if "pornhub" in url :
scrapedurl = scrapertools.find_single_match(data,'"defaultQuality":true,"format":"mp4","quality":"\d+","videoUrl":"(.*?)"')
if "txx" in url :
video_url = scrapertools.find_single_match(data, 'var video_url="([^"]*)"')
video_url += scrapertools.find_single_match(data, 'video_url\+="([^"]*)"')
partes = video_url.split('||')
video_url = decode_url(partes[0])
video_url = re.sub('/get_file/\d+/[0-9a-z]{32}/', partes[1], video_url)
video_url += '&' if '?' in video_url else '?'
video_url += 'lip=' + partes[2] + '&lt=' + partes[3]
scrapedurl = video_url
else:
scrapedurl = scrapertools.find_single_match(data,'"quality":"\d+","videoUrl":"(.*?)"')
scrapedurl = scrapedurl.replace("\/", "/")
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist
def decode_url(txt):
_0x52f6x15 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~'
reto = ''; n = 0
# En las dos siguientes líneas, ABCEM ocupan 2 bytes cada letra! El replace lo deja en 1 byte. !!!!: АВСЕМ (10 bytes) ABCEM (5 bytes)
txt = re.sub('[^АВСЕМA-Za-z0-9\.\,\~]', '', txt)
txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M')
while n < len(txt):
a = _0x52f6x15.index(txt[n])
n += 1
b = _0x52f6x15.index(txt[n])
n += 1
c = _0x52f6x15.index(txt[n])
n += 1
d = _0x52f6x15.index(txt[n])
n += 1
a = a << 2 | b >> 4
b = (b & 15) << 4 | c >> 2
e = (c & 3) << 6 | d
reto += chr(a)
if c != 64: reto += chr(b)
if d != 64: reto += chr(e)
return urllib.unquote(reto)

View File

@@ -4,7 +4,7 @@
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://sexgalaxy.net/wp-content/uploads/2016/11/logogalaxy_red.png",
"thumbnail": "https://sexgalaxy.net/wp-content/themes/redwaves-lite/images/logo.png",
"banner": "",
"categories": [
"adult"

View File

@@ -14,8 +14,8 @@ host = 'http://sexgalaxy.net'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host + "/new-releases/"))
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/full-movies/"))
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host + "/new-releases/"))
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/full-movies/"))
itemlist.append( Item(channel=item.channel, title="Canales" , action="canales", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search"))
@@ -27,7 +27,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -47,7 +47,8 @@ def canales (item):
scrapedthumbnail = ""
scrapedtitle = str(scrapedtitle)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
@@ -63,11 +64,12 @@ def categorias(item):
scrapedthumbnail = ""
scrapedtitle = str(scrapedtitle)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -75,10 +77,14 @@ def peliculas(item):
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , fulltitle=scrapedtitle , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)"')
if next_page_url!="":
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) )
calidad = scrapertools.find_single_match(scrapedtitle,'\(.*?/(\w+)\)')
if calidad:
scrapedtitle = "[COLOR red]" + calidad + "[/COLOR] " + scrapedtitle
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)"')
if next_page!="":
itemlist.append(item.clone(action="lista", title="Next page >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -2,13 +2,11 @@
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://woodrocket.com'
@@ -16,9 +14,8 @@ host = 'http://woodrocket.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/porn"))
itemlist.append( Item(channel=item.channel, title="Parodias" , action="peliculas", url=host + "/parodies"))
itemlist.append( Item(channel=item.channel, title="Novedades" , action="lista", url=host + "/porn"))
itemlist.append( Item(channel=item.channel, title="Parodias" , action="lista", url=host + "/parodies"))
itemlist.append( Item(channel=item.channel, title="Shows" , action="categorias", url=host + "/series"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories"))
return itemlist
@@ -33,11 +30,12 @@ def categorias(item):
for scrapedthumbnail,scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = host + scrapedthumbnail
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -50,11 +48,12 @@ def peliculas(item):
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
title = scrapedtitle
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">&raquo;</a></li>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail,
plot=plot, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">&raquo;</a></li>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
@@ -62,12 +61,10 @@ def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '<iframe src="(.*?)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
scrapedurl = scrapedurl
scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
scrapedurl = scrapedurl.replace("pornhub.com/embed/", "pornhub.com/view_video.php?viewkey=")
data = httptools.downloadpage(scrapedurl).data
scrapedurl = scrapertools.find_single_match(data,'"quality":"\d*","videoUrl":"(.*?)"')
scrapedurl = scrapertools.find_single_match(data,'"defaultQuality":true,"format":"mp4","quality":"\d+","videoUrl":"(.*?)"')
scrapedurl = scrapedurl.replace("\/", "/")
itemlist.append(item.clone(action="play", title=scrapedurl, fulltitle = item.title, url=scrapedurl))
return itemlist