Depurando y dando formato

This commit is contained in:
paezner
2019-01-09 16:37:39 +01:00
parent 5a1b687b1b
commit f4088497de
13 changed files with 209 additions and 242 deletions

View File

@@ -19,7 +19,7 @@ def mainlist(item):
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/wall-date-1.html"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/wall-note-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas valorados" , action="peliculas", url=host + "/wall-note-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/wall-main-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas largos" , action="peliculas", url=host + "/wall-time-1.html"))
@@ -52,7 +52,8 @@ def categorias(item):
scrapedthumbnail = ""
scrapedurl = scrapedurl.replace(".html", "_date.html")
scrapedurl = host +"/" + scrapedurl
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
@@ -60,6 +61,7 @@ def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data)
patron = '<div class="thumb-main-titre"><a href="([^"]+)".*?'
patron += 'title="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
@@ -71,11 +73,13 @@ def peliculas(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = title, infoLabels={'year':year} ))
next_page = scrapertools.find_single_match(data, '<span class="text16">\d+</span> <a href="..([^"]+)"')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page ) )
return itemlist

View File

@@ -15,9 +15,8 @@ host = 'http://www.alsoporn.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host +"/g/All/new/1"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/g/All/top/1"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/"))
itemlist.append( Item(channel=item.channel, title="Top" , action="peliculas", url=host + "/g/All/top/1"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -41,12 +40,14 @@ def catalogo(item):
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<h3>CLIPS</h3>(.*?)<h3>FILM</h3>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li><a href="([^"]+)" title="">.*?<span class="videos-count">([^"]+)</span><span class="title">([^"]+)</span>'
patron = '<li><a href="([^"]+)" title="">.*?'
patron += '<span class="videos-count">([^"]+)</span><span class="title">([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,cantidad,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
@@ -60,9 +61,9 @@ def categorias(item):
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedurl = scrapedurl.replace("top", "new")
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
@@ -82,23 +83,26 @@ def peliculas(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, contentTitle = title, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" target="_self"><span class="alsoporn_page">NEXT</span></a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url , folder=True) )
return itemlist
def findvideos(item):
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
scrapedurl = scrapertools.find_single_match(data,'<iframe frameborder=0 scrolling="no" src=\'([^\']+)\'')
data = scrapertools.cachePage(scrapedurl)
scrapedurl1 = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
scrapedurl1 = scrapedurl1.replace("//www.playercdn.com/ec/i2.php?", "https://www.synergytube.xyz/ec/i2.php?")
scrapedurl1 = scrapedurl1.replace("//www.playercdn.com/ec/i2.php?", "https://www.trinitytube.xyz/ec/i2.php?")
data = scrapertools.cachePage(scrapedurl1)
scrapedurl2 = scrapertools.find_single_match(data,'<source src="(.*?)" type=\'video/mp4\'>')
scrapedurl2 = scrapertools.find_single_match(data,'<source src="(.*?)"')
itemlist.append(item.clone(action="play", title=item.title, fulltitle = item.title, url=scrapedurl2))
return itemlist

View File

@@ -19,7 +19,7 @@ def mainlist(item):
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/más-visto/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/mejor-valorado/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url="https://www.analdin.com/categories/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categorías/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -48,11 +48,13 @@ def catalogo(item):
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
next_page_url = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
return itemlist
@@ -69,14 +71,15 @@ def categorias(item):
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="popup-video-link" href="([^"]+)".*?'
patron += 'thumb="([^"]+)".*?'
@@ -89,11 +92,13 @@ def peliculas(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = title, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
return itemlist
@@ -108,15 +113,3 @@ def play(item):
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
return itemlist
# def play(item):
# logger.info()
# itemlist = []
# data = scrapertools.cachePage(item.url)
# patron = 'video_url: \'([^\']+)\''
# matches = scrapertools.find_multiple_matches(data, patron)
# for scrapedurl in matches:
# title = scrapedurl
# itemlist.append(item.clone(action="play", title=title, fulltitle = item.title, url=scrapedurl))
# return itemlist

View File

@@ -42,23 +42,28 @@ def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" class="th">.*?<img src="([^"]+)".*?<span>([^"]+)</span>\s*(\d+) movies.*?</strong>'
patron = '<a href="([^"]+)" class="th">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<span>([^"]+)</span>\s*(\d+) movies.*?</strong>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedthumbnail = "http:" + scrapedthumbnail
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/latest/"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class=".*?video_block"><a href="([^"]+)".*?<img src="([^"]+)".*?alt="([^"]+)".*?<span class="time">([^"]+)</span>'
patron = '<div class=".*?video_block"><a href="([^"]+)".*?'
patron += '<img src="([^"]+)".*?alt="([^"]+)".*?'
patron += '<span class="time">([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = urlparse.urljoin(item.url,scrapedurl)
@@ -66,21 +71,23 @@ def peliculas(item):
thumbnail = "https:" + scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, contentTitle = title, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next" title="Next">Next</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = '<source src="([^"]+)" type=\'video/mp4\' title="HQ" />'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl, folder=True))
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl))
return itemlist

View File

@@ -13,6 +13,7 @@ from core import jsontools
host = 'https://www.camwhoresbay.com'
# EN CATALOGO Y BUSQUEDA LA PAGINACION FUNCIONA CON UN AJAX
def mainlist(item):
logger.info()
@@ -20,7 +21,6 @@ def mainlist(item):
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -51,7 +51,8 @@ def categorias(item):
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
@@ -70,11 +71,13 @@ def peliculas(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = title, infoLabels={'year':year} ))
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page ) )
return itemlist

View File

@@ -47,11 +47,13 @@ def catalogo(item):
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/movies"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
return itemlist
@@ -66,14 +68,15 @@ def categorias(item):
scrapedplot = ""
scrapedtitle = scrapedtitle
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<img class=".*?" src="([^"]+)".*?<div class="tr">(.*?)</div>.*?<a href="([^"]+)\s*" class="vttl.*?">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
@@ -83,25 +86,26 @@ def peliculas(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = title, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = '"url"\:"(.*?)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
scrapedurl = scrapedurl.replace("\/", "/")
title = scrapedurl
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo"))
return itemlist

View File

@@ -7,6 +7,8 @@ from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
host ='http://www.coomelonitas.com'
@@ -34,21 +36,22 @@ def search(item, texto):
def categorias(item):
itemlist = []
data = scrapertools.cache_page(item.url)
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = '<div class="all"(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for match in matches:
@@ -56,9 +59,11 @@ def peliculas(item):
url = scrapertools.find_single_match(match,'<a href="([^"]+)"')
plot = scrapertools.find_single_match(match,'<p class="summary">(.*?)</p>')
thumbnail = scrapertools.find_single_match(match,'<img src="([^"]+)"')
itemlist.append( Item(channel=item.channel, action="findvideos", title=title , fulltitle=title, url=url , thumbnail=thumbnail , plot=plot , viewmode="movie", folder=True) )
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, plot=plot, viewmode="movie") )
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)" class="siguiente">')
if next_page_url!="":
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
return itemlist

View File

@@ -16,10 +16,11 @@ host = 'http://www.elreyx.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/peliculasporno.html"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/peliculasporno.html"))
itemlist.append( Item(channel=item.channel, title="Escenas" , action="escenas", url=host + "/index.html"))
itemlist.append( Item(channel=item.channel, title="Productora" , action="productora", url=host + "/index.html"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/peliculasporno.html"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -27,7 +28,7 @@ def mainlist(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://elreyx.com/search-%s" % texto + ".html"
item.url = host + "/search-%s" % texto + ".html"
try:
return escenas(item)
except:
@@ -44,10 +45,12 @@ def productora(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^<]+)" title="View Category ([^<]+)">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="escenas", title=scrapedtitle , url="https:" + scrapedurl , thumbnail="https:" + scrapedthumbnail , plot=scrapedplot , folder=True) )
thumbnail="https:" + scrapedthumbnail
url="https:" + scrapedurl
itemlist.append( Item(channel=item.channel, action="escenas", title=scrapedtitle, url=url, thumbnail=thumbnail,
plot=scrapedplot) )
return itemlist
@@ -58,73 +61,64 @@ def categorias(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<td><a href="([^<]+)" title="Movies ([^<]+)">.*?</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
url="https:" + scrapedurl
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url="https:" + scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def escenas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = '<div class="notice_image">.*?<a title="([^"]+)" href="([^"]+)">.*?<img src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedtitle,scrapedurl,scrapedthumbnail in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url="https:" + scrapedurl , thumbnail="https:" + scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a href=\'([^\']+)\' title=\'Pagina \d+\'><span class="visible-xs-inline">Siguiente</span> &raquo;</a>')
next_page_url = "http://www.elreyx.com/"+str(next_page_url)
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="escenas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
url="https:" + scrapedurl
thumbnail="https:" + scrapedthumbnail
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url, thumbnail=thumbnail,
plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li class="float-xs-right"><a href=\'([^\']+)\' title=\'Pagina \d+\'>')
if next_page == "":
next_page = scrapertools.find_single_match(data,'<li><a href=\'([^\']+)\' title=\'Pagina \d+\'>&raquo;</a>')
if next_page!= "":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="escenas", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = '<div class="captura"><a title="([^"]+)" href="([^"]+)".*?><img src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedtitle,scrapedurl,scrapedthumbnail in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url="https:" + scrapedurl , thumbnail="https:" + scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<li><a href=\'([^\']+)\' title=\'Pagina \d+\'>&raquo;</a>')
next_page_url = "http://www.elreyx.com/"+str(next_page_url)
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data)
patron = '<iframe src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl in matches:
scrapedplot = ""
itemlist.append(item.clone(channel=item.channel, action="play", title=scrapedurl , url=scrapedurl , plot="" , folder=True) )
patron = '<IFRAME SRC="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl in matches:
scrapedplot = ""
itemlist.append(item.clone(channel=item.channel, action="play", title=scrapedurl , url=scrapedurl , plot="" , folder=True) )
url="https:" + scrapedurl
thumbnail="https:" + scrapedthumbnail
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url, thumbnail=thumbnail,
plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li><a href=\'([^\']+)\' title=\'Pagina \d+\'>&raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, '<IFRAME SRC="(.*?)"')
if url == "":
url = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
data = httptools.downloadpage(url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
@@ -132,3 +126,4 @@ def play(item):
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -35,22 +35,6 @@ def search(item, texto):
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<h3>CLIPS</h3>(.*?)<h3>FILM</h3>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class=""\s+title="([^"]+)"\s+href="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedtitle,scrapedurl in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = host + scrapedurl
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
@@ -61,7 +45,8 @@ def categorias(item):
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
@@ -77,25 +62,16 @@ def peliculas(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page_url) )
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
next_page = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<div id="wrapper" class="ortala">(.*?)<div class="butonlar">')
patron = '<iframe\s+src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
itemlist.append( Item(action="play", title=scrapedurl, fulltitle = item.title, url=scrapedurl))
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)

View File

@@ -40,11 +40,11 @@ def categorias(item):
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)".*?>([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
@@ -55,17 +55,16 @@ def peliculas(item):
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="movie-poster"><a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
plot = ""
contentTitle = scrapedtitle
url = urlparse.urljoin(item.url,scrapedurl)
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=scrapedtitle , url=url, thumbnail=scrapedthumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url, thumbnail=scrapedthumbnail,
plot=plot, contentTitle = scrapedtitle) )
next_page = scrapertools.find_single_match(data, '<div class="naviright"><a href="([^"]+)">Siguiente &raquo;</a>')
if next_page:
next_page = urlparse.urljoin(item.url, next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ))
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page ))
return itemlist
@@ -76,10 +75,7 @@ def play(item):
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
url = urlparse.urljoin(item.url, url)
data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
if url == "":
url = scrapertools.find_single_match(data, 'window.location="([^"]+)"')
itemlist = servertools.find_video_items(data=url)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle

View File

@@ -43,41 +43,47 @@ def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="item"><a href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)">.*?<div class="right">([^"]+)</div>'
patron = '<div class="item"><a href="([^"]+)" title="([^"]+)">.*?'
patron += '<img src="([^"]+)">.*?'
patron += '<div class="right">([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle.replace("movies", "") + " (" + cantidad + ")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li class="masonry-item item ".*?<a href="([^"]+)" class="kt_imgrc popfire" title="([^"]+)" >.*?<img src="([^"]+)"'
patron = '<li class="masonry-item item ".*?'
patron += '<a href="([^"]+)" class="kt_imgrc popfire" title="([^"]+)" >.*?'
patron += '<img src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
plot=plot, contentTitle = contentTitle) )
next_page_url = scrapertools.find_single_match(data,'<li itemprop="url" class="current">.*?<a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = '<source id="video_source_1" src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl in matches:

View File

@@ -42,26 +42,31 @@ def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<a href="http://freepornstreams.org/freepornst/stout.php">Top Sites</a>(.*?)</aside>')
data = scrapertools.get_match(data,'>Top Sites</a>(.*?)</aside>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li id="menu-item-\d+".*?u=([^"]+)">(.*?)</a>'
patron = '<li id="menu-item-\d+".*?<a href="([^"]+)">([^"]+)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li><a href="([^"]+)" rel="nofollow">(.*?)</a>'
data = scrapertools.get_match(data,'Top Tags(.*?)</ul>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
scrapedurl = scrapedurl.replace ("http://freepornstreams.org/freepornst/stout.php?s=100,75,65:*&#038;u=" , "")
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
@@ -70,24 +75,27 @@ def peliculas(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<article id="post-\d+".*?<a href="([^"]+)" rel="bookmark">(.*?)</a>.*?<img src="([^"]+)"'
patron = '<article id="post-\d+".*?'
patron += '<a href="([^"]+)" rel="bookmark">(.*?)</a>.*?'
patron += '<img src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapedtitle
thumbnail = scrapedthumbnail.replace("jpg#", "jpg")
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, fulltitle=title, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
plot=plot, fulltitle=title) )
next_page_url = scrapertools.find_single_match(data, '<div class="nav-previous"><a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.fulltitle
@@ -95,4 +103,4 @@ def play(item):
videoitem.thumbnail = item.thumbnail
videochannel=item.channel
return itemlist

View File

@@ -11,19 +11,18 @@ from core import tmdb
from core import jsontools
host = 'https://spankbang.xxx'
host = 'https://es.spankbang.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url= host + "/new_videos/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/wall-note-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url= host + "/wall-main-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas largos" , action="peliculas", url= host + "/wall-time-1.html"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Nuevos", action="peliculas", url= host + "/new_videos/"))
itemlist.append( Item(channel=item.channel, title="Mas valorados", action="peliculas", url=host + "/trending_videos/"))
itemlist.append( Item(channel=item.channel, title="Mas vistos", action="peliculas", url= host + "/most_popular/"))
itemlist.append( Item(channel=item.channel, title="Mas largos", action="peliculas", url= host + "/longest_videos/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -31,7 +30,7 @@ def mainlist(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search-%s-1.html" % texto
item.url = host + "/s/%s" % texto
try:
return peliculas(item)
except:
@@ -45,78 +44,45 @@ def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '&nbsp;<a href="([^"]+)" class="link1">([^"]+)</a>'
patron = '<a href="([^"]+)/?order=trending"><img src="([^"]+)"><span>([^"]+)</span></a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = scrapedurl.replace(".html", "_date.html")
scrapedurl = host +"/" + scrapedurl
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
scrapedthumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
# data = httptools.downloadpage(item.url).data
data = scrapertools.cachePage(item.url)
# <div class="video-item" data-id="4652797">
# <a href="/2rq4d/video/yenlomfc" class="thumb ">
# <img src="//static.spankbang.com/static_desktop/Images/blank.png" data-src="//cdnthumb3.spankbang.com/250/4/6/4652797-t6.jpg" alt="yenlomfc" class="cover lazyload has_mp4" />
# <span class="play fa fa-play-circle-o fa-3x"></span>
# <span class="i-len"><i class="fa fa-clock-o"></i> 73</span>
# </a>
# <span class="i-wl" onclick="add_wl(4652797, this)" title="Add to watch later"><i class="fa fa-clock-o"></i><strong>Watch later</strong></span>
# <span class="i-fav" onclick="add_fav(4652797, this)" title="Add to favorites"><i class="fa fa-heart"></i><strong>Favorite</strong></span>
# <span class="i-flag" onclick="show_flag(4652797)" title="Report"><i class="fa fa-flag"></i><strong>Report</strong></span>
# <div class="inf">yenlomfc</div>
# <ul>
# <li>Hace 11 minutos</li>
# <li><i class="fa fa-eye"></i> 60</li>
# <li><i class="fa fa-thumbs-o-up"></i> 100%</li>
# </ul>
# </div>
# <div class="video-item" data-id="4652795">
# <a href="/2rq4b/video/penny+underbust+playstation+modeling" class="thumb ">
# <img src="//static.spankbang.com/static_desktop/Images/blank.png" data-src="//cdnthumb1.spankbang.com/250/4/6/4652795-t6.jpg" alt="Penny Underbust Playstation Modeling" class="cover lazyload " />
# <span class="play fa fa-play-circle-o fa-3x"></span>
# <span class="i-hd">1080p</span>
# <span class="i-len"><i class="fa fa-clock-o"></i> 3</span>
# </a>
# <span class="i-wl" onclick="add_wl(4652795, this)" title="Add to watch later"><i class="fa fa-clock-o"></i><strong>Watch later</strong></span>
# <span class="i-fav" onclick="add_fav(4652795, this)" title="Add to favorites"><i class="fa fa-heart"></i><strong>Favorite</strong></span>
# <span class="i-flag" onclick="show_flag(4652795)" title="Report"><i class="fa fa-flag"></i><strong>Report</strong></span>
# <div class="inf">Penny Underbust Playstation Modeling</div>
# <ul>
# <li>Hace 12 minutos</li>
# <li><i class="fa fa-eye"></i> 99</li>
# <li><i class="fa fa-thumbs-o-up"></i> 100%</li>
# </ul>
# </div>
data = httptools.downloadpage(item.url).data
patron = '<div class="video-item" data-id="\d+">.*?'
patron += '<a href="([^"]+)".*?'
patron += '<a href="([^"]+)" class="thumb ">.*?'
patron += 'data-src="([^"]+)" alt="([^"]+)".*?'
patron += '<i class="fa fa-clock-o"></i>(.*?)</span>'
patron += '</span>(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
# http://cdnthumb1.spankbang.com/250/4/6/4652755-t6.jpg
scrapedhd = scrapertools.find_single_match(scrapedtime, '<span class="i-hd">(.*?)</span>')
duration = scrapertools.find_single_match(scrapedtime, '<i class="fa fa-clock-o"></i>(.*?)</span>')
if scrapedhd != '':
title = "[COLOR yellow]" +duration+ " min[/COLOR] " + "[COLOR red]" +scrapedhd+ "[/COLOR] "+scrapedtitle
else:
title = "[COLOR yellow]" + duration + " min[/COLOR] " + scrapedtitle
thumbnail = "http:" + scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
# <li class="next"><a href="/new_videos/2/">&raquo;</a></li>
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
plot=plot, contentTitle=title ))
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)">')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue",
url=next_page ) )
return itemlist
@@ -124,13 +90,13 @@ def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'servervideo = \'([^\']+)\'.*?'
patron += 'path = \'([^\']+)\'.*?'
patron += 'filee = \'([^\']+)\'.*?'
matches = scrapertools.find_multiple_matches(data, patron)
for servervideo,path,filee in matches:
scrapedurl = servervideo + path + "56ea912c4df934c216c352fa8d623af3" + filee
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
scrapedurl = scrapertools.find_single_match(data, 'var stream_url_1080p = \'([^\']+)\';')
if scrapedurl == "":
scrapedurl = scrapertools.find_single_match(data, 'var stream_url_720p = \'([^\']+)\';')
if scrapedurl == "":
scrapedurl = scrapertools.find_single_match(data, 'var stream_url_480p = \'([^\']+)\';')
scrapedurl = scrapedurl.replace("amp;", "")
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl, thumbnail=item.thumbnail,
plot=item.plot, show=item.title, server="directo"))
return itemlist