Arreglo codigo.
Puesta en off: eroticasonlinetv , spankwire Nuevos canales: Siska , xxxfreeinhd
This commit is contained in:
@@ -46,7 +46,7 @@ def categorias(item):
|
||||
scrapedthumbnail = "https:" + scrapedthumbnail
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -57,7 +57,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="item">.*?'
|
||||
patron += '<a href="([^"]+)" title="(.*?)">.*?'
|
||||
@@ -72,7 +72,7 @@ def lista(item):
|
||||
thumbnail = "https:" + scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -82,7 +82,7 @@ def lista(item):
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.fulltitle
|
||||
|
||||
@@ -70,7 +70,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = scrapedtitle))
|
||||
fanart=thumbnail, contentTitle = scrapedtitle))
|
||||
next_page = scrapertools.find_single_match(data, '<span class="text16">\d+</span> <a href="..([^"]+)"')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -62,14 +62,14 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="alsoporn_prev">.*?'
|
||||
patron += '<a href="([^"]+)">.*?'
|
||||
@@ -82,7 +82,8 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = scrapedtitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = scrapedtitle))
|
||||
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" target="_self"><span class="alsoporn_page">NEXT</span></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -93,7 +94,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe frameborder=0 scrolling="no" src=\'([^\']+)\'')
|
||||
data = scrapertools.cachePage(scrapedurl)
|
||||
scrapedurl1 = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
|
||||
|
||||
@@ -49,7 +49,7 @@ def catalogo(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -71,8 +71,8 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
@@ -91,7 +91,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = title))
|
||||
fanart=thumbnail, contentTitle = title))
|
||||
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -50,7 +50,7 @@ def categorias(item):
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/latest/"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ def lista(item):
|
||||
thumbnail = "https:" + scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = scrapedtitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = scrapedtitle))
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next" title="Next">Next</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -49,8 +49,8 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
@@ -68,7 +68,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = scrapedtitle, fanart=scrapedthumbnail))
|
||||
contentTitle = scrapedtitle, fanart=thumbnail))
|
||||
if item.extra:
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)')
|
||||
if next_page:
|
||||
|
||||
@@ -47,7 +47,7 @@ def catalogo(item):
|
||||
scrapedplot = ""
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/movies"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -68,7 +68,7 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@ def lista(item):
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = title, infoLabels={'year':year} ))
|
||||
fanart=thumbnail, contentTitle = title, infoLabels={'year':year} ))
|
||||
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -59,7 +59,7 @@ def lista(item):
|
||||
plot = scrapertools.find_single_match(match,'<p class="summary">(.*?)</p>')
|
||||
thumbnail = scrapertools.find_single_match(match,'<img src="([^"]+)"')
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
|
||||
thumbnail=thumbnail, plot=plot, viewmode="movie") )
|
||||
fanart=thumbnail, thumbnail=thumbnail, plot=plot, viewmode="movie") )
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="siguiente">')
|
||||
if next_page!="":
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
|
||||
@@ -6,6 +6,7 @@ import urlparse
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from core import httptools
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
@@ -18,7 +19,7 @@ def mainlist(item):
|
||||
# ------------------------------------------------------
|
||||
# Descarga la página
|
||||
# ------------------------------------------------------
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# logger.info(data)
|
||||
|
||||
# ------------------------------------------------------
|
||||
@@ -68,7 +69,7 @@ def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
# Descarga la página
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.info(data)
|
||||
|
||||
# Extrae las películas
|
||||
|
||||
@@ -69,7 +69,7 @@ def lista(item):
|
||||
url="https:" + scrapedurl
|
||||
thumbnail="https:" + scrapedthumbnail
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url, thumbnail=thumbnail,
|
||||
plot=scrapedplot) )
|
||||
fanart=thumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class="float-xs-right"><a href=\'([^\']+)\' title=\'Pagina \d+\'>')
|
||||
if next_page == "":
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href=\'([^\']+)\' title=\'Pagina \d+\'>»</a>')
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "eroticasonlinetv",
|
||||
"name": "eroticasonlinetv",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.peliculaseroticasonline.tv/wp-content/themes/wpeliculaseroticasonlinetv/favicon.png",
|
||||
|
||||
@@ -18,6 +18,7 @@ def mainlist(item):
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/channels/"))
|
||||
itemlist.append( Item(channel=item.channel, title="PornStar" , action="categorias", url=host + "/pornstars/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -50,7 +51,13 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle.replace("movies", "") + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li itemprop="url" class="current">.*?<a href="([^"]+)"')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
|
||||
if "/categories/" in item.url:
|
||||
itemlist = sorted(itemlist, key=lambda i: i.title)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -69,7 +76,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle) )
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li itemprop="url" class="current">.*?<a href="([^"]+)"')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
|
||||
@@ -48,7 +48,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -59,11 +59,14 @@ def lista(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" itemprop="url">.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)">.*?'
|
||||
patron += '<span itemprop="duration" class="length">(.*?)</span>'
|
||||
patron += '<span itemprop="duration" class="length">(.*?)</span>(.*?)<span class="thumb-info">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion,calidad in matches:
|
||||
url = scrapedurl
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
if ">HD<" in calidad:
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " +scrapedtitle
|
||||
else:
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
contentTitle = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
|
||||
@@ -8,7 +8,7 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
|
||||
|
||||
# BLOQUEO ESET INTERNET SECURITY
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
@@ -30,7 +30,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ def lista(item):
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=plot , viewmode="movie") )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=plot , viewmode="movie") )
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)">Next')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -52,7 +52,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a class="thumb tco1" href="([^"]+)">.*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += 'alt="([^"]+)".*?'
|
||||
@@ -69,7 +69,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail + "|Referer=%s" %host
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<a class="bgco2 tco3" rel="next" href="([^"]+)">></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -16,6 +16,7 @@ def mainlist(item):
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-raped/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Modelos" , action="categorias", url=host + "/models/most-popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -39,25 +40,32 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li class="thumb thumb-category">.*?'
|
||||
patron = '<li class="thumb thumb-\w+">.*?'
|
||||
patron += '<a href="([^"]+)">.*?'
|
||||
patron += '<img class="lazy" data-original="([^"]+)">.*?'
|
||||
patron += '<div class="name">([^"]+)</div>.*?'
|
||||
patron += '<div class="count">(\d+)</div>'
|
||||
patron += '<img class="lazy" data-original="([^"]+)".*?'
|
||||
patron += '<div class="title">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
title = scrapertools.find_single_match(scrapedtitle,'<div class="text">([^<]+)<')
|
||||
if "/categories/" in item.url:
|
||||
cantidad = scrapertools.find_single_match(scrapedtitle,'<div class="count">(\d+)</div>')
|
||||
scrapedtitle = scrapertools.find_single_match(scrapedtitle,'<div class="name">([^<]+)</div>')
|
||||
title = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class="pagination-next"><a href="([^"]+)">')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="thumb">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
@@ -72,7 +80,7 @@ def lista(item):
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li class="pagination-next"><a href="([^"]+)">')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -83,7 +91,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<meta property="og:video" content="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
|
||||
@@ -77,11 +77,12 @@ def lista(item):
|
||||
patron += '<img src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
title = scrapedtitle
|
||||
calidad = scrapertools.find_single_match(scrapedtitle, '(\(.*?\))')
|
||||
title = "[COLOR yellow]" + calidad + "[/COLOR] " + scrapedtitle.replace( "%s" % calidad, "")
|
||||
thumbnail = scrapedthumbnail.replace("jpg#", "jpg")
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
|
||||
plot=plot, fulltitle=title) )
|
||||
fanart=thumbnail, plot=plot, fulltitle=title) )
|
||||
next_page = scrapertools.find_single_match(data, '<div class="nav-previous"><a href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -147,7 +147,7 @@ def findvideos(item):
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.fulltitle
|
||||
|
||||
@@ -45,7 +45,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next">Next page »</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -56,7 +56,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video-thumb"><a href="([^"]+)" class="title".*?>([^"]+)</a>.*?'
|
||||
patron += '<span class="time">([^<]+)</span>.*?'
|
||||
@@ -69,7 +69,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next">Next page »</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -80,7 +80,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<source data-fluid-hd src="([^"]+)/?br=\d+"')
|
||||
if scrapedurl=="":
|
||||
scrapedurl = scrapertools.find_single_match(data,'<source src="([^"]+)/?br=\d+"')
|
||||
|
||||
@@ -10,6 +10,7 @@ from core import httptools
|
||||
|
||||
host = 'http://javl.in'
|
||||
|
||||
# BLOQUEO ANTIVIRUS
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
@@ -27,7 +27,7 @@ def mainlist(item):
|
||||
fanart = ''
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=fanart))
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail))
|
||||
|
||||
# Paginacion
|
||||
title = ''
|
||||
|
||||
@@ -50,7 +50,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
return itemlist
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
|
||||
@@ -51,7 +51,7 @@ def categorias(item):
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
|
||||
return itemlist
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
@@ -67,7 +67,7 @@ def lista(item):
|
||||
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
|
||||
scrapedurl = "http://xxx.justporno.tv/embed/" + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
|
||||
if item.extra:
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(.*?)>')
|
||||
|
||||
@@ -64,7 +64,7 @@ def peliculas(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle=contentTitle))
|
||||
fanart=thumbnail, plot=plot, contentTitle=contentTitle))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a href=\'([^\']+)\' class="next">Next >></a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
|
||||
@@ -72,7 +72,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle=title))
|
||||
fanart=thumbnail, plot=plot, contentTitle=title))
|
||||
# <li class='active'><a class=''>1</a></li><li><a rel='nofollow' class='page larger' href='https://pandamovies.pw/movies/page/2'>
|
||||
next_page = scrapertools.find_single_match(data, '<li class=\'active\'>.*?href=\'([^\']+)\'>')
|
||||
if next_page == "":
|
||||
|
||||
@@ -6,37 +6,26 @@ import urlparse
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from core import httptools
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
if item.url == "":
|
||||
item.url = "http://www.peliculaseroticas.net/"
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cachePage(item.url)
|
||||
|
||||
# Extrae las entradas de la pagina seleccionada
|
||||
patron = '<div class="post"[^<]+'
|
||||
patron += '<a href="([^"]+)">([^<]+)</a[^<]+'
|
||||
patron += '<hr[^<]+'
|
||||
patron += '<a[^<]+<img src="([^"]+)"'
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="post">.*?'
|
||||
patron += '<a href="([^"]+)">([^<]+)</a>.*?'
|
||||
patron += '<img src="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
title = scrapedtitle.strip()
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
plot = ""
|
||||
|
||||
# Añade al listado
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, viewmode="movie", folder=True))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
|
||||
fanart=thumbnail, thumbnail=thumbnail, plot=plot, viewmode="movie"))
|
||||
# Extrae la marca de siguiente página
|
||||
if item.url == "http://www.peliculaseroticas.net/":
|
||||
next_page_url = "http://www.peliculaseroticas.net/cine-erotico/2.html"
|
||||
@@ -44,8 +33,6 @@ def mainlist(item):
|
||||
current_page = scrapertools.find_single_match(item.url, "(\d+)")
|
||||
next_page = int(current_page) + 1
|
||||
next_page_url = "http://www.peliculaseroticas.net/cine-erotico/" + str(next_page) + ".html"
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="mainlist", title=">> Página siguiente", url=next_page_url, folder=True))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action="mainlist", title=">> Página siguiente", url=next_page_url))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -35,7 +35,8 @@ def lista(item):
|
||||
if duration:
|
||||
scrapedtitle += " (%s)" % duration
|
||||
|
||||
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail))
|
||||
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
fanart=scrapedthumbnail))
|
||||
|
||||
# Extrae la marca de siguiente página
|
||||
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)"')
|
||||
|
||||
@@ -70,7 +70,7 @@ def peliculas(item):
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=scrapedthumbnail,
|
||||
plot=plot, contentTitle = title))
|
||||
fanart=scrapedthumbnail, plot=plot, contentTitle = title))
|
||||
next_page = scrapertools.find_single_match(data, '<a class="btn_wrapper__btn" href="([^"]+)">Next</a></li>')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url, next_page)
|
||||
|
||||
@@ -10,14 +10,16 @@ from core import httptools
|
||||
|
||||
host = 'https://www.porn300.com'
|
||||
|
||||
#BLOQUEO ANTIVIRUS STREAMCLOUD
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/es/videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/es/mas-vistos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/es/mas-votados/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/es/canales/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="categorias", url=host + "/es/pornostars/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/es/canales/?page=1"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="categorias", url=host + "/es/pornostars/?page=1"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/es/categorias/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -56,8 +58,11 @@ def categorias(item):
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/?sort=latest"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
|
||||
if next_page=="":
|
||||
if "/?page=1" in item.url:
|
||||
next_page=urlparse.urljoin(item.url,"/?page=2")
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
@@ -67,7 +72,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a itemprop="url" href="([^"]+)" data-video-id="\d+" title="([^"]+)">.*?'
|
||||
patron += '<img itemprop="thumbnailUrl" src="([^"]+)".*?'
|
||||
@@ -81,7 +86,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = contentTitle) )
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle) )
|
||||
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -91,11 +96,10 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<source src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
url = scrapedurl
|
||||
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
|
||||
for url in matches:
|
||||
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -59,16 +59,16 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<article id=post-\d+.*?'
|
||||
patron += '<img class="center cover" src=([^"]+) alt="([^"]+)".*?'
|
||||
patron += '<blockquote>.*?<a href=(.*?) target=_blank>'
|
||||
patron += '<blockquote><p> <a href=(.*?) target=_blank'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedthumbnail,scrapedtitle,scrapedurl in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class=nextpostslink rel=next href=(.*?)>')
|
||||
if next_page!="":
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
@@ -77,6 +77,7 @@ def lista(item):
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
|
||||
@@ -52,7 +52,7 @@ def lista(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot))
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot))
|
||||
next_page = scrapertools.find_single_match(data, '<nav id="page_nav"><a href="(.*?)"')
|
||||
if next_page != "":
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
import base64
|
||||
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
@@ -10,6 +12,7 @@ from core import httptools
|
||||
|
||||
host = 'http://www.pornhive.tv/en'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -66,22 +69,25 @@ def lista(item):
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title, url=scrapedurl, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle=title))
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title, url=scrapedurl, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle=title))
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" data-ci-pagination-page="\d+" rel="next">Next ›')
|
||||
if next_page != "" :
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videochannel=item.channel
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = ';extra_urls\[\d+\]=\'([^\']+)\''
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
scrapedurl = base64.b64decode(scrapedurl)
|
||||
itemlist.append(item.clone(action="play", title="%s", url=scrapedurl))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -50,8 +50,8 @@ def categorias(item):
|
||||
else:
|
||||
url = urlparse.urljoin(item.url, scrapedurl + "?o=cm")
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, fanart=item.fanart,
|
||||
thumbnail=scrapedthumbnail))
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail))
|
||||
itemlist.sort(key=lambda x: x.title)
|
||||
return itemlist
|
||||
|
||||
@@ -73,7 +73,7 @@ def peliculas(item):
|
||||
title += ' [HD]'
|
||||
url = urlparse.urljoin(item.url, url)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", title=title, url=url, fanart=item.fanart, thumbnail=thumbnail))
|
||||
Item(channel=item.channel, action="play", title=title, url=url, fanart=thumbnail, thumbnail=thumbnail))
|
||||
if itemlist:
|
||||
# Paginador
|
||||
patron = '<li class="page_next"><a href="([^"]+)"'
|
||||
@@ -88,7 +88,7 @@ def peliculas(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '"defaultQuality":true,"format":"mp4","quality":"\d+","videoUrl":"(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
@@ -96,3 +96,4 @@ def play(item):
|
||||
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ def categorias(item):
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ def lista(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = title))
|
||||
fanart=thumbnail, plot=plot, contentTitle = title))
|
||||
next_page = scrapertools.find_single_match(data, '<li class="direction"><a href="([^"]+)" data-ajax="pagination">')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -10,6 +10,7 @@ from core import httptools
|
||||
|
||||
host = 'http://qwertty.net'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -64,7 +65,7 @@ def lista(item):
|
||||
scrapedplot = ""
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Next</a>')
|
||||
if next_page=="":
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="current">.*?<li><a href=\'([^\']+)\' class="inactive">')
|
||||
@@ -77,10 +78,11 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url = scrapertools.find_single_match(data,'<meta itemprop="embedURL" content="([^"]+)"')
|
||||
url = url.replace("pornhub.com/embed/", "pornhub.com/view_video.php?viewkey=")
|
||||
data = scrapertools.cachePage(url)
|
||||
data = httptools.downloadpage(url).data
|
||||
# data = scrapertools.cachePage(url) https://www.spankwire.com/EmbedPlayer.aspx?ArticleId=14049072
|
||||
if "xvideos" in url :
|
||||
scrapedurl = scrapertools.find_single_match(data,'setVideoHLS\(\'([^\']+)\'')
|
||||
if "pornhub" in url :
|
||||
|
||||
@@ -46,11 +46,12 @@ def catalogo(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " [COLOR yellow]" + cantidad + "[/COLOR] "
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a id="wp_navNext" class="js_pop_page" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
|
||||
return itemlist
|
||||
|
||||
def categorias(item):
|
||||
@@ -58,22 +59,30 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="category_item_wrapper">.*?<a href="([^"]+)".*?data-thumb_url="([^"]+)".*?alt="([^"]+)".*?<span class="category_count">\s+([^"]+) Videos'
|
||||
patron = '<div class="category_item_wrapper">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += 'data-src="([^"]+)".*?'
|
||||
patron += 'alt="([^"]+)".*?'
|
||||
patron += '<span class="category_count">([^"]+) Videos'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
cantidad = cantidad.strip()
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<img id="img_.*?data-path="([^"]+)".*?<span class="duration">(.*?)</a>.*?<a title="([^"]+)" href="([^"]+)">'
|
||||
patron = '<img id="img_.*?data-path="([^"]+)".*?'
|
||||
patron += '<span class="duration">(.*?)</a>.*?'
|
||||
patron += '<a title="([^"]+)" href="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,duration,scrapedtitle,scrapedurl in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
@@ -82,23 +91,25 @@ def peliculas(item):
|
||||
duration = scrapertools.find_single_match(duration, 'HD</span>(.*?)</span>')
|
||||
title = "[COLOR yellow]" + duration + "[/COLOR] " + "[COLOR red]" + scrapedhd + "[/COLOR] " + scrapedtitle
|
||||
else:
|
||||
duration = duration.replace("<span class=\"vr-video\">VR</span>", "")
|
||||
title = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle
|
||||
title = title.replace(" </span>", "").replace(" ", "")
|
||||
scrapedthumbnail = scrapedthumbnail.replace("{index}.", "1.")
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=scrapedthumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
|
||||
if not "/premium/" in url:
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=plot, contentTitle = title) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a id="wp_navNext" class="js_pop_page" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
itemlist.append(item.clone(action="peliculas", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '"defaultQuality":true,"format":"",.*?"videoUrl"\:"([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
|
||||
@@ -13,25 +13,25 @@ host = "https://www.serviporno.com"
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Útimos videos", url= host))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Más vistos", url="http://www.serviporno.com/mas-vistos/"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Más votados", url="http://www.serviporno.com/mas-votados/"))
|
||||
itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias",
|
||||
url="http://www.serviporno.com/categorias/"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="chicas", title="Chicas", url="http://www.serviporno.com/pornstars/"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="search", title="Buscar", url="http://www.serviporno.com/search/?q="))
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos",
|
||||
url=host + "/ajax/homepage/?page=1", last= host))
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Más vistos",
|
||||
url=host + "/ajax/most_viewed/?page=1", last= host + "/mas-vistos/"))
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Más votados",
|
||||
url=host + "/ajax/best_rated/?page=1", last= host + "/mas-votados/"))
|
||||
itemlist.append(Item(channel=item.channel, action="categorias", title="Canal",
|
||||
url=host + "/ajax/list_producers/?page=1", last= host + "/sitios/"))
|
||||
itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias", url= host + "/categorias/"))
|
||||
itemlist.append(Item(channel=item.channel, action="chicas", title="Chicas",
|
||||
url=host + "/ajax/list_pornstars/?page=1", last= host + "/pornstars/"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", last=""))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
item.url = host + '/ajax/new_search/?q=%s&page=1' % texto
|
||||
try:
|
||||
return videos(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
@@ -42,29 +42,39 @@ def search(item, texto):
|
||||
return []
|
||||
|
||||
|
||||
def get_last_page(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
last_page= int(scrapertools.find_single_match(data,'data-ajax-last-page="(\d+)"'))
|
||||
return last_page
|
||||
|
||||
|
||||
def videos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '(?s)<div class="wrap-box-escena">.*?'
|
||||
patron += '<div class="box-escena">.*?'
|
||||
patron += '<a\s*href="([^"]+)".*?'
|
||||
patron += 'data-stats-video-name="([^"]+)".*?'
|
||||
patron += '<img\s*src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, title, thumbnail in matches:
|
||||
patron += '<img\s*src="([^"]+)".*?'
|
||||
patron += '<div class="duracion">([^"]+) min</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for url, title, thumbnail,duration in matches:
|
||||
title = "[COLOR yellow]" + duration + "[/COLOR] " + title
|
||||
url = urlparse.urljoin(item.url, url)
|
||||
itemlist.append(Item(channel=item.channel, action='play', title=title, url=url, thumbnail=thumbnail))
|
||||
|
||||
# Paginador
|
||||
patron = '<a href="([^<]+)">Siguiente »</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if len(matches) > 0:
|
||||
url = "http://www.serviporno.com" + matches[0]
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Página Siguiente", url=url, thumbnail="", folder=True))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action='play', title=title, url=url, thumbnail=thumbnail, fanart=thumbnail))
|
||||
# Paginador "Página Siguiente >>"
|
||||
current_page = int(scrapertools.find_single_match(item.url, "/?page=(\d+)"))
|
||||
if not item.last_page:
|
||||
last_page = get_last_page(item.last)
|
||||
else:
|
||||
last_page = int(item.last_page)
|
||||
if current_page < last_page:
|
||||
next_page = "?page=" + str(current_page + 1)
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page, thumbnail="", last_page=last_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -72,18 +82,28 @@ def chicas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.downloadpage(item.url)
|
||||
|
||||
patron = '<div class="box-chica">.*?'
|
||||
patron += '<a href="([^"]+)" title="">.*?'
|
||||
patron += '<img class="img" src=\'([^"]+)\' width="175" height="150" border=\'0\' alt="[^"]+"/>.*?'
|
||||
patron += '</a>[^<]{1}<h4><a href="[^"]+" title="">([^"]+)</a></h4>.*?'
|
||||
patron += '<img class="img" src=\'([^"]+)\' width="175" height="150" border=\'0\' alt="[^"]+" />.*?'
|
||||
patron += '<h4><a href="[^"]+" title="">([^"]+)</a></h4>.*?'
|
||||
patron += '<a class="total-videos" href="[^"]+" title="">([^<]+)</a>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for url, thumbnail, title, videos in matches:
|
||||
url = urlparse.urljoin("http://www.serviporno.com", url)
|
||||
last = urlparse.urljoin(item.url, url)
|
||||
url= last.replace("/pornstar", "/ajax/show_pornstar") + "?page=1"
|
||||
title = title + " (" + videos + ")"
|
||||
itemlist.append(Item(channel=item.channel, action='videos', title=title, url=url, thumbnail=thumbnail, plot=""))
|
||||
itemlist.append(Item(channel=item.channel, action='videos', title=title, url=url, last=last, thumbnail=thumbnail, fanart=thumbnail))
|
||||
# Paginador "Página Siguiente >>"
|
||||
current_page = int(scrapertools.find_single_match(item.url, "/?page=(\d+)"))
|
||||
if not item.last_page:
|
||||
last_page = get_last_page(item.last)
|
||||
else:
|
||||
last_page = int(item.last_page)
|
||||
if current_page < last_page:
|
||||
next_page = "?page=" + str(current_page + 1)
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(Item(channel=item.channel, action="chicas", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page, thumbnail="", last_page=last_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -91,16 +111,25 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.downloadpage(item.url)
|
||||
|
||||
patron = '<div class="wrap-box-escena">.*?'
|
||||
patron += '<div class="cat box-escena">.*?'
|
||||
patron += '<a href="([^"]+)"><img src="([^"]+)" alt="Webcam" height="150" width="175" border=0 /></a>.*?'
|
||||
patron += '<h4><a href="[^"]+">([^<]+)</a></h4>'
|
||||
|
||||
patron = '<div class="wrap-box-escena.*?'
|
||||
patron += 'href="([^"]+)"><img src="([^"]+)".*?'
|
||||
patron += '<h4.*?<a href="[^"]+">([^<]+)</a></h4>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for url, thumbnail, title in matches:
|
||||
url = urlparse.urljoin(item.url, url)
|
||||
itemlist.append(Item(channel=item.channel, action='videos', title=title, url=url, thumbnail=thumbnail, plot=""))
|
||||
last = urlparse.urljoin(item.url, url)
|
||||
url= last.replace("/videos-porno", "/ajax/show_category").replace("/sitio","/ajax/show_producer") + "?page=1"
|
||||
itemlist.append(Item(channel=item.channel, action='videos', title=title, url=url, last=last, thumbnail=thumbnail, plot=""))
|
||||
# Paginador "Página Siguiente >>"
|
||||
current_page = int(scrapertools.find_single_match(item.url, "/?page=(\d+)"))
|
||||
if not item.last_page:
|
||||
last_page = get_last_page(item.last)
|
||||
else:
|
||||
last_page = int(item.last_page)
|
||||
if current_page < last_page:
|
||||
next_page = "?page=" + str(current_page + 1)
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(Item(channel=item.channel, action="categorias", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page, thumbnail="", last_page=last_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -113,3 +142,4 @@ def play(item):
|
||||
Item(channel=item.channel, action="play", server="directo", title=item.title, url=url, thumbnail=item.thumbnail,
|
||||
plot=item.plot, folder=False))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -14,8 +14,8 @@ host = 'http://sexgalaxy.net'
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, title="Ultimos", action="lista", url=host + "/new-releases/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Peliculas", action="lista", url=host + "/full-movies/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Videos", action="lista", url=host + "/new-releases/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Canales", action="canales", url=host))
|
||||
itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
|
||||
@@ -81,10 +81,10 @@ def lista(item):
|
||||
if calidad:
|
||||
scrapedtitle = "[COLOR red]" + calidad + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, plot=scrapedplot))
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, plot=scrapedplot))
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)"')
|
||||
if next_page != "":
|
||||
itemlist.append(item.clone(action="lista", title="Next page >>", text_color="blue", url=next_page))
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
15
plugin.video.alfa/channels/siska.json
Normal file
15
plugin.video.alfa/channels/siska.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"id": "siska",
|
||||
"name": "siska",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.siska.tv/images/siska.png?50",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
]
|
||||
}
|
||||
|
||||
89
plugin.video.alfa/channels/siska.py
Normal file
89
plugin.video.alfa/channels/siska.py
Normal file
@@ -0,0 +1,89 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
|
||||
host = 'http://www.siska.tv/'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "newVideo.php?language=en"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "MostViewed.php?views=month&language=en"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "Channel.php?language=en"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "index.php?category=1&language=en"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "search.php?q=%s&language=en&search=Search" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
|
||||
data = scrapertools.get_match(data,'<div id="content">(.*?)<div class="maincat">')
|
||||
patron = '<a href="(.*?)".*?'
|
||||
patron += '<img src="(.*?)".*?alt="(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle.replace("Watch Channel ", "")
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=url,
|
||||
thumbnail=thumbnail , plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
|
||||
if "catID=" in item.url:
|
||||
patron = '<li><h3><a href="([^"]+)">.*?'
|
||||
patron += '<img src="([^"]+)" class="imgt" alt="([^"]+)".*?'
|
||||
patron += '<div class="time">(.*?)</div>'
|
||||
else:
|
||||
patron = '<li><h3><a href=\'([^\']+)\'>.*?'
|
||||
patron += '<img src=\'([^\']+)\' class=\'imgt\' alt=\'(.*?)\'.*?'
|
||||
patron += '<div class=\'time\'>(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
|
||||
scrapedtime = scrapedtime.replace("Duration: ", "").replace(" : ", ":")
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = scrapedtitle))
|
||||
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"><span>Next')
|
||||
if next_page == "":
|
||||
next_page = scrapertools.find_single_match(data, '<a href=\'([^\']+)\' title=\'Next Page\'>')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page) )
|
||||
return itemlist
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "spankwire",
|
||||
"name": "spankwire",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://cdn1-static-spankwire.spankcdn.net/apple-touch-icon-precomposed.png",
|
||||
|
||||
@@ -67,7 +67,7 @@ def catalogo(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="pag-next" href="(.*?)">Next ></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -78,10 +78,10 @@ def catalogo(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
data = scrapertools.get_match(data,'class="thumbs-container">(.*?)<div class="clearfix">')
|
||||
patron = '<p class="btime">([^"]+)</p>.*?href="([^"]+)".*?src="([^"]+)".*?title="([^"]+)">'
|
||||
patron = '<p class="btime">([^"]+)</p>.*?href="([^"]+)".*?src="([^"]+)".*?title="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for duracion,scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
@@ -101,7 +101,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<video src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
|
||||
@@ -49,7 +49,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="pag-next" href="([^"]+)"><ins>Next</ins></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -60,7 +60,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = 'href="([^"]+)"\s*class="th-video.*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
@@ -85,7 +85,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<video src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url in matches:
|
||||
|
||||
@@ -56,7 +56,7 @@ def catalogo(item):
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = host + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class="next" href="([^"]+)">')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -84,7 +84,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
@@ -92,11 +92,12 @@ def lista(item):
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,time,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.replace(", ", " & ").replace("(", "(").replace(")", ")")
|
||||
title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl,
|
||||
thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle = title))
|
||||
fanart=thumbnail, thumbnail=thumbnail, plot=plot, contentTitle = title))
|
||||
next_page = scrapertools.find_single_match(data,'<a class="next.*?title="Next Page" href="([^"]+)">')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -107,7 +108,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<source src="([^"]+)" type="video/mp4" label="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
|
||||
@@ -15,9 +15,9 @@ host = 'https://watchpornfree.ws'
|
||||
def mainlist(item):
|
||||
logger.info("")
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/category/clips-scenes"))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Parodia" , action="lista", url=host + "/category/parodies-hd"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/category/clips-scenes"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Año" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
@@ -37,8 +37,7 @@ def search(item, texto):
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
# <li class="cat-item cat-item-6"><a href="https://watchpornfree.ws/category/all-girl" >All Girl</a> (2,777)
|
||||
# </li>
|
||||
|
||||
def categorias(item):
|
||||
logger.info("")
|
||||
itemlist = []
|
||||
@@ -62,13 +61,13 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info("")
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<article class="TPost B">.*?<a href="([^"]+)">.*?src="([^"]+)".*?<div class="Title">([^"]+)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next »</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
|
||||
@@ -31,7 +31,7 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = host + scrapedthumbnail
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
|
||||
scrapedurl = scrapedurl.replace("pornhub.com/embed/", "pornhub.com/view_video.php?viewkey=")
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
|
||||
15
plugin.video.alfa/channels/xxxfreeinhd.json
Normal file
15
plugin.video.alfa/channels/xxxfreeinhd.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"id": "xxxfreeinhd",
|
||||
"name": "xxxfreeinhd",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://watchxxxfreeinhd.com/wp-content/uploads/logo2015%20(1).jpg",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
]
|
||||
}
|
||||
|
||||
93
plugin.video.alfa/channels/xxxfreeinhd.py
Normal file
93
plugin.video.alfa/channels/xxxfreeinhd.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
|
||||
host = 'https://watchxxxfreeinhd.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/?filtre=date&cat=0"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/?display=tube&filtre=views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/?display=tube&filtre=rate"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "search.php?q=%s&language=en&search=Search" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
|
||||
patron = '<noscript>.*?src="([^"]+)".*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)".*?'
|
||||
patron += '<span class="nb_cat border-radius-5">(\d+) videos</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,scrapedurl,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
title = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
|
||||
patron = '<li class="border-radius-5 box-shadow">.*?'
|
||||
patron += '<img width="\d+" height="\d+" src="([^"]+)" class=.*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)">.*?'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,scrapedurl,scrapedtitle in matches:
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail + "|https://watchxxxfreeinhd.com/"
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
|
||||
thumbnail=thumbnail, plot=plot, fanart=scrapedthumbnail, contentTitle = scrapedtitle))
|
||||
next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)"')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
data = scrapertools.get_match(data,'<div class="video-embed">(.*?)</div>')
|
||||
patron = '<noscript>.*?<iframe src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url in matches:
|
||||
itemlist.append(item.clone(action="play", title = "%s", url=url ))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ def mainlist(item):
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas", action="lista", url=host + "/browse/time/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas", action="lista", url=host + "/browse/views/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada", action="lista", url=host + "/top_rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal", action="categorias", url=host + "/channels/rating/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal", action="categorias", url=host + "/channels/most_popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars", action="catalogo", url=host + "/pornstars/most_popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias", action="categorias", url=host + "/categories/alphabetical/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
@@ -52,7 +52,7 @@ def catalogo(item):
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<div class="currentPage".*?<a href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -82,7 +82,7 @@ def categorias(item):
|
||||
title = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl,
|
||||
thumbnail=thumbnail, fanart=thumbnail, plot=scrapedplot) )
|
||||
fanart=thumbnail, thumbnail=thumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<div class="currentPage".*?<a href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
@@ -93,7 +93,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" class=\'video-box-image\'.*?'
|
||||
patron += 'data-original="([^"]+)".*?'
|
||||
@@ -108,7 +108,7 @@ def lista(item):
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<div class="currentPage".*?<a href="([^"]+)"')
|
||||
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
@@ -118,7 +118,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'page_params.video.mediaDefinition =.*?"videoUrl":"([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
|
||||
Reference in New Issue
Block a user