Clonación de Mispeliculasyseries, descargas2020, torrentrapid y torrentlocura sobre código mejorado de newpct1
Áreas de mejora para todos estos canales: - Etiquetado más entendible y limpio en todas las áreas, sobre todos en la pantalla final de servidores. Se incluyen los parámetros de calidad, idioma, capítulo y título, etc. También mejora el etiquetado de los nombres de servidores - El etiquetado de vídeos de la videoteca mejora, aunque va por libre y no sé cómo mejorarlo más. - El menú contextual permite añadir películas o series a la videoteca, incluido desde la pantalla resultante de una búsquedas - Más robustez en el scrapeo de datos, siendo más flexible en los casos donde algún campo no ha sido informado. Se intenta recuperar el error en vez de dejar que el canal dé error - En las series se soportan capítulos especiales, que ahora cancelaban el canal debido a que la temporada y el capítulo se informan de forma diferente.
This commit is contained in:
@@ -10,7 +10,8 @@
|
||||
"movie",
|
||||
"tvshow",
|
||||
"anime",
|
||||
"torrent"
|
||||
"torrent",
|
||||
"documentary"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
@@ -21,6 +22,22 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Episodios de series",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
@@ -10,7 +10,7 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://descargas2020.com/' # Cambiar manualmente "xx" en línea 287 ".com/xx/library" por tl para descargas2020, tr para descargas2020, d20 para descargas2020
|
||||
host = 'http://descargas2020.com/'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
@@ -26,7 +26,7 @@ def mainlist(item):
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series",
|
||||
thumbnail=thumb_series))
|
||||
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios",
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(
|
||||
@@ -40,12 +40,17 @@ def submenu(item):
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com
|
||||
|
||||
#patron = '<li><a href="http://(?:www.)?descargas2020.com/' + item.extra + '/">.*?<ul>(.*?)</ul>'
|
||||
patron = '<li><a href="'+item.url+item.extra + '/">.*?<ul>(.*?)</ul>' #Filtrado por url
|
||||
data = scrapertools.get_match(data, patron)
|
||||
patron = '<li><.*?href="'+item.url+item.extra + '/">.*?<ul.*?>(.*?)</ul>' #Filtrado por url, compatibilidad con mispelisy.series.com
|
||||
#logger.debug("patron: " + patron + " / data: " + data)
|
||||
if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com
|
||||
data = '<a href="http://descargas2020.com/varios/" title="Documentales"><i class="icon-rocket"></i> Documentales</a>'
|
||||
else:
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="([^"]+)".*?>([^>]+)</a>'
|
||||
patron = '<.*?href="([^"]+)".*?>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
@@ -55,12 +60,12 @@ def submenu(item):
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist"))
|
||||
|
||||
|
||||
if item.extra == "peliculas":
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -91,16 +96,16 @@ def listado(item):
|
||||
itemlist = []
|
||||
url_next_page =''
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
#data = httptools.downloadpage(item.url).data
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
#logger.debug(data)
|
||||
|
||||
logger.debug('item.modo: %s'%item.modo)
|
||||
logger.debug('item.extra: %s'%item.extra)
|
||||
|
||||
if item.modo != 'next' or item.modo =='':
|
||||
logger.debug('item.title: %s'% item.title)
|
||||
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
|
||||
logger.debug("patron=" + patron)
|
||||
fichas = scrapertools.get_match(data, patron)
|
||||
page_extra = item.extra
|
||||
else:
|
||||
@@ -109,11 +114,13 @@ def listado(item):
|
||||
|
||||
patron = '<a href="([^"]+).*?' # la url
|
||||
patron += 'title="([^"]+).*?' # el titulo
|
||||
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
#patron += '<span>([^<].*?)<' # la calidad: original de NewPCT1: si falta la calidad, el siguiente "matches" entra en un loop
|
||||
patron += '<img.*?src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
patron += '<h2.*?>(.*?)?<\/h2>' # titulo alternativo
|
||||
patron += '<span>([^<].*?)?<' # la calidad
|
||||
#logger.debug("patron: " + patron + " / fichas: " + fichas)
|
||||
matches = re.compile(patron, re.DOTALL).findall(fichas)
|
||||
logger.debug('item.next_page: %s'%item.next_page)
|
||||
#logger.debug('item.next_page: %s'%item.next_page)
|
||||
#logger.debug(matches)
|
||||
|
||||
# Paginacion
|
||||
if item.next_page != 'b':
|
||||
@@ -132,20 +139,22 @@ def listado(item):
|
||||
url_next_page = matches_next_page[0]
|
||||
modo = 'next'
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches:
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
action = "findvideos"
|
||||
extra = ""
|
||||
context = "movie"
|
||||
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
|
||||
if ".com/series" in url:
|
||||
|
||||
if ".com/serie" in url and "/miniseries" not in url:
|
||||
action = "episodios"
|
||||
extra = "serie"
|
||||
|
||||
context = "tvshow"
|
||||
|
||||
title = scrapertools.find_single_match(title, '([^-]+)')
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "",
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea ", "",
|
||||
1).strip()
|
||||
|
||||
else:
|
||||
@@ -153,39 +162,43 @@ def listado(item):
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
if title.endswith("torrent"): title = title[:-8]
|
||||
if title.endswith("en HD"): title = title[:-6]
|
||||
|
||||
if title == "":
|
||||
title = title_alt
|
||||
context_title = title_alt
|
||||
show = title_alt
|
||||
#if item.extra != "buscar-list":
|
||||
# title = title + '[' + calidad + "]"
|
||||
|
||||
show = title
|
||||
if item.extra != "buscar-list":
|
||||
title = title + ' [' + calidad + "]"
|
||||
#Este bucle parece obsoleto:
|
||||
#context = ""
|
||||
#context_title = scrapertools.find_single_match(url, "http://(?:www.)?descargas2020.com/(.*?)/(.*?)/")
|
||||
#if context_title:
|
||||
# try:
|
||||
# context = context_title[0].replace("descargar-", "").replace("descargar", "").replace("pelicula", "movie").replace("series", "tvshow").replace("-hd", "").replace("-vo", "")
|
||||
# context_title = context_title[1].replace("-", " ")
|
||||
# if re.search('\d{4}', context_title[-4:]):
|
||||
# context_title = context_title[:-4]
|
||||
# elif re.search('\(\d{4}\)', context_title[-6:]):
|
||||
# context_title = context_title[:-6]
|
||||
#
|
||||
# except:
|
||||
# context_title = show
|
||||
#
|
||||
|
||||
context = ""
|
||||
context_title = scrapertools.find_single_match(url, "http://(?:www.)?descargas2020.com/(.*?)/(.*?)/")
|
||||
if context_title:
|
||||
try:
|
||||
context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series",
|
||||
"tvshow")
|
||||
context_title = context_title[1].replace("-", " ")
|
||||
if re.search('\d{4}', context_title[-4:]):
|
||||
context_title = context_title[:-4]
|
||||
elif re.search('\(\d{4}\)', context_title[-6:]):
|
||||
context_title = context_title[:-6]
|
||||
|
||||
except:
|
||||
context_title = show
|
||||
logger.debug('contxt title: %s'%context_title)
|
||||
logger.debug('year: %s' % year)
|
||||
|
||||
logger.debug('context: %s' % context)
|
||||
#logger.debug('contxt title: %s'%context_title)
|
||||
#logger.debug('year: %s' % year)
|
||||
#logger.debug('context: %s' % context)
|
||||
|
||||
if not 'array' in title:
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
|
||||
extra = extra,
|
||||
show = context_title, contentTitle=context_title, contentType=context,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
extra = extra, show = context_title, contentTitle=context_title, contentType=context, quality=calidad,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / contxt title: " + context_title + " / context: " + context + " / calidad: " + calidad+ " / year: " + year)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
|
||||
url=url_next_page, next_page=next_page, folder=True,
|
||||
@@ -198,7 +211,7 @@ def listado_busqueda(item):
|
||||
itemlist = []
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
|
||||
list_chars = [["ñ", "ñ"]]
|
||||
|
||||
for el in list_chars:
|
||||
@@ -218,32 +231,80 @@ def listado_busqueda(item):
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
pattern = '<li><a href="(?P<url>[^"]+)".*?<img src="(?P<img>[^"]+)"[^>]+>.*?<h2.*?>\s*(?P<title>.*?)\s*</h2>'
|
||||
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2.*?>(?P<title>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
#logger.debug("patron: " + pattern)
|
||||
#logger.debug(matches)
|
||||
|
||||
for url, thumb, title in matches:
|
||||
# fix encoding for title
|
||||
real_title = scrapertools.find_single_match(title, r'font color.*?font.*?><b>(.*?)<\/b><\/font>')
|
||||
real_title = scrapertools.find_single_match(title, r'<strong.*?>(.*?)Temporada.*?<\/strong>') #series
|
||||
if real_title == "":
|
||||
real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies
|
||||
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
|
||||
real_title = scrapertools.htmlclean(real_title)
|
||||
calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?<span[^>]+>[\[]\s*(?P<quality>.*?)\s*[\]]<\/span>') #series
|
||||
if calidad == "":
|
||||
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
|
||||
year = scrapertools.find_single_match(thumb, r'-(\d{4})')
|
||||
|
||||
# fix encoding for title
|
||||
title = scrapertools.htmlclean(title)
|
||||
title = title.replace("�", "ñ")
|
||||
title = title.replace("�", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng")
|
||||
title = re.sub(r'(Calidad.*?\])', '', title)
|
||||
|
||||
if real_title == "":
|
||||
real_title = title
|
||||
if calidad == "":
|
||||
calidad = title
|
||||
context = "movie"
|
||||
|
||||
# no mostramos lo que no sean videos
|
||||
if "/juego/" in url or "/varios/" in url:
|
||||
if "juego/" in url:
|
||||
continue
|
||||
|
||||
if ".com/series" in url:
|
||||
# Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie
|
||||
if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host:
|
||||
calidad_mps = "series/"
|
||||
if "seriehd" in url:
|
||||
calidad_mps = "series-hd/"
|
||||
if "serievo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
if "serie-vo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
|
||||
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
|
||||
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
|
||||
|
||||
if "/0_" not in thumb:
|
||||
serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P<serieid>\d+).*?.*')
|
||||
if len(serieid) > 5:
|
||||
serieid = ""
|
||||
else:
|
||||
serieid = ""
|
||||
|
||||
url = host + calidad_mps + real_title_mps + "/" + serieid
|
||||
|
||||
real_title_mps = real_title_mps.replace("-", " ")
|
||||
#logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps)
|
||||
real_title = real_title_mps
|
||||
|
||||
show = real_title
|
||||
|
||||
show = real_title
|
||||
if ".com/serie" in url and "/miniseries" not in url:
|
||||
|
||||
context = "tvshow"
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"], contentSerieName=show))
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year}))
|
||||
else:
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"]))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
if post:
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente",
|
||||
thumbnail=get_thumb("next.png")))
|
||||
@@ -253,7 +314,6 @@ def listado_busqueda(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
## Cualquiera de las tres opciones son válidas
|
||||
# item.url = item.url.replace(".com/",".com/ver-online/")
|
||||
# item.url = item.url.replace(".com/",".com/descarga-directa/")
|
||||
@@ -263,32 +323,36 @@ def findvideos(item):
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures")
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1><strong>([^<]+)<\/strong>[^<]+<\/h1>")
|
||||
title += scrapertools.find_single_match(data, "<h1><strong>[^<]+<\/strong>([^<]+)<\/h1>")
|
||||
caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
|
||||
#<div style="float:left;width:100%;min-height:70px;margin:10px 0px;"> <a href="javascript:void(0);" onClick="javascript:openTorrent();" title="Descargar torrent de Star Wars Los Ultimos Jedi " class="btn-torrent">Descarga tu Archivo torrent!</a> <script type="text/javascript"> function openTorrent() {var link = "http://advserver.xyz/v2/gena?gid=ADQGZS0ABR&uid=164"; window.open(link); window.location.href = "http://descargas2020.com/descargar-torrent/104616_-1520707769-star-wars-los-ultimos-jedi--bluray-screeener/";} </script> </div>
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1.*?<strong>([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
title += scrapertools.find_single_match(data, "<h1.*?<strong>[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
#caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
caratula = scrapertools.find_single_match(data, '<h1.*?<img.*?src="([^"]+)')
|
||||
|
||||
patron = 'openTorrent.*?title=".*?class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
|
||||
|
||||
#logger.debug("patron: " + patron + " / data: " + data)
|
||||
# escraped torrent
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
logger.debug("urltorrent: " + url + " Title: " + title + " Caratula: " + caratula)
|
||||
if url != "":
|
||||
|
||||
if "Temp" in title and item.quality != "": #scrapear información duplicada en Series
|
||||
title = re.sub(r'Temp.*?\[', '[', title)
|
||||
title = re.sub(r'\[Cap.*?\]', '', title)
|
||||
title = str(item.contentSeason) + "x" + str(item.contentEpisodeNumber) + " - " + item.contentTitle + ", " + title
|
||||
|
||||
if url != "": #Torrent
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title="[torrent] - " + title, fulltitle=title,
|
||||
Item(channel=item.channel, action="play", server="torrent", title=title, quality=title, fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, folder=False))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / calidad: " + item.quality + " / context: " + str(item.context))
|
||||
|
||||
# escraped ver vídeos, descargar vídeos un link, múltiples liks
|
||||
|
||||
data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "(")
|
||||
data = data.replace(
|
||||
'javascript:;" onClick="popup("http://www.descargas2020.com/d20/library/include/ajax/get_modallinks.php?links=', "")
|
||||
data = re.sub(r'javascript:;" onClick="popup\("http:\/\/(?:www.)?descargas2020.com\/\w{1,9}\/library\/include\/ajax\/get_modallinks.php\?links=', "", data)
|
||||
#logger.debug("matar %s" % data)
|
||||
|
||||
logger.debug("matar %s" % data)
|
||||
|
||||
# Antiguo sistema de scrapeo de servidores usado por Newpct1. Como no funciona con descargas2020, se sustituye por este más común
|
||||
# Antiguo sistema de scrapeo de servidores usado por Newpct1. Como no funciona con torrent.locura, se sustituye por este más común
|
||||
#patron_descargar = '<div id="tab2"[^>]+>.*?</ul>'
|
||||
#patron_ver = '<div id="tab3"[^>]+>.*?</ul>'
|
||||
|
||||
@@ -309,37 +373,48 @@ def findvideos(item):
|
||||
patron = '<div class=\"box1\"[^<]+<img src=\"([^<]+)?" style[^<]+><\/div[^<]+<div class="box2">([^<]+)?<\/div[^<]+<div class="box3">([^<]+)?'
|
||||
patron += '<\/div[^<]+<div class="box4">([^<]+)?<\/div[^<]+<div class="box5"><a href=(.*?)? rel.*?'
|
||||
patron += '<\/div[^<]+<div class="box6">([^<]+)?<'
|
||||
logger.debug("Patron: " + patron)
|
||||
#logger.debug("Patron: " + patron)
|
||||
|
||||
enlaces_ver = re.compile(patron, re.DOTALL).findall(data)
|
||||
enlaces_descargar = enlaces_ver
|
||||
logger.debug(enlaces_ver)
|
||||
#logger.debug(enlaces_ver)
|
||||
|
||||
if len(enlaces_ver) > 0:
|
||||
itemlist.append(item.clone(title="", action="", folder=False))
|
||||
itemlist.append(item.clone(title=" Enlaces Ver: ", action="", text_color="green", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
|
||||
if "Ver" in titulo:
|
||||
servidor = servidor.replace("streamin", "streaminto")
|
||||
titulo = titulo + " [" + servidor + "]"
|
||||
titulo = title
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
|
||||
fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False))
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, quality=titulo,
|
||||
fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
if len(enlaces_descargar) > 0:
|
||||
itemlist.append(item.clone(title="", action="", folder=False))
|
||||
itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", text_color="green", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
|
||||
if "Ver" not in titulo:
|
||||
servidor = servidor.replace("uploaded", "uploadedto")
|
||||
partes = enlace.split(" ")
|
||||
titulo = "Partes "
|
||||
p = 1
|
||||
logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
for enlace in partes:
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]"
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " - " + title
|
||||
p += 1
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
@@ -349,11 +424,12 @@ def findvideos(item):
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
|
||||
title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo,
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, quality=parte_titulo,
|
||||
title=parte_titulo, fulltitle=parte_titulo, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -363,6 +439,8 @@ def episodios(item):
|
||||
infoLabels = item.infoLabels
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
calidad = item.quality
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
|
||||
pagination = scrapertools.find_single_match(data, pattern)
|
||||
if pagination:
|
||||
@@ -381,22 +459,43 @@ def episodios(item):
|
||||
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
#logger.debug("data: " + data)
|
||||
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img src="(?P<thumb>[^"]+)".*?<h2[^>]+>(?P<info>.*?)</h2>'
|
||||
if "pelisyseries.com" in host:
|
||||
pattern = '<li[^>]*><div class.*?src="(?P<thumb>[^"]+)?".*?<a class.*?href="(?P<url>[^"]+).*?<h3[^>]+>(?P<info>.*?)?<\/h3>.*?<\/li>'
|
||||
else:
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2[^>]+>(?P<info>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
#logger.debug("patron: " + pattern)
|
||||
#logger.debug(matches)
|
||||
|
||||
season = "1"
|
||||
|
||||
for url, thumb, info in matches:
|
||||
|
||||
if "pelisyseries.com" in host:
|
||||
interm = url
|
||||
url = thumb
|
||||
thumb = interm
|
||||
|
||||
if "<span" in info: # new style
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)\s*Capitulo(?:s)?\s*(?P<episode>\d+)" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)</span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)\s*[\]]</span>"
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)?.*?Capitulo(?:s)?\s*(?P<episode>\d+)?" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
if "Especial" in info: # Capitulos Especiales
|
||||
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
logger.debug("patron: " + pattern)
|
||||
logger.debug(info)
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
|
||||
if match['season'] is None: match['season'] = season
|
||||
if match['episode'] is None: match['episode'] = "0"
|
||||
if match['quality']: item.quality = match['quality']
|
||||
|
||||
if match["episode2"]:
|
||||
multi = True
|
||||
title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
@@ -408,12 +507,17 @@ def episodios(item):
|
||||
match["lang"], match["quality"])
|
||||
|
||||
else: # old style
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+)(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
|
||||
|
||||
logger.debug("patron: " + pattern)
|
||||
logger.debug(info)
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
# logger.debug("data %s" % match)
|
||||
#logger.debug("data %s" % match)
|
||||
|
||||
#if match['season'] is "": match['season'] = season
|
||||
#if match['episode'] is "": match['episode'] = "0"
|
||||
#logger.debug(match)
|
||||
|
||||
str_lang = ""
|
||||
if match["lang"] is not None:
|
||||
@@ -436,18 +540,19 @@ def episodios(item):
|
||||
|
||||
season = match['season']
|
||||
episode = match['episode']
|
||||
logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode))
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
quality=item.quality, multi=multi, contentSeason=season,
|
||||
contentEpisodeNumber=episode, infoLabels = infoLabels))
|
||||
|
||||
# order list
|
||||
#tmdb.set_infoLabels(itemlist, True)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios"))
|
||||
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -9,7 +9,8 @@
|
||||
"categories": [
|
||||
"torrent",
|
||||
"movie",
|
||||
"tvshow"
|
||||
"tvshow",
|
||||
"documentary"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
|
||||
@@ -1,137 +1,75 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from channelselector import get_thumb
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://mispelisyseries.com/'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="menu", title="Películas", url=host,
|
||||
extra="Peliculas", folder=True, thumbnail=get_thumb('movies', auto=True)))
|
||||
|
||||
thumb_pelis=get_thumb("channels_movie.png")
|
||||
thumb_series=get_thumb("channels_tvshow.png")
|
||||
thumb_search = get_thumb("search.png")
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host,
|
||||
extra="peliculas", thumbnail=thumb_pelis ))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series",
|
||||
thumbnail=thumb_series))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios",
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="menu", title="Series", url=host, extra="Series",
|
||||
folder=True, thumbnail=get_thumb('tvshows', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + 'buscar',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu(item):
|
||||
def submenu(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# logger.info("data="+data)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com
|
||||
|
||||
data = scrapertools.find_single_match(data, item.extra + "</a[^<]+<ul(.*?)</ul>")
|
||||
# logger.info("data="+data)
|
||||
#patron = '<li><a href="http://(?:www.)?mispelisyseries.com/' + item.extra + '/">.*?<ul>(.*?)</ul>'
|
||||
patron = '<li><.*?href="'+item.url+item.extra + '/">.*?<ul.*?>(.*?)</ul>' #Filtrado por url, compatibilidad con mispelisy.series.com
|
||||
#logger.debug("patron: " + patron + " / data: " + data)
|
||||
if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com
|
||||
data = '<a href="http://mispelisyseries.com/varios/" title="Documentales"><i class="icon-rocket"></i> Documentales</a>'
|
||||
else:
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = "<li><a.*?href='([^']+)'[^>]+>([^<]+)</a></li>"
|
||||
patron = '<.*?href="([^"]+)".*?>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
folder=True))
|
||||
|
||||
|
||||
if title != "Todas las Peliculas":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabetico", title=title + " [A-Z]", url=url, thumbnail=thumbnail,
|
||||
plot=plot, folder=True))
|
||||
|
||||
title = scrapedtitle.strip()
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabetico", title=title + " [A-Z]", url=url, thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
folder=True))
|
||||
|
||||
if 'películas' in item.title.lower():
|
||||
new_item = item.clone(title='Peliculas 4K', url=host+'buscar', post='q=4k', action='listado2',
|
||||
pattern='buscar-list')
|
||||
itemlist.append(new_item)
|
||||
|
||||
Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist"))
|
||||
|
||||
if item.extra == "peliculas":
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("search:" + texto)
|
||||
# texto = texto.replace(" ", "+")
|
||||
|
||||
#try:
|
||||
item.post = "q=%s" % texto
|
||||
item.pattern = "buscar-list"
|
||||
itemlist = listado2(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
# except:
|
||||
# import sys
|
||||
# for line in sys.exc_info():
|
||||
# logger.error("%s" % line)
|
||||
# return []
|
||||
|
||||
def newest(categoria):
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas', 'torrent']:
|
||||
item.url = host+"peliculas"
|
||||
|
||||
elif categoria == 'series':
|
||||
item.url = host+"series"
|
||||
|
||||
if categoria == '4k':
|
||||
|
||||
item.url = Host + '/buscar'
|
||||
|
||||
item.post = 'q=4k'
|
||||
|
||||
item.pattern = 'buscar-list'
|
||||
|
||||
action = listado2(item)
|
||||
|
||||
else:
|
||||
return []
|
||||
|
||||
itemlist = lista(item)
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
# Esta pagina coloca a veces contenido duplicado, intentamos descartarlo
|
||||
dict_aux = {}
|
||||
for i in itemlist:
|
||||
if not i.url in dict_aux:
|
||||
dict_aux[i.url] = i
|
||||
else:
|
||||
itemlist.remove(i)
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
# return dict_aux.values()
|
||||
return itemlist
|
||||
|
||||
|
||||
def alfabetico(item):
|
||||
def alfabeto(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
@@ -148,93 +86,137 @@ def alfabetico(item):
|
||||
title = scrapedtitle.upper()
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url_next_page =''
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url, post=item.extra).data
|
||||
# logger.info("data="+data)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
#data = httptools.downloadpage(item.url).data
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
logger.debug('item.modo: %s'%item.modo)
|
||||
logger.debug('item.extra: %s'%item.extra)
|
||||
|
||||
if item.modo != 'next' or item.modo =='':
|
||||
logger.debug('item.title: %s'% item.title)
|
||||
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
|
||||
fichas = scrapertools.get_match(data, patron)
|
||||
page_extra = item.extra
|
||||
else:
|
||||
fichas = data
|
||||
page_extra = item.extra
|
||||
|
||||
bloque = scrapertools.find_single_match(data, '(?:<ul class="pelilist">|<ul class="buscar-list">)(.*?)</ul>')
|
||||
patron = '<a href="([^"]+).*?' # la url
|
||||
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
patron += '<h2[^>]*>(.*?)</h2.*?' # el titulo
|
||||
patron += '<span>([^<].*?)<' # la calidad
|
||||
patron += 'title="([^"]+).*?' # el titulo
|
||||
patron += '<img.*?src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
patron += '<h2.*?>(.*?)?<\/h2>' # titulo alternativo
|
||||
patron += '<span>([^<].*?)?<' # la calidad
|
||||
#logger.debug("patron: " + patron + " / fichas: " + fichas)
|
||||
matches = re.compile(patron, re.DOTALL).findall(fichas)
|
||||
#logger.debug('item.next_page: %s'%item.next_page)
|
||||
#logger.debug(matches)
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
scrapertools.printMatches(matches)
|
||||
# Paginacion
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > 30:
|
||||
url_next_page = item.url
|
||||
matches = matches[:30]
|
||||
next_page = 'b'
|
||||
modo = 'continue'
|
||||
else:
|
||||
matches = matches[30:]
|
||||
next_page = 'a'
|
||||
patron_next_page = '<a href="([^"]+)">Next<\/a>'
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
modo = 'continue'
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = matches_next_page[0]
|
||||
modo = 'next'
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, calidad in matches:
|
||||
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
|
||||
title = scrapedtitle.strip()
|
||||
if scrapertools.htmlclean(calidad):
|
||||
title += " (" + scrapertools.htmlclean(calidad) + ")"
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
action = "findvideos"
|
||||
extra = ""
|
||||
context = "movie"
|
||||
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
|
||||
contentTitle = scrapertools.htmlclean(scrapedtitle).strip()
|
||||
patron = '([^<]+)<br>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(calidad + '<br>')
|
||||
idioma = ''
|
||||
|
||||
if host+"/serie" in url:
|
||||
contentTitle = re.sub('\s+-|\.{3}$', '', contentTitle)
|
||||
capitulo = ''
|
||||
temporada = 0
|
||||
episodio = 0
|
||||
if ".com/serie" in url and "/miniseries" not in url:
|
||||
action = "episodios"
|
||||
extra = "serie"
|
||||
context = "tvshow"
|
||||
|
||||
if len(matches) == 3:
|
||||
calidad = matches[0].strip()
|
||||
idioma = matches[1].strip()
|
||||
capitulo = matches[2].replace('Cap', 'x').replace('Temp', '').replace(' ', '')
|
||||
temporada, episodio = capitulo.strip().split('x')
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url,
|
||||
thumbnail=thumbnail, plot=plot, folder=True, contentTitle=contentTitle,
|
||||
language=idioma, contentSeason=int(temporada),
|
||||
contentEpisodeNumber=int(episodio), quality=calidad))
|
||||
title = scrapertools.find_single_match(title, '([^-]+)')
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea ", "",
|
||||
1).strip()
|
||||
|
||||
else:
|
||||
if len(matches) == 2:
|
||||
calidad = matches[0].strip()
|
||||
idioma = matches[1].strip()
|
||||
title = title.replace("Descargar torrent ", "", 1).replace("Descarga Gratis ", "", 1).replace("Descargar Estreno ", "", 1).replace("Pelicula en latino ", "", 1).replace("Descargar Pelicula ", "", 1).replace("Descargar", "", 1).replace("Descarga", "", 1).replace("Bajar", "", 1).strip()
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
if title.endswith("torrent"): title = title[:-8]
|
||||
if title.endswith("en HD"): title = title[:-6]
|
||||
|
||||
if title == "":
|
||||
title = title_alt
|
||||
context_title = title_alt
|
||||
show = title_alt
|
||||
#if item.extra != "buscar-list":
|
||||
# title = title + '[' + calidad + "]"
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
|
||||
thumbnail=thumbnail, plot=plot, folder=True, contentTitle=contentTitle,
|
||||
language=idioma, contentThumbnail=thumbnail, quality=calidad))
|
||||
#Este bucle parece obsoleto:
|
||||
#context = ""
|
||||
#context_title = scrapertools.find_single_match(url, "http://(?:www.)?mispelisyseries.com/(.*?)/(.*?)/")
|
||||
#if context_title:
|
||||
# try:
|
||||
# context = context_title[0].replace("descargar-", "").replace("descargar", "").replace("pelicula", "movie").replace("series", "tvshow").replace("-hd", "").replace("-vo", "")
|
||||
# context_title = context_title[1].replace("-", " ")
|
||||
# if re.search('\d{4}', context_title[-4:]):
|
||||
# context_title = context_title[:-4]
|
||||
# elif re.search('\(\d{4}\)', context_title[-6:]):
|
||||
# context_title = context_title[:-6]
|
||||
#
|
||||
# except:
|
||||
# context_title = show
|
||||
#
|
||||
|
||||
next_page_url = scrapertools.find_single_match(data, '<li><a href="([^"]+)">Next</a></li>')
|
||||
if next_page_url != "":
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente",
|
||||
url=urlparse.urljoin(item.url, next_page_url), folder=True))
|
||||
else:
|
||||
next_page_url = scrapertools.find_single_match(data,
|
||||
'<li><input type="button" class="btn-submit" value="Siguiente" onClick="paginar..(\d+)')
|
||||
if next_page_url != "":
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=item.url,
|
||||
extra=item.extra + "&pg=" + next_page_url, folder=True))
|
||||
#logger.debug('contxt title: %s'%context_title)
|
||||
#logger.debug('year: %s' % year)
|
||||
#logger.debug('context: %s' % context)
|
||||
|
||||
if not 'array' in title:
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
|
||||
extra = extra, show = context_title, contentTitle=context_title, contentType=context, quality=calidad,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / contxt title: " + context_title + " / context: " + context + " / calidad: " + calidad+ " / year: " + year)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
|
||||
url=url_next_page, next_page=next_page, folder=True,
|
||||
text_color='yellow', text_bold=True, modo = modo, plot = extra,
|
||||
extra = page_extra))
|
||||
return itemlist
|
||||
|
||||
|
||||
def listado2(item):
|
||||
def listado_busqueda(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
|
||||
list_chars = [["ñ", "ñ"]]
|
||||
|
||||
for el in list_chars:
|
||||
data = re.sub(r"%s" % el[0], el[1], data)
|
||||
|
||||
try:
|
||||
get, post = scrapertools.find_single_match(data, '<ul class="pagination">.*?<a class="current" href.*?'
|
||||
'<a\s*href="([^"]+)"(?:\s*onClick=".*?\'([^"]+)\'.*?")')
|
||||
@@ -249,154 +231,371 @@ def listado2(item):
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
|
||||
logger.debug(data)
|
||||
pattern = '<a href="(?P<url>[^"]+)".*?<img.*?src="(?P<img>[^"]+)"[^>]+>.*?<h2.*?>\s*(?P<title>.*?)\s*</h2>'
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2.*?>(?P<title>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
#logger.debug("patron: " + pattern)
|
||||
#logger.debug(matches)
|
||||
|
||||
for url, thumb, title in matches:
|
||||
real_title = scrapertools.find_single_match(title, r'<strong.*?>(.*?)Temporada.*?<\/strong>') #series
|
||||
if real_title == "":
|
||||
real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies
|
||||
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
|
||||
real_title = scrapertools.htmlclean(real_title)
|
||||
calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?<span[^>]+>[\[]\s*(?P<quality>.*?)\s*[\]]<\/span>') #series
|
||||
if calidad == "":
|
||||
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
|
||||
year = scrapertools.find_single_match(thumb, r'-(\d{4})')
|
||||
|
||||
# fix encoding for title
|
||||
title = scrapertools.htmlclean(title)
|
||||
title = title.replace("�", "ñ")
|
||||
title = title.replace("�", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng")
|
||||
title = re.sub(r'(Calidad.*?\])', '', title)
|
||||
|
||||
if real_title == "":
|
||||
real_title = title
|
||||
if calidad == "":
|
||||
calidad = title
|
||||
context = "movie"
|
||||
|
||||
# no mostramos lo que no sean videos
|
||||
if "descargar-juego/" in url or "/varios/" in url:
|
||||
if "juego/" in url:
|
||||
continue
|
||||
|
||||
if ".com/series" in url:
|
||||
# Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie
|
||||
if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host:
|
||||
calidad_mps = "series/"
|
||||
if "seriehd" in url:
|
||||
calidad_mps = "series-hd/"
|
||||
if "serievo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
if "serie-vo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
|
||||
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
|
||||
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
|
||||
|
||||
if "/0_" not in thumb:
|
||||
serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P<serieid>\d+).*?.*')
|
||||
if len(serieid) > 5:
|
||||
serieid = ""
|
||||
else:
|
||||
serieid = ""
|
||||
|
||||
url = host + calidad_mps + real_title_mps + "/" + serieid
|
||||
|
||||
real_title_mps = real_title_mps.replace("-", " ")
|
||||
#logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps)
|
||||
real_title = real_title_mps
|
||||
|
||||
show = real_title
|
||||
|
||||
show = title
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"], show=show))
|
||||
if ".com/serie" in url and "/miniseries" not in url:
|
||||
|
||||
context = "tvshow"
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year}))
|
||||
else:
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"]))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
if post:
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado2", title="[COLOR cyan]Página Siguiente >>[/COLOR]",
|
||||
thumbnail=''))
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente",
|
||||
thumbnail=get_thumb("next.png")))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
## Cualquiera de las tres opciones son válidas
|
||||
# item.url = item.url.replace(".com/",".com/ver-online/")
|
||||
# item.url = item.url.replace(".com/",".com/descarga-directa/")
|
||||
item.url = item.url.replace(".com/", ".com/descarga-torrent/")
|
||||
|
||||
# Descarga la página
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures")
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1.*?<strong>([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
title += scrapertools.find_single_match(data, "<h1.*?<strong>[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
#caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
caratula = scrapertools.find_single_match(data, '<h1.*?<img.*?src="([^"]+)')
|
||||
|
||||
patron = 'openTorrent.*?title=".*?class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
|
||||
#logger.debug("patron: " + patron + " / data: " + data)
|
||||
# escraped torrent
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
|
||||
if "Temp" in title and item.quality != "": #scrapear información duplicada en Series
|
||||
title = re.sub(r'Temp.*?\[', '[', title)
|
||||
title = re.sub(r'\[Cap.*?\]', '', title)
|
||||
title = str(item.contentSeason) + "x" + str(item.contentEpisodeNumber) + " - " + item.contentTitle + ", " + title
|
||||
|
||||
if url != "": #Torrent
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title=title, quality=title, fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, folder=False))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / calidad: " + item.quality + " / context: " + str(item.context))
|
||||
|
||||
# escraped ver vídeos, descargar vídeos un link, múltiples liks
|
||||
|
||||
data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "(")
|
||||
data = re.sub(r'javascript:;" onClick="popup\("http:\/\/(?:www.)?mispelisyseries.com\/\w{1,9}\/library\/include\/ajax\/get_modallinks.php\?links=', "", data)
|
||||
#logger.debug("matar %s" % data)
|
||||
|
||||
# Antiguo sistema de scrapeo de servidores usado por Newpct1. Como no funciona con torrent.locura, se sustituye por este más común
|
||||
#patron_descargar = '<div id="tab2"[^>]+>.*?</ul>'
|
||||
#patron_ver = '<div id="tab3"[^>]+>.*?</ul>'
|
||||
|
||||
#match_ver = scrapertools.find_single_match(data, patron_ver)
|
||||
#match_descargar = scrapertools.find_single_match(data, patron_descargar)
|
||||
|
||||
#patron = '<div class="box1"><img src="([^"]+)".*?' # logo
|
||||
#patron += '<div class="box2">([^<]+)</div>' # servidor
|
||||
#patron += '<div class="box3">([^<]+)</div>' # idioma
|
||||
#patron += '<div class="box4">([^<]+)</div>' # calidad
|
||||
#patron += '<div class="box5"><a href="([^"]+)".*?' # enlace
|
||||
#patron += '<div class="box6">([^<]+)</div>' # titulo
|
||||
|
||||
#enlaces_ver = re.compile(patron, re.DOTALL).findall(match_ver)
|
||||
#enlaces_descargar = re.compile(patron, re.DOTALL).findall(match_descargar)
|
||||
|
||||
# Nuevo sistema de scrapeo de servidores creado por Torrentlocula, compatible con otros clones de Newpct1
|
||||
patron = '<div class=\"box1\"[^<]+<img src=\"([^<]+)?" style[^<]+><\/div[^<]+<div class="box2">([^<]+)?<\/div[^<]+<div class="box3">([^<]+)?'
|
||||
patron += '<\/div[^<]+<div class="box4">([^<]+)?<\/div[^<]+<div class="box5"><a href=(.*?)? rel.*?'
|
||||
patron += '<\/div[^<]+<div class="box6">([^<]+)?<'
|
||||
#logger.debug("Patron: " + patron)
|
||||
|
||||
enlaces_ver = re.compile(patron, re.DOTALL).findall(data)
|
||||
enlaces_descargar = enlaces_ver
|
||||
#logger.debug(enlaces_ver)
|
||||
|
||||
if len(enlaces_ver) > 0:
|
||||
itemlist.append(item.clone(title="", action="", folder=False))
|
||||
itemlist.append(item.clone(title=" Enlaces Ver: ", action="", text_color="green", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
|
||||
if "Ver" in titulo:
|
||||
servidor = servidor.replace("streamin", "streaminto")
|
||||
titulo = title
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, quality=titulo,
|
||||
fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
if len(enlaces_descargar) > 0:
|
||||
itemlist.append(item.clone(title="", action="", folder=False))
|
||||
itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", text_color="green", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
|
||||
if "Ver" not in titulo:
|
||||
servidor = servidor.replace("uploaded", "uploadedto")
|
||||
partes = enlace.split(" ")
|
||||
titulo = "Partes "
|
||||
p = 1
|
||||
logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
for enlace in partes:
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " - " + title
|
||||
p += 1
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, quality=parte_titulo,
|
||||
title=parte_titulo, fulltitle=parte_titulo, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = item.infoLabels
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
calidad = item.quality
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
|
||||
pagination = scrapertools.find_single_match(data, pattern)
|
||||
if pagination:
|
||||
pattern = '<li><a href="([^"]+)">Last<\/a>'
|
||||
full_url = scrapertools.find_single_match(pagination, pattern)
|
||||
url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)')
|
||||
list_pages = [item.url]
|
||||
for x in range(2, int(last_page) + 1):
|
||||
response = httptools.downloadpage('%s%s'% (url,x))
|
||||
if response.sucess:
|
||||
list_pages.append("%s%s" % (url, x))
|
||||
else:
|
||||
list_pages = [item.url]
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url, post=item.extra).data
|
||||
# logger.info("data="+data)
|
||||
for index, page in enumerate(list_pages):
|
||||
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
#logger.debug("data: " + data)
|
||||
|
||||
patron = '<div class="chap-desc"[^<]+'
|
||||
patron += '<a class="chap-title".*?href="([^"]+)" title="([^"]+)"[^<]+'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
if "pelisyseries.com" in host:
|
||||
pattern = '<li[^>]*><div class.*?src="(?P<thumb>[^"]+)?".*?<a class.*?href="(?P<url>[^"]+).*?<h3[^>]+>(?P<info>.*?)?<\/h3>.*?<\/li>'
|
||||
else:
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2[^>]+>(?P<info>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
#logger.debug("patron: " + pattern)
|
||||
#logger.debug(matches)
|
||||
|
||||
season = "1"
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.strip()
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
for url, thumb, info in matches:
|
||||
|
||||
if "pelisyseries.com" in host:
|
||||
interm = url
|
||||
url = thumb
|
||||
thumb = interm
|
||||
|
||||
if "<span" in info: # new style
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)?.*?Capitulo(?:s)?\s*(?P<episode>\d+)?" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
if "Especial" in info: # Capitulos Especiales
|
||||
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
logger.debug("patron: " + pattern)
|
||||
logger.debug(info)
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
|
||||
if match['season'] is None: match['season'] = season
|
||||
if match['episode'] is None: match['episode'] = "0"
|
||||
if match['quality']: item.quality = match['quality']
|
||||
|
||||
if match["episode2"]:
|
||||
multi = True
|
||||
title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
str(match["episode2"]).zfill(2), match["lang"],
|
||||
match["quality"])
|
||||
else:
|
||||
multi = False
|
||||
title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
match["lang"], match["quality"])
|
||||
|
||||
else: # old style
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
|
||||
logger.debug("patron: " + pattern)
|
||||
logger.debug(info)
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
#logger.debug("data %s" % match)
|
||||
|
||||
#if match['season'] is "": match['season'] = season
|
||||
#if match['episode'] is "": match['episode'] = "0"
|
||||
#logger.debug(match)
|
||||
|
||||
str_lang = ""
|
||||
if match["lang"] is not None:
|
||||
str_lang = "[%s]" % match["lang"]
|
||||
|
||||
if match["season2"] and match["episode2"]:
|
||||
multi = True
|
||||
if match["season"] == match["season2"]:
|
||||
|
||||
title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"],
|
||||
match["episode2"], str_lang, match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"],
|
||||
match["season2"], match["episode2"], str_lang,
|
||||
match["quality"])
|
||||
else:
|
||||
title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang,
|
||||
match["quality"])
|
||||
multi = False
|
||||
|
||||
season = match['season']
|
||||
episode = match['episode']
|
||||
logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode))
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
quality=item.quality, multi=multi, contentSeason=season,
|
||||
contentEpisodeNumber=episode, infoLabels = infoLabels))
|
||||
# order list
|
||||
#tmdb.set_infoLabels(itemlist, True)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, folder=True))
|
||||
|
||||
next_page_url = scrapertools.find_single_match(data, "<a class='active' href=[^<]+</a><a\s*href='([^']+)'")
|
||||
if next_page_url != "":
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=">> Página siguiente",
|
||||
url=urlparse.urljoin(item.url, next_page_url), folder=True))
|
||||
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("search:" + texto)
|
||||
# texto = texto.replace(" ", "+")
|
||||
|
||||
def findvideos(item):
|
||||
try:
|
||||
item.post = "q=%s" % texto
|
||||
item.pattern = "buscar-list"
|
||||
itemlist = listado_busqueda(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
item.extra = 'pelilist'
|
||||
if categoria == 'torrent':
|
||||
item.url = host+'peliculas/'
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
item.plot = scrapertools.find_single_match(data, '<div class="post-entry" style="height:300px;">(.*?)</div>')
|
||||
item.plot = scrapertools.htmlclean(item.plot).strip()
|
||||
item.contentPlot = item.plot
|
||||
al_url_fa = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"')
|
||||
if al_url_fa == "":
|
||||
al_url_fa = scrapertools.find_single_match(data, 'location\.href.*?=.*?"%s(.*?)" ' % host)
|
||||
if al_url_fa != "":
|
||||
al_url_fa = host + al_url_fa
|
||||
logger.info("torrent=" + al_url_fa)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title="Vídeo en torrent", fulltitle=item.title,
|
||||
url=al_url_fa, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False,
|
||||
parentContent=item))
|
||||
itemlist = listado(item)
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
item.url = host+'series/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
patron = '<div class=\"box1\"[^<]+<img[^<]+<\/div[^<]+<div class="box2">([^<]+)<\/div[^<]+<div class="box3">([^<]+)'
|
||||
patron += '<\/div[^<]+<div class="box4">([^<]+)<\/div[^<]+<div class="box5"><a href=(.*?) rel.*?'
|
||||
patron += '<\/div[^<]+<div class="box6">([^<]+)<'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
itemlist_ver = []
|
||||
itemlist_descargar = []
|
||||
|
||||
for servername, idioma, calidad, scrapedurl, comentarios in matches:
|
||||
title = "Mirror en " + servername + " (" + calidad + ")" + " (" + idioma + ")"
|
||||
servername = servername.replace("uploaded", "uploadedto").replace("1fichier", "onefichier")
|
||||
if comentarios.strip() != "":
|
||||
title = title + " (" + comentarios.strip() + ")"
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
mostrar_server = servertools.is_server_enabled(servername)
|
||||
if mostrar_server:
|
||||
thumbnail = servertools.guess_server_thumbnail(title)
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
action = "play"
|
||||
if "partes" in title:
|
||||
action = "extract_url"
|
||||
new_item = Item(channel=item.channel, action=action, title=title, fulltitle=title, url=url,
|
||||
thumbnail=thumbnail, plot=plot, parentContent=item, server = servername, quality=calidad)
|
||||
if comentarios.startswith("Ver en"):
|
||||
itemlist_ver.append(new_item)
|
||||
else:
|
||||
itemlist_descargar.append(new_item)
|
||||
|
||||
itemlist.extend(itemlist_ver)
|
||||
itemlist.extend(itemlist_descargar)
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def extract_url(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url(
|
||||
videoitem.url) + ")"
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
|
||||
if item.server != "torrent":
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url(
|
||||
videoitem.url) + ")"
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
else:
|
||||
itemlist = [item]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -8,18 +8,36 @@
|
||||
"thumbnail": "http://imgur.com/EWmLS3d.png",
|
||||
"fanart": "http://imgur.com/V7QZLAL.jpg",
|
||||
"categories": [
|
||||
"torrent",
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"anime",
|
||||
"torrent",
|
||||
"documentary"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Episodios de series",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
@@ -37,5 +55,5 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
@@ -10,7 +10,7 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://torrentlocura.com/' # Cambiar manualmente "xx" en línea 287 ".com/xx/library" por tl para torrentlocura, tr para torrentrapid, d20 para descargas2020
|
||||
host = 'http://torrentlocura.com/'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
@@ -40,12 +40,17 @@ def submenu(item):
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com
|
||||
|
||||
#patron = '<li><a href="http://(?:www.)?torrentlocura.com/' + item.extra + '/">.*?<ul>(.*?)</ul>'
|
||||
patron = '<li><a href="'+item.url+item.extra + '/">.*?<ul>(.*?)</ul>' #Filtrado por url
|
||||
data = scrapertools.get_match(data, patron)
|
||||
patron = '<li><.*?href="'+item.url+item.extra + '/">.*?<ul.*?>(.*?)</ul>' #Filtrado por url, compatibilidad con mispelisy.series.com
|
||||
#logger.debug("patron: " + patron + " / data: " + data)
|
||||
if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com
|
||||
data = '<a href="http://torrentlocura.com/varios/" title="Documentales"><i class="icon-rocket"></i> Documentales</a>'
|
||||
else:
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="([^"]+)".*?>([^>]+)</a>'
|
||||
patron = '<.*?href="([^"]+)".*?>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
@@ -92,15 +97,15 @@ def listado(item):
|
||||
url_next_page =''
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
#data = httptools.downloadpage(item.url).data
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
#logger.debug(data)
|
||||
|
||||
logger.debug('item.modo: %s'%item.modo)
|
||||
logger.debug('item.extra: %s'%item.extra)
|
||||
|
||||
if item.modo != 'next' or item.modo =='':
|
||||
logger.debug('item.title: %s'% item.title)
|
||||
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
|
||||
logger.debug("patron=" + patron)
|
||||
fichas = scrapertools.get_match(data, patron)
|
||||
page_extra = item.extra
|
||||
else:
|
||||
@@ -109,11 +114,13 @@ def listado(item):
|
||||
|
||||
patron = '<a href="([^"]+).*?' # la url
|
||||
patron += 'title="([^"]+).*?' # el titulo
|
||||
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
#patron += '<span>([^<].*?)<' # la calidad: original de NewPCT1: si falta la calidad, el siguiente "matches" entra en un loop
|
||||
patron += '<img.*?src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
patron += '<h2.*?>(.*?)?<\/h2>' # titulo alternativo
|
||||
patron += '<span>([^<].*?)?<' # la calidad
|
||||
#logger.debug("patron: " + patron + " / fichas: " + fichas)
|
||||
matches = re.compile(patron, re.DOTALL).findall(fichas)
|
||||
logger.debug('item.next_page: %s'%item.next_page)
|
||||
#logger.debug('item.next_page: %s'%item.next_page)
|
||||
#logger.debug(matches)
|
||||
|
||||
# Paginacion
|
||||
if item.next_page != 'b':
|
||||
@@ -132,20 +139,22 @@ def listado(item):
|
||||
url_next_page = matches_next_page[0]
|
||||
modo = 'next'
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches:
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
action = "findvideos"
|
||||
extra = ""
|
||||
context = "movie"
|
||||
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
|
||||
if ".com/series" in url:
|
||||
|
||||
if ".com/serie" in url and "/miniseries" not in url:
|
||||
action = "episodios"
|
||||
extra = "serie"
|
||||
|
||||
context = "tvshow"
|
||||
|
||||
title = scrapertools.find_single_match(title, '([^-]+)')
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "",
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea ", "",
|
||||
1).strip()
|
||||
|
||||
else:
|
||||
@@ -153,39 +162,43 @@ def listado(item):
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
if title.endswith("torrent"): title = title[:-8]
|
||||
if title.endswith("en HD"): title = title[:-6]
|
||||
|
||||
if title == "":
|
||||
title = title_alt
|
||||
context_title = title_alt
|
||||
show = title_alt
|
||||
#if item.extra != "buscar-list":
|
||||
# title = title + '[' + calidad + "]"
|
||||
|
||||
show = title
|
||||
if item.extra != "buscar-list":
|
||||
title = title + ' ' + calidad
|
||||
#Este bucle parece obsoleto:
|
||||
#context = ""
|
||||
#context_title = scrapertools.find_single_match(url, "http://(?:www.)?torrentlocura.com/(.*?)/(.*?)/")
|
||||
#if context_title:
|
||||
# try:
|
||||
# context = context_title[0].replace("descargar-", "").replace("descargar", "").replace("pelicula", "movie").replace("series", "tvshow").replace("-hd", "").replace("-vo", "")
|
||||
# context_title = context_title[1].replace("-", " ")
|
||||
# if re.search('\d{4}', context_title[-4:]):
|
||||
# context_title = context_title[:-4]
|
||||
# elif re.search('\(\d{4}\)', context_title[-6:]):
|
||||
# context_title = context_title[:-6]
|
||||
#
|
||||
# except:
|
||||
# context_title = show
|
||||
#
|
||||
|
||||
context = ""
|
||||
context_title = scrapertools.find_single_match(url, "http://(?:www.)?torrentlocura.com/(.*?)/(.*?)/")
|
||||
if context_title:
|
||||
try:
|
||||
context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series",
|
||||
"tvshow")
|
||||
context_title = context_title[1].replace("-", " ")
|
||||
if re.search('\d{4}', context_title[-4:]):
|
||||
context_title = context_title[:-4]
|
||||
elif re.search('\(\d{4}\)', context_title[-6:]):
|
||||
context_title = context_title[:-6]
|
||||
|
||||
except:
|
||||
context_title = show
|
||||
logger.debug('contxt title: %s'%context_title)
|
||||
logger.debug('year: %s' % year)
|
||||
|
||||
logger.debug('context: %s' % context)
|
||||
#logger.debug('contxt title: %s'%context_title)
|
||||
#logger.debug('year: %s' % year)
|
||||
#logger.debug('context: %s' % context)
|
||||
|
||||
if not 'array' in title:
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
|
||||
extra = extra,
|
||||
show = context_title, contentTitle=context_title, contentType=context,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
extra = extra, show = context_title, contentTitle=context_title, contentType=context, quality=calidad,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / contxt title: " + context_title + " / context: " + context + " / calidad: " + calidad+ " / year: " + year)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
|
||||
url=url_next_page, next_page=next_page, folder=True,
|
||||
@@ -193,12 +206,12 @@ def listado(item):
|
||||
extra = page_extra))
|
||||
return itemlist
|
||||
|
||||
def listado2(item):
|
||||
def listado_busqueda(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
|
||||
list_chars = [["ñ", "ñ"]]
|
||||
|
||||
for el in list_chars:
|
||||
@@ -218,34 +231,82 @@ def listado2(item):
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
pattern = '<li><a href="(?P<url>[^"]+)".*?<img src="(?P<img>[^"]+)"[^>]+>.*?<h2.*?>\s*(?P<title>.*?)\s*</h2>'
|
||||
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2.*?>(?P<title>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
#logger.debug("patron: " + pattern)
|
||||
#logger.debug(matches)
|
||||
|
||||
for url, thumb, title in matches:
|
||||
# fix encoding for title
|
||||
real_title = scrapertools.find_single_match(title, r'font color.*?font.*?><b>(.*?)<\/b><\/font>')
|
||||
real_title = scrapertools.find_single_match(title, r'<strong.*?>(.*?)Temporada.*?<\/strong>') #series
|
||||
if real_title == "":
|
||||
real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies
|
||||
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
|
||||
real_title = scrapertools.htmlclean(real_title)
|
||||
calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?<span[^>]+>[\[]\s*(?P<quality>.*?)\s*[\]]<\/span>') #series
|
||||
if calidad == "":
|
||||
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
|
||||
year = scrapertools.find_single_match(thumb, r'-(\d{4})')
|
||||
|
||||
# fix encoding for title
|
||||
title = scrapertools.htmlclean(title)
|
||||
title = title.replace("�", "ñ")
|
||||
title = title.replace("�", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng")
|
||||
title = re.sub(r'(Calidad.*?\])', '', title)
|
||||
|
||||
if real_title == "":
|
||||
real_title = title
|
||||
if calidad == "":
|
||||
calidad = title
|
||||
context = "movie"
|
||||
|
||||
# no mostramos lo que no sean videos
|
||||
if "/juego/" in url or "/varios/" in url:
|
||||
if "juego/" in url:
|
||||
continue
|
||||
|
||||
if ".com/series" in url:
|
||||
# Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie
|
||||
if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host:
|
||||
calidad_mps = "series/"
|
||||
if "seriehd" in url:
|
||||
calidad_mps = "series-hd/"
|
||||
if "serievo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
if "serie-vo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
|
||||
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
|
||||
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
|
||||
|
||||
if "/0_" not in thumb:
|
||||
serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P<serieid>\d+).*?.*')
|
||||
if len(serieid) > 5:
|
||||
serieid = ""
|
||||
else:
|
||||
serieid = ""
|
||||
|
||||
url = host + calidad_mps + real_title_mps + "/" + serieid
|
||||
|
||||
real_title_mps = real_title_mps.replace("-", " ")
|
||||
#logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps)
|
||||
real_title = real_title_mps
|
||||
|
||||
show = real_title
|
||||
|
||||
show = real_title
|
||||
if ".com/serie" in url and "/miniseries" not in url:
|
||||
|
||||
context = "tvshow"
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"], contentSerieName=show))
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year}))
|
||||
else:
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"]))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
if post:
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado2", title=">> Página siguiente",
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente",
|
||||
thumbnail=get_thumb("next.png")))
|
||||
|
||||
return itemlist
|
||||
@@ -253,7 +314,6 @@ def listado2(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
## Cualquiera de las tres opciones son válidas
|
||||
# item.url = item.url.replace(".com/",".com/ver-online/")
|
||||
# item.url = item.url.replace(".com/",".com/descarga-directa/")
|
||||
@@ -263,32 +323,36 @@ def findvideos(item):
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures")
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1><strong>([^<]+)<\/strong>[^<]+<\/h1>")
|
||||
title += scrapertools.find_single_match(data, "<h1><strong>[^<]+<\/strong>([^<]+)<\/h1>")
|
||||
caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
|
||||
#<div style="float:left;width:100%;min-height:70px;margin:10px 0px;"> <a href="javascript:void(0);" onClick="javascript:openTorrent();" title="Descargar torrent de Star Wars Los Ultimos Jedi " class="btn-torrent">Descarga tu Archivo torrent!</a> <script type="text/javascript"> function openTorrent() {var link = "http://advserver.xyz/v2/gena?gid=ADQGZS0ABR&uid=164"; window.open(link); window.location.href = "http://torrentlocura.com/descargar-torrent/104616_-1520707769-star-wars-los-ultimos-jedi--bluray-screeener/";} </script> </div>
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1.*?<strong>([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
title += scrapertools.find_single_match(data, "<h1.*?<strong>[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
#caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
caratula = scrapertools.find_single_match(data, '<h1.*?<img.*?src="([^"]+)')
|
||||
|
||||
patron = 'openTorrent.*?title=".*?class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
|
||||
|
||||
#logger.debug("patron: " + patron + " / data: " + data)
|
||||
# escraped torrent
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
logger.debug("urltorrent: " + url + " Title: " + title + " Caratula: " + caratula)
|
||||
if url != "":
|
||||
|
||||
if "Temp" in title and item.quality != "": #scrapear información duplicada en Series
|
||||
title = re.sub(r'Temp.*?\[', '[', title)
|
||||
title = re.sub(r'\[Cap.*?\]', '', title)
|
||||
title = str(item.contentSeason) + "x" + str(item.contentEpisodeNumber) + " - " + item.contentTitle + ", " + title
|
||||
|
||||
if url != "": #Torrent
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title="[torrent] - " + title, fulltitle=title,
|
||||
Item(channel=item.channel, action="play", server="torrent", title=title, quality=title, fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, folder=False))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / calidad: " + item.quality + " / context: " + str(item.context))
|
||||
|
||||
# escraped ver vídeos, descargar vídeos un link, múltiples liks
|
||||
|
||||
data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "(")
|
||||
data = data.replace(
|
||||
'javascript:;" onClick="popup("http://www.torrentlocura.com/tl/library/include/ajax/get_modallinks.php?links=', "")
|
||||
data = re.sub(r'javascript:;" onClick="popup\("http:\/\/(?:www.)?torrentlocura.com\/\w{1,9}\/library\/include\/ajax\/get_modallinks.php\?links=', "", data)
|
||||
#logger.debug("matar %s" % data)
|
||||
|
||||
logger.debug("matar %s" % data)
|
||||
|
||||
# Antiguo sistema de scrapeo de servidores usado por Newpct1. Como no funciona con Torrentlocura, se sustituye por este más común
|
||||
# Antiguo sistema de scrapeo de servidores usado por Newpct1. Como no funciona con torrent.locura, se sustituye por este más común
|
||||
#patron_descargar = '<div id="tab2"[^>]+>.*?</ul>'
|
||||
#patron_ver = '<div id="tab3"[^>]+>.*?</ul>'
|
||||
|
||||
@@ -309,37 +373,48 @@ def findvideos(item):
|
||||
patron = '<div class=\"box1\"[^<]+<img src=\"([^<]+)?" style[^<]+><\/div[^<]+<div class="box2">([^<]+)?<\/div[^<]+<div class="box3">([^<]+)?'
|
||||
patron += '<\/div[^<]+<div class="box4">([^<]+)?<\/div[^<]+<div class="box5"><a href=(.*?)? rel.*?'
|
||||
patron += '<\/div[^<]+<div class="box6">([^<]+)?<'
|
||||
logger.debug("Patron: " + patron)
|
||||
#logger.debug("Patron: " + patron)
|
||||
|
||||
enlaces_ver = re.compile(patron, re.DOTALL).findall(data)
|
||||
enlaces_descargar = enlaces_ver
|
||||
logger.debug(enlaces_ver)
|
||||
#logger.debug(enlaces_ver)
|
||||
|
||||
if len(enlaces_ver) > 0:
|
||||
itemlist.append(item.clone(title="", action="", folder=False))
|
||||
itemlist.append(item.clone(title=" Enlaces Ver: ", action="", text_color="green", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
|
||||
if "Ver" in titulo:
|
||||
servidor = servidor.replace("streamin", "streaminto")
|
||||
titulo = titulo + " [" + servidor + "]"
|
||||
titulo = title
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
|
||||
fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False))
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, quality=titulo,
|
||||
fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
if len(enlaces_descargar) > 0:
|
||||
itemlist.append(item.clone(title="", action="", folder=False))
|
||||
itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", text_color="green", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
|
||||
if "Ver" not in titulo:
|
||||
servidor = servidor.replace("uploaded", "uploadedto")
|
||||
partes = enlace.split(" ")
|
||||
titulo = "Partes "
|
||||
p = 1
|
||||
logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
for enlace in partes:
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]"
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " - " + title
|
||||
p += 1
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
@@ -349,11 +424,12 @@ def findvideos(item):
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
|
||||
title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo,
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, quality=parte_titulo,
|
||||
title=parte_titulo, fulltitle=parte_titulo, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -363,6 +439,8 @@ def episodios(item):
|
||||
infoLabels = item.infoLabels
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
calidad = item.quality
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
|
||||
pagination = scrapertools.find_single_match(data, pattern)
|
||||
if pagination:
|
||||
@@ -381,22 +459,43 @@ def episodios(item):
|
||||
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
#logger.debug("data: " + data)
|
||||
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img src="(?P<thumb>[^"]+)".*?<h2[^>]+>(?P<info>.*?)</h2>'
|
||||
if "pelisyseries.com" in host:
|
||||
pattern = '<li[^>]*><div class.*?src="(?P<thumb>[^"]+)?".*?<a class.*?href="(?P<url>[^"]+).*?<h3[^>]+>(?P<info>.*?)?<\/h3>.*?<\/li>'
|
||||
else:
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2[^>]+>(?P<info>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
#logger.debug("patron: " + pattern)
|
||||
#logger.debug(matches)
|
||||
|
||||
season = "1"
|
||||
|
||||
for url, thumb, info in matches:
|
||||
|
||||
if "pelisyseries.com" in host:
|
||||
interm = url
|
||||
url = thumb
|
||||
thumb = interm
|
||||
|
||||
if "<span" in info: # new style
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)\s*Capitulo(?:s)?\s*(?P<episode>\d+)" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)</span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)\s*[\]]</span>"
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)?.*?Capitulo(?:s)?\s*(?P<episode>\d+)?" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
if "Especial" in info: # Capitulos Especiales
|
||||
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
logger.debug("patron: " + pattern)
|
||||
logger.debug(info)
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
|
||||
if match['season'] is None: match['season'] = season
|
||||
if match['episode'] is None: match['episode'] = "0"
|
||||
if match['quality']: item.quality = match['quality']
|
||||
|
||||
if match["episode2"]:
|
||||
multi = True
|
||||
title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
@@ -408,12 +507,17 @@ def episodios(item):
|
||||
match["lang"], match["quality"])
|
||||
|
||||
else: # old style
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+)(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
|
||||
|
||||
logger.debug("patron: " + pattern)
|
||||
logger.debug(info)
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
# logger.debug("data %s" % match)
|
||||
#logger.debug("data %s" % match)
|
||||
|
||||
#if match['season'] is "": match['season'] = season
|
||||
#if match['episode'] is "": match['episode'] = "0"
|
||||
#logger.debug(match)
|
||||
|
||||
str_lang = ""
|
||||
if match["lang"] is not None:
|
||||
@@ -436,18 +540,19 @@ def episodios(item):
|
||||
|
||||
season = match['season']
|
||||
episode = match['episode']
|
||||
logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode))
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
quality=item.quality, multi=multi, contentSeason=season,
|
||||
contentEpisodeNumber=episode, infoLabels = infoLabels))
|
||||
|
||||
# order list
|
||||
#tmdb.set_infoLabels(itemlist, True)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios"))
|
||||
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -458,7 +563,7 @@ def search(item, texto):
|
||||
try:
|
||||
item.post = "q=%s" % texto
|
||||
item.pattern = "buscar-list"
|
||||
itemlist = listado2(item)
|
||||
itemlist = listado_busqueda(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
"movie",
|
||||
"tvshow",
|
||||
"anime",
|
||||
"torrent"
|
||||
"torrent",
|
||||
"documentary"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
@@ -21,6 +22,22 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Episodios de series",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
@@ -28,6 +45,14 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_4k",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - 4K",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -10,7 +10,7 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://torrentrapid.com/' # Cambiar manualmente "xx" en línea 287 ".com/xx/library" por tl para torrentrapid, tr para torrentrapid, d20 para descargas2020
|
||||
host = 'http://torrentrapid.com/'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
@@ -40,12 +40,17 @@ def submenu(item):
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com
|
||||
|
||||
#patron = '<li><a href="http://(?:www.)?torrentrapid.com/' + item.extra + '/">.*?<ul>(.*?)</ul>'
|
||||
patron = '<li><a href="'+item.url+item.extra + '/">.*?<ul>(.*?)</ul>' #Filtrado por url
|
||||
data = scrapertools.get_match(data, patron)
|
||||
patron = '<li><.*?href="'+item.url+item.extra + '/">.*?<ul.*?>(.*?)</ul>' #Filtrado por url, compatibilidad con mispelisy.series.com
|
||||
#logger.debug("patron: " + patron + " / data: " + data)
|
||||
if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com
|
||||
data = '<a href="http://torrentrapid.com/varios/" title="Documentales"><i class="icon-rocket"></i> Documentales</a>'
|
||||
else:
|
||||
data = scrapertools.get_match(data, patron)
|
||||
|
||||
patron = '<a href="([^"]+)".*?>([^>]+)</a>'
|
||||
patron = '<.*?href="([^"]+)".*?>([^>]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
@@ -92,15 +97,15 @@ def listado(item):
|
||||
url_next_page =''
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
#data = httptools.downloadpage(item.url).data
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
#logger.debug(data)
|
||||
|
||||
logger.debug('item.modo: %s'%item.modo)
|
||||
logger.debug('item.extra: %s'%item.extra)
|
||||
|
||||
if item.modo != 'next' or item.modo =='':
|
||||
logger.debug('item.title: %s'% item.title)
|
||||
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
|
||||
logger.debug("patron=" + patron)
|
||||
fichas = scrapertools.get_match(data, patron)
|
||||
page_extra = item.extra
|
||||
else:
|
||||
@@ -109,11 +114,13 @@ def listado(item):
|
||||
|
||||
patron = '<a href="([^"]+).*?' # la url
|
||||
patron += 'title="([^"]+).*?' # el titulo
|
||||
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
#patron += '<span>([^<].*?)<' # la calidad: original de NewPCT1: si falta la calidad, el siguiente "matches" entra en un loop
|
||||
patron += '<img.*?src="([^"]+)"[^>]+>.*?' # el thumbnail
|
||||
patron += '<h2.*?>(.*?)?<\/h2>' # titulo alternativo
|
||||
patron += '<span>([^<].*?)?<' # la calidad
|
||||
#logger.debug("patron: " + patron + " / fichas: " + fichas)
|
||||
matches = re.compile(patron, re.DOTALL).findall(fichas)
|
||||
logger.debug('item.next_page: %s'%item.next_page)
|
||||
#logger.debug('item.next_page: %s'%item.next_page)
|
||||
#logger.debug(matches)
|
||||
|
||||
# Paginacion
|
||||
if item.next_page != 'b':
|
||||
@@ -132,20 +139,22 @@ def listado(item):
|
||||
url_next_page = matches_next_page[0]
|
||||
modo = 'next'
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches:
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
action = "findvideos"
|
||||
extra = ""
|
||||
context = "movie"
|
||||
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
|
||||
if ".com/series" in url:
|
||||
|
||||
if ".com/serie" in url and "/miniseries" not in url:
|
||||
action = "episodios"
|
||||
extra = "serie"
|
||||
|
||||
context = "tvshow"
|
||||
|
||||
title = scrapertools.find_single_match(title, '([^-]+)')
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "",
|
||||
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea ", "",
|
||||
1).strip()
|
||||
|
||||
else:
|
||||
@@ -153,39 +162,43 @@ def listado(item):
|
||||
if title.endswith("gratis"): title = title[:-7]
|
||||
if title.endswith("torrent"): title = title[:-8]
|
||||
if title.endswith("en HD"): title = title[:-6]
|
||||
|
||||
if title == "":
|
||||
title = title_alt
|
||||
context_title = title_alt
|
||||
show = title_alt
|
||||
#if item.extra != "buscar-list":
|
||||
# title = title + '[' + calidad + "]"
|
||||
|
||||
show = title
|
||||
if item.extra != "buscar-list":
|
||||
title = title + ' ' + calidad
|
||||
#Este bucle parece obsoleto:
|
||||
#context = ""
|
||||
#context_title = scrapertools.find_single_match(url, "http://(?:www.)?torrentrapid.com/(.*?)/(.*?)/")
|
||||
#if context_title:
|
||||
# try:
|
||||
# context = context_title[0].replace("descargar-", "").replace("descargar", "").replace("pelicula", "movie").replace("series", "tvshow").replace("-hd", "").replace("-vo", "")
|
||||
# context_title = context_title[1].replace("-", " ")
|
||||
# if re.search('\d{4}', context_title[-4:]):
|
||||
# context_title = context_title[:-4]
|
||||
# elif re.search('\(\d{4}\)', context_title[-6:]):
|
||||
# context_title = context_title[:-6]
|
||||
#
|
||||
# except:
|
||||
# context_title = show
|
||||
#
|
||||
|
||||
context = ""
|
||||
context_title = scrapertools.find_single_match(url, "http://(?:www.)?torrentrapid.com/(.*?)/(.*?)/")
|
||||
if context_title:
|
||||
try:
|
||||
context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series",
|
||||
"tvshow")
|
||||
context_title = context_title[1].replace("-", " ")
|
||||
if re.search('\d{4}', context_title[-4:]):
|
||||
context_title = context_title[:-4]
|
||||
elif re.search('\(\d{4}\)', context_title[-6:]):
|
||||
context_title = context_title[:-6]
|
||||
|
||||
except:
|
||||
context_title = show
|
||||
logger.debug('contxt title: %s'%context_title)
|
||||
logger.debug('year: %s' % year)
|
||||
|
||||
logger.debug('context: %s' % context)
|
||||
#logger.debug('contxt title: %s'%context_title)
|
||||
#logger.debug('year: %s' % year)
|
||||
#logger.debug('context: %s' % context)
|
||||
|
||||
if not 'array' in title:
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
|
||||
extra = extra,
|
||||
show = context_title, contentTitle=context_title, contentType=context,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
extra = extra, show = context_title, contentTitle=context_title, contentType=context, quality=calidad,
|
||||
context=["buscar_trailer"], infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / contxt title: " + context_title + " / context: " + context + " / calidad: " + calidad+ " / year: " + year)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
|
||||
url=url_next_page, next_page=next_page, folder=True,
|
||||
@@ -193,12 +206,12 @@ def listado(item):
|
||||
extra = page_extra))
|
||||
return itemlist
|
||||
|
||||
def listado2(item):
|
||||
def listado_busqueda(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
|
||||
list_chars = [["ñ", "ñ"]]
|
||||
|
||||
for el in list_chars:
|
||||
@@ -218,34 +231,82 @@ def listado2(item):
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
pattern = '<li><a href="(?P<url>[^"]+)".*?<img src="(?P<img>[^"]+)"[^>]+>.*?<h2.*?>\s*(?P<title>.*?)\s*</h2>'
|
||||
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2.*?>(?P<title>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
#logger.debug("patron: " + pattern)
|
||||
#logger.debug(matches)
|
||||
|
||||
for url, thumb, title in matches:
|
||||
# fix encoding for title
|
||||
real_title = scrapertools.find_single_match(title, r'font color.*?font.*?><b>(.*?)<\/b><\/font>')
|
||||
real_title = scrapertools.find_single_match(title, r'<strong.*?>(.*?)Temporada.*?<\/strong>') #series
|
||||
if real_title == "":
|
||||
real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies
|
||||
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
|
||||
real_title = scrapertools.htmlclean(real_title)
|
||||
calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?<span[^>]+>[\[]\s*(?P<quality>.*?)\s*[\]]<\/span>') #series
|
||||
if calidad == "":
|
||||
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
|
||||
year = scrapertools.find_single_match(thumb, r'-(\d{4})')
|
||||
|
||||
# fix encoding for title
|
||||
title = scrapertools.htmlclean(title)
|
||||
title = title.replace("�", "ñ")
|
||||
title = title.replace("�", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng")
|
||||
title = re.sub(r'(Calidad.*?\])', '', title)
|
||||
|
||||
if real_title == "":
|
||||
real_title = title
|
||||
if calidad == "":
|
||||
calidad = title
|
||||
context = "movie"
|
||||
|
||||
# no mostramos lo que no sean videos
|
||||
if "/juego/" in url or "/varios/" in url:
|
||||
if "juego/" in url:
|
||||
continue
|
||||
|
||||
if ".com/series" in url:
|
||||
# Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie
|
||||
if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host:
|
||||
calidad_mps = "series/"
|
||||
if "seriehd" in url:
|
||||
calidad_mps = "series-hd/"
|
||||
if "serievo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
if "serie-vo" in url:
|
||||
calidad_mps = "series-vo/"
|
||||
|
||||
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
|
||||
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
|
||||
|
||||
if "/0_" not in thumb:
|
||||
serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P<serieid>\d+).*?.*')
|
||||
if len(serieid) > 5:
|
||||
serieid = ""
|
||||
else:
|
||||
serieid = ""
|
||||
|
||||
url = host + calidad_mps + real_title_mps + "/" + serieid
|
||||
|
||||
real_title_mps = real_title_mps.replace("-", " ")
|
||||
#logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps)
|
||||
real_title = real_title_mps
|
||||
|
||||
show = real_title
|
||||
|
||||
show = real_title
|
||||
if ".com/serie" in url and "/miniseries" not in url:
|
||||
|
||||
context = "tvshow"
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"], contentSerieName=show))
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year}))
|
||||
else:
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
context=["buscar_trailer"]))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad,
|
||||
show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year}))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
if post:
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado2", title=">> Página siguiente",
|
||||
itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente",
|
||||
thumbnail=get_thumb("next.png")))
|
||||
|
||||
return itemlist
|
||||
@@ -253,7 +314,6 @@ def listado2(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
## Cualquiera de las tres opciones son válidas
|
||||
# item.url = item.url.replace(".com/",".com/ver-online/")
|
||||
# item.url = item.url.replace(".com/",".com/descarga-directa/")
|
||||
@@ -263,32 +323,36 @@ def findvideos(item):
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures")
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1><strong>([^<]+)<\/strong>[^<]+<\/h1>")
|
||||
title += scrapertools.find_single_match(data, "<h1><strong>[^<]+<\/strong>([^<]+)<\/h1>")
|
||||
caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
|
||||
#<div style="float:left;width:100%;min-height:70px;margin:10px 0px;"> <a href="javascript:void(0);" onClick="javascript:openTorrent();" title="Descargar torrent de Star Wars Los Ultimos Jedi " class="btn-torrent">Descarga tu Archivo torrent!</a> <script type="text/javascript"> function openTorrent() {var link = "http://advserver.xyz/v2/gena?gid=ADQGZS0ABR&uid=164"; window.open(link); window.location.href = "http://torrentrapid.com/descargar-torrent/104616_-1520707769-star-wars-los-ultimos-jedi--bluray-screeener/";} </script> </div>
|
||||
|
||||
title = scrapertools.find_single_match(data, "<h1.*?<strong>([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
title += scrapertools.find_single_match(data, "<h1.*?<strong>[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com
|
||||
#caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
|
||||
caratula = scrapertools.find_single_match(data, '<h1.*?<img.*?src="([^"]+)')
|
||||
|
||||
patron = 'openTorrent.*?title=".*?class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
|
||||
|
||||
#logger.debug("patron: " + patron + " / data: " + data)
|
||||
# escraped torrent
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
logger.debug("urltorrent: " + url + " Title: " + title + " Caratula: " + caratula)
|
||||
if url != "":
|
||||
|
||||
if "Temp" in title and item.quality != "": #scrapear información duplicada en Series
|
||||
title = re.sub(r'Temp.*?\[', '[', title)
|
||||
title = re.sub(r'\[Cap.*?\]', '', title)
|
||||
title = str(item.contentSeason) + "x" + str(item.contentEpisodeNumber) + " - " + item.contentTitle + ", " + title
|
||||
|
||||
if url != "": #Torrent
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title="[torrent] - " + title, fulltitle=title,
|
||||
Item(channel=item.channel, action="play", server="torrent", title=title, quality=title, fulltitle=title,
|
||||
url=url, thumbnail=caratula, plot=item.plot, folder=False))
|
||||
|
||||
logger.debug("url: " + url + " / title: " + title + " / calidad: " + item.quality + " / context: " + str(item.context))
|
||||
|
||||
# escraped ver vídeos, descargar vídeos un link, múltiples liks
|
||||
|
||||
data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "(")
|
||||
data = data.replace(
|
||||
'javascript:;" onClick="popup("http://www.torrentrapid.com/tr/library/include/ajax/get_modallinks.php?links=', "")
|
||||
data = re.sub(r'javascript:;" onClick="popup\("http:\/\/(?:www.)?torrentrapid.com\/\w{1,9}\/library\/include\/ajax\/get_modallinks.php\?links=', "", data)
|
||||
#logger.debug("matar %s" % data)
|
||||
|
||||
logger.debug("matar %s" % data)
|
||||
|
||||
# Antiguo sistema de scrapeo de servidores usado por Newpct1. Como no funciona con torrentrapid, se sustituye por este más común
|
||||
# Antiguo sistema de scrapeo de servidores usado por Newpct1. Como no funciona con torrent.locura, se sustituye por este más común
|
||||
#patron_descargar = '<div id="tab2"[^>]+>.*?</ul>'
|
||||
#patron_ver = '<div id="tab3"[^>]+>.*?</ul>'
|
||||
|
||||
@@ -309,37 +373,48 @@ def findvideos(item):
|
||||
patron = '<div class=\"box1\"[^<]+<img src=\"([^<]+)?" style[^<]+><\/div[^<]+<div class="box2">([^<]+)?<\/div[^<]+<div class="box3">([^<]+)?'
|
||||
patron += '<\/div[^<]+<div class="box4">([^<]+)?<\/div[^<]+<div class="box5"><a href=(.*?)? rel.*?'
|
||||
patron += '<\/div[^<]+<div class="box6">([^<]+)?<'
|
||||
logger.debug("Patron: " + patron)
|
||||
#logger.debug("Patron: " + patron)
|
||||
|
||||
enlaces_ver = re.compile(patron, re.DOTALL).findall(data)
|
||||
enlaces_descargar = enlaces_ver
|
||||
logger.debug(enlaces_ver)
|
||||
#logger.debug(enlaces_ver)
|
||||
|
||||
if len(enlaces_ver) > 0:
|
||||
itemlist.append(item.clone(title="", action="", folder=False))
|
||||
itemlist.append(item.clone(title=" Enlaces Ver: ", action="", text_color="green", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
|
||||
if "Ver" in titulo:
|
||||
servidor = servidor.replace("streamin", "streaminto")
|
||||
titulo = titulo + " [" + servidor + "]"
|
||||
titulo = title
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
mostrar_server = servertools.is_server_enabled(servidor)
|
||||
logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
if mostrar_server:
|
||||
try:
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
|
||||
fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False))
|
||||
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, quality=titulo,
|
||||
fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
if len(enlaces_descargar) > 0:
|
||||
itemlist.append(item.clone(title="", action="", folder=False))
|
||||
itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", text_color="green", folder=False))
|
||||
|
||||
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
|
||||
if "Ver" not in titulo:
|
||||
servidor = servidor.replace("uploaded", "uploadedto")
|
||||
partes = enlace.split(" ")
|
||||
titulo = "Partes "
|
||||
p = 1
|
||||
logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma)
|
||||
for enlace in partes:
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]"
|
||||
parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " - " + title
|
||||
p += 1
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"):
|
||||
@@ -349,11 +424,12 @@ def findvideos(item):
|
||||
devuelve = servertools.findvideosbyserver(enlace, servidor)
|
||||
if devuelve:
|
||||
enlace = devuelve[0][1]
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
|
||||
title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo,
|
||||
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, quality=parte_titulo,
|
||||
title=parte_titulo, fulltitle=parte_titulo, url=enlace, thumbnail=logo,
|
||||
plot=item.plot, folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -363,6 +439,8 @@ def episodios(item):
|
||||
infoLabels = item.infoLabels
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
calidad = item.quality
|
||||
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
|
||||
pagination = scrapertools.find_single_match(data, pattern)
|
||||
if pagination:
|
||||
@@ -381,22 +459,43 @@ def episodios(item):
|
||||
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
|
||||
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
|
||||
data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com
|
||||
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
|
||||
data = scrapertools.get_match(data, pattern)
|
||||
#logger.debug("data: " + data)
|
||||
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img src="(?P<thumb>[^"]+)".*?<h2[^>]+>(?P<info>.*?)</h2>'
|
||||
if "pelisyseries.com" in host:
|
||||
pattern = '<li[^>]*><div class.*?src="(?P<thumb>[^"]+)?".*?<a class.*?href="(?P<url>[^"]+).*?<h3[^>]+>(?P<info>.*?)?<\/h3>.*?<\/li>'
|
||||
else:
|
||||
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img.*?src="(?P<thumb>[^"]+)?".*?<h2[^>]+>(?P<info>.*?)?<\/h2>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
#logger.debug("patron: " + pattern)
|
||||
#logger.debug(matches)
|
||||
|
||||
season = "1"
|
||||
|
||||
for url, thumb, info in matches:
|
||||
|
||||
if "pelisyseries.com" in host:
|
||||
interm = url
|
||||
url = thumb
|
||||
thumb = interm
|
||||
|
||||
if "<span" in info: # new style
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)\s*Capitulo(?:s)?\s*(?P<episode>\d+)" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)</span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)\s*[\]]</span>"
|
||||
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)?.*?Capitulo(?:s)?\s*(?P<episode>\d+)?" \
|
||||
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>" \
|
||||
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
if "Especial" in info: # Capitulos Especiales
|
||||
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
|
||||
logger.debug("patron: " + pattern)
|
||||
logger.debug(info)
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
|
||||
if match['season'] is None: match['season'] = season
|
||||
if match['episode'] is None: match['episode'] = "0"
|
||||
if match['quality']: item.quality = match['quality']
|
||||
|
||||
if match["episode2"]:
|
||||
multi = True
|
||||
title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
|
||||
@@ -408,12 +507,17 @@ def episodios(item):
|
||||
match["lang"], match["quality"])
|
||||
|
||||
else: # old style
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+)(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
|
||||
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
|
||||
|
||||
logger.debug("patron: " + pattern)
|
||||
logger.debug(info)
|
||||
r = re.compile(pattern)
|
||||
match = [m.groupdict() for m in r.finditer(info)][0]
|
||||
# logger.debug("data %s" % match)
|
||||
#logger.debug("data %s" % match)
|
||||
|
||||
#if match['season'] is "": match['season'] = season
|
||||
#if match['episode'] is "": match['episode'] = "0"
|
||||
#logger.debug(match)
|
||||
|
||||
str_lang = ""
|
||||
if match["lang"] is not None:
|
||||
@@ -436,18 +540,19 @@ def episodios(item):
|
||||
|
||||
season = match['season']
|
||||
episode = match['episode']
|
||||
logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode))
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
|
||||
quality=item.quality, multi=multi, contentSeason=season,
|
||||
contentEpisodeNumber=episode, infoLabels = infoLabels))
|
||||
|
||||
# order list
|
||||
#tmdb.set_infoLabels(itemlist, True)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios"))
|
||||
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -458,7 +563,7 @@ def search(item, texto):
|
||||
try:
|
||||
item.post = "q=%s" % texto
|
||||
item.pattern = "buscar-list"
|
||||
itemlist = listado2(item)
|
||||
itemlist = listado_busqueda(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
Reference in New Issue
Block a user