This commit is contained in:
alfa-addon
2017-09-01 20:17:58 -04:00
parent 7d6ade2a90
commit 4ad5760e93
3 changed files with 294 additions and 158 deletions

6
plugin.video.alfa/channels/divxtotal.py Executable file → Normal file
View File

@@ -232,7 +232,6 @@ def findtemporadas(item):
th.start()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
if len(item.extra.split("|")):
if len(item.extra.split("|")) >= 4:
fanart = item.extra.split("|")[2]
@@ -266,7 +265,7 @@ def findtemporadas(item):
fanart_extra = item.fanart
fanart_info = item.fanart
bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada (\d+) </a>(.*?)</table>')
bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada.*?(\d+).*?<\/a>(.*?)<\/table>')
for temporada, bloque_epis in bloque_episodios:
item.infoLabels = item.InfoLabels
item.infoLabels['season'] = temporada
@@ -299,9 +298,8 @@ def epis(item):
itemlist = []
if item.extra == "serie_add":
item.url = item.datalibrary
patron = scrapertools.find_multiple_matches(item.url,
'<td><img src=".*?images/(.*?)\.png.*?<a href="([^"]+)" title="">.*?(\d+x\d+).*?td>')
'<td><img src=".*?images\/(.*?)\.png".*?href="([^"]+)" title="">.*?(\d+x\d+).*?td>')
for idioma, url, epi in patron:
episodio = scrapertools.find_single_match(epi, '\d+x(\d+)')
item.infoLabels['episode'] = episodio

View File

@@ -8,7 +8,9 @@ from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
host = 'http://newpct1.com/'
def mainlist(item):
logger.info()
@@ -17,13 +19,15 @@ def mainlist(item):
thumb_pelis=get_thumb("channels_movie.png")
thumb_series=get_thumb("channels_tvshow.png")
thumb_search = get_thumb("search.png")
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url="http://www.newpct1.com/",
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host,
extra="peliculas", thumbnail=thumb_pelis ))
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url="http://www.newpct1.com/", extra="series",
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series",
thumbnail=thumb_series))
# itemlist.append(Item(channel=item.channel, action="search", title="Buscar"))
itemlist.append(
Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search))
return itemlist
@@ -96,7 +100,7 @@ def alfabeto(item):
title = scrapedtitle.upper()
url = scrapedurl
itemlist.append(Item(channel=item.channel, action="completo", title=title, url=url, extra=item.extra))
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra))
return itemlist
@@ -105,13 +109,23 @@ def listado(item):
logger.info()
# logger.info("[newpct1.py] listado url=" + item.url)
itemlist = []
url_next_page =''
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
#logger.debug(data)
logger.debug('item.modo: %s'%item.modo)
logger.debug('item.extra: %s'%item.extra)
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
logger.debug("patron=" + patron)
fichas = scrapertools.get_match(data, patron)
if item.modo != 'next' or item.modo =='':
logger.debug('item.title: %s'% item.title)
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
logger.debug("patron=" + patron)
fichas = scrapertools.get_match(data, patron)
page_extra = item.extra
else:
fichas = data
page_extra = item.extra
# <li><a href="http://www.newpct1.com/pelicula/x-men-dias-del-futuro-pasado/ts-screener/" title="Descargar XMen Dias Del Futuro gratis"><img src="http://www.newpct1.com/pictures/f/58066_x-men-dias-del-futuro--blurayrip-ac3-5.1.jpg" width="130" height="180" alt="Descargar XMen Dias Del Futuro gratis"><h2>XMen Dias Del Futuro </h2><span>BluRayRip AC3 5.1</span></a></li>
patron = '<li><a href="([^"]+).*?' # url
@@ -120,6 +134,25 @@ def listado(item):
patron += '<span>([^<]*)</span>' # calidad
matches = re.compile(patron, re.DOTALL).findall(fichas)
logger.debug('item.next_page: %s'%item.next_page)
# Paginacion
if item.next_page != 'b':
if len(matches) > 30:
url_next_page = item.url
matches = matches[:30]
next_page = 'b'
modo = 'continue'
else:
matches = matches[30:]
next_page = 'a'
patron_next_page = '<a href="([^"]+)">Next<\/a>'
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
modo = 'continue'
if len(matches_next_page) > 0:
url_next_page = matches_next_page[0]
modo = 'next'
for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches:
url = scrapedurl
@@ -127,33 +160,17 @@ def listado(item):
thumbnail = scrapedthumbnail
action = "findvideos"
extra = ""
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
if "1.com/series" in url:
action = "completo"
action = "episodios"
extra = "serie"
title = scrapertools.find_single_match(title, '([^-]+)')
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "",
1).strip()
# logger.info("[newpct1.py] titulo="+title)
'''
if len(title)>3:
url_i = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=%22' + title.replace(" ","%20") + '%22'
else:
url_i = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=' + title
if "1.com/series-hd" in url:
extra="serie-hd"
url = url_i + '&categoryID=&categoryIDR=1469&calidad=' + calidad.replace(" ","+") #DTV+720p+AC3+5.1
elif "1.com/series-vo" in url:
extra="serie-vo"
url = url_i + '&categoryID=&categoryIDR=775&calidad=' + calidad.replace(" ","+") #HDTV+720p+AC3+5.1
elif "1.com/series/" in url:
extra="serie-tv"
url = url_i + '&categoryID=&categoryIDR=767&calidad=' + calidad.replace(" ","+")
url += '&idioma=&ordenar=Nombre&inon=Descendente'
'''
else:
title = title.replace("Descargar", "", 1).strip()
if title.endswith("gratis"): title = title[:-7]
@@ -164,9 +181,10 @@ def listado(item):
context = ""
context_title = scrapertools.find_single_match(url, "http://(?:www.)?newpct1.com/(.*?)/(.*?)/")
#logger.debug('context_title[0]: %s' % context_title[0])
if context_title:
try:
context = context_title[0].replace("pelicula", "movie").replace("descargar", "movie").replace("series",
context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series",
"tvshow")
context_title = context_title[1].replace("-", " ")
if re.search('\d{4}', context_title[-4:]):
@@ -176,22 +194,126 @@ def listado(item):
except:
context_title = show
logger.debug('contxt title: %s'%context_title)
logger.debug('year: %s' % year)
itemlist.append(
Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, extra=extra, show=show,
contentTitle=context_title, contentType=context, context=["buscar_trailer"]))
logger.debug('context: %s' % context)
if not 'array' in title:
new_item = Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
extra = extra,
show = context_title, contentTitle=context_title, contentType=context,
context=["buscar_trailer"], infoLabels= {'year':year})
if year:
tmdb.set_infoLabels_item(new_item, seekTmdb = True)
itemlist.append(new_item)
if "pagination" in data:
patron = '<ul class="pagination">(.*?)</ul>'
paginacion = scrapertools.get_match(data, patron)
if "Next" in paginacion:
url_next_page = scrapertools.get_match(paginacion, '<a href="(http[^>]+)>Next</a>')[:-1].replace(" ", "%20")
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page,
extra=item.extra))
if url_next_page:
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
url=url_next_page, next_page=next_page, folder=True,
text_color='yellow', text_bold=True, modo = modo, plot = extra,
extra = page_extra))
# if "pagination" in data:
# patron = '<ul class="pagination">(.*?)</ul>'
# paginacion = scrapertools.get_match(data, patron)
#
# if "Next" in paginacion:
# url_next_page = scrapertools.get_match(paginacion, '<a href="(http[^>]+)>Next</a>')[:-1].replace(" ", "%20")
# itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page,
# extra=item.extra))
# logger.info("[newpct1.py] listado items:" + str(len(itemlist)))
return itemlist
def listado2(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
logger.debug(data)
list_chars = [["ñ", "ñ"]]
for el in list_chars:
data = re.sub(r"%s" % el[0], el[1], data)
try:
# logger.debug("data %s " % data)
get, post = scrapertools.find_single_match(data, '<ul class="pagination">.*?<a class="current" href.*?'
'<a\s*href="([^"]+)"(?:\s*onClick=".*?\'([^"]+)\'.*?")')
except:
post = False
if post:
# logger.debug("post %s" % post)
# logger.debug("item.post %s" % item.post)
if "pg" in item.post:
item.post = re.sub(r"pg=(\d+)", "pg=%s" % post, item.post)
# logger.debug("item.post %s" % item.post)
else:
item.post += "&pg=%s" % post
# logger.debug("item.post %s" % item.post)
# logger.debug("data %s " % next_page)
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
data = scrapertools.get_match(data, pattern)
# logger.debug("data %s " % data)
pattern = '<li><a href="(?P<url>[^"]+)".*?<img src="(?P<img>[^"]+)"[^>]+>.*?<h2.*?>\s*(?P<title>.*?)\s*</h2>'
matches = re.compile(pattern, re.DOTALL).findall(data)
for url, thumb, title in matches:
# fix encoding for title
title = scrapertools.htmlclean(title)
title = title.replace("�", "ñ")
# logger.debug("\n\nu %s " % url)
# logger.debug("\nb %s " % thumb)
# logger.debug("\nt %s " % title)
# title is the clean way but it doesn't work if it's a long, so we have to use title_to_fix
# title_fix = False
# if title.endswith(".."):
# title = title_to_fix
# title_fix = True
# no mostramos lo que no sean videos
if "/juego/" in url or "/varios/" in url:
continue
if ".com/series" in url:
# title = scrapertools.find_single_match(title, '([^-]+)')
# title = title.replace("Ver online", "", 1).replace("Ver en linea", "", 1). \
# replace("Descarga Serie HD", "", 1).strip()
show = title
# if quality:
# title = "%s [%s]" % (title, quality)
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
context=["buscar_trailer"], show=show))
else:
# title = title.replace("Descargar", "", 1).strip()
# if title.endswith("gratis"):
# title = title[:-6].strip()
# if quality:
# title = "%s [%s]" % (title, quality)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
context=["buscar_trailer"]))
if post:
itemlist.append(item.clone(channel=item.channel, action="listado2", title=">> Página siguiente",
thumbnail=get_thumb("next.png")))
return itemlist
def completo(item):
logger.info()
@@ -202,6 +324,7 @@ def completo(item):
item_extra = item.extra
item_show = item.show
item_title = item.title
infoLabels = item.infoLabels
# Lee las entradas
if item_extra.startswith("serie"):
@@ -225,14 +348,14 @@ def completo(item):
fanart = oTvdb.get_graphics_by_serieId(serieID)
if len(fanart)>0:
item.fanart = fanart[0]'''
try:
from core.tmdb import Tmdb
oTmdb = Tmdb(texto_buscado=item.show, tipo="tv", idioma_busqueda="es")
item.fanart = oTmdb.get_backdrop()
item.plot = oTmdb.get_sinopsis()
print item.plot
except:
pass
# try:
# from core.tmdb import Tmdb
# oTmdb = Tmdb(texto_buscado=item.show, tipo="tv", idioma_busqueda="es")
# item.fanart = oTmdb.get_backdrop()
# item.plot = oTmdb.get_sinopsis()
# print item.plot
# except:
# pass
else:
item_title = item.show
@@ -281,109 +404,6 @@ def completo(item):
return itemlist
def get_episodios(item):
logger.info("url=" + item.url)
itemlist = []
data = re.sub(r'\n|\r|\t|\s{2}|<!--.*?-->|<i class="icon[^>]+"></i>', "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
logger.debug("data=" + data)
patron = '<ul class="buscar-list">(.*?)</ul>'
# logger.info("[newpct1.py] patron=" + patron)
fichas = scrapertools.get_match(data, patron)
# logger.info("[newpct1.py] matches=" + str(len(fichas)))
# <li><a href="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"><img src="http://www.newpct1.com/pictures/c/minis/1880_forever.jpg" alt="Serie Forever 1x01"></a> <div class="info"> <a href="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"><h2 style="padding:0;">Serie <strong style="color:red;background:none;">Forever - Temporada 1 </strong> - Temporada<span style="color:red;background:none;">[ 1 ]</span>Capitulo<span style="color:red;background:none;">[ 01 ]</span><span style="color:red;background:none;padding:0px;">Espa<70>ol Castellano</span> Calidad <span style="color:red;background:none;">[ HDTV ]</span></h2></a> <span>27-10-2014</span> <span>450 MB</span> <span class="color"><ahref="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"> Descargar</a> </div></li>
# logger.info("[newpct1.py] get_episodios: " + fichas)
patron = '<li[^>]*><a href="([^"]+).*?' # url
patron += '<img src="([^"]+)".*?' # thumbnail
patron += '<h2 style="padding(.*?)/h2>' # titulo, idioma y calidad
matches = re.compile(patron, re.DOTALL).findall(fichas)
# logger.info("[newpct1.py] get_episodios matches: " + str(len(matches)))
for scrapedurl, scrapedthumbnail, scrapedinfo in matches:
try:
url = scrapedurl
if '</span>' in scrapedinfo:
# logger.info("[newpct1.py] get_episodios: scrapedinfo="+scrapedinfo)
try:
# <h2 style="padding:0;">Serie <strong style="color:red;background:none;">The Big Bang Theory - Temporada 6 </strong> - Temporada<span style="color:red;background:none;">[ 6 ]</span>Capitulo<span style="color:red;background:none;">[ 03 ]</span><span style="color:red;background:none;padding:0px;">Español Castellano</span> Calidad <span style="color:red;background:none;">[ HDTV ]</span></h2>
patron = '<span style=".*?">\[\s*(.*?)\]</span>.*?' # temporada
patron += '<span style=".*?">\[\s*(.*?)\].*?' # capitulo
patron += ';([^/]+)' # idioma
info_extra = re.compile(patron, re.DOTALL).findall(scrapedinfo)
(temporada, capitulo, idioma) = info_extra[0]
except:
# <h2 style="padding:0;">Serie <strong style="color:red;background:none;">The Affair Temporada 3 Capitulo 5</strong> - <span style="color:red;background:none;padding:0px;">Español Castellano</span> Calidad <span style="color:red;background:none;">[ HDTV ]</span></h2>
patron = '<strong style=".*?">([^<]+).*?' # temporada y capitulo
patron += '<span style=".*?">([^<]+)'
info_extra = re.compile(patron, re.DOTALL).findall(scrapedinfo)
(temporada_capitulo, idioma) = info_extra[0]
if re.search(r'(?i)Capitulos', temporada_capitulo):
temporada = scrapertools.find_single_match(temporada_capitulo, 'Temp.*?\s*([\d]+)')
cap1, cap2 = scrapertools.find_single_match(temporada_capitulo, 'Cap.*?\s*(\d+).*?(\d+)')
capitulo = ""
else:
temporada, capitulo = scrapertools.get_season_and_episode(temporada_capitulo).split('x')
# logger.info("[newpct1.py] get_episodios: temporada=" + temporada)
# logger.info("[newpct1.py] get_episodios: capitulo=" + capitulo)
logger.debug("idioma=" + idioma)
if '">' in idioma:
idioma = " [" + scrapertools.find_single_match(idioma, '">([^<]+)').strip() + "]"
elif '&nbsp' in idioma:
idioma = " [" + scrapertools.find_single_match(idioma, '&nbsp;([^<]+)').strip() + "]"
'''else:
idioma=""'''
if capitulo:
title = item.title + " (" + temporada.strip() + "x" + capitulo.strip() + ") " + idioma
else:
title = item.title + " (Del %sx%s al %sx%s) %s" % (temporada, cap1, temporada, cap2, idioma)
else:
# <h2 style="padding:0;">The Big Bang Theory - Temporada 6 [HDTV][Cap.602][Español Castellano]</h2>
# <h2 style="padding:0;">The Beast - Temporada 1 [HDTV] [Capítulo 13] [Español]</h2
# <h2 style="padding:0;">The Beast - Temp.1 [DVD-DVB][Cap.103][Spanish]</h2>
try:
temp, cap = scrapertools.get_season_and_episode(scrapedinfo).split('x')
except:
# Formatear temporadaXepisodio
patron = re.compile('Cap.*?\s*([\d]+)', re.IGNORECASE)
info_extra = patron.search(scrapedinfo)
if len(str(info_extra.group(1))) >= 3:
cap = info_extra.group(1)[-2:]
temp = info_extra.group(1)[:-2]
else:
cap = info_extra.group(1)
patron = 'Temp.*?\s*([\d]+)'
temp = re.compile(patron, re.IGNORECASE).search(scrapedinfo).group(1)
title = item.title + " (" + temp + 'x' + cap + ")"
# logger.info("[newpct1.py] get_episodios: fanart= " +item.fanart)
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail,
show=item.show, fanart=item.fanart))
except:
logger.error("ERROR al añadir un episodio")
if "pagination" in data:
patron = '<ul class="pagination">(.*?)</ul>'
paginacion = scrapertools.get_match(data, patron)
# logger.info("[newpct1.py] get_episodios: paginacion= " + paginacion)
if "Next" in paginacion:
url_next_page = "http" + scrapertools.get_match(paginacion, '<a href="(http[^>]+)>Next</a>')[:-1]
url_next_page = url_next_page.replace(" ", "%20")
# logger.info("[newpct1.py] get_episodios: url_next_page= " + url_next_page)
itemlist.append(
Item(channel=item.channel, action="get_episodios", title=">> Página siguiente", url=url_next_page))
return itemlist
def buscar_en_subcategoria(titulo, categoria):
data = httptools.downloadpage("http://www.newpct1.com/pct1/library/include/ajax/get_subcategory.php",
post="categoryIDR=" + categoria).data
@@ -491,6 +511,124 @@ def findvideos(item):
return itemlist
# def episodios(item):
# # Necesario para las actualizaciones automaticas
# infoLabels= item.infoLabels
# infoLabels['show']=item.show
# return completo(Item(item.clone(url=item.url, extra="serie_add", infoLabels=infoLabels)))
def episodios(item):
# Necesario para las actualizaciones automaticas
return completo(Item(channel=item.channel, url=item.url, show=item.show, extra="serie_add"))
logger.info()
itemlist = []
infoLabels = item.infoLabels
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
# logger.debug("data %s " % data)
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
pagination = scrapertools.find_single_match(data, pattern)
# logger.debug("pagination %s" % pagination)
if pagination:
pattern = '<li><a href="([^"]+)">Last<\/a>'
full_url = scrapertools.find_single_match(pagination, pattern)
url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)')
list_pages = []
for x in range(1, int(last_page) + 1):
list_pages.append("%s%s" % (url, x))
# logger.debug("data %s%s" % (url, x))
# logger.debug("list_pages %s" % list_pages)
else:
list_pages = [item.url]
for index, page in enumerate(list_pages):
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
data = scrapertools.get_match(data, pattern)
# logger.debug("data %s " % data)
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img src="(?P<thumb>[^"]+)".*?<h2[^>]+>(?P<info>.*?)</h2>'
matches = re.compile(pattern, re.DOTALL).findall(data)
# logger.debug("data %s " % matches)
for url, thumb, info in matches:
# logger.debug("info %s" % info)
if "<span" in info: # new style
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)\s*Capitulo(?:s)?\s*(?P<episode>\d+)" \
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)</span>\s*Calidad\s*<span[^>]+>" \
"[\[]\s*(?P<quality>.*?)\s*[\]]</span>"
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]
if match["episode2"]:
multi = True
title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
str(match["episode2"]).zfill(2), match["lang"],
match["quality"])
else:
multi = False
title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
match["lang"], match["quality"])
else: # old style
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+)(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]
# logger.debug("data %s" % match)
str_lang = ""
if match["lang"] is not None:
str_lang = "[%s]" % match["lang"]
if match["season2"] and match["episode2"]:
multi = True
if match["season"] == match["season2"]:
title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"],
match["episode2"], str_lang, match["quality"])
else:
title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"],
match["season2"], match["episode2"], str_lang,
match["quality"])
else:
title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang,
match["quality"])
multi = False
season = match['season']
episode = match['episode']
infoLabels['season']= season
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
quality=item.quality, multi=multi, contentSeason=season,
contentEpisodeNumber=episode, infoLabels = infoLabels))
# order list
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
if len(itemlist) > 1:
return sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
return itemlist
def search(item, texto):
logger.info("search:" + texto)
# texto = texto.replace(" ", "+")
try:
item.post = "q=%s" % texto
item.pattern = "buscar-list"
itemlist = listado2(item)
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []

View File

@@ -4,7 +4,7 @@
# -*- By the Alfa Develop Group -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
@@ -66,7 +66,7 @@ def list_all(item):
plot=plot,
contentErieName=contentSerieName
))
itemlist = get_thumb(templist)
itemlist = serie_thumb(templist)
# Paginación
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, next_page=next_page, i=item.i))
@@ -110,7 +110,7 @@ def episodios(item):
return itemlist
def get_thumb(itemlist):
def serie_thumb(itemlist):
logger.info()
for item in itemlist:
data = get_source(item.url)
@@ -135,7 +135,7 @@ def search_list(item):
next_page = scrapertools.find_single_match(data, '<link rel=next href=(.*?) />')
if next_page:
itemlist.append(Item(channel=item.channel, action="search_list", title='>> Pagina Siguiente', url=next_page,
thumbnail=config.get_thumb("thumb_next.png")))
thumbnail = get_thumb('thumb_next.png')))
return itemlist