From 4ad5760e937610a55fe6deed870d099ba7f2d9d5 Mon Sep 17 00:00:00 2001 From: alfa-addon Date: Fri, 1 Sep 2017 20:17:58 -0400 Subject: [PATCH] fixes --- plugin.video.alfa/channels/divxtotal.py | 6 +- plugin.video.alfa/channels/newpct1.py | 438 +++++++++++++++-------- plugin.video.alfa/channels/tvseriesdk.py | 8 +- 3 files changed, 294 insertions(+), 158 deletions(-) mode change 100755 => 100644 plugin.video.alfa/channels/divxtotal.py diff --git a/plugin.video.alfa/channels/divxtotal.py b/plugin.video.alfa/channels/divxtotal.py old mode 100755 new mode 100644 index 9a7746e3..889bd667 --- a/plugin.video.alfa/channels/divxtotal.py +++ b/plugin.video.alfa/channels/divxtotal.py @@ -232,7 +232,6 @@ def findtemporadas(item): th.start() data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - if len(item.extra.split("|")): if len(item.extra.split("|")) >= 4: fanart = item.extra.split("|")[2] @@ -266,7 +265,7 @@ def findtemporadas(item): fanart_extra = item.fanart fanart_info = item.fanart - bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada (\d+) (.*?)') + bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada.*?(\d+).*?<\/a>(.*?)<\/table>') for temporada, bloque_epis in bloque_episodios: item.infoLabels = item.InfoLabels item.infoLabels['season'] = temporada @@ -299,9 +298,8 @@ def epis(item): itemlist = [] if item.extra == "serie_add": item.url = item.datalibrary - patron = scrapertools.find_multiple_matches(item.url, - '.*?(\d+x\d+).*?td>') + '.*?(\d+x\d+).*?td>') for idioma, url, epi in patron: episodio = scrapertools.find_single_match(epi, '\d+x(\d+)') item.infoLabels['episode'] = episodio diff --git a/plugin.video.alfa/channels/newpct1.py b/plugin.video.alfa/channels/newpct1.py index 0d613847..d5d573e4 100644 --- a/plugin.video.alfa/channels/newpct1.py +++ b/plugin.video.alfa/channels/newpct1.py @@ -8,7 +8,9 @@ from core import scrapertools from core import servertools from core.item import Item from platformcode import config, logger +from core import tmdb +host = 'http://newpct1.com/' def mainlist(item): logger.info() @@ -17,13 +19,15 @@ def mainlist(item): thumb_pelis=get_thumb("channels_movie.png") thumb_series=get_thumb("channels_tvshow.png") + thumb_search = get_thumb("search.png") - itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url="http://www.newpct1.com/", + itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host, extra="peliculas", thumbnail=thumb_pelis )) - itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url="http://www.newpct1.com/", extra="series", + itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series", thumbnail=thumb_series)) - # itemlist.append(Item(channel=item.channel, action="search", title="Buscar")) + itemlist.append( + Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search)) return itemlist @@ -96,7 +100,7 @@ def alfabeto(item): title = scrapedtitle.upper() url = scrapedurl - itemlist.append(Item(channel=item.channel, action="completo", title=title, url=url, extra=item.extra)) + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra)) return itemlist @@ -105,13 +109,23 @@ def listado(item): logger.info() # logger.info("[newpct1.py] listado url=" + item.url) itemlist = [] + url_next_page ='' data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + #logger.debug(data) + logger.debug('item.modo: %s'%item.modo) + logger.debug('item.extra: %s'%item.extra) - patron = '' - logger.debug("patron=" + patron) - fichas = scrapertools.get_match(data, patron) + if item.modo != 'next' or item.modo =='': + logger.debug('item.title: %s'% item.title) + patron = '' + logger.debug("patron=" + patron) + fichas = scrapertools.get_match(data, patron) + page_extra = item.extra + else: + fichas = data + page_extra = item.extra #
  • Descargar XMen Dias Del Futuro gratis

    XMen Dias Del Futuro

    BluRayRip AC3 5.1
  • patron = '
  • 30: + url_next_page = item.url + matches = matches[:30] + next_page = 'b' + modo = 'continue' + else: + matches = matches[30:] + next_page = 'a' + patron_next_page = 'Next<\/a>' + matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data) + modo = 'continue' + if len(matches_next_page) > 0: + url_next_page = matches_next_page[0] + modo = 'next' for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches: url = scrapedurl @@ -127,33 +160,17 @@ def listado(item): thumbnail = scrapedthumbnail action = "findvideos" extra = "" - + year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})') if "1.com/series" in url: - action = "completo" + action = "episodios" extra = "serie" + title = scrapertools.find_single_match(title, '([^-]+)') title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "", 1).strip() # logger.info("[newpct1.py] titulo="+title) - ''' - if len(title)>3: - url_i = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=%22' + title.replace(" ","%20") + '%22' - else: - url_i = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=' + title - if "1.com/series-hd" in url: - extra="serie-hd" - url = url_i + '&categoryID=&categoryIDR=1469&calidad=' + calidad.replace(" ","+") #DTV+720p+AC3+5.1 - elif "1.com/series-vo" in url: - extra="serie-vo" - url = url_i + '&categoryID=&categoryIDR=775&calidad=' + calidad.replace(" ","+") #HDTV+720p+AC3+5.1 - elif "1.com/series/" in url: - extra="serie-tv" - url = url_i + '&categoryID=&categoryIDR=767&calidad=' + calidad.replace(" ","+") - - url += '&idioma=&ordenar=Nombre&inon=Descendente' - ''' else: title = title.replace("Descargar", "", 1).strip() if title.endswith("gratis"): title = title[:-7] @@ -164,9 +181,10 @@ def listado(item): context = "" context_title = scrapertools.find_single_match(url, "http://(?:www.)?newpct1.com/(.*?)/(.*?)/") + #logger.debug('context_title[0]: %s' % context_title[0]) if context_title: try: - context = context_title[0].replace("pelicula", "movie").replace("descargar", "movie").replace("series", + context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series", "tvshow") context_title = context_title[1].replace("-", " ") if re.search('\d{4}', context_title[-4:]): @@ -176,22 +194,126 @@ def listado(item): except: context_title = show + logger.debug('contxt title: %s'%context_title) + logger.debug('year: %s' % year) - itemlist.append( - Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, extra=extra, show=show, - contentTitle=context_title, contentType=context, context=["buscar_trailer"])) + logger.debug('context: %s' % context) + if not 'array' in title: + new_item = Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, + extra = extra, + show = context_title, contentTitle=context_title, contentType=context, + context=["buscar_trailer"], infoLabels= {'year':year}) + if year: + tmdb.set_infoLabels_item(new_item, seekTmdb = True) + itemlist.append(new_item) - if "pagination" in data: - patron = '
      (.*?)
    ' - paginacion = scrapertools.get_match(data, patron) - if "Next" in paginacion: - url_next_page = scrapertools.get_match(paginacion, '
    0: item.fanart = fanart[0]''' - try: - from core.tmdb import Tmdb - oTmdb = Tmdb(texto_buscado=item.show, tipo="tv", idioma_busqueda="es") - item.fanart = oTmdb.get_backdrop() - item.plot = oTmdb.get_sinopsis() - print item.plot - except: - pass + # try: + # from core.tmdb import Tmdb + # oTmdb = Tmdb(texto_buscado=item.show, tipo="tv", idioma_busqueda="es") + # item.fanart = oTmdb.get_backdrop() + # item.plot = oTmdb.get_sinopsis() + # print item.plot + # except: + # pass else: item_title = item.show @@ -281,109 +404,6 @@ def completo(item): return itemlist -def get_episodios(item): - logger.info("url=" + item.url) - itemlist = [] - data = re.sub(r'\n|\r|\t|\s{2}||', "", httptools.downloadpage(item.url).data) - data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - - logger.debug("data=" + data) - - patron = '
      (.*?)
    ' - # logger.info("[newpct1.py] patron=" + patron) - - fichas = scrapertools.get_match(data, patron) - # logger.info("[newpct1.py] matches=" + str(len(fichas))) - - #
  • Serie Forever 1x01
  • - # logger.info("[newpct1.py] get_episodios: " + fichas) - patron = ']*>' in scrapedinfo: - # logger.info("[newpct1.py] get_episodios: scrapedinfo="+scrapedinfo) - try: - #

    Serie The Big Bang Theory - Temporada 6 - Temporada[ 6 ]Capitulo[ 03 ]Español Castellano Calidad [ HDTV ]

    - patron = '\[\s*(.*?)\].*?' # temporada - patron += '\[\s*(.*?)\].*?' # capitulo - patron += ';([^/]+)' # idioma - info_extra = re.compile(patron, re.DOTALL).findall(scrapedinfo) - (temporada, capitulo, idioma) = info_extra[0] - - except: - #

    Serie The Affair Temporada 3 Capitulo 5 - Español Castellano Calidad [ HDTV ]

    - patron = '([^<]+).*?' # temporada y capitulo - patron += '([^<]+)' - - info_extra = re.compile(patron, re.DOTALL).findall(scrapedinfo) - (temporada_capitulo, idioma) = info_extra[0] - if re.search(r'(?i)Capitulos', temporada_capitulo): - temporada = scrapertools.find_single_match(temporada_capitulo, 'Temp.*?\s*([\d]+)') - cap1, cap2 = scrapertools.find_single_match(temporada_capitulo, 'Cap.*?\s*(\d+).*?(\d+)') - capitulo = "" - else: - temporada, capitulo = scrapertools.get_season_and_episode(temporada_capitulo).split('x') - - # logger.info("[newpct1.py] get_episodios: temporada=" + temporada) - # logger.info("[newpct1.py] get_episodios: capitulo=" + capitulo) - logger.debug("idioma=" + idioma) - if '">' in idioma: - idioma = " [" + scrapertools.find_single_match(idioma, '">([^<]+)').strip() + "]" - elif ' ' in idioma: - idioma = " [" + scrapertools.find_single_match(idioma, ' ([^<]+)').strip() + "]" - '''else: - idioma=""''' - if capitulo: - title = item.title + " (" + temporada.strip() + "x" + capitulo.strip() + ") " + idioma - else: - title = item.title + " (Del %sx%s al %sx%s) %s" % (temporada, cap1, temporada, cap2, idioma) - else: - #

    The Big Bang Theory - Temporada 6 [HDTV][Cap.602][Español Castellano]

    - #

    The Beast - Temporada 1 [HDTV] [Capítulo 13] [Español]

    The Beast - Temp.1 [DVD-DVB][Cap.103][Spanish] - try: - temp, cap = scrapertools.get_season_and_episode(scrapedinfo).split('x') - except: - # Formatear temporadaXepisodio - patron = re.compile('Cap.*?\s*([\d]+)', re.IGNORECASE) - info_extra = patron.search(scrapedinfo) - - if len(str(info_extra.group(1))) >= 3: - cap = info_extra.group(1)[-2:] - temp = info_extra.group(1)[:-2] - else: - cap = info_extra.group(1) - patron = 'Temp.*?\s*([\d]+)' - temp = re.compile(patron, re.IGNORECASE).search(scrapedinfo).group(1) - - title = item.title + " (" + temp + 'x' + cap + ")" - - # logger.info("[newpct1.py] get_episodios: fanart= " +item.fanart) - itemlist.append( - Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail, - show=item.show, fanart=item.fanart)) - except: - logger.error("ERROR al añadir un episodio") - if "pagination" in data: - patron = '
      (.*?)
    ' - paginacion = scrapertools.get_match(data, patron) - # logger.info("[newpct1.py] get_episodios: paginacion= " + paginacion) - if "Next" in paginacion: - url_next_page = "http" + scrapertools.get_match(paginacion, '
    \d+)?)<.+?]+>(?P.*?)\s*Calidad\s*]+>" \ + "[\[]\s*(?P.*?)\s*[\]]" + r = re.compile(pattern) + match = [m.groupdict() for m in r.finditer(info)][0] + + if match["episode2"]: + multi = True + title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), + str(match["episode2"]).zfill(2), match["lang"], + match["quality"]) + else: + multi = False + title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), + match["lang"], match["quality"]) + + else: # old style + pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+)(?P\d{2})(?:_(?P\d+)" \ + "(?P\d{2}))?.*?\].*?(?:\[(?P.*?)\])?" + + r = re.compile(pattern) + match = [m.groupdict() for m in r.finditer(info)][0] + # logger.debug("data %s" % match) + + str_lang = "" + if match["lang"] is not None: + str_lang = "[%s]" % match["lang"] + + if match["season2"] and match["episode2"]: + multi = True + if match["season"] == match["season2"]: + + title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"], + match["episode2"], str_lang, match["quality"]) + else: + title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], + match["season2"], match["episode2"], str_lang, + match["quality"]) + else: + title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang, + match["quality"]) + multi = False + + season = match['season'] + episode = match['episode'] + infoLabels['season']= season + infoLabels['episode'] = episode + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, + quality=item.quality, multi=multi, contentSeason=season, + contentEpisodeNumber=episode, infoLabels = infoLabels)) + + # order list + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) + if len(itemlist) > 1: + return sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) + + return itemlist + +def search(item, texto): + logger.info("search:" + texto) + # texto = texto.replace(" ", "+") + + try: + item.post = "q=%s" % texto + item.pattern = "buscar-list" + itemlist = listado2(item) + + return itemlist + + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] diff --git a/plugin.video.alfa/channels/tvseriesdk.py b/plugin.video.alfa/channels/tvseriesdk.py index 44b9e6b9..26a9771c 100644 --- a/plugin.video.alfa/channels/tvseriesdk.py +++ b/plugin.video.alfa/channels/tvseriesdk.py @@ -4,7 +4,7 @@ # -*- By the Alfa Develop Group -*- import re - +from channelselector import get_thumb from core import httptools from core import scrapertools from core import servertools @@ -66,7 +66,7 @@ def list_all(item): plot=plot, contentErieName=contentSerieName )) - itemlist = get_thumb(templist) + itemlist = serie_thumb(templist) # Paginación if url_next_page: itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, next_page=next_page, i=item.i)) @@ -110,7 +110,7 @@ def episodios(item): return itemlist -def get_thumb(itemlist): +def serie_thumb(itemlist): logger.info() for item in itemlist: data = get_source(item.url) @@ -135,7 +135,7 @@ def search_list(item): next_page = scrapertools.find_single_match(data, '') if next_page: itemlist.append(Item(channel=item.channel, action="search_list", title='>> Pagina Siguiente', url=next_page, - thumbnail=config.get_thumb("thumb_next.png"))) + thumbnail = get_thumb('thumb_next.png'))) return itemlist