diff --git a/plugin.video.alfa/channels/descargas2020.json b/plugin.video.alfa/channels/descargas2020.json index d413b2e0..be4f4191 100755 --- a/plugin.video.alfa/channels/descargas2020.json +++ b/plugin.video.alfa/channels/descargas2020.json @@ -10,7 +10,8 @@ "movie", "tvshow", "anime", - "torrent" + "torrent", + "documentary" ], "settings": [ { @@ -21,6 +22,22 @@ "enabled": true, "visible": true }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Incluir en Novedades - Episodios de series", + "default": true, + "enabled": true, + "visible": true + }, { "id": "include_in_newest_torrent", "type": "bool", diff --git a/plugin.video.alfa/channels/descargas2020.py b/plugin.video.alfa/channels/descargas2020.py index 3b8613d1..e6902ffd 100644 --- a/plugin.video.alfa/channels/descargas2020.py +++ b/plugin.video.alfa/channels/descargas2020.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# -*- coding: utf-8 -*- import re @@ -10,7 +10,7 @@ from core.item import Item from platformcode import config, logger from core import tmdb -host = 'http://descargas2020.com/' # Cambiar manualmente "xx" en línea 287 ".com/xx/library" por tl para descargas2020, tr para descargas2020, d20 para descargas2020 +host = 'http://descargas2020.com/' def mainlist(item): logger.info() @@ -26,7 +26,7 @@ def mainlist(item): itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series", thumbnail=thumb_series)) - + itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios", thumbnail=thumb_series)) itemlist.append( @@ -40,12 +40,17 @@ def submenu(item): data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com #patron = '
  • .*?' - patron = '
  • .*?' #Filtrado por url - data = scrapertools.get_match(data, patron) + patron = '
  • <.*?href="'+item.url+item.extra + '/">.*?(.*?)' #Filtrado por url, compatibilidad con mispelisy.series.com + #logger.debug("patron: " + patron + " / data: " + data) + if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com + data = ' Documentales' + else: + data = scrapertools.get_match(data, patron) - patron = '([^>]+)' + patron = '<.*?href="([^"]+)".*?>([^>]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: @@ -55,12 +60,12 @@ def submenu(item): itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist")) itemlist.append( Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist")) - + if item.extra == "peliculas": itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) itemlist.append( Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) - + return itemlist @@ -91,16 +96,16 @@ def listado(item): itemlist = [] url_next_page ='' - data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + #data = httptools.downloadpage(item.url).data data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - #logger.debug(data) + logger.debug('item.modo: %s'%item.modo) logger.debug('item.extra: %s'%item.extra) if item.modo != 'next' or item.modo =='': logger.debug('item.title: %s'% item.title) patron = '' - logger.debug("patron=" + patron) fichas = scrapertools.get_match(data, patron) page_extra = item.extra else: @@ -109,11 +114,13 @@ def listado(item): patron = '(.*?)<\/b><\/font>') + real_title = scrapertools.find_single_match(title, r'(.*?)Temporada.*?<\/strong>') #series + if real_title == "": + real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8') + real_title = scrapertools.htmlclean(real_title) + calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?]+>[\[]\s*(?P.*?)\s*[\]]<\/span>') #series + if calidad == "": + calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies + year = scrapertools.find_single_match(thumb, r'-(\d{4})') + + # fix encoding for title title = scrapertools.htmlclean(title) - title = title.replace("�", "ñ") + title = title.replace("�", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng") + title = re.sub(r'(Calidad.*?\])', '', title) + + if real_title == "": + real_title = title + if calidad == "": + calidad = title + context = "movie" # no mostramos lo que no sean videos - if "/juego/" in url or "/varios/" in url: + if "juego/" in url: continue - if ".com/series" in url: + # Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie + if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host: + calidad_mps = "series/" + if "seriehd" in url: + calidad_mps = "series-hd/" + if "serievo" in url: + calidad_mps = "series-vo/" + if "serie-vo" in url: + calidad_mps = "series-vo/" + + real_title_mps = re.sub(r'.*?\/\d+_', '', thumb) + real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps) + + if "/0_" not in thumb: + serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P\d+).*?.*') + if len(serieid) > 5: + serieid = "" + else: + serieid = "" + + url = host + calidad_mps + real_title_mps + "/" + serieid + + real_title_mps = real_title_mps.replace("-", " ") + #logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps) + real_title = real_title_mps + + show = real_title - show = real_title + if ".com/serie" in url and "/miniseries" not in url: + + context = "tvshow" - itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, - context=["buscar_trailer"], contentSerieName=show)) + itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad, + show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year})) else: - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, - context=["buscar_trailer"])) - + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad, + show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year})) + + logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad) + + tmdb.set_infoLabels(itemlist, True) + if post: itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente", thumbnail=get_thumb("next.png"))) @@ -253,7 +314,6 @@ def listado_busqueda(item): def findvideos(item): logger.info() itemlist = [] - ## Cualquiera de las tres opciones son válidas # item.url = item.url.replace(".com/",".com/ver-online/") # item.url = item.url.replace(".com/",".com/descarga-directa/") @@ -263,32 +323,36 @@ def findvideos(item): data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures") - - title = scrapertools.find_single_match(data, "

    ([^<]+)<\/strong>[^<]+<\/h1>") - title += scrapertools.find_single_match(data, "

    [^<]+<\/strong>([^<]+)<\/h1>") - caratula = scrapertools.find_single_match(data, '
    .*?src="([^"]+)"') - - #
    Descarga tu Archivo torrent!
    + + title = scrapertools.find_single_match(data, "([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com + title += scrapertools.find_single_match(data, "[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com + #caratula = scrapertools.find_single_match(data, '
    .*?src="([^"]+)"') + caratula = scrapertools.find_single_match(data, ']+>.*?' #patron_ver = '
    ]+>.*?' @@ -309,37 +373,48 @@ def findvideos(item): patron = '
    <\/div[^<]+
    ([^<]+)?<\/div[^<]+
    ([^<]+)?' patron += '<\/div[^<]+
    ([^<]+)?<\/div[^<]+
    0: + itemlist.append(item.clone(title="", action="", folder=False)) + itemlist.append(item.clone(title=" Enlaces Ver: ", action="", text_color="green", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: if "Ver" in titulo: servidor = servidor.replace("streamin", "streaminto") - titulo = titulo + " [" + servidor + "]" + titulo = title mostrar_server = True if config.get_setting("hidepremium"): mostrar_server = servertools.is_server_enabled(servidor) + logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma) if mostrar_server: try: devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] itemlist.append( - Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, - fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) + Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, quality=titulo, + fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) except: pass + if len(enlaces_descargar) > 0: + itemlist.append(item.clone(title="", action="", folder=False)) + itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", text_color="green", folder=False)) + for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: if "Ver" not in titulo: servidor = servidor.replace("uploaded", "uploadedto") partes = enlace.split(" ") + titulo = "Partes " p = 1 + logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma) for enlace in partes: - parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]" + parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " - " + title p += 1 mostrar_server = True if config.get_setting("hidepremium"): @@ -349,11 +424,12 @@ def findvideos(item): devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] - itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, - title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo, + itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, quality=parte_titulo, + title=parte_titulo, fulltitle=parte_titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) except: pass + return itemlist @@ -363,6 +439,8 @@ def episodios(item): infoLabels = item.infoLabels data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + calidad = item.quality + pattern = '
      (.*?)
    ' % "pagination" # item.pattern pagination = scrapertools.find_single_match(data, pattern) if pagination: @@ -381,22 +459,43 @@ def episodios(item): logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page)) data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - + data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com pattern = '
      (.*?)
    ' % "buscar-list" # item.pattern data = scrapertools.get_match(data, pattern) + #logger.debug("data: " + data) - pattern = ']*>
    ]+>(?P.*?)

    ' + if "pelisyseries.com" in host: + pattern = ']*>
    ]+>(?P.*?)?<\/h3>.*?<\/li>' + else: + pattern = ']*>]+>(?P.*?)?<\/h2>' matches = re.compile(pattern, re.DOTALL).findall(data) + #logger.debug("patron: " + pattern) + #logger.debug(matches) + + season = "1" for url, thumb, info in matches: + if "pelisyseries.com" in host: + interm = url + url = thumb + thumb = interm + if "\d+)?)<.+?]+>(?P.*?)\s*Calidad\s*]+>" \ - "[\[]\s*(?P.*?)\s*[\]]" + pattern = ".*?[^>]+>.*?Temporada\s*(?P\d+)?.*?Capitulo(?:s)?\s*(?P\d+)?" \ + "(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>" \ + "[\[]\s*(?P.*?)?\s*[\]]<\/span>" + if "Especial" in info: # Capitulos Especiales + pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P\d+).*?\].*?Capitulo.*?\[\s*(?P\d+).*?\]?(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>[\[]\s*(?P.*?)?\s*[\]]<\/span>" + logger.debug("patron: " + pattern) + logger.debug(info) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] + if match['season'] is None: match['season'] = season + if match['episode'] is None: match['episode'] = "0" + if match['quality']: item.quality = match['quality'] + if match["episode2"]: multi = True title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), @@ -408,12 +507,17 @@ def episodios(item): match["lang"], match["quality"]) else: # old style - pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+)(?P\d{2})(?:_(?P\d+)" \ + pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+).*?(?P\d{2})(?:_(?P\d+)" \ "(?P\d{2}))?.*?\].*?(?:\[(?P.*?)\])?" - + logger.debug("patron: " + pattern) + logger.debug(info) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] - # logger.debug("data %s" % match) + #logger.debug("data %s" % match) + + #if match['season'] is "": match['season'] = season + #if match['episode'] is "": match['episode'] = "0" + #logger.debug(match) str_lang = "" if match["lang"] is not None: @@ -436,18 +540,19 @@ def episodios(item): season = match['season'] episode = match['episode'] + logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode)) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=item.quality, multi=multi, contentSeason=season, contentEpisodeNumber=episode, infoLabels = infoLabels)) - # order list + #tmdb.set_infoLabels(itemlist, True) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) if len(itemlist) > 1: itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( - item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios")) + item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad)) return itemlist diff --git a/plugin.video.alfa/channels/mispelisyseries.json b/plugin.video.alfa/channels/mispelisyseries.json index 9d5ae057..73b0db8b 100755 --- a/plugin.video.alfa/channels/mispelisyseries.json +++ b/plugin.video.alfa/channels/mispelisyseries.json @@ -9,7 +9,8 @@ "categories": [ "torrent", "movie", - "tvshow" + "tvshow", + "documentary" ], "settings": [ { diff --git a/plugin.video.alfa/channels/mispelisyseries.py b/plugin.video.alfa/channels/mispelisyseries.py index ba65968e..0e4d44cd 100644 --- a/plugin.video.alfa/channels/mispelisyseries.py +++ b/plugin.video.alfa/channels/mispelisyseries.py @@ -1,137 +1,75 @@ -# -*- coding: utf-8 -*- +# -*- coding: utf-8 -*- import re -import urllib -import urlparse +from channelselector import get_thumb from core import httptools from core import scrapertools from core import servertools from core.item import Item -from platformcode import logger -from channelselector import get_thumb +from platformcode import config, logger +from core import tmdb host = 'http://mispelisyseries.com/' + def mainlist(item): logger.info() itemlist = [] - itemlist.append(Item(channel=item.channel, action="menu", title="Películas", url=host, - extra="Peliculas", folder=True, thumbnail=get_thumb('movies', auto=True))) + + thumb_pelis=get_thumb("channels_movie.png") + thumb_series=get_thumb("channels_tvshow.png") + thumb_search = get_thumb("search.png") + + itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host, + extra="peliculas", thumbnail=thumb_pelis )) + + itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series", + thumbnail=thumb_series)) + + itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios", + thumbnail=thumb_series)) itemlist.append( - Item(channel=item.channel, action="menu", title="Series", url=host, extra="Series", - folder=True, thumbnail=get_thumb('tvshows', auto=True))) - itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + 'buscar', - thumbnail=get_thumb('search', auto=True))) + Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search)) + return itemlist - -def menu(item): +def submenu(item): logger.info() itemlist = [] - data = httptools.downloadpage(item.url).data - # logger.info("data="+data) + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com - data = scrapertools.find_single_match(data, item.extra + "") - # logger.info("data="+data) + #patron = '
  • .*?
      (.*?)
    ' + patron = '
  • <.*?href="'+item.url+item.extra + '/">.*?(.*?)' #Filtrado por url, compatibilidad con mispelisy.series.com + #logger.debug("patron: " + patron + " / data: " + data) + if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com + data = ' Documentales' + else: + data = scrapertools.get_match(data, patron) - patron = "
  • ]+>([^<]+)
  • " + patron = '<.*?href="([^"]+)".*?>([^>]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: - title = scrapedtitle - url = urlparse.urljoin(item.url, scrapedurl) - thumbnail = "" - plot = "" - itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url, thumbnail=thumbnail, plot=plot, - folder=True)) - - - if title != "Todas las Peliculas": - itemlist.append( - Item(channel=item.channel, action="alfabetico", title=title + " [A-Z]", url=url, thumbnail=thumbnail, - plot=plot, folder=True)) - + title = scrapedtitle.strip() + url = scrapedurl + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist")) itemlist.append( - Item(channel=item.channel, action="alfabetico", title=title + " [A-Z]", url=url, thumbnail=thumbnail, - plot=plot, - folder=True)) - - if 'películas' in item.title.lower(): - new_item = item.clone(title='Peliculas 4K', url=host+'buscar', post='q=4k', action='listado2', - pattern='buscar-list') - itemlist.append(new_item) - + Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist")) + + if item.extra == "peliculas": + itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) + itemlist.append( + Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) + return itemlist -def search(item, texto): - logger.info("search:" + texto) - # texto = texto.replace(" ", "+") - - #try: - item.post = "q=%s" % texto - item.pattern = "buscar-list" - itemlist = listado2(item) - - return itemlist - - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - # except: - # import sys - # for line in sys.exc_info(): - # logger.error("%s" % line) - # return [] - -def newest(categoria): - itemlist = [] - item = Item() - try: - if categoria in ['peliculas', 'torrent']: - item.url = host+"peliculas" - - elif categoria == 'series': - item.url = host+"series" - - if categoria == '4k': - - item.url = Host + '/buscar' - - item.post = 'q=4k' - - item.pattern = 'buscar-list' - - action = listado2(item) - - else: - return [] - - itemlist = lista(item) - if itemlist[-1].title == ">> Página siguiente": - itemlist.pop() - - # Esta pagina coloca a veces contenido duplicado, intentamos descartarlo - dict_aux = {} - for i in itemlist: - if not i.url in dict_aux: - dict_aux[i.url] = i - else: - itemlist.remove(i) - - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - # return dict_aux.values() - return itemlist - - -def alfabetico(item): +def alfabeto(item): logger.info() itemlist = [] @@ -148,93 +86,137 @@ def alfabetico(item): title = scrapedtitle.upper() url = scrapedurl - itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url)) + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra)) return itemlist -def lista(item): +def listado(item): logger.info() itemlist = [] + url_next_page ='' - # Descarga la pagina - data = httptools.downloadpage(item.url, post=item.extra).data - # logger.info("data="+data) + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + #data = httptools.downloadpage(item.url).data + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + logger.debug('item.modo: %s'%item.modo) + logger.debug('item.extra: %s'%item.extra) + + if item.modo != 'next' or item.modo =='': + logger.debug('item.title: %s'% item.title) + patron = '
      (.*?)
    ' + fichas = scrapertools.get_match(data, patron) + page_extra = item.extra + else: + fichas = data + page_extra = item.extra - bloque = scrapertools.find_single_match(data, '(?:' + #patron_ver = '
    ]+>.*?' + + #match_ver = scrapertools.find_single_match(data, patron_ver) + #match_descargar = scrapertools.find_single_match(data, patron_descargar) + + #patron = '
    0: + itemlist.append(item.clone(title="", action="", folder=False)) + itemlist.append(item.clone(title=" Enlaces Ver: ", action="", text_color="green", folder=False)) + + for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: + if "Ver" in titulo: + servidor = servidor.replace("streamin", "streaminto") + titulo = title + mostrar_server = True + if config.get_setting("hidepremium"): + mostrar_server = servertools.is_server_enabled(servidor) + logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma) + if mostrar_server: + try: + devuelve = servertools.findvideosbyserver(enlace, servidor) + if devuelve: + enlace = devuelve[0][1] + itemlist.append( + Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, quality=titulo, + fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) + except: + pass + + if len(enlaces_descargar) > 0: + itemlist.append(item.clone(title="", action="", folder=False)) + itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", text_color="green", folder=False)) + + for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: + if "Ver" not in titulo: + servidor = servidor.replace("uploaded", "uploadedto") + partes = enlace.split(" ") + titulo = "Partes " + p = 1 + logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma) + for enlace in partes: + parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " - " + title + p += 1 + mostrar_server = True + if config.get_setting("hidepremium"): + mostrar_server = servertools.is_server_enabled(servidor) + if mostrar_server: + try: + devuelve = servertools.findvideosbyserver(enlace, servidor) + if devuelve: + enlace = devuelve[0][1] + itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, quality=parte_titulo, + title=parte_titulo, fulltitle=parte_titulo, url=enlace, thumbnail=logo, + plot=item.plot, folder=False)) + except: + pass + + return itemlist def episodios(item): logger.info() itemlist = [] + infoLabels = item.infoLabels + data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + calidad = item.quality + + pattern = '
      (.*?)
    ' % "pagination" # item.pattern + pagination = scrapertools.find_single_match(data, pattern) + if pagination: + pattern = '
  • Last<\/a>' + full_url = scrapertools.find_single_match(pagination, pattern) + url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)') + list_pages = [item.url] + for x in range(2, int(last_page) + 1): + response = httptools.downloadpage('%s%s'% (url,x)) + if response.sucess: + list_pages.append("%s%s" % (url, x)) + else: + list_pages = [item.url] - # Descarga la pagina - data = httptools.downloadpage(item.url, post=item.extra).data - # logger.info("data="+data) + for index, page in enumerate(list_pages): + logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page)) + data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com + pattern = '
      (.*?)
    ' % "buscar-list" # item.pattern + data = scrapertools.get_match(data, pattern) + #logger.debug("data: " + data) - patron = '
    \d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>" \ + "[\[]\s*(?P.*?)?\s*[\]]<\/span>" + if "Especial" in info: # Capitulos Especiales + pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P\d+).*?\].*?Capitulo.*?\[\s*(?P\d+).*?\]?(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>[\[]\s*(?P.*?)?\s*[\]]<\/span>" + logger.debug("patron: " + pattern) + logger.debug(info) + r = re.compile(pattern) + match = [m.groupdict() for m in r.finditer(info)][0] + + if match['season'] is None: match['season'] = season + if match['episode'] is None: match['episode'] = "0" + if match['quality']: item.quality = match['quality'] + + if match["episode2"]: + multi = True + title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), + str(match["episode2"]).zfill(2), match["lang"], + match["quality"]) + else: + multi = False + title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), + match["lang"], match["quality"]) + + else: # old style + pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+).*?(?P\d{2})(?:_(?P\d+)" \ + "(?P\d{2}))?.*?\].*?(?:\[(?P.*?)\])?" + logger.debug("patron: " + pattern) + logger.debug(info) + r = re.compile(pattern) + match = [m.groupdict() for m in r.finditer(info)][0] + #logger.debug("data %s" % match) + + #if match['season'] is "": match['season'] = season + #if match['episode'] is "": match['episode'] = "0" + #logger.debug(match) + + str_lang = "" + if match["lang"] is not None: + str_lang = "[%s]" % match["lang"] + + if match["season2"] and match["episode2"]: + multi = True + if match["season"] == match["season2"]: + + title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"], + match["episode2"], str_lang, match["quality"]) + else: + title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], + match["season2"], match["episode2"], str_lang, + match["quality"]) + else: + title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang, + match["quality"]) + multi = False + + season = match['season'] + episode = match['episode'] + logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode)) + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, + quality=item.quality, multi=multi, contentSeason=season, + contentEpisodeNumber=episode, infoLabels = infoLabels)) + # order list + #tmdb.set_infoLabels(itemlist, True) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) + if len(itemlist) > 1: + itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) + + if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( - Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail, - plot=plot, folder=True)) - - next_page_url = scrapertools.find_single_match(data, "(.*?)
    ') - item.plot = scrapertools.htmlclean(item.plot).strip() - item.contentPlot = item.plot - al_url_fa = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"') - if al_url_fa == "": - al_url_fa = scrapertools.find_single_match(data, 'location\.href.*?=.*?"%s(.*?)" ' % host) - if al_url_fa != "": - al_url_fa = host + al_url_fa - logger.info("torrent=" + al_url_fa) - itemlist.append( - Item(channel=item.channel, action="play", server="torrent", title="Vídeo en torrent", fulltitle=item.title, - url=al_url_fa, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False, - parentContent=item)) + itemlist = listado(item) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + item.url = host+'series/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() - patron = '
  • .*?
      (.*?)
    ' - patron = '
  • .*?
      (.*?)
    ' #Filtrado por url - data = scrapertools.get_match(data, patron) + patron = '
  • <.*?href="'+item.url+item.extra + '/">.*?(.*?)' #Filtrado por url, compatibilidad con mispelisy.series.com + #logger.debug("patron: " + patron + " / data: " + data) + if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com + data = ' Documentales' + else: + data = scrapertools.get_match(data, patron) - patron = '([^>]+)' + patron = '<.*?href="([^"]+)".*?>([^>]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: @@ -92,15 +97,15 @@ def listado(item): url_next_page ='' data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + #data = httptools.downloadpage(item.url).data data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - #logger.debug(data) + logger.debug('item.modo: %s'%item.modo) logger.debug('item.extra: %s'%item.extra) if item.modo != 'next' or item.modo =='': logger.debug('item.title: %s'% item.title) patron = '
      (.*?)
    ' - logger.debug("patron=" + patron) fichas = scrapertools.get_match(data, patron) page_extra = item.extra else: @@ -109,11 +114,13 @@ def listado(item): patron = '(.*?)<\/b><\/font>') + real_title = scrapertools.find_single_match(title, r'(.*?)Temporada.*?<\/strong>') #series + if real_title == "": + real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8') + real_title = scrapertools.htmlclean(real_title) + calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?]+>[\[]\s*(?P.*?)\s*[\]]<\/span>') #series + if calidad == "": + calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies + year = scrapertools.find_single_match(thumb, r'-(\d{4})') + + # fix encoding for title title = scrapertools.htmlclean(title) - title = title.replace("�", "ñ") + title = title.replace("�", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng") + title = re.sub(r'(Calidad.*?\])', '', title) + + if real_title == "": + real_title = title + if calidad == "": + calidad = title + context = "movie" # no mostramos lo que no sean videos - if "/juego/" in url or "/varios/" in url: + if "juego/" in url: continue - if ".com/series" in url: + # Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie + if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host: + calidad_mps = "series/" + if "seriehd" in url: + calidad_mps = "series-hd/" + if "serievo" in url: + calidad_mps = "series-vo/" + if "serie-vo" in url: + calidad_mps = "series-vo/" + + real_title_mps = re.sub(r'.*?\/\d+_', '', thumb) + real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps) + + if "/0_" not in thumb: + serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P\d+).*?.*') + if len(serieid) > 5: + serieid = "" + else: + serieid = "" + + url = host + calidad_mps + real_title_mps + "/" + serieid + + real_title_mps = real_title_mps.replace("-", " ") + #logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps) + real_title = real_title_mps + + show = real_title - show = real_title + if ".com/serie" in url and "/miniseries" not in url: + + context = "tvshow" - itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, - context=["buscar_trailer"], contentSerieName=show)) + itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad, + show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year})) else: - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, - context=["buscar_trailer"])) - + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad, + show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year})) + + logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad) + + tmdb.set_infoLabels(itemlist, True) + if post: - itemlist.append(item.clone(channel=item.channel, action="listado2", title=">> Página siguiente", + itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente", thumbnail=get_thumb("next.png"))) return itemlist @@ -253,7 +314,6 @@ def listado2(item): def findvideos(item): logger.info() itemlist = [] - ## Cualquiera de las tres opciones son válidas # item.url = item.url.replace(".com/",".com/ver-online/") # item.url = item.url.replace(".com/",".com/descarga-directa/") @@ -263,32 +323,36 @@ def findvideos(item): data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures") - - title = scrapertools.find_single_match(data, "

    ([^<]+)<\/strong>[^<]+<\/h1>") - title += scrapertools.find_single_match(data, "

    [^<]+<\/strong>([^<]+)<\/h1>") - caratula = scrapertools.find_single_match(data, '
    .*?src="([^"]+)"') - - #
    Descarga tu Archivo torrent!
    + + title = scrapertools.find_single_match(data, "([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com + title += scrapertools.find_single_match(data, "[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com + #caratula = scrapertools.find_single_match(data, '
    .*?src="([^"]+)"') + caratula = scrapertools.find_single_match(data, ']+>.*?' #patron_ver = '
    ]+>.*?' @@ -309,37 +373,48 @@ def findvideos(item): patron = '
    <\/div[^<]+
    ([^<]+)?<\/div[^<]+
    ([^<]+)?' patron += '<\/div[^<]+
    ([^<]+)?<\/div[^<]+
    0: + itemlist.append(item.clone(title="", action="", folder=False)) + itemlist.append(item.clone(title=" Enlaces Ver: ", action="", text_color="green", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: if "Ver" in titulo: servidor = servidor.replace("streamin", "streaminto") - titulo = titulo + " [" + servidor + "]" + titulo = title mostrar_server = True if config.get_setting("hidepremium"): mostrar_server = servertools.is_server_enabled(servidor) + logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma) if mostrar_server: try: devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] itemlist.append( - Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, - fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) + Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, quality=titulo, + fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) except: pass + if len(enlaces_descargar) > 0: + itemlist.append(item.clone(title="", action="", folder=False)) + itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", text_color="green", folder=False)) + for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: if "Ver" not in titulo: servidor = servidor.replace("uploaded", "uploadedto") partes = enlace.split(" ") + titulo = "Partes " p = 1 + logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma) for enlace in partes: - parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]" + parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " - " + title p += 1 mostrar_server = True if config.get_setting("hidepremium"): @@ -349,11 +424,12 @@ def findvideos(item): devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] - itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, - title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo, + itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, quality=parte_titulo, + title=parte_titulo, fulltitle=parte_titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) except: pass + return itemlist @@ -363,6 +439,8 @@ def episodios(item): infoLabels = item.infoLabels data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + calidad = item.quality + pattern = '
      (.*?)
    ' % "pagination" # item.pattern pagination = scrapertools.find_single_match(data, pattern) if pagination: @@ -381,22 +459,43 @@ def episodios(item): logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page)) data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - + data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com pattern = '
      (.*?)
    ' % "buscar-list" # item.pattern data = scrapertools.get_match(data, pattern) + #logger.debug("data: " + data) - pattern = ']*>
    ]+>(?P.*?)

    ' + if "pelisyseries.com" in host: + pattern = ']*>
    ]+>(?P.*?)?<\/h3>.*?<\/li>' + else: + pattern = ']*>]+>(?P.*?)?<\/h2>' matches = re.compile(pattern, re.DOTALL).findall(data) + #logger.debug("patron: " + pattern) + #logger.debug(matches) + + season = "1" for url, thumb, info in matches: + if "pelisyseries.com" in host: + interm = url + url = thumb + thumb = interm + if "\d+)?)<.+?]+>(?P.*?)\s*Calidad\s*]+>" \ - "[\[]\s*(?P.*?)\s*[\]]" + pattern = ".*?[^>]+>.*?Temporada\s*(?P\d+)?.*?Capitulo(?:s)?\s*(?P\d+)?" \ + "(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>" \ + "[\[]\s*(?P.*?)?\s*[\]]<\/span>" + if "Especial" in info: # Capitulos Especiales + pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P\d+).*?\].*?Capitulo.*?\[\s*(?P\d+).*?\]?(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>[\[]\s*(?P.*?)?\s*[\]]<\/span>" + logger.debug("patron: " + pattern) + logger.debug(info) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] + if match['season'] is None: match['season'] = season + if match['episode'] is None: match['episode'] = "0" + if match['quality']: item.quality = match['quality'] + if match["episode2"]: multi = True title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), @@ -408,12 +507,17 @@ def episodios(item): match["lang"], match["quality"]) else: # old style - pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+)(?P\d{2})(?:_(?P\d+)" \ + pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+).*?(?P\d{2})(?:_(?P\d+)" \ "(?P\d{2}))?.*?\].*?(?:\[(?P.*?)\])?" - + logger.debug("patron: " + pattern) + logger.debug(info) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] - # logger.debug("data %s" % match) + #logger.debug("data %s" % match) + + #if match['season'] is "": match['season'] = season + #if match['episode'] is "": match['episode'] = "0" + #logger.debug(match) str_lang = "" if match["lang"] is not None: @@ -436,18 +540,19 @@ def episodios(item): season = match['season'] episode = match['episode'] + logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode)) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=item.quality, multi=multi, contentSeason=season, contentEpisodeNumber=episode, infoLabels = infoLabels)) - # order list + #tmdb.set_infoLabels(itemlist, True) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) if len(itemlist) > 1: itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( - item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios")) + item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad)) return itemlist @@ -458,7 +563,7 @@ def search(item, texto): try: item.post = "q=%s" % texto item.pattern = "buscar-list" - itemlist = listado2(item) + itemlist = listado_busqueda(item) return itemlist diff --git a/plugin.video.alfa/channels/torrentrapid.json b/plugin.video.alfa/channels/torrentrapid.json index 0362f46d..303149a0 100644 --- a/plugin.video.alfa/channels/torrentrapid.json +++ b/plugin.video.alfa/channels/torrentrapid.json @@ -10,7 +10,8 @@ "movie", "tvshow", "anime", - "torrent" + "torrent", + "documentary" ], "settings": [ { @@ -21,6 +22,22 @@ "enabled": true, "visible": true }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Incluir en Novedades - Episodios de series", + "default": true, + "enabled": true, + "visible": true + }, { "id": "include_in_newest_torrent", "type": "bool", @@ -28,6 +45,14 @@ "default": true, "enabled": true, "visible": true + }, + { + "id": "include_in_newest_4k", + "type": "bool", + "label": "Incluir en Novedades - 4K", + "default": true, + "enabled": true, + "visible": true } ] } \ No newline at end of file diff --git a/plugin.video.alfa/channels/torrentrapid.py b/plugin.video.alfa/channels/torrentrapid.py index ae0e174f..fa93fce0 100644 --- a/plugin.video.alfa/channels/torrentrapid.py +++ b/plugin.video.alfa/channels/torrentrapid.py @@ -10,7 +10,7 @@ from core.item import Item from platformcode import config, logger from core import tmdb -host = 'http://torrentrapid.com/' # Cambiar manualmente "xx" en línea 287 ".com/xx/library" por tl para torrentrapid, tr para torrentrapid, d20 para descargas2020 +host = 'http://torrentrapid.com/' def mainlist(item): logger.info() @@ -40,12 +40,17 @@ def submenu(item): data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com #patron = '
  • .*?
      (.*?)
    ' - patron = '
  • .*?
      (.*?)
    ' #Filtrado por url - data = scrapertools.get_match(data, patron) + patron = '
  • <.*?href="'+item.url+item.extra + '/">.*?(.*?)' #Filtrado por url, compatibilidad con mispelisy.series.com + #logger.debug("patron: " + patron + " / data: " + data) + if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com + data = ' Documentales' + else: + data = scrapertools.get_match(data, patron) - patron = '([^>]+)' + patron = '<.*?href="([^"]+)".*?>([^>]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: @@ -92,15 +97,15 @@ def listado(item): url_next_page ='' data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + #data = httptools.downloadpage(item.url).data data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - #logger.debug(data) + logger.debug('item.modo: %s'%item.modo) logger.debug('item.extra: %s'%item.extra) if item.modo != 'next' or item.modo =='': logger.debug('item.title: %s'% item.title) patron = '
      (.*?)
    ' - logger.debug("patron=" + patron) fichas = scrapertools.get_match(data, patron) page_extra = item.extra else: @@ -109,11 +114,13 @@ def listado(item): patron = '(.*?)<\/b><\/font>') + real_title = scrapertools.find_single_match(title, r'(.*?)Temporada.*?<\/strong>') #series + if real_title == "": + real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8') + real_title = scrapertools.htmlclean(real_title) + calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?]+>[\[]\s*(?P.*?)\s*[\]]<\/span>') #series + if calidad == "": + calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies + year = scrapertools.find_single_match(thumb, r'-(\d{4})') + + # fix encoding for title title = scrapertools.htmlclean(title) - title = title.replace("�", "ñ") + title = title.replace("�", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng") + title = re.sub(r'(Calidad.*?\])', '', title) + + if real_title == "": + real_title = title + if calidad == "": + calidad = title + context = "movie" # no mostramos lo que no sean videos - if "/juego/" in url or "/varios/" in url: + if "juego/" in url: continue - if ".com/series" in url: + # Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie + if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host: + calidad_mps = "series/" + if "seriehd" in url: + calidad_mps = "series-hd/" + if "serievo" in url: + calidad_mps = "series-vo/" + if "serie-vo" in url: + calidad_mps = "series-vo/" + + real_title_mps = re.sub(r'.*?\/\d+_', '', thumb) + real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps) + + if "/0_" not in thumb: + serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P\d+).*?.*') + if len(serieid) > 5: + serieid = "" + else: + serieid = "" + + url = host + calidad_mps + real_title_mps + "/" + serieid + + real_title_mps = real_title_mps.replace("-", " ") + #logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps) + real_title = real_title_mps + + show = real_title - show = real_title + if ".com/serie" in url and "/miniseries" not in url: + + context = "tvshow" - itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, - context=["buscar_trailer"], contentSerieName=show)) + itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad, + show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year})) else: - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, - context=["buscar_trailer"])) - + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad, + show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year})) + + logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad) + + tmdb.set_infoLabels(itemlist, True) + if post: - itemlist.append(item.clone(channel=item.channel, action="listado2", title=">> Página siguiente", + itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente", thumbnail=get_thumb("next.png"))) return itemlist @@ -253,7 +314,6 @@ def listado2(item): def findvideos(item): logger.info() itemlist = [] - ## Cualquiera de las tres opciones son válidas # item.url = item.url.replace(".com/",".com/ver-online/") # item.url = item.url.replace(".com/",".com/descarga-directa/") @@ -263,32 +323,36 @@ def findvideos(item): data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures") - - title = scrapertools.find_single_match(data, "

    ([^<]+)<\/strong>[^<]+<\/h1>") - title += scrapertools.find_single_match(data, "

    [^<]+<\/strong>([^<]+)<\/h1>") - caratula = scrapertools.find_single_match(data, '
    .*?src="([^"]+)"') - - #
    Descarga tu Archivo torrent!
    + + title = scrapertools.find_single_match(data, "([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com + title += scrapertools.find_single_match(data, "[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com + #caratula = scrapertools.find_single_match(data, '
    .*?src="([^"]+)"') + caratula = scrapertools.find_single_match(data, ']+>.*?' #patron_ver = '
    ]+>.*?' @@ -309,37 +373,48 @@ def findvideos(item): patron = '
    <\/div[^<]+
    ([^<]+)?<\/div[^<]+
    ([^<]+)?' patron += '<\/div[^<]+
    ([^<]+)?<\/div[^<]+
    0: + itemlist.append(item.clone(title="", action="", folder=False)) + itemlist.append(item.clone(title=" Enlaces Ver: ", action="", text_color="green", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: if "Ver" in titulo: servidor = servidor.replace("streamin", "streaminto") - titulo = titulo + " [" + servidor + "]" + titulo = title mostrar_server = True if config.get_setting("hidepremium"): mostrar_server = servertools.is_server_enabled(servidor) + logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma) if mostrar_server: try: devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] itemlist.append( - Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, - fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) + Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, quality=titulo, + fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) except: pass + if len(enlaces_descargar) > 0: + itemlist.append(item.clone(title="", action="", folder=False)) + itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", text_color="green", folder=False)) + for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: if "Ver" not in titulo: servidor = servidor.replace("uploaded", "uploadedto") partes = enlace.split(" ") + titulo = "Partes " p = 1 + logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma) for enlace in partes: - parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]" + parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " - " + title p += 1 mostrar_server = True if config.get_setting("hidepremium"): @@ -349,11 +424,12 @@ def findvideos(item): devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] - itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, - title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo, + itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, quality=parte_titulo, + title=parte_titulo, fulltitle=parte_titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) except: pass + return itemlist @@ -363,6 +439,8 @@ def episodios(item): infoLabels = item.infoLabels data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + calidad = item.quality + pattern = '
      (.*?)
    ' % "pagination" # item.pattern pagination = scrapertools.find_single_match(data, pattern) if pagination: @@ -381,22 +459,43 @@ def episodios(item): logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page)) data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - + data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com pattern = '
      (.*?)
    ' % "buscar-list" # item.pattern data = scrapertools.get_match(data, pattern) + #logger.debug("data: " + data) - pattern = ']*>
    ]+>(?P.*?)

    ' + if "pelisyseries.com" in host: + pattern = ']*>
    ]+>(?P.*?)?<\/h3>.*?<\/li>' + else: + pattern = ']*>]+>(?P.*?)?<\/h2>' matches = re.compile(pattern, re.DOTALL).findall(data) + #logger.debug("patron: " + pattern) + #logger.debug(matches) + + season = "1" for url, thumb, info in matches: + if "pelisyseries.com" in host: + interm = url + url = thumb + thumb = interm + if "\d+)?)<.+?]+>(?P.*?)\s*Calidad\s*]+>" \ - "[\[]\s*(?P.*?)\s*[\]]" + pattern = ".*?[^>]+>.*?Temporada\s*(?P\d+)?.*?Capitulo(?:s)?\s*(?P\d+)?" \ + "(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>" \ + "[\[]\s*(?P.*?)?\s*[\]]<\/span>" + if "Especial" in info: # Capitulos Especiales + pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P\d+).*?\].*?Capitulo.*?\[\s*(?P\d+).*?\]?(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>[\[]\s*(?P.*?)?\s*[\]]<\/span>" + logger.debug("patron: " + pattern) + logger.debug(info) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] + if match['season'] is None: match['season'] = season + if match['episode'] is None: match['episode'] = "0" + if match['quality']: item.quality = match['quality'] + if match["episode2"]: multi = True title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), @@ -408,12 +507,17 @@ def episodios(item): match["lang"], match["quality"]) else: # old style - pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+)(?P\d{2})(?:_(?P\d+)" \ + pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+).*?(?P\d{2})(?:_(?P\d+)" \ "(?P\d{2}))?.*?\].*?(?:\[(?P.*?)\])?" - + logger.debug("patron: " + pattern) + logger.debug(info) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] - # logger.debug("data %s" % match) + #logger.debug("data %s" % match) + + #if match['season'] is "": match['season'] = season + #if match['episode'] is "": match['episode'] = "0" + #logger.debug(match) str_lang = "" if match["lang"] is not None: @@ -436,18 +540,19 @@ def episodios(item): season = match['season'] episode = match['episode'] + logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode)) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=item.quality, multi=multi, contentSeason=season, contentEpisodeNumber=episode, infoLabels = infoLabels)) - # order list + #tmdb.set_infoLabels(itemlist, True) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) if len(itemlist) > 1: itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( - item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios")) + item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad)) return itemlist @@ -458,7 +563,7 @@ def search(item, texto): try: item.post = "q=%s" % texto item.pattern = "buscar-list" - itemlist = listado2(item) + itemlist = listado_busqueda(item) return itemlist