diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index 8510f408..12ee44df 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@ - + @@ -19,12 +19,11 @@ [B]Estos son los cambios para esta versión:[/B] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR] - » cinemahd » seriesdanko - » doramasmp4 » pelisplus - » descargas2020 + » torrentrapid » torrentlocura + » mispelisyseries » descargas2020 ¤ arreglos internos - ¤ Gracias a la colaboración de @t1254362 en esta versión + ¤ Gracias a la colaboración de @pipcat y @lopezvg en ésta versión Navega con Kodi por páginas web para ver sus videos de manera fácil. Browse web pages using Kodi diff --git a/plugin.video.alfa/channels/animemovil.py b/plugin.video.alfa/channels/animemovil.py index 8d84eb5e..f5347e4e 100644 --- a/plugin.video.alfa/channels/animemovil.py +++ b/plugin.video.alfa/channels/animemovil.py @@ -204,14 +204,16 @@ def episodios(item): matches = scrapertools.find_multiple_matches(bloque, '
  • (.+?)<\/ul><\/div><\/div><\/div>') show = item.title - patron_caps = '<\/a><\/div>
    ([^"]+)<\/div>.+?([^"]+)<\/a>' + patron_caps = '.+?' + patron_caps += '<\/a><\/div>
    ([^"]+)<\/div>.+?([^"]+)<\/a>' #scrapedthumbnail,#scrapedtempepi, #scrapedurl, #scrapedtitle matches = scrapertools.find_multiple_matches(data_lista, patron_caps) for scrapedthumbnail, scrapedtempepi, scrapedurl, scrapedtitle in matches: @@ -148,14 +148,24 @@ def findvideos(item): data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - data = scrapertools.find_single_match(data, + data1 = scrapertools.find_single_match(data, '
    (.+?)<\/nav><\/div><\/div>') patron='src="(.+?)"' - logger.info("assfxxv "+data) - itemla = scrapertools.find_multiple_matches(data,patron) + itemla = scrapertools.find_multiple_matches(data1,patron) + if "favicons?domain" in itemla[1]: + method = 1 + data2=scrapertools.find_single_match(data, "var \$user_hashs = {(.+?)}") + patron='".+?":"(.+?)"' + itemla = scrapertools.find_multiple_matches(data2,patron) + else: + method = 0 for i in range(len(itemla)): - #for url in itemla: - url=itemla[i] + if method==0: + url=itemla[i] + else: + import base64 + b=base64.b64decode(itemla[i]) + url=b.decode('utf8') #verificar existencia del video (testing) codigo=verificar_video(itemla[i]) if codigo==200: @@ -199,5 +209,5 @@ def verificar_video(url): else: codigo1=200 else: - codigo1=200 + codigo1=200 return codigo1 diff --git a/plugin.video.alfa/channels/descargas2020.json b/plugin.video.alfa/channels/descargas2020.json index d413b2e0..be4f4191 100755 --- a/plugin.video.alfa/channels/descargas2020.json +++ b/plugin.video.alfa/channels/descargas2020.json @@ -10,7 +10,8 @@ "movie", "tvshow", "anime", - "torrent" + "torrent", + "documentary" ], "settings": [ { @@ -21,6 +22,22 @@ "enabled": true, "visible": true }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Incluir en Novedades - Episodios de series", + "default": true, + "enabled": true, + "visible": true + }, { "id": "include_in_newest_torrent", "type": "bool", diff --git a/plugin.video.alfa/channels/descargas2020.py b/plugin.video.alfa/channels/descargas2020.py index 3b8613d1..4008919a 100644 --- a/plugin.video.alfa/channels/descargas2020.py +++ b/plugin.video.alfa/channels/descargas2020.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# -*- coding: utf-8 -*- import re @@ -10,7 +10,7 @@ from core.item import Item from platformcode import config, logger from core import tmdb -host = 'http://descargas2020.com/' # Cambiar manualmente "xx" en línea 287 ".com/xx/library" por tl para descargas2020, tr para descargas2020, d20 para descargas2020 +host = 'http://descargas2020.com/' def mainlist(item): logger.info() @@ -26,7 +26,7 @@ def mainlist(item): itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series", thumbnail=thumb_series)) - + itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios", thumbnail=thumb_series)) itemlist.append( @@ -40,12 +40,15 @@ def submenu(item): data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com - #patron = '
  • .*?
      (.*?)
    ' - patron = '
  • .*?
      (.*?)
    ' #Filtrado por url - data = scrapertools.get_match(data, patron) + patron = '
  • .*?(.*?)' + if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com + data = ' Documentales' + else: + data = scrapertools.get_match(data, patron) - patron = '([^>]+)' + patron = '<.*?href="([^"]+)".*?>([^>]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: @@ -55,12 +58,12 @@ def submenu(item): itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist")) itemlist.append( Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist")) - + if item.extra == "peliculas": itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) itemlist.append( Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) - + return itemlist @@ -91,16 +94,12 @@ def listado(item): itemlist = [] url_next_page ='' - data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + #data = httptools.downloadpage(item.url).data data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - #logger.debug(data) - logger.debug('item.modo: %s'%item.modo) - logger.debug('item.extra: %s'%item.extra) if item.modo != 'next' or item.modo =='': - logger.debug('item.title: %s'% item.title) patron = '
      (.*?)
    ' - logger.debug("patron=" + patron) fichas = scrapertools.get_match(data, patron) page_extra = item.extra else: @@ -109,11 +108,11 @@ def listado(item): patron = '(.*?)<\/b><\/font>') + real_title = scrapertools.find_single_match(title, r'(.*?)Temporada.*?<\/strong>') #series + if not real_title: + real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8') + real_title = scrapertools.htmlclean(real_title) + real_title = real_title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "") + calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?]+>[\[]\s*(?P.*?)\s*[\]]<\/span>') #series + if calidad == "": + calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies + year = scrapertools.find_single_match(thumb, r'-(\d{4})') + + # fix encoding for title title = scrapertools.htmlclean(title) - title = title.replace("�", "ñ") + title = re.sub(r'(Calidad.*?\])', '', title) + title = title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "") + if real_title == "": + real_title = title + if calidad == "": + calidad = title + context = "movie" + url_real = True # no mostramos lo que no sean videos - if "/juego/" in url or "/varios/" in url: + if "juego/" in url: continue - if ".com/series" in url: + # Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie + if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host: + if "seriehd" in url: + calidad_mps = "series-hd/" + elif "serievo" in url: + calidad_mps = "series-vo/" + elif "serie-vo" in url: + calidad_mps = "series-vo/" + else: + calidad_mps = "series/" + + if "no_image" in thumb: + real_title_mps = title + else: + real_title_mps = re.sub(r'.*?\/\d+_', '', thumb) + real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps) + + if "/0_" not in thumb and not "no_image" in thumb: + serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P\d+).*?.*') + if len(serieid) > 5: + serieid = "" + else: + serieid = "" - show = real_title + #detectar si la url creada de tvshow es válida o hay que volver atras + url_tvshow = host + calidad_mps + real_title_mps + "/" + url_id = host + calidad_mps + real_title_mps + "/" + serieid + data_serie = data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_id).data) + data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8") + data_serie = data_serie.replace("chapters", "buscar-list") + pattern = '
      (.*?)
    ' % "buscar-list" # item.pattern + if not scrapertools.find_single_match(data_serie, pattern): + data_serie = data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_tvshow).data) + data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8") + data_serie = data_serie.replace("chapters", "buscar-list") + if not scrapertools.find_single_match(data_serie, pattern): + context = "movie" + url_real = False + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + if calidad: + title = title + '[' + calidad + "]" + else: + url = url_tvshow + else: + url = url_id + + real_title_mps = real_title_mps.replace("-", " ") + logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps + " / context : " + context) + real_title = real_title_mps + + show = real_title - itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, - context=["buscar_trailer"], contentSerieName=show)) + if ".com/serie" in url and "/miniseries" not in url and url_real: + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + if calidad: + title = title + '[' + calidad + "]" + context = "tvshow" + + itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad, + show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year})) else: + if config.get_setting("unify"): #Si Titulos Inteligentes SI seleccionados: + title = real_title - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, - context=["buscar_trailer"])) - + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad, + show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year})) + + logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad) + + tmdb.set_infoLabels(itemlist, True) + if post: itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente", - thumbnail=get_thumb("next.png"))) + text_color='yellow', text_bold=True, thumbnail=get_thumb("next.png"))) return itemlist def findvideos(item): logger.info() itemlist = [] - ## Cualquiera de las tres opciones son válidas # item.url = item.url.replace(".com/",".com/ver-online/") # item.url = item.url.replace(".com/",".com/descarga-directa/") @@ -263,65 +324,87 @@ def findvideos(item): data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures") - - title = scrapertools.find_single_match(data, "
    ' + if "pelisyseries.com" in host: + pattern = ']*>
    ]+>(?P.*?)?<\/h3>.*?<\/li>' + else: + pattern = ']*>]+>(?P.*?)?<\/h2>' matches = re.compile(pattern, re.DOTALL).findall(data) + #logger.debug("patron: " + pattern) + #logger.debug(matches) + + season = "1" for url, thumb, info in matches: + if "pelisyseries.com" in host: + interm = url + url = thumb + thumb = interm + if "\d+)?)<.+?]+>(?P.*?)\s*Calidad\s*]+>" \ - "[\[]\s*(?P.*?)\s*[\]]" + pattern = ".*?[^>]+>.*?Temporada\s*(?P\d+)?.*?Capitulo(?:s)?\s*(?P\d+)?" \ + "(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>" \ + "[\[]\s*(?P.*?)?\s*[\]]<\/span>" + if "Especial" in info: # Capitulos Especiales + pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P\d+).*?\].*?Capitulo.*?\[\s*(?P\d+).*?\]?(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>[\[]\s*(?P.*?)?\s*[\]]<\/span>" + + if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico + logger.debug("patron episodioNEW: " + pattern) + logger.debug(info) + info = '>%sTemporada %s Capitulo 0 - Español Castellano Calidad [%s]' % (item.contentTitle, season, item.infoLabels['quality']) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] + if match['season'] is None: match['season'] = season + if match['episode'] is None: match['episode'] = "0" + if match['quality']: + item.quality = match['quality'] + if match["episode2"]: multi = True - title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), - str(match["episode2"]).zfill(2), match["lang"], - match["quality"]) + title = "%s (%sx%s-%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), + str(match["episode2"]).zfill(2), match["lang"]) + if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados: + title = "%s[%s]" % (title, match["quality"]) else: multi = False - title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), - match["lang"], match["quality"]) + title = "%s (%sx%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), + match["lang"]) + if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados: + title = "%s[%s]" % (title, match["quality"]) else: # old style - pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+)(?P\d{2})(?:_(?P\d+)" \ + if scrapertools.find_single_match(info, '\[\d{3}\]'): + info = re.sub(r'\[(\d{3}\])', r'[Cap.\1', info) + elif scrapertools.find_single_match(info, '\[Cap.\d{2}_\d{2}\]'): + info = re.sub(r'\[Cap.(\d{2})_(\d{2})\]', r'[Cap.1\1_1\2]', info) + elif scrapertools.find_single_match(info, '\[Cap.([A-Za-z]+)\]'): + info = re.sub(r'\[Cap.([A-Za-z]+)\]', '[Cap.100]', info) + if scrapertools.find_single_match(info, '\[Cap.\d{2,3}'): + pattern = "\[(?P.*?)\].*?\[Cap.(?P\d).*?(?P\d{2})(?:_(?P\d+)" \ "(?P\d{2}))?.*?\].*?(?:\[(?P.*?)\])?" + elif scrapertools.find_single_match(info, 'Cap.\d{2,3}'): + pattern = ".*?Temp.*?\s(?P.*?)\s.*?Cap.(?P\d).*?(?P\d{2})(?:_(?P\d+)(?P\d{2}))?.*?\s(?P.*)?" + if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico + logger.debug("patron episodioOLD: " + pattern) + logger.debug(info) + info = '%s [%s][Cap.%s00][Español]' % (item.contentTitle, item.infoLabels['quality'], season) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] - # logger.debug("data %s" % match) str_lang = "" + if match['quality']: + item.quality = match['quality'] + if match["lang"] is not None: str_lang = "[%s]" % match["lang"] - + item.quality = "%s %s" % (item.quality, match['lang']) + if match["season2"] and match["episode2"]: multi = True if match["season"] == match["season2"]: - title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"], - match["episode2"], str_lang, match["quality"]) + title = "%s (%sx%s-%s) %s" % (item.show, match["season"], match["episode"], + match["episode2"], str_lang) + if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados: + title = "%s[%s]" % (title, match["quality"]) else: - title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], - match["season2"], match["episode2"], str_lang, - match["quality"]) + title = "%s (%sx%s-%sx%s) %s" % (item.show, match["season"], match["episode"], + match["season2"], match["episode2"], str_lang) + if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados: + title = "%s[%s]" % (title, match["quality"]) else: - title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang, - match["quality"]) + title = "%s (%sx%s) %s" % (item.show, match["season"], match["episode"], str_lang) + if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados: + title = "%s[%s]" % (title, match["quality"]) multi = False season = match['season'] episode = match['episode'] + logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode)) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=item.quality, multi=multi, contentSeason=season, contentEpisodeNumber=episode, infoLabels = infoLabels)) - # order list + #tmdb.set_infoLabels(itemlist, True) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) if len(itemlist) > 1: itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( - item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios")) + item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad)) return itemlist @@ -458,8 +610,8 @@ def search(item, texto): try: item.post = "q=%s" % texto item.pattern = "buscar-list" - itemlist = listado2(item) - + itemlist = listado_busqueda(item) + return itemlist # Se captura la excepción, para no interrumpir al buscador global si un canal falla @@ -485,6 +637,24 @@ def newest(categoria): itemlist.extend(listado(item)) if itemlist[-1].title == ">> Página siguiente": itemlist.pop() + + if categoria == 'peliculas 4k': + item.url = host+'peliculas-hd/4kultrahd/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + if categoria == 'anime': + item.url = host+'anime/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + if categoria == 'documentales': + item.url = host+'documentales/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: diff --git a/plugin.video.alfa/channels/torrentrapid.json b/plugin.video.alfa/channels/torrentrapid.json index 0362f46d..303149a0 100644 --- a/plugin.video.alfa/channels/torrentrapid.json +++ b/plugin.video.alfa/channels/torrentrapid.json @@ -10,7 +10,8 @@ "movie", "tvshow", "anime", - "torrent" + "torrent", + "documentary" ], "settings": [ { @@ -21,6 +22,22 @@ "enabled": true, "visible": true }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Incluir en Novedades - Episodios de series", + "default": true, + "enabled": true, + "visible": true + }, { "id": "include_in_newest_torrent", "type": "bool", @@ -28,6 +45,14 @@ "default": true, "enabled": true, "visible": true + }, + { + "id": "include_in_newest_4k", + "type": "bool", + "label": "Incluir en Novedades - 4K", + "default": true, + "enabled": true, + "visible": true } ] } \ No newline at end of file diff --git a/plugin.video.alfa/channels/torrentrapid.py b/plugin.video.alfa/channels/torrentrapid.py index ae0e174f..17273437 100644 --- a/plugin.video.alfa/channels/torrentrapid.py +++ b/plugin.video.alfa/channels/torrentrapid.py @@ -10,7 +10,7 @@ from core.item import Item from platformcode import config, logger from core import tmdb -host = 'http://torrentrapid.com/' # Cambiar manualmente "xx" en línea 287 ".com/xx/library" por tl para torrentrapid, tr para torrentrapid, d20 para descargas2020 +host = 'http://torrentrapid.com/' def mainlist(item): logger.info() @@ -40,12 +40,15 @@ def submenu(item): data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com - #patron = '

    ([^<]+)<\/strong>[^<]+<\/h1>") - title += scrapertools.find_single_match(data, "

    [^<]+<\/strong>([^<]+)<\/h1>") - caratula = scrapertools.find_single_match(data, '
    .*?src="([^"]+)"') - - #
    Descarga tu Archivo torrent!
    + + title = scrapertools.find_single_match(data, "([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com + title += scrapertools.find_single_match(data, "[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com + #caratula = scrapertools.find_single_match(data, '
    .*?src="([^"]+)"') + caratula = scrapertools.find_single_match(data, ']+>.*?' - #patron_ver = '
    ]+>.*?' - - #match_ver = scrapertools.find_single_match(data, patron_ver) - #match_descargar = scrapertools.find_single_match(data, patron_descargar) - - #patron = '
    <\/div[^<]+
    ([^<]+)?<\/div[^<]+
    ([^<]+)?' patron += '<\/div[^<]+
    ([^<]+)?<\/div[^<]+
    0: + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Ver: -**[/COLOR]", action="", folder=False)) + else: + itemlist.append(item.clone(title="[COLOR gold] Enlaces Ver: [/COLOR]", action="", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: if "Ver" in titulo: servidor = servidor.replace("streamin", "streaminto") - titulo = titulo + " [" + servidor + "]" + titulo = title mostrar_server = True if config.get_setting("hidepremium"): mostrar_server = servertools.is_server_enabled(servidor) + titulo = '[COLOR yellow][%s]-[/COLOR] %s [online]' % (servidor.capitalize(), titulo) + logger.debug("VER: url: " + enlace + " / title: " + titulo + " / servidor: " + servidor + " / idioma: " + idioma) + if mostrar_server: try: devuelve = servertools.findvideosbyserver(enlace, servidor) @@ -329,31 +412,46 @@ def findvideos(item): enlace = devuelve[0][1] itemlist.append( Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, - fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) + fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False)) except: pass + if len(enlaces_descargar) > 0: + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Descargar: -**[/COLOR]", action="", folder=False)) + else: + itemlist.append(item.clone(title="[COLOR gold] Enlaces Descargar: [/COLOR]", action="", folder=False)) + for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: if "Ver" not in titulo: servidor = servidor.replace("uploaded", "uploadedto") partes = enlace.split(" ") + titulo = "Descarga " p = 1 + logger.debug("DESCARGAR: url: " + enlace + " / title: " + titulo + title + " / servidor: " + servidor + " / idioma: " + idioma) for enlace in partes: - parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]" + parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) p += 1 mostrar_server = True if config.get_setting("hidepremium"): mostrar_server = servertools.is_server_enabled(servidor) + parte_titulo = '[COLOR yellow][%s]-[/COLOR] %s' % (servidor.capitalize(), parte_titulo) + if item.infoLabels['quality']: + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + parte_titulo = '%s [%s]' %(parte_titulo, item.infoLabels['quality']) + else: + parte_titulo = '%s (%s)' %(parte_titulo, item.infoLabels['quality']) if mostrar_server: try: devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, - title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo, - plot=item.plot, folder=False)) + title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo, + plot=item.plot, infoLabels=item.infoLabels, folder=False)) except: pass + return itemlist @@ -363,6 +461,8 @@ def episodios(item): infoLabels = item.infoLabels data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + calidad = item.quality + pattern = '
      (.*?)
    ' % "pagination" # item.pattern pagination = scrapertools.find_single_match(data, pattern) if pagination: @@ -378,76 +478,128 @@ def episodios(item): list_pages = [item.url] for index, page in enumerate(list_pages): - logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page)) data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - + data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com pattern = '
      (.*?)
    ' % "buscar-list" # item.pattern - data = scrapertools.get_match(data, pattern) + if scrapertools.find_single_match(data, pattern): + data = scrapertools.get_match(data, pattern) + else: + logger.debug(item) + logger.debug("data: " + data) + return itemlist - pattern = ']*>
    ]+>(?P.*?)

    ' + if "pelisyseries.com" in host: + pattern = ']*>
    ]+>(?P.*?)?<\/h3>.*?<\/li>' + else: + pattern = ']*>]+>(?P.*?)?<\/h2>' matches = re.compile(pattern, re.DOTALL).findall(data) + #logger.debug("patron: " + pattern) + #logger.debug(matches) + + season = "1" for url, thumb, info in matches: + if "pelisyseries.com" in host: + interm = url + url = thumb + thumb = interm + if "\d+)?)<.+?]+>(?P.*?)\s*Calidad\s*]+>" \ - "[\[]\s*(?P.*?)\s*[\]]" + pattern = ".*?[^>]+>.*?Temporada\s*(?P\d+)?.*?Capitulo(?:s)?\s*(?P\d+)?" \ + "(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>" \ + "[\[]\s*(?P.*?)?\s*[\]]<\/span>" + if "Especial" in info: # Capitulos Especiales + pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P\d+).*?\].*?Capitulo.*?\[\s*(?P\d+).*?\]?(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>[\[]\s*(?P.*?)?\s*[\]]<\/span>" + + if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico + logger.debug("patron episodioNEW: " + pattern) + logger.debug(info) + info = '>%sTemporada %s Capitulo 0 - Español Castellano Calidad [%s]' % (item.contentTitle, season, item.infoLabels['quality']) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] + if match['season'] is None: match['season'] = season + if match['episode'] is None: match['episode'] = "0" + if match['quality']: + item.quality = match['quality'] + if match["episode2"]: multi = True - title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), - str(match["episode2"]).zfill(2), match["lang"], - match["quality"]) + title = "%s (%sx%s-%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), + str(match["episode2"]).zfill(2), match["lang"]) + if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados: + title = "%s[%s]" % (title, match["quality"]) else: multi = False - title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), - match["lang"], match["quality"]) + title = "%s (%sx%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), + match["lang"]) + if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados: + title = "%s[%s]" % (title, match["quality"]) else: # old style - pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+)(?P\d{2})(?:_(?P\d+)" \ + if scrapertools.find_single_match(info, '\[\d{3}\]'): + info = re.sub(r'\[(\d{3}\])', r'[Cap.\1', info) + elif scrapertools.find_single_match(info, '\[Cap.\d{2}_\d{2}\]'): + info = re.sub(r'\[Cap.(\d{2})_(\d{2})\]', r'[Cap.1\1_1\2]', info) + elif scrapertools.find_single_match(info, '\[Cap.([A-Za-z]+)\]'): + info = re.sub(r'\[Cap.([A-Za-z]+)\]', '[Cap.100]', info) + if scrapertools.find_single_match(info, '\[Cap.\d{2,3}'): + pattern = "\[(?P.*?)\].*?\[Cap.(?P\d).*?(?P\d{2})(?:_(?P\d+)" \ "(?P\d{2}))?.*?\].*?(?:\[(?P.*?)\])?" + elif scrapertools.find_single_match(info, 'Cap.\d{2,3}'): + pattern = ".*?Temp.*?\s(?P.*?)\s.*?Cap.(?P\d).*?(?P\d{2})(?:_(?P\d+)(?P\d{2}))?.*?\s(?P.*)?" + if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico + logger.debug("patron episodioOLD: " + pattern) + logger.debug(info) + info = '%s [%s][Cap.%s00][Español]' % (item.contentTitle, item.infoLabels['quality'], season) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] - # logger.debug("data %s" % match) str_lang = "" + if match['quality']: + item.quality = match['quality'] + if match["lang"] is not None: str_lang = "[%s]" % match["lang"] - + item.quality = "%s %s" % (item.quality, match['lang']) + if match["season2"] and match["episode2"]: multi = True if match["season"] == match["season2"]: - title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"], - match["episode2"], str_lang, match["quality"]) + title = "%s (%sx%s-%s) %s" % (item.show, match["season"], match["episode"], + match["episode2"], str_lang) + if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados: + title = "%s[%s]" % (title, match["quality"]) else: - title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], - match["season2"], match["episode2"], str_lang, - match["quality"]) + title = "%s (%sx%s-%sx%s) %s" % (item.show, match["season"], match["episode"], + match["season2"], match["episode2"], str_lang) + if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados: + title = "%s[%s]" % (title, match["quality"]) else: - title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang, - match["quality"]) + title = "%s (%sx%s) %s" % (item.show, match["season"], match["episode"], str_lang) + if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados: + title = "%s[%s]" % (title, match["quality"]) multi = False season = match['season'] episode = match['episode'] + logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode)) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=item.quality, multi=multi, contentSeason=season, contentEpisodeNumber=episode, infoLabels = infoLabels)) - # order list + #tmdb.set_infoLabels(itemlist, True) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) if len(itemlist) > 1: itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( - item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios")) + item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad)) return itemlist @@ -459,7 +611,7 @@ def search(item, texto): item.post = "q=%s" % texto item.pattern = "buscar-list" itemlist = listado_busqueda(item) - + return itemlist # Se captura la excepción, para no interrumpir al buscador global si un canal falla @@ -485,6 +637,24 @@ def newest(categoria): itemlist.extend(listado(item)) if itemlist[-1].title == ">> Página siguiente": itemlist.pop() + + if categoria == 'peliculas 4k': + item.url = host+'peliculas-hd/4kultrahd/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + if categoria == 'anime': + item.url = host+'anime/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + if categoria == 'documentales': + item.url = host+'documentales/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: diff --git a/plugin.video.alfa/channels/mejortorrent.py b/plugin.video.alfa/channels/mejortorrent.py index f085f160..c31662f6 100755 --- a/plugin.video.alfa/channels/mejortorrent.py +++ b/plugin.video.alfa/channels/mejortorrent.py @@ -254,7 +254,7 @@ def episodios(item): url = host + scrapertools.find_single_match(data,patron) # "episodios%5B1%5D=11744&total_capis=5&tabla=series&titulo=Sea+Patrol+-+2%AA+Temporada" post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo}) - logger.debug("post=" + post) + #logger.debug("post=" + post) if item.extra == "series": epi = scrapedtitle.split("x") @@ -311,7 +311,6 @@ def show_movie_info(item): pass data = httptools.downloadpage(item.url).data - logger.debug("data=" + data) patron = "") + if scrapertools.find_single_match(torrent_data, ""): + link = scrapertools.get_match(torrent_data, "") + else: + link = scrapertools.get_match(torrent_data, "") link = urlparse.urljoin(url, link) logger.debug("link=" + link) itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link, @@ -363,7 +364,7 @@ def play(item): else: #data = httptools.downloadpage(item.url, post=item.extra).data data = httptools.downloadpage(item.url).data - logger.debug("data=" + data) + #logger.debug("data=" + data) params = dict(urlparse.parse_qsl(item.extra)) patron = host + "/secciones.php?sec=descargas&ap=contar&tabla=" + params["tabla"] + "&id=" + item.id @@ -373,7 +374,9 @@ def play(item): data = httptools.downloadpage(patron).data patron = "Pincha " - link = host + scrapertools.find_single_match(data, patron) + link = scrapertools.find_single_match(data, patron) + if not host in link: + link = host + link logger.info("link=" + link) itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link, thumbnail=item.thumbnail, plot=item.plot, folder=False)) diff --git a/plugin.video.alfa/channels/mispelisyseries.json b/plugin.video.alfa/channels/mispelisyseries.json index 9d5ae057..73b0db8b 100755 --- a/plugin.video.alfa/channels/mispelisyseries.json +++ b/plugin.video.alfa/channels/mispelisyseries.json @@ -9,7 +9,8 @@ "categories": [ "torrent", "movie", - "tvshow" + "tvshow", + "documentary" ], "settings": [ { diff --git a/plugin.video.alfa/channels/mispelisyseries.py b/plugin.video.alfa/channels/mispelisyseries.py index ba65968e..7c6ae65b 100644 --- a/plugin.video.alfa/channels/mispelisyseries.py +++ b/plugin.video.alfa/channels/mispelisyseries.py @@ -1,137 +1,73 @@ -# -*- coding: utf-8 -*- +# -*- coding: utf-8 -*- import re -import urllib -import urlparse +from channelselector import get_thumb from core import httptools from core import scrapertools from core import servertools from core.item import Item -from platformcode import logger -from channelselector import get_thumb +from platformcode import config, logger +from core import tmdb host = 'http://mispelisyseries.com/' + def mainlist(item): logger.info() itemlist = [] - itemlist.append(Item(channel=item.channel, action="menu", title="Películas", url=host, - extra="Peliculas", folder=True, thumbnail=get_thumb('movies', auto=True))) + + thumb_pelis=get_thumb("channels_movie.png") + thumb_series=get_thumb("channels_tvshow.png") + thumb_search = get_thumb("search.png") + + itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host, + extra="peliculas", thumbnail=thumb_pelis )) + + itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series", + thumbnail=thumb_series)) + + itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios", + thumbnail=thumb_series)) itemlist.append( - Item(channel=item.channel, action="menu", title="Series", url=host, extra="Series", - folder=True, thumbnail=get_thumb('tvshows', auto=True))) - itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + 'buscar', - thumbnail=get_thumb('search', auto=True))) + Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search)) + return itemlist - -def menu(item): +def submenu(item): logger.info() itemlist = [] - data = httptools.downloadpage(item.url).data - # logger.info("data="+data) + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com - data = scrapertools.find_single_match(data, item.extra + "") - # logger.info("data="+data) + patron = '
  • .*?(.*?)' + if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com + data = ' Documentales' + else: + data = scrapertools.get_match(data, patron) - patron = "
  • ]+>([^<]+)
  • " + patron = '<.*?href="([^"]+)".*?>([^>]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: - title = scrapedtitle - url = urlparse.urljoin(item.url, scrapedurl) - thumbnail = "" - plot = "" - itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url, thumbnail=thumbnail, plot=plot, - folder=True)) - - - if title != "Todas las Peliculas": - itemlist.append( - Item(channel=item.channel, action="alfabetico", title=title + " [A-Z]", url=url, thumbnail=thumbnail, - plot=plot, folder=True)) - + title = scrapedtitle.strip() + url = scrapedurl + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist")) itemlist.append( - Item(channel=item.channel, action="alfabetico", title=title + " [A-Z]", url=url, thumbnail=thumbnail, - plot=plot, - folder=True)) - - if 'películas' in item.title.lower(): - new_item = item.clone(title='Peliculas 4K', url=host+'buscar', post='q=4k', action='listado2', - pattern='buscar-list') - itemlist.append(new_item) - + Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist")) + + if item.extra == "peliculas": + itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) + itemlist.append( + Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) + return itemlist -def search(item, texto): - logger.info("search:" + texto) - # texto = texto.replace(" ", "+") - - #try: - item.post = "q=%s" % texto - item.pattern = "buscar-list" - itemlist = listado2(item) - - return itemlist - - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - # except: - # import sys - # for line in sys.exc_info(): - # logger.error("%s" % line) - # return [] - -def newest(categoria): - itemlist = [] - item = Item() - try: - if categoria in ['peliculas', 'torrent']: - item.url = host+"peliculas" - - elif categoria == 'series': - item.url = host+"series" - - if categoria == '4k': - - item.url = Host + '/buscar' - - item.post = 'q=4k' - - item.pattern = 'buscar-list' - - action = listado2(item) - - else: - return [] - - itemlist = lista(item) - if itemlist[-1].title == ">> Página siguiente": - itemlist.pop() - - # Esta pagina coloca a veces contenido duplicado, intentamos descartarlo - dict_aux = {} - for i in itemlist: - if not i.url in dict_aux: - dict_aux[i.url] = i - else: - itemlist.remove(i) - - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - # return dict_aux.values() - return itemlist - - -def alfabetico(item): +def alfabeto(item): logger.info() itemlist = [] @@ -148,93 +84,113 @@ def alfabetico(item): title = scrapedtitle.upper() url = scrapedurl - itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url)) + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra)) return itemlist -def lista(item): +def listado(item): logger.info() itemlist = [] + url_next_page ='' - # Descarga la pagina - data = httptools.downloadpage(item.url, post=item.extra).data - # logger.info("data="+data) - - bloque = scrapertools.find_single_match(data, '(?:' - #patron_ver = '
    ]+>.*?' - - #match_ver = scrapertools.find_single_match(data, patron_ver) - #match_descargar = scrapertools.find_single_match(data, patron_descargar) - - #patron = '
    <\/div[^<]+
    ([^<]+)?<\/div[^<]+
    ([^<]+)?' patron += '<\/div[^<]+
    ([^<]+)?<\/div[^<]+
    0: + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Ver: -**[/COLOR]", action="", folder=False)) + else: + itemlist.append(item.clone(title="[COLOR gold] Enlaces Ver: [/COLOR]", action="", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: if "Ver" in titulo: servidor = servidor.replace("streamin", "streaminto") - titulo = titulo + " [" + servidor + "]" + titulo = title mostrar_server = True if config.get_setting("hidepremium"): mostrar_server = servertools.is_server_enabled(servidor) + titulo = '[COLOR yellow][%s]-[/COLOR] %s [online]' % (servidor.capitalize(), titulo) + logger.debug("VER: url: " + enlace + " / title: " + titulo + " / servidor: " + servidor + " / idioma: " + idioma) + if mostrar_server: try: devuelve = servertools.findvideosbyserver(enlace, servidor) @@ -329,31 +412,46 @@ def findvideos(item): enlace = devuelve[0][1] itemlist.append( Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, - fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) + fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False)) except: pass + if len(enlaces_descargar) > 0: + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Descargar: -**[/COLOR]", action="", folder=False)) + else: + itemlist.append(item.clone(title="[COLOR gold] Enlaces Descargar: [/COLOR]", action="", folder=False)) + for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: if "Ver" not in titulo: servidor = servidor.replace("uploaded", "uploadedto") partes = enlace.split(" ") + titulo = "Descarga " p = 1 + logger.debug("DESCARGAR: url: " + enlace + " / title: " + titulo + title + " / servidor: " + servidor + " / idioma: " + idioma) for enlace in partes: - parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]" + parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) p += 1 mostrar_server = True if config.get_setting("hidepremium"): mostrar_server = servertools.is_server_enabled(servidor) + parte_titulo = '[COLOR yellow][%s]-[/COLOR] %s' % (servidor.capitalize(), parte_titulo) + if item.infoLabels['quality']: + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + parte_titulo = '%s [%s]' %(parte_titulo, item.infoLabels['quality']) + else: + parte_titulo = '%s (%s)' %(parte_titulo, item.infoLabels['quality']) if mostrar_server: try: devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, - title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo, - plot=item.plot, folder=False)) + title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo, + plot=item.plot, infoLabels=item.infoLabels, folder=False)) except: pass + return itemlist @@ -363,6 +461,8 @@ def episodios(item): infoLabels = item.infoLabels data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + calidad = item.quality + pattern = '
      (.*?)
    ' % "pagination" # item.pattern pagination = scrapertools.find_single_match(data, pattern) if pagination: @@ -378,76 +478,128 @@ def episodios(item): list_pages = [item.url] for index, page in enumerate(list_pages): - logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page)) data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - + data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com pattern = '
      (.*?)
    ' % "buscar-list" # item.pattern - data = scrapertools.get_match(data, pattern) + if scrapertools.find_single_match(data, pattern): + data = scrapertools.get_match(data, pattern) + else: + logger.debug(item) + logger.debug("data: " + data) + return itemlist - pattern = ']*>
    ]+>(?P.*?)

  • .*?
      (.*?)
    ' - patron = '
  • .*?
      (.*?)
    ' #Filtrado por url - data = scrapertools.get_match(data, patron) + patron = '
  • .*?(.*?)' + if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com + data = ' Documentales' + else: + data = scrapertools.get_match(data, patron) - patron = '([^>]+)' + patron = '<.*?href="([^"]+)".*?>([^>]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: @@ -92,15 +95,11 @@ def listado(item): url_next_page ='' data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + #data = httptools.downloadpage(item.url).data data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - #logger.debug(data) - logger.debug('item.modo: %s'%item.modo) - logger.debug('item.extra: %s'%item.extra) if item.modo != 'next' or item.modo =='': - logger.debug('item.title: %s'% item.title) patron = '
      (.*?)
    ' - logger.debug("patron=" + patron) fichas = scrapertools.get_match(data, patron) page_extra = item.extra else: @@ -109,11 +108,11 @@ def listado(item): patron = '(.*?)<\/b><\/font>') + real_title = scrapertools.find_single_match(title, r'(.*?)Temporada.*?<\/strong>') #series + if not real_title: + real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8') + real_title = scrapertools.htmlclean(real_title) + real_title = real_title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "") + calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?]+>[\[]\s*(?P.*?)\s*[\]]<\/span>') #series + if calidad == "": + calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies + year = scrapertools.find_single_match(thumb, r'-(\d{4})') + + # fix encoding for title title = scrapertools.htmlclean(title) - title = title.replace("�", "ñ") + title = re.sub(r'(Calidad.*?\])', '', title) + title = title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "") + if real_title == "": + real_title = title + if calidad == "": + calidad = title + context = "movie" + url_real = True # no mostramos lo que no sean videos - if "/juego/" in url or "/varios/" in url: + if "juego/" in url: continue - if ".com/series" in url: + # Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie + if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host: + if "seriehd" in url: + calidad_mps = "series-hd/" + elif "serievo" in url: + calidad_mps = "series-vo/" + elif "serie-vo" in url: + calidad_mps = "series-vo/" + else: + calidad_mps = "series/" + + if "no_image" in thumb: + real_title_mps = title + else: + real_title_mps = re.sub(r'.*?\/\d+_', '', thumb) + real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps) + + if "/0_" not in thumb and not "no_image" in thumb: + serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P\d+).*?.*') + if len(serieid) > 5: + serieid = "" + else: + serieid = "" - show = real_title + #detectar si la url creada de tvshow es válida o hay que volver atras + url_tvshow = host + calidad_mps + real_title_mps + "/" + url_id = host + calidad_mps + real_title_mps + "/" + serieid + data_serie = data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_id).data) + data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8") + data_serie = data_serie.replace("chapters", "buscar-list") + pattern = '
      (.*?)
    ' % "buscar-list" # item.pattern + if not scrapertools.find_single_match(data_serie, pattern): + data_serie = data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_tvshow).data) + data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8") + data_serie = data_serie.replace("chapters", "buscar-list") + if not scrapertools.find_single_match(data_serie, pattern): + context = "movie" + url_real = False + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + if calidad: + title = title + '[' + calidad + "]" + else: + url = url_tvshow + else: + url = url_id + + real_title_mps = real_title_mps.replace("-", " ") + logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps + " / context : " + context) + real_title = real_title_mps + + show = real_title - itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, - context=["buscar_trailer"], contentSerieName=show)) + if ".com/serie" in url and "/miniseries" not in url and url_real: + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + if calidad: + title = title + '[' + calidad + "]" + context = "tvshow" + + itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad, + show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year})) else: + if config.get_setting("unify"): #Si Titulos Inteligentes SI seleccionados: + title = real_title - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, - context=["buscar_trailer"])) - + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad, + show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year})) + + logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad) + + tmdb.set_infoLabels(itemlist, True) + if post: - itemlist.append(item.clone(channel=item.channel, action="listado2", title=">> Página siguiente", - thumbnail=get_thumb("next.png"))) + itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente", + text_color='yellow', text_bold=True, thumbnail=get_thumb("next.png"))) return itemlist def findvideos(item): logger.info() itemlist = [] - ## Cualquiera de las tres opciones son válidas # item.url = item.url.replace(".com/",".com/ver-online/") # item.url = item.url.replace(".com/",".com/descarga-directa/") @@ -263,65 +324,87 @@ def findvideos(item): data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures") - - title = scrapertools.find_single_match(data, "

    ([^<]+)<\/strong>[^<]+<\/h1>") - title += scrapertools.find_single_match(data, "

    [^<]+<\/strong>([^<]+)<\/h1>") - caratula = scrapertools.find_single_match(data, '
    .*?src="([^"]+)"') - - #
    Descarga tu Archivo torrent!
    + + title = scrapertools.find_single_match(data, "([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com + title += scrapertools.find_single_match(data, "[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com + #caratula = scrapertools.find_single_match(data, '
    .*?src="([^"]+)"') + caratula = scrapertools.find_single_match(data, ']+>.*?' - #patron_ver = '
    ]+>.*?' - - #match_ver = scrapertools.find_single_match(data, patron_ver) - #match_descargar = scrapertools.find_single_match(data, patron_descargar) - - #patron = '
    <\/div[^<]+
    ([^<]+)?<\/div[^<]+
    ([^<]+)?' patron += '<\/div[^<]+
    ([^<]+)?<\/div[^<]+
    0: + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Ver: -**[/COLOR]", action="", folder=False)) + else: + itemlist.append(item.clone(title="[COLOR gold] Enlaces Ver: [/COLOR]", action="", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: if "Ver" in titulo: servidor = servidor.replace("streamin", "streaminto") - titulo = titulo + " [" + servidor + "]" + titulo = title mostrar_server = True if config.get_setting("hidepremium"): mostrar_server = servertools.is_server_enabled(servidor) + titulo = '[COLOR yellow][%s]-[/COLOR] %s [online]' % (servidor.capitalize(), titulo) + logger.debug("VER: url: " + enlace + " / title: " + titulo + " / servidor: " + servidor + " / idioma: " + idioma) + if mostrar_server: try: devuelve = servertools.findvideosbyserver(enlace, servidor) @@ -329,31 +412,46 @@ def findvideos(item): enlace = devuelve[0][1] itemlist.append( Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, - fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) + fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False)) except: pass + if len(enlaces_descargar) > 0: + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + itemlist.append(item.clone(title="[COLOR gold]**- Enlaces Descargar: -**[/COLOR]", action="", folder=False)) + else: + itemlist.append(item.clone(title="[COLOR gold] Enlaces Descargar: [/COLOR]", action="", folder=False)) + for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: if "Ver" not in titulo: servidor = servidor.replace("uploaded", "uploadedto") partes = enlace.split(" ") + titulo = "Descarga " p = 1 + logger.debug("DESCARGAR: url: " + enlace + " / title: " + titulo + title + " / servidor: " + servidor + " / idioma: " + idioma) for enlace in partes: - parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]" + parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) p += 1 mostrar_server = True if config.get_setting("hidepremium"): mostrar_server = servertools.is_server_enabled(servidor) + parte_titulo = '[COLOR yellow][%s]-[/COLOR] %s' % (servidor.capitalize(), parte_titulo) + if item.infoLabels['quality']: + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + parte_titulo = '%s [%s]' %(parte_titulo, item.infoLabels['quality']) + else: + parte_titulo = '%s (%s)' %(parte_titulo, item.infoLabels['quality']) if mostrar_server: try: devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, - title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo, - plot=item.plot, folder=False)) + title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo, + plot=item.plot, infoLabels=item.infoLabels, folder=False)) except: pass + return itemlist @@ -363,6 +461,8 @@ def episodios(item): infoLabels = item.infoLabels data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + calidad = item.quality + pattern = '
      (.*?)
    ' % "pagination" # item.pattern pagination = scrapertools.find_single_match(data, pattern) if pagination: @@ -378,76 +478,128 @@ def episodios(item): list_pages = [item.url] for index, page in enumerate(list_pages): - logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page)) data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - + data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com pattern = '
      (.*?)
    ' % "buscar-list" # item.pattern - data = scrapertools.get_match(data, pattern) + if scrapertools.find_single_match(data, pattern): + data = scrapertools.get_match(data, pattern) + else: + logger.debug(item) + logger.debug("data: " + data) + return itemlist - pattern = ']*>
    ]+>(?P.*?)

    ' + if "pelisyseries.com" in host: + pattern = ']*>
    ]+>(?P.*?)?<\/h3>.*?<\/li>' + else: + pattern = ']*>]+>(?P.*?)?<\/h2>' matches = re.compile(pattern, re.DOTALL).findall(data) + #logger.debug("patron: " + pattern) + #logger.debug(matches) + + season = "1" for url, thumb, info in matches: + if "pelisyseries.com" in host: + interm = url + url = thumb + thumb = interm + if "\d+)?)<.+?]+>(?P.*?)\s*Calidad\s*]+>" \ - "[\[]\s*(?P.*?)\s*[\]]" + pattern = ".*?[^>]+>.*?Temporada\s*(?P\d+)?.*?Capitulo(?:s)?\s*(?P\d+)?" \ + "(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>" \ + "[\[]\s*(?P.*?)?\s*[\]]<\/span>" + if "Especial" in info: # Capitulos Especiales + pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P\d+).*?\].*?Capitulo.*?\[\s*(?P\d+).*?\]?(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>[\[]\s*(?P.*?)?\s*[\]]<\/span>" + + if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico + logger.debug("patron episodioNEW: " + pattern) + logger.debug(info) + info = '>%sTemporada %s Capitulo 0 - Español Castellano Calidad [%s]' % (item.contentTitle, season, item.infoLabels['quality']) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] + if match['season'] is None: match['season'] = season + if match['episode'] is None: match['episode'] = "0" + if match['quality']: + item.quality = match['quality'] + if match["episode2"]: multi = True - title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), - str(match["episode2"]).zfill(2), match["lang"], - match["quality"]) + title = "%s (%sx%s-%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), + str(match["episode2"]).zfill(2), match["lang"]) + if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados: + title = "%s[%s]" % (title, match["quality"]) else: multi = False - title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), - match["lang"], match["quality"]) + title = "%s (%sx%s) [%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), + match["lang"]) + if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados: + title = "%s[%s]" % (title, match["quality"]) else: # old style - pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+)(?P\d{2})(?:_(?P\d+)" \ + if scrapertools.find_single_match(info, '\[\d{3}\]'): + info = re.sub(r'\[(\d{3}\])', r'[Cap.\1', info) + elif scrapertools.find_single_match(info, '\[Cap.\d{2}_\d{2}\]'): + info = re.sub(r'\[Cap.(\d{2})_(\d{2})\]', r'[Cap.1\1_1\2]', info) + elif scrapertools.find_single_match(info, '\[Cap.([A-Za-z]+)\]'): + info = re.sub(r'\[Cap.([A-Za-z]+)\]', '[Cap.100]', info) + if scrapertools.find_single_match(info, '\[Cap.\d{2,3}'): + pattern = "\[(?P.*?)\].*?\[Cap.(?P\d).*?(?P\d{2})(?:_(?P\d+)" \ "(?P\d{2}))?.*?\].*?(?:\[(?P.*?)\])?" + elif scrapertools.find_single_match(info, 'Cap.\d{2,3}'): + pattern = ".*?Temp.*?\s(?P.*?)\s.*?Cap.(?P\d).*?(?P\d{2})(?:_(?P\d+)(?P\d{2}))?.*?\s(?P.*)?" + if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico + logger.debug("patron episodioOLD: " + pattern) + logger.debug(info) + info = '%s [%s][Cap.%s00][Español]' % (item.contentTitle, item.infoLabels['quality'], season) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] - # logger.debug("data %s" % match) str_lang = "" + if match['quality']: + item.quality = match['quality'] + if match["lang"] is not None: str_lang = "[%s]" % match["lang"] - + item.quality = "%s %s" % (item.quality, match['lang']) + if match["season2"] and match["episode2"]: multi = True if match["season"] == match["season2"]: - title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"], - match["episode2"], str_lang, match["quality"]) + title = "%s (%sx%s-%s) %s" % (item.show, match["season"], match["episode"], + match["episode2"], str_lang) + if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados: + title = "%s[%s]" % (title, match["quality"]) else: - title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], - match["season2"], match["episode2"], str_lang, - match["quality"]) + title = "%s (%sx%s-%sx%s) %s" % (item.show, match["season"], match["episode"], + match["season2"], match["episode2"], str_lang) + if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados: + title = "%s[%s]" % (title, match["quality"]) else: - title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang, - match["quality"]) + title = "%s (%sx%s) %s" % (item.show, match["season"], match["episode"], str_lang) + if not config.get_setting("unify") and match["quality"]: #Si Titulos Inteligentes NO seleccionados: + title = "%s[%s]" % (title, match["quality"]) multi = False season = match['season'] episode = match['episode'] + logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode)) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=item.quality, multi=multi, contentSeason=season, contentEpisodeNumber=episode, infoLabels = infoLabels)) - # order list + #tmdb.set_infoLabels(itemlist, True) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) if len(itemlist) > 1: itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( - item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios")) + item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad)) return itemlist @@ -458,8 +610,8 @@ def search(item, texto): try: item.post = "q=%s" % texto item.pattern = "buscar-list" - itemlist = listado2(item) - + itemlist = listado_busqueda(item) + return itemlist # Se captura la excepción, para no interrumpir al buscador global si un canal falla @@ -485,6 +637,24 @@ def newest(categoria): itemlist.extend(listado(item)) if itemlist[-1].title == ">> Página siguiente": itemlist.pop() + + if categoria == 'peliculas 4k': + item.url = host+'peliculas-hd/4kultrahd/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + if categoria == 'anime': + item.url = host+'anime/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + if categoria == 'documentales': + item.url = host+'documentales/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: diff --git a/plugin.video.alfa/channels/tumejortorrent.json b/plugin.video.alfa/channels/tumejortorrent.json new file mode 100644 index 00000000..9f11c74c --- /dev/null +++ b/plugin.video.alfa/channels/tumejortorrent.json @@ -0,0 +1,58 @@ +{ + "id": "tumejortorrent", + "name": "Tumejortorrent", + "active": true, + "adult": false, + "language": ["cast", "lat"], + "thumbnail": "tumejortorrent.png", + "banner": "tumejortorrent.png", + "categories": [ + "movie", + "tvshow", + "anime", + "torrent", + "documentary" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Incluir en Novedades - Episodios de series", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_torrent", + "type": "bool", + "label": "Incluir en Novedades - Torrent", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_4k", + "type": "bool", + "label": "Incluir en Novedades - 4K", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/tumejortorrent.py b/plugin.video.alfa/channels/tumejortorrent.py new file mode 100644 index 00000000..10cf34e5 --- /dev/null +++ b/plugin.video.alfa/channels/tumejortorrent.py @@ -0,0 +1,666 @@ +# -*- coding: utf-8 -*- + +import re + +from channelselector import get_thumb +from core import httptools +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import config, logger +from core import tmdb + +host = 'http://tumejortorrent.com/' + +def mainlist(item): + logger.info() + + itemlist = [] + + thumb_pelis=get_thumb("channels_movie.png") + thumb_series=get_thumb("channels_tvshow.png") + thumb_search = get_thumb("search.png") + + itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host, + extra="peliculas", thumbnail=thumb_pelis )) + + itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series", + thumbnail=thumb_series)) + + itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios", + thumbnail=thumb_series)) + itemlist.append( + Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search)) + + return itemlist + +def submenu(item): + logger.info() + itemlist = [] + + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com + + patron = '
  • .*?(.*?)' + if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com + data = ' Documentales' + else: + data = scrapertools.get_match(data, patron) + + patron = '<.*?href="([^"]+)".*?>([^>]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.strip() + url = scrapedurl + + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist")) + itemlist.append( + Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist")) + + if item.extra == "peliculas": + itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) + itemlist.append( + Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) + + return itemlist + + +def alfabeto(item): + logger.info() + itemlist = [] + + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + patron = '
      (.*?)
    ' + data = scrapertools.get_match(data, patron) + + patron = ']+>([^>]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.upper() + url = scrapedurl + + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra)) + + return itemlist + + +def listado(item): + logger.info() + itemlist = [] + url_next_page ='' + + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + #data = httptools.downloadpage(item.url).data + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + if item.modo != 'next' or item.modo =='': + patron = '
      (.*?)
    ' + fichas = scrapertools.get_match(data, patron) + page_extra = item.extra + else: + fichas = data + page_extra = item.extra + + patron = ' 30: + url_next_page = item.url + matches = matches[:30] + next_page = 'b' + modo = 'continue' + else: + matches = matches[30:] + next_page = 'a' + patron_next_page = 'Next<\/a>' + matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data) + modo = 'continue' + if len(matches_next_page) > 0: + url_next_page = matches_next_page[0] + modo = 'next' + + for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches: + url = scrapedurl + title = scrapedtitle.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "") + title_alt = title_alt.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "") + thumbnail = scrapedthumbnail + action = "findvideos" + extra = "" + context = "movie" + year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})') + + if ".com/serie" in url and "/miniseries" not in url: + action = "episodios" + extra = "serie" + context = "tvshow" + + title = scrapertools.find_single_match(title, '([^-]+)') + title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea ", "", + 1).strip() + + else: + title = title.replace("Descargar torrent ", "", 1).replace("Descarga Gratis ", "", 1).replace("Descargar Estreno ", "", 1).replace("Pelicula en latino ", "", 1).replace("Descargar Pelicula ", "", 1).replace("Descargar", "", 1).replace("Descarga", "", 1).replace("Bajar", "", 1).strip() + if title.endswith("gratis"): title = title[:-7] + if title.endswith("torrent"): title = title[:-8] + if title.endswith("en HD"): title = title[:-6] + + if title == "": + title = title_alt + context_title = title_alt + show = title_alt + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + if calidad: + title = title + ' [' + calidad + "]" + + if not 'array' in title: + itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, + extra = extra, show = context_title, contentTitle=context_title, contentType=context, quality=calidad, + context=["buscar_trailer"], infoLabels= {'year':year})) + + logger.debug("url: " + url + " / title: " + title + " / contxt title: " + context_title + " / context: " + context + " / calidad: " + calidad+ " / year: " + year) + + tmdb.set_infoLabels(itemlist, True) + + if url_next_page: + itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", + url=url_next_page, next_page=next_page, folder=True, + text_color='yellow', text_bold=True, modo = modo, plot = extra, + extra = page_extra)) + return itemlist + +def listado_busqueda(item): + logger.info() + itemlist = [] + data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + list_chars = [["ñ", "ñ"]] + + for el in list_chars: + data = re.sub(r"%s" % el[0], el[1], data) + + try: + get, post = scrapertools.find_single_match(data, '' + if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com + data = ' Documentales' + else: + data = scrapertools.get_match(data, patron) + + patron = '<.*?href="([^"]+)".*?>([^>]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.strip() + url = scrapedurl + + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist")) + itemlist.append( + Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist")) + + if item.extra == "peliculas": + itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) + itemlist.append( + Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) + + return itemlist + + +def alfabeto(item): + logger.info() + itemlist = [] + + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + patron = '
      (.*?)
    ' + data = scrapertools.get_match(data, patron) + + patron = ']+>([^>]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.upper() + url = scrapedurl + + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra)) + + return itemlist + + +def listado(item): + logger.info() + itemlist = [] + url_next_page ='' + + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + #data = httptools.downloadpage(item.url).data + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + if item.modo != 'next' or item.modo =='': + patron = '
      (.*?)
    ' + fichas = scrapertools.get_match(data, patron) + page_extra = item.extra + else: + fichas = data + page_extra = item.extra + + patron = ' 30: + url_next_page = item.url + matches = matches[:30] + next_page = 'b' + modo = 'continue' + else: + matches = matches[30:] + next_page = 'a' + patron_next_page = 'Next<\/a>' + matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data) + modo = 'continue' + if len(matches_next_page) > 0: + url_next_page = matches_next_page[0] + modo = 'next' + + for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches: + url = scrapedurl + title = scrapedtitle.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "") + title_alt = title_alt.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "") + thumbnail = scrapedthumbnail + action = "findvideos" + extra = "" + context = "movie" + year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})') + + if ".com/serie" in url and "/miniseries" not in url: + action = "episodios" + extra = "serie" + context = "tvshow" + + title = scrapertools.find_single_match(title, '([^-]+)') + title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea ", "", + 1).strip() + + else: + title = title.replace("Descargar torrent ", "", 1).replace("Descarga Gratis ", "", 1).replace("Descargar Estreno ", "", 1).replace("Pelicula en latino ", "", 1).replace("Descargar Pelicula ", "", 1).replace("Descargar", "", 1).replace("Descarga", "", 1).replace("Bajar", "", 1).strip() + if title.endswith("gratis"): title = title[:-7] + if title.endswith("torrent"): title = title[:-8] + if title.endswith("en HD"): title = title[:-6] + + if title == "": + title = title_alt + context_title = title_alt + show = title_alt + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + if calidad: + title = title + ' [' + calidad + "]" + + if not 'array' in title: + itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, + extra = extra, show = context_title, contentTitle=context_title, contentType=context, quality=calidad, + context=["buscar_trailer"], infoLabels= {'year':year})) + + logger.debug("url: " + url + " / title: " + title + " / contxt title: " + context_title + " / context: " + context + " / calidad: " + calidad+ " / year: " + year) + + tmdb.set_infoLabels(itemlist, True) + + if url_next_page: + itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", + url=url_next_page, next_page=next_page, folder=True, + text_color='yellow', text_bold=True, modo = modo, plot = extra, + extra = page_extra)) + return itemlist + +def listado_busqueda(item): + logger.info() + itemlist = [] + data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + list_chars = [["ñ", "ñ"]] + + for el in list_chars: + data = re.sub(r"%s" % el[0], el[1], data) + + try: + get, post = scrapertools.find_single_match(data, '