From 86ec11849b093d5f304bf4aa68181640417df75f Mon Sep 17 00:00:00 2001 From: danielr460 Date: Sat, 31 Mar 2018 13:47:16 -0500 Subject: [PATCH 01/13] Danimados: Correccion en la busqueda de capitulos y de enlaces a los servidores --- plugin.video.alfa/channels/danimados.py | 28 +++++++++++++++++-------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/plugin.video.alfa/channels/danimados.py b/plugin.video.alfa/channels/danimados.py index e8b1c74b..9f2f7ad0 100644 --- a/plugin.video.alfa/channels/danimados.py +++ b/plugin.video.alfa/channels/danimados.py @@ -117,12 +117,12 @@ def episodios(item): itemlist = [] data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) data_lista = scrapertools.find_single_match(data, '' #Filtrado por url, compatibilidad con mispelisy.series.com + #logger.debug("patron: " + patron + " / data: " + data) + if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com + data = ' Documentales' + else: + data = scrapertools.get_match(data, patron) - patron = '([^>]+)' + patron = '<.*?href="([^"]+)".*?>([^>]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: @@ -55,12 +60,12 @@ def submenu(item): itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist")) itemlist.append( Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist")) - + if item.extra == "peliculas": itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) itemlist.append( Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) - + return itemlist @@ -91,16 +96,16 @@ def listado(item): itemlist = [] url_next_page ='' - data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + #data = httptools.downloadpage(item.url).data data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - #logger.debug(data) + logger.debug('item.modo: %s'%item.modo) logger.debug('item.extra: %s'%item.extra) if item.modo != 'next' or item.modo =='': logger.debug('item.title: %s'% item.title) patron = '' - logger.debug("patron=" + patron) fichas = scrapertools.get_match(data, patron) page_extra = item.extra else: @@ -109,11 +114,13 @@ def listado(item): patron = '(.*?)<\/b><\/font>') + real_title = scrapertools.find_single_match(title, r'(.*?)Temporada.*?<\/strong>') #series + if real_title == "": + real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8') + real_title = scrapertools.htmlclean(real_title) + calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?]+>[\[]\s*(?P.*?)\s*[\]]<\/span>') #series + if calidad == "": + calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies + year = scrapertools.find_single_match(thumb, r'-(\d{4})') + + # fix encoding for title title = scrapertools.htmlclean(title) - title = title.replace("�", "ñ") + title = title.replace("�", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng") + title = re.sub(r'(Calidad.*?\])', '', title) + + if real_title == "": + real_title = title + if calidad == "": + calidad = title + context = "movie" # no mostramos lo que no sean videos - if "/juego/" in url or "/varios/" in url: + if "juego/" in url: continue - if ".com/series" in url: + # Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie + if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host: + calidad_mps = "series/" + if "seriehd" in url: + calidad_mps = "series-hd/" + if "serievo" in url: + calidad_mps = "series-vo/" + if "serie-vo" in url: + calidad_mps = "series-vo/" + + real_title_mps = re.sub(r'.*?\/\d+_', '', thumb) + real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps) + + if "/0_" not in thumb: + serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P\d+).*?.*') + if len(serieid) > 5: + serieid = "" + else: + serieid = "" + + url = host + calidad_mps + real_title_mps + "/" + serieid + + real_title_mps = real_title_mps.replace("-", " ") + #logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps) + real_title = real_title_mps + + show = real_title - show = real_title + if ".com/serie" in url and "/miniseries" not in url: + + context = "tvshow" - itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, - context=["buscar_trailer"], contentSerieName=show)) + itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad, + show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year})) else: - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, - context=["buscar_trailer"])) - + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad, + show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year})) + + logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad) + + tmdb.set_infoLabels(itemlist, True) + if post: itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente", thumbnail=get_thumb("next.png"))) @@ -253,7 +314,6 @@ def listado_busqueda(item): def findvideos(item): logger.info() itemlist = [] - ## Cualquiera de las tres opciones son válidas # item.url = item.url.replace(".com/",".com/ver-online/") # item.url = item.url.replace(".com/",".com/descarga-directa/") @@ -263,32 +323,36 @@ def findvideos(item): data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures") - - title = scrapertools.find_single_match(data, "

([^<]+)<\/strong>[^<]+<\/h1>") - title += scrapertools.find_single_match(data, "

[^<]+<\/strong>([^<]+)<\/h1>") - caratula = scrapertools.find_single_match(data, '
.*?src="([^"]+)"') - - #
Descarga tu Archivo torrent!
+ + title = scrapertools.find_single_match(data, "([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com + title += scrapertools.find_single_match(data, "[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com + #caratula = scrapertools.find_single_match(data, '
.*?src="([^"]+)"') + caratula = scrapertools.find_single_match(data, ']+>.*?' #patron_ver = '
]+>.*?' @@ -309,37 +373,48 @@ def findvideos(item): patron = '
<\/div[^<]+
([^<]+)?<\/div[^<]+
([^<]+)?' patron += '<\/div[^<]+
([^<]+)?<\/div[^<]+
0: + itemlist.append(item.clone(title="", action="", folder=False)) + itemlist.append(item.clone(title=" Enlaces Ver: ", action="", text_color="green", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: if "Ver" in titulo: servidor = servidor.replace("streamin", "streaminto") - titulo = titulo + " [" + servidor + "]" + titulo = title mostrar_server = True if config.get_setting("hidepremium"): mostrar_server = servertools.is_server_enabled(servidor) + logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma) if mostrar_server: try: devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] itemlist.append( - Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, - fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) + Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, quality=titulo, + fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) except: pass + if len(enlaces_descargar) > 0: + itemlist.append(item.clone(title="", action="", folder=False)) + itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", text_color="green", folder=False)) + for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: if "Ver" not in titulo: servidor = servidor.replace("uploaded", "uploadedto") partes = enlace.split(" ") + titulo = "Partes " p = 1 + logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma) for enlace in partes: - parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]" + parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " - " + title p += 1 mostrar_server = True if config.get_setting("hidepremium"): @@ -349,11 +424,12 @@ def findvideos(item): devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] - itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, - title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo, + itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, quality=parte_titulo, + title=parte_titulo, fulltitle=parte_titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) except: pass + return itemlist @@ -363,6 +439,8 @@ def episodios(item): infoLabels = item.infoLabels data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + calidad = item.quality + pattern = '
    (.*?)
' % "pagination" # item.pattern pagination = scrapertools.find_single_match(data, pattern) if pagination: @@ -381,22 +459,43 @@ def episodios(item): logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page)) data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - + data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com pattern = '
    (.*?)
' % "buscar-list" # item.pattern data = scrapertools.get_match(data, pattern) + #logger.debug("data: " + data) - pattern = ']*>
]+>(?P.*?)

' + if "pelisyseries.com" in host: + pattern = ']*>
]+>(?P.*?)?<\/h3>.*?<\/li>' + else: + pattern = ']*>]+>(?P.*?)?<\/h2>' matches = re.compile(pattern, re.DOTALL).findall(data) + #logger.debug("patron: " + pattern) + #logger.debug(matches) + + season = "1" for url, thumb, info in matches: + if "pelisyseries.com" in host: + interm = url + url = thumb + thumb = interm + if "\d+)?)<.+?]+>(?P.*?)\s*Calidad\s*]+>" \ - "[\[]\s*(?P.*?)\s*[\]]" + pattern = ".*?[^>]+>.*?Temporada\s*(?P\d+)?.*?Capitulo(?:s)?\s*(?P\d+)?" \ + "(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>" \ + "[\[]\s*(?P.*?)?\s*[\]]<\/span>" + if "Especial" in info: # Capitulos Especiales + pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P\d+).*?\].*?Capitulo.*?\[\s*(?P\d+).*?\]?(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>[\[]\s*(?P.*?)?\s*[\]]<\/span>" + logger.debug("patron: " + pattern) + logger.debug(info) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] + if match['season'] is None: match['season'] = season + if match['episode'] is None: match['episode'] = "0" + if match['quality']: item.quality = match['quality'] + if match["episode2"]: multi = True title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), @@ -408,12 +507,17 @@ def episodios(item): match["lang"], match["quality"]) else: # old style - pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+)(?P\d{2})(?:_(?P\d+)" \ + pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+).*?(?P\d{2})(?:_(?P\d+)" \ "(?P\d{2}))?.*?\].*?(?:\[(?P.*?)\])?" - + logger.debug("patron: " + pattern) + logger.debug(info) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] - # logger.debug("data %s" % match) + #logger.debug("data %s" % match) + + #if match['season'] is "": match['season'] = season + #if match['episode'] is "": match['episode'] = "0" + #logger.debug(match) str_lang = "" if match["lang"] is not None: @@ -436,18 +540,19 @@ def episodios(item): season = match['season'] episode = match['episode'] + logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode)) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=item.quality, multi=multi, contentSeason=season, contentEpisodeNumber=episode, infoLabels = infoLabels)) - # order list + #tmdb.set_infoLabels(itemlist, True) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) if len(itemlist) > 1: itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( - item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios")) + item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad)) return itemlist diff --git a/plugin.video.alfa/channels/mispelisyseries.json b/plugin.video.alfa/channels/mispelisyseries.json index 9d5ae057..73b0db8b 100755 --- a/plugin.video.alfa/channels/mispelisyseries.json +++ b/plugin.video.alfa/channels/mispelisyseries.json @@ -9,7 +9,8 @@ "categories": [ "torrent", "movie", - "tvshow" + "tvshow", + "documentary" ], "settings": [ { diff --git a/plugin.video.alfa/channels/mispelisyseries.py b/plugin.video.alfa/channels/mispelisyseries.py index ba65968e..0e4d44cd 100644 --- a/plugin.video.alfa/channels/mispelisyseries.py +++ b/plugin.video.alfa/channels/mispelisyseries.py @@ -1,137 +1,75 @@ -# -*- coding: utf-8 -*- +# -*- coding: utf-8 -*- import re -import urllib -import urlparse +from channelselector import get_thumb from core import httptools from core import scrapertools from core import servertools from core.item import Item -from platformcode import logger -from channelselector import get_thumb +from platformcode import config, logger +from core import tmdb host = 'http://mispelisyseries.com/' + def mainlist(item): logger.info() itemlist = [] - itemlist.append(Item(channel=item.channel, action="menu", title="Películas", url=host, - extra="Peliculas", folder=True, thumbnail=get_thumb('movies', auto=True))) + + thumb_pelis=get_thumb("channels_movie.png") + thumb_series=get_thumb("channels_tvshow.png") + thumb_search = get_thumb("search.png") + + itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host, + extra="peliculas", thumbnail=thumb_pelis )) + + itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series", + thumbnail=thumb_series)) + + itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios", + thumbnail=thumb_series)) itemlist.append( - Item(channel=item.channel, action="menu", title="Series", url=host, extra="Series", - folder=True, thumbnail=get_thumb('tvshows', auto=True))) - itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + 'buscar', - thumbnail=get_thumb('search', auto=True))) + Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search)) + return itemlist - -def menu(item): +def submenu(item): logger.info() itemlist = [] - data = httptools.downloadpage(item.url).data - # logger.info("data="+data) + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com - data = scrapertools.find_single_match(data, item.extra + "") - # logger.info("data="+data) + #patron = '
  • .*?
      (.*?)
    ' + patron = '
  • <.*?href="'+item.url+item.extra + '/">.*?(.*?)' #Filtrado por url, compatibilidad con mispelisy.series.com + #logger.debug("patron: " + patron + " / data: " + data) + if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com + data = ' Documentales' + else: + data = scrapertools.get_match(data, patron) - patron = "
  • ]+>([^<]+)
  • " + patron = '<.*?href="([^"]+)".*?>([^>]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: - title = scrapedtitle - url = urlparse.urljoin(item.url, scrapedurl) - thumbnail = "" - plot = "" - itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url, thumbnail=thumbnail, plot=plot, - folder=True)) - - - if title != "Todas las Peliculas": - itemlist.append( - Item(channel=item.channel, action="alfabetico", title=title + " [A-Z]", url=url, thumbnail=thumbnail, - plot=plot, folder=True)) - + title = scrapedtitle.strip() + url = scrapedurl + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist")) itemlist.append( - Item(channel=item.channel, action="alfabetico", title=title + " [A-Z]", url=url, thumbnail=thumbnail, - plot=plot, - folder=True)) - - if 'películas' in item.title.lower(): - new_item = item.clone(title='Peliculas 4K', url=host+'buscar', post='q=4k', action='listado2', - pattern='buscar-list') - itemlist.append(new_item) - + Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist")) + + if item.extra == "peliculas": + itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) + itemlist.append( + Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) + return itemlist -def search(item, texto): - logger.info("search:" + texto) - # texto = texto.replace(" ", "+") - - #try: - item.post = "q=%s" % texto - item.pattern = "buscar-list" - itemlist = listado2(item) - - return itemlist - - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - # except: - # import sys - # for line in sys.exc_info(): - # logger.error("%s" % line) - # return [] - -def newest(categoria): - itemlist = [] - item = Item() - try: - if categoria in ['peliculas', 'torrent']: - item.url = host+"peliculas" - - elif categoria == 'series': - item.url = host+"series" - - if categoria == '4k': - - item.url = Host + '/buscar' - - item.post = 'q=4k' - - item.pattern = 'buscar-list' - - action = listado2(item) - - else: - return [] - - itemlist = lista(item) - if itemlist[-1].title == ">> Página siguiente": - itemlist.pop() - - # Esta pagina coloca a veces contenido duplicado, intentamos descartarlo - dict_aux = {} - for i in itemlist: - if not i.url in dict_aux: - dict_aux[i.url] = i - else: - itemlist.remove(i) - - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - # return dict_aux.values() - return itemlist - - -def alfabetico(item): +def alfabeto(item): logger.info() itemlist = [] @@ -148,93 +86,137 @@ def alfabetico(item): title = scrapedtitle.upper() url = scrapedurl - itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url)) + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra)) return itemlist -def lista(item): +def listado(item): logger.info() itemlist = [] + url_next_page ='' - # Descarga la pagina - data = httptools.downloadpage(item.url, post=item.extra).data - # logger.info("data="+data) + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + #data = httptools.downloadpage(item.url).data + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + logger.debug('item.modo: %s'%item.modo) + logger.debug('item.extra: %s'%item.extra) + + if item.modo != 'next' or item.modo =='': + logger.debug('item.title: %s'% item.title) + patron = '
      (.*?)
    ' + fichas = scrapertools.get_match(data, patron) + page_extra = item.extra + else: + fichas = data + page_extra = item.extra - bloque = scrapertools.find_single_match(data, '(?:' + #patron_ver = '
    ]+>.*?' + + #match_ver = scrapertools.find_single_match(data, patron_ver) + #match_descargar = scrapertools.find_single_match(data, patron_descargar) + + #patron = '
    0: + itemlist.append(item.clone(title="", action="", folder=False)) + itemlist.append(item.clone(title=" Enlaces Ver: ", action="", text_color="green", folder=False)) + + for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: + if "Ver" in titulo: + servidor = servidor.replace("streamin", "streaminto") + titulo = title + mostrar_server = True + if config.get_setting("hidepremium"): + mostrar_server = servertools.is_server_enabled(servidor) + logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma) + if mostrar_server: + try: + devuelve = servertools.findvideosbyserver(enlace, servidor) + if devuelve: + enlace = devuelve[0][1] + itemlist.append( + Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, quality=titulo, + fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) + except: + pass + + if len(enlaces_descargar) > 0: + itemlist.append(item.clone(title="", action="", folder=False)) + itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", text_color="green", folder=False)) + + for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: + if "Ver" not in titulo: + servidor = servidor.replace("uploaded", "uploadedto") + partes = enlace.split(" ") + titulo = "Partes " + p = 1 + logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma) + for enlace in partes: + parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " - " + title + p += 1 + mostrar_server = True + if config.get_setting("hidepremium"): + mostrar_server = servertools.is_server_enabled(servidor) + if mostrar_server: + try: + devuelve = servertools.findvideosbyserver(enlace, servidor) + if devuelve: + enlace = devuelve[0][1] + itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, quality=parte_titulo, + title=parte_titulo, fulltitle=parte_titulo, url=enlace, thumbnail=logo, + plot=item.plot, folder=False)) + except: + pass + + return itemlist def episodios(item): logger.info() itemlist = [] + infoLabels = item.infoLabels + data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + calidad = item.quality + + pattern = '
      (.*?)
    ' % "pagination" # item.pattern + pagination = scrapertools.find_single_match(data, pattern) + if pagination: + pattern = '
  • Last<\/a>' + full_url = scrapertools.find_single_match(pagination, pattern) + url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)') + list_pages = [item.url] + for x in range(2, int(last_page) + 1): + response = httptools.downloadpage('%s%s'% (url,x)) + if response.sucess: + list_pages.append("%s%s" % (url, x)) + else: + list_pages = [item.url] - # Descarga la pagina - data = httptools.downloadpage(item.url, post=item.extra).data - # logger.info("data="+data) + for index, page in enumerate(list_pages): + logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page)) + data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com + pattern = '
      (.*?)
    ' % "buscar-list" # item.pattern + data = scrapertools.get_match(data, pattern) + #logger.debug("data: " + data) - patron = '
    \d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>" \ + "[\[]\s*(?P.*?)?\s*[\]]<\/span>" + if "Especial" in info: # Capitulos Especiales + pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P\d+).*?\].*?Capitulo.*?\[\s*(?P\d+).*?\]?(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>[\[]\s*(?P.*?)?\s*[\]]<\/span>" + logger.debug("patron: " + pattern) + logger.debug(info) + r = re.compile(pattern) + match = [m.groupdict() for m in r.finditer(info)][0] + + if match['season'] is None: match['season'] = season + if match['episode'] is None: match['episode'] = "0" + if match['quality']: item.quality = match['quality'] + + if match["episode2"]: + multi = True + title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), + str(match["episode2"]).zfill(2), match["lang"], + match["quality"]) + else: + multi = False + title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), + match["lang"], match["quality"]) + + else: # old style + pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+).*?(?P\d{2})(?:_(?P\d+)" \ + "(?P\d{2}))?.*?\].*?(?:\[(?P.*?)\])?" + logger.debug("patron: " + pattern) + logger.debug(info) + r = re.compile(pattern) + match = [m.groupdict() for m in r.finditer(info)][0] + #logger.debug("data %s" % match) + + #if match['season'] is "": match['season'] = season + #if match['episode'] is "": match['episode'] = "0" + #logger.debug(match) + + str_lang = "" + if match["lang"] is not None: + str_lang = "[%s]" % match["lang"] + + if match["season2"] and match["episode2"]: + multi = True + if match["season"] == match["season2"]: + + title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"], + match["episode2"], str_lang, match["quality"]) + else: + title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], + match["season2"], match["episode2"], str_lang, + match["quality"]) + else: + title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang, + match["quality"]) + multi = False + + season = match['season'] + episode = match['episode'] + logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode)) + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, + quality=item.quality, multi=multi, contentSeason=season, + contentEpisodeNumber=episode, infoLabels = infoLabels)) + # order list + #tmdb.set_infoLabels(itemlist, True) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) + if len(itemlist) > 1: + itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) + + if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( - Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail, - plot=plot, folder=True)) - - next_page_url = scrapertools.find_single_match(data, "(.*?)
    ') - item.plot = scrapertools.htmlclean(item.plot).strip() - item.contentPlot = item.plot - al_url_fa = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"') - if al_url_fa == "": - al_url_fa = scrapertools.find_single_match(data, 'location\.href.*?=.*?"%s(.*?)" ' % host) - if al_url_fa != "": - al_url_fa = host + al_url_fa - logger.info("torrent=" + al_url_fa) - itemlist.append( - Item(channel=item.channel, action="play", server="torrent", title="Vídeo en torrent", fulltitle=item.title, - url=al_url_fa, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False, - parentContent=item)) + itemlist = listado(item) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + item.url = host+'series/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() - patron = '
  • .*?
      (.*?)
    ' - patron = '
  • .*?
      (.*?)
    ' #Filtrado por url - data = scrapertools.get_match(data, patron) + patron = '
  • <.*?href="'+item.url+item.extra + '/">.*?(.*?)' #Filtrado por url, compatibilidad con mispelisy.series.com + #logger.debug("patron: " + patron + " / data: " + data) + if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com + data = ' Documentales' + else: + data = scrapertools.get_match(data, patron) - patron = '([^>]+)' + patron = '<.*?href="([^"]+)".*?>([^>]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: @@ -92,15 +97,15 @@ def listado(item): url_next_page ='' data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + #data = httptools.downloadpage(item.url).data data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - #logger.debug(data) + logger.debug('item.modo: %s'%item.modo) logger.debug('item.extra: %s'%item.extra) if item.modo != 'next' or item.modo =='': logger.debug('item.title: %s'% item.title) patron = '
      (.*?)
    ' - logger.debug("patron=" + patron) fichas = scrapertools.get_match(data, patron) page_extra = item.extra else: @@ -109,11 +114,13 @@ def listado(item): patron = '(.*?)<\/b><\/font>') + real_title = scrapertools.find_single_match(title, r'(.*?)Temporada.*?<\/strong>') #series + if real_title == "": + real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8') + real_title = scrapertools.htmlclean(real_title) + calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?]+>[\[]\s*(?P.*?)\s*[\]]<\/span>') #series + if calidad == "": + calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies + year = scrapertools.find_single_match(thumb, r'-(\d{4})') + + # fix encoding for title title = scrapertools.htmlclean(title) - title = title.replace("�", "ñ") + title = title.replace("�", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng") + title = re.sub(r'(Calidad.*?\])', '', title) + + if real_title == "": + real_title = title + if calidad == "": + calidad = title + context = "movie" # no mostramos lo que no sean videos - if "/juego/" in url or "/varios/" in url: + if "juego/" in url: continue - if ".com/series" in url: + # Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie + if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host: + calidad_mps = "series/" + if "seriehd" in url: + calidad_mps = "series-hd/" + if "serievo" in url: + calidad_mps = "series-vo/" + if "serie-vo" in url: + calidad_mps = "series-vo/" + + real_title_mps = re.sub(r'.*?\/\d+_', '', thumb) + real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps) + + if "/0_" not in thumb: + serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P\d+).*?.*') + if len(serieid) > 5: + serieid = "" + else: + serieid = "" + + url = host + calidad_mps + real_title_mps + "/" + serieid + + real_title_mps = real_title_mps.replace("-", " ") + #logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps) + real_title = real_title_mps + + show = real_title - show = real_title + if ".com/serie" in url and "/miniseries" not in url: + + context = "tvshow" - itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, - context=["buscar_trailer"], contentSerieName=show)) + itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad, + show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year})) else: - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, - context=["buscar_trailer"])) - + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad, + show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year})) + + logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad) + + tmdb.set_infoLabels(itemlist, True) + if post: - itemlist.append(item.clone(channel=item.channel, action="listado2", title=">> Página siguiente", + itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente", thumbnail=get_thumb("next.png"))) return itemlist @@ -253,7 +314,6 @@ def listado2(item): def findvideos(item): logger.info() itemlist = [] - ## Cualquiera de las tres opciones son válidas # item.url = item.url.replace(".com/",".com/ver-online/") # item.url = item.url.replace(".com/",".com/descarga-directa/") @@ -263,32 +323,36 @@ def findvideos(item): data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures") - - title = scrapertools.find_single_match(data, "

    ([^<]+)<\/strong>[^<]+<\/h1>") - title += scrapertools.find_single_match(data, "

    [^<]+<\/strong>([^<]+)<\/h1>") - caratula = scrapertools.find_single_match(data, '
    .*?src="([^"]+)"') - - #
    Descarga tu Archivo torrent!
    + + title = scrapertools.find_single_match(data, "([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com + title += scrapertools.find_single_match(data, "[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com + #caratula = scrapertools.find_single_match(data, '
    .*?src="([^"]+)"') + caratula = scrapertools.find_single_match(data, ']+>.*?' #patron_ver = '
    ]+>.*?' @@ -309,37 +373,48 @@ def findvideos(item): patron = '
    <\/div[^<]+
    ([^<]+)?<\/div[^<]+
    ([^<]+)?' patron += '<\/div[^<]+
    ([^<]+)?<\/div[^<]+
    0: + itemlist.append(item.clone(title="", action="", folder=False)) + itemlist.append(item.clone(title=" Enlaces Ver: ", action="", text_color="green", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: if "Ver" in titulo: servidor = servidor.replace("streamin", "streaminto") - titulo = titulo + " [" + servidor + "]" + titulo = title mostrar_server = True if config.get_setting("hidepremium"): mostrar_server = servertools.is_server_enabled(servidor) + logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma) if mostrar_server: try: devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] itemlist.append( - Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, - fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) + Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, quality=titulo, + fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) except: pass + if len(enlaces_descargar) > 0: + itemlist.append(item.clone(title="", action="", folder=False)) + itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", text_color="green", folder=False)) + for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: if "Ver" not in titulo: servidor = servidor.replace("uploaded", "uploadedto") partes = enlace.split(" ") + titulo = "Partes " p = 1 + logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma) for enlace in partes: - parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]" + parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " - " + title p += 1 mostrar_server = True if config.get_setting("hidepremium"): @@ -349,11 +424,12 @@ def findvideos(item): devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] - itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, - title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo, + itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, quality=parte_titulo, + title=parte_titulo, fulltitle=parte_titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) except: pass + return itemlist @@ -363,6 +439,8 @@ def episodios(item): infoLabels = item.infoLabels data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + calidad = item.quality + pattern = '
      (.*?)
    ' % "pagination" # item.pattern pagination = scrapertools.find_single_match(data, pattern) if pagination: @@ -381,22 +459,43 @@ def episodios(item): logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page)) data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - + data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com pattern = '
      (.*?)
    ' % "buscar-list" # item.pattern data = scrapertools.get_match(data, pattern) + #logger.debug("data: " + data) - pattern = ']*>
    ]+>(?P.*?)

    ' + if "pelisyseries.com" in host: + pattern = ']*>
    ]+>(?P.*?)?<\/h3>.*?<\/li>' + else: + pattern = ']*>]+>(?P.*?)?<\/h2>' matches = re.compile(pattern, re.DOTALL).findall(data) + #logger.debug("patron: " + pattern) + #logger.debug(matches) + + season = "1" for url, thumb, info in matches: + if "pelisyseries.com" in host: + interm = url + url = thumb + thumb = interm + if "\d+)?)<.+?]+>(?P.*?)\s*Calidad\s*]+>" \ - "[\[]\s*(?P.*?)\s*[\]]" + pattern = ".*?[^>]+>.*?Temporada\s*(?P\d+)?.*?Capitulo(?:s)?\s*(?P\d+)?" \ + "(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>" \ + "[\[]\s*(?P.*?)?\s*[\]]<\/span>" + if "Especial" in info: # Capitulos Especiales + pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P\d+).*?\].*?Capitulo.*?\[\s*(?P\d+).*?\]?(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>[\[]\s*(?P.*?)?\s*[\]]<\/span>" + logger.debug("patron: " + pattern) + logger.debug(info) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] + if match['season'] is None: match['season'] = season + if match['episode'] is None: match['episode'] = "0" + if match['quality']: item.quality = match['quality'] + if match["episode2"]: multi = True title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), @@ -408,12 +507,17 @@ def episodios(item): match["lang"], match["quality"]) else: # old style - pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+)(?P\d{2})(?:_(?P\d+)" \ + pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+).*?(?P\d{2})(?:_(?P\d+)" \ "(?P\d{2}))?.*?\].*?(?:\[(?P.*?)\])?" - + logger.debug("patron: " + pattern) + logger.debug(info) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] - # logger.debug("data %s" % match) + #logger.debug("data %s" % match) + + #if match['season'] is "": match['season'] = season + #if match['episode'] is "": match['episode'] = "0" + #logger.debug(match) str_lang = "" if match["lang"] is not None: @@ -436,18 +540,19 @@ def episodios(item): season = match['season'] episode = match['episode'] + logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode)) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=item.quality, multi=multi, contentSeason=season, contentEpisodeNumber=episode, infoLabels = infoLabels)) - # order list + #tmdb.set_infoLabels(itemlist, True) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) if len(itemlist) > 1: itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( - item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios")) + item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad)) return itemlist @@ -458,7 +563,7 @@ def search(item, texto): try: item.post = "q=%s" % texto item.pattern = "buscar-list" - itemlist = listado2(item) + itemlist = listado_busqueda(item) return itemlist diff --git a/plugin.video.alfa/channels/torrentrapid.json b/plugin.video.alfa/channels/torrentrapid.json index 0362f46d..303149a0 100644 --- a/plugin.video.alfa/channels/torrentrapid.json +++ b/plugin.video.alfa/channels/torrentrapid.json @@ -10,7 +10,8 @@ "movie", "tvshow", "anime", - "torrent" + "torrent", + "documentary" ], "settings": [ { @@ -21,6 +22,22 @@ "enabled": true, "visible": true }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Incluir en Novedades - Episodios de series", + "default": true, + "enabled": true, + "visible": true + }, { "id": "include_in_newest_torrent", "type": "bool", @@ -28,6 +45,14 @@ "default": true, "enabled": true, "visible": true + }, + { + "id": "include_in_newest_4k", + "type": "bool", + "label": "Incluir en Novedades - 4K", + "default": true, + "enabled": true, + "visible": true } ] } \ No newline at end of file diff --git a/plugin.video.alfa/channels/torrentrapid.py b/plugin.video.alfa/channels/torrentrapid.py index ae0e174f..fa93fce0 100644 --- a/plugin.video.alfa/channels/torrentrapid.py +++ b/plugin.video.alfa/channels/torrentrapid.py @@ -10,7 +10,7 @@ from core.item import Item from platformcode import config, logger from core import tmdb -host = 'http://torrentrapid.com/' # Cambiar manualmente "xx" en línea 287 ".com/xx/library" por tl para torrentrapid, tr para torrentrapid, d20 para descargas2020 +host = 'http://torrentrapid.com/' def mainlist(item): logger.info() @@ -40,12 +40,17 @@ def submenu(item): data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + data = data.replace("'", "\"").replace("/series\"", "/series/\"") #Compatibilidad con mispelisy.series.com #patron = '
  • .*?
      (.*?)
    ' - patron = '
  • .*?
      (.*?)
    ' #Filtrado por url - data = scrapertools.get_match(data, patron) + patron = '
  • <.*?href="'+item.url+item.extra + '/">.*?(.*?)' #Filtrado por url, compatibilidad con mispelisy.series.com + #logger.debug("patron: " + patron + " / data: " + data) + if "pelisyseries.com" in host and item.extra == "varios": #compatibilidad con mispelisy.series.com + data = ' Documentales' + else: + data = scrapertools.get_match(data, patron) - patron = '([^>]+)' + patron = '<.*?href="([^"]+)".*?>([^>]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: @@ -92,15 +97,15 @@ def listado(item): url_next_page ='' data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + #data = httptools.downloadpage(item.url).data data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - #logger.debug(data) + logger.debug('item.modo: %s'%item.modo) logger.debug('item.extra: %s'%item.extra) if item.modo != 'next' or item.modo =='': logger.debug('item.title: %s'% item.title) patron = '
      (.*?)
    ' - logger.debug("patron=" + patron) fichas = scrapertools.get_match(data, patron) page_extra = item.extra else: @@ -109,11 +114,13 @@ def listado(item): patron = '(.*?)<\/b><\/font>') + real_title = scrapertools.find_single_match(title, r'(.*?)Temporada.*?<\/strong>') #series + if real_title == "": + real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8') + real_title = scrapertools.htmlclean(real_title) + calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?]+>[\[]\s*(?P.*?)\s*[\]]<\/span>') #series + if calidad == "": + calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies + year = scrapertools.find_single_match(thumb, r'-(\d{4})') + + # fix encoding for title title = scrapertools.htmlclean(title) - title = title.replace("�", "ñ") + title = title.replace("�", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng") + title = re.sub(r'(Calidad.*?\])', '', title) + + if real_title == "": + real_title = title + if calidad == "": + calidad = title + context = "movie" # no mostramos lo que no sean videos - if "/juego/" in url or "/varios/" in url: + if "juego/" in url: continue - if ".com/series" in url: + # Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie + if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host: + calidad_mps = "series/" + if "seriehd" in url: + calidad_mps = "series-hd/" + if "serievo" in url: + calidad_mps = "series-vo/" + if "serie-vo" in url: + calidad_mps = "series-vo/" + + real_title_mps = re.sub(r'.*?\/\d+_', '', thumb) + real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps) + + if "/0_" not in thumb: + serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P\d+).*?.*') + if len(serieid) > 5: + serieid = "" + else: + serieid = "" + + url = host + calidad_mps + real_title_mps + "/" + serieid + + real_title_mps = real_title_mps.replace("-", " ") + #logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / real_title_mps: " + real_title_mps + " / calidad_mps : " + calidad_mps) + real_title = real_title_mps + + show = real_title - show = real_title + if ".com/serie" in url and "/miniseries" not in url: + + context = "tvshow" - itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, - context=["buscar_trailer"], contentSerieName=show)) + itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb, quality=calidad, + show=show, extra="serie", context=["buscar_trailer"], contentType=context, contentTitle=real_title, contentSerieName=real_title, infoLabels= {'year':year})) else: - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, - context=["buscar_trailer"])) - + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=calidad, + show=show, context=["buscar_trailer"], contentType=context, contentTitle=real_title, infoLabels= {'year':year})) + + logger.debug("url: " + url + " / title: " + title + " / real_title: " + real_title + " / show: " + show + " / calidad: " + calidad) + + tmdb.set_infoLabels(itemlist, True) + if post: - itemlist.append(item.clone(channel=item.channel, action="listado2", title=">> Página siguiente", + itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title=">> Página siguiente", thumbnail=get_thumb("next.png"))) return itemlist @@ -253,7 +314,6 @@ def listado2(item): def findvideos(item): logger.info() itemlist = [] - ## Cualquiera de las tres opciones son válidas # item.url = item.url.replace(".com/",".com/ver-online/") # item.url = item.url.replace(".com/",".com/descarga-directa/") @@ -263,32 +323,36 @@ def findvideos(item): data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures") - - title = scrapertools.find_single_match(data, "

    ([^<]+)<\/strong>[^<]+<\/h1>") - title += scrapertools.find_single_match(data, "

    [^<]+<\/strong>([^<]+)<\/h1>") - caratula = scrapertools.find_single_match(data, '
    .*?src="([^"]+)"') - - #
    Descarga tu Archivo torrent!
    + + title = scrapertools.find_single_match(data, "([^<]+)<\/strong>.*?<\/h1>") #corregido para adaptarlo a mispelisy.series.com + title += scrapertools.find_single_match(data, "[^<]+<\/strong>([^<]+)<\/h1>") #corregido para adaptarlo a mispelisy.series.com + #caratula = scrapertools.find_single_match(data, '
    .*?src="([^"]+)"') + caratula = scrapertools.find_single_match(data, ']+>.*?' #patron_ver = '
    ]+>.*?' @@ -309,37 +373,48 @@ def findvideos(item): patron = '
    <\/div[^<]+
    ([^<]+)?<\/div[^<]+
    ([^<]+)?' patron += '<\/div[^<]+
    ([^<]+)?<\/div[^<]+
    0: + itemlist.append(item.clone(title="", action="", folder=False)) + itemlist.append(item.clone(title=" Enlaces Ver: ", action="", text_color="green", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: if "Ver" in titulo: servidor = servidor.replace("streamin", "streaminto") - titulo = titulo + " [" + servidor + "]" + titulo = title mostrar_server = True if config.get_setting("hidepremium"): mostrar_server = servertools.is_server_enabled(servidor) + logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma) if mostrar_server: try: devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] itemlist.append( - Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, - fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) + Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, quality=titulo, + fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) except: pass + if len(enlaces_descargar) > 0: + itemlist.append(item.clone(title="", action="", folder=False)) + itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", text_color="green", folder=False)) + for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: if "Ver" not in titulo: servidor = servidor.replace("uploaded", "uploadedto") partes = enlace.split(" ") + titulo = "Partes " p = 1 + logger.debug("url: " + enlace + " / title: " + title + " / servidor: " + servidor + " / idioma: " + idioma) for enlace in partes: - parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]" + parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " - " + title p += 1 mostrar_server = True if config.get_setting("hidepremium"): @@ -349,11 +424,12 @@ def findvideos(item): devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] - itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, - title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo, + itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, quality=parte_titulo, + title=parte_titulo, fulltitle=parte_titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) except: pass + return itemlist @@ -363,6 +439,8 @@ def episodios(item): infoLabels = item.infoLabels data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + calidad = item.quality + pattern = '
      (.*?)
    ' % "pagination" # item.pattern pagination = scrapertools.find_single_match(data, pattern) if pagination: @@ -381,22 +459,43 @@ def episodios(item): logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page)) data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - + data = data.replace("chapters", "buscar-list") #Compatibilidad con mispelisy.series.com pattern = '
      (.*?)
    ' % "buscar-list" # item.pattern data = scrapertools.get_match(data, pattern) + #logger.debug("data: " + data) - pattern = ']*>
    ]+>(?P.*?)

    ' + if "pelisyseries.com" in host: + pattern = ']*>
    ]+>(?P.*?)?<\/h3>.*?<\/li>' + else: + pattern = ']*>]+>(?P.*?)?<\/h2>' matches = re.compile(pattern, re.DOTALL).findall(data) + #logger.debug("patron: " + pattern) + #logger.debug(matches) + + season = "1" for url, thumb, info in matches: + if "pelisyseries.com" in host: + interm = url + url = thumb + thumb = interm + if "\d+)?)<.+?]+>(?P.*?)\s*Calidad\s*]+>" \ - "[\[]\s*(?P.*?)\s*[\]]" + pattern = ".*?[^>]+>.*?Temporada\s*(?P\d+)?.*?Capitulo(?:s)?\s*(?P\d+)?" \ + "(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>" \ + "[\[]\s*(?P.*?)?\s*[\]]<\/span>" + if "Especial" in info: # Capitulos Especiales + pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P\d+).*?\].*?Capitulo.*?\[\s*(?P\d+).*?\]?(?:.*?(?P\d+)?)<.+?]+>(?P.*?)?<\/span>\s*Calidad\s*]+>[\[]\s*(?P.*?)?\s*[\]]<\/span>" + logger.debug("patron: " + pattern) + logger.debug(info) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] + if match['season'] is None: match['season'] = season + if match['episode'] is None: match['episode'] = "0" + if match['quality']: item.quality = match['quality'] + if match["episode2"]: multi = True title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), @@ -408,12 +507,17 @@ def episodios(item): match["lang"], match["quality"]) else: # old style - pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+)(?P\d{2})(?:_(?P\d+)" \ + pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+).*?(?P\d{2})(?:_(?P\d+)" \ "(?P\d{2}))?.*?\].*?(?:\[(?P.*?)\])?" - + logger.debug("patron: " + pattern) + logger.debug(info) r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] - # logger.debug("data %s" % match) + #logger.debug("data %s" % match) + + #if match['season'] is "": match['season'] = season + #if match['episode'] is "": match['episode'] = "0" + #logger.debug(match) str_lang = "" if match["lang"] is not None: @@ -436,18 +540,19 @@ def episodios(item): season = match['season'] episode = match['episode'] + logger.debug("title: " + title + " / url: " + url + " / calidad: " + item.quality + " / multi: " + str(multi) + " / Season: " + str(season) + " / EpisodeNumber: " + str(episode)) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=item.quality, multi=multi, contentSeason=season, contentEpisodeNumber=episode, infoLabels = infoLabels)) - # order list + #tmdb.set_infoLabels(itemlist, True) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) if len(itemlist) > 1: itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( - item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios")) + item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", quality=calidad)) return itemlist @@ -458,7 +563,7 @@ def search(item, texto): try: item.post = "q=%s" % texto item.pattern = "buscar-list" - itemlist = listado2(item) + itemlist = listado_busqueda(item) return itemlist From 3ff15fe156bbc4c660b64738cc10270639c5e4fc Mon Sep 17 00:00:00 2001 From: Kingbox <37674310+lopezvg@users.noreply.github.com> Date: Wed, 4 Apr 2018 11:28:27 +0200 Subject: [PATCH 04/13] Videolibray: permitir etiquetas-comentarios MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Permite añadir etiquetas o comentarios separadores en la lista de servidores, separando servidores Torrent de Ver Directo y Descargas. Actualmente llega a ser dificil de leer si hay muchos servidores --- plugin.video.alfa/channels/videolibrary.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin.video.alfa/channels/videolibrary.py b/plugin.video.alfa/channels/videolibrary.py index 69b8382f..a23f3c2f 100644 --- a/plugin.video.alfa/channels/videolibrary.py +++ b/plugin.video.alfa/channels/videolibrary.py @@ -398,8 +398,8 @@ def findvideos(item): # Cambiarle el titulo a los servers añadiendoles el nombre del canal delante y # las infoLabels y las imagenes del item si el server no tiene for server in list_servers: - if not server.action: # Ignorar las etiquetas - continue + #if not server.action: # Ignorar/PERMITIR las etiquetas + # continue server.contentChannel = server.channel server.channel = "videolibrary" From beedfb6ab54721cee12980f54c831826e1692cf5 Mon Sep 17 00:00:00 2001 From: danielr460 Date: Wed, 4 Apr 2018 10:00:46 -0500 Subject: [PATCH 05/13] Animemovil: Solucionado renumbertools --- plugin.video.alfa/channels/animemovil.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/plugin.video.alfa/channels/animemovil.py b/plugin.video.alfa/channels/animemovil.py index 8d84eb5e..f5347e4e 100644 --- a/plugin.video.alfa/channels/animemovil.py +++ b/plugin.video.alfa/channels/animemovil.py @@ -204,14 +204,16 @@ def episodios(item): matches = scrapertools.find_multiple_matches(bloque, '
  • Date: Wed, 4 Apr 2018 10:03:23 -0500 Subject: [PATCH 06/13] SeriesLan: Arreglado renumbertools --- plugin.video.alfa/channels/serieslan.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugin.video.alfa/channels/serieslan.py b/plugin.video.alfa/channels/serieslan.py index 6a3e3279..fc33b1b8 100644 --- a/plugin.video.alfa/channels/serieslan.py +++ b/plugin.video.alfa/channels/serieslan.py @@ -71,11 +71,11 @@ def lista(item): context2 = autoplay.context context.extend(context2) - itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title, + itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,contentSerieName=title, context=context)) if b<29: a=a+1 - url="https://serieslan.com/pag-"+str(a) + url=host+"/pag-"+str(a) if b>10: itemlist.append( Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=url, action="lista", page=0)) @@ -116,14 +116,14 @@ def episodios(item): for pos in name.split(pat): i = i + 1 total_episode += 1 - season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, total_episode) + season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, total_episode) if len(name.split(pat)) == i: title += "%sx%s " % (season, str(episode).zfill(2)) else: title += "%sx%s_" % (season, str(episode).zfill(2)) else: total_episode += 1 - season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, total_episode) + season, episode = renumbertools.numbered_for_tratk(item.channel,item.contentSerieName, 1, total_episode) title += "%sx%s " % (season, str(episode).zfill(2)) From d427949252ce7d0655c45a0f8051809ddab99bc5 Mon Sep 17 00:00:00 2001 From: Kingbox <37674310+lopezvg@users.noreply.github.com> Date: Wed, 4 Apr 2018 18:50:10 +0200 Subject: [PATCH 07/13] Unifu permite los titulos en Series MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit En accion=Findvideos despreciaba el títulos de episodios de series. Solo daba la calidad, que es muy insuficiente para valorar el video que se va a ver --- plugin.video.alfa/platformcode/unify.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugin.video.alfa/platformcode/unify.py b/plugin.video.alfa/platformcode/unify.py index ecc63199..f83a4855 100644 --- a/plugin.video.alfa/platformcode/unify.py +++ b/plugin.video.alfa/platformcode/unify.py @@ -412,6 +412,7 @@ def title_format(item): # Compureba si estamos en findvideos, y si hay server, si es asi no se muestra el # titulo sino el server, en caso contrario se muestra el titulo normalmente. + # MODIFICADO: muestra también los títulos para findvideos, sino la información es muy escasa #logger.debug('item.title antes de server: %s'%item.title) if item.action != 'play' and item.server: @@ -420,7 +421,7 @@ def title_format(item): if item.quality == 'default': quality = '' #logger.debug('language_color: %s'%language_color) - item.title = '%s %s' % (server, set_color(quality,'quality')) + item.title = '%s %s' % (server, item.title) #EL TITULO ES NECESARIO porque la calidad sola es insuficiente if lang: item.title = add_languages(item.title, simple_language) #logger.debug('item.title: %s' % item.title) From 74a8bfa5da5dfbab69a850229388ec0b4ed41d72 Mon Sep 17 00:00:00 2001 From: Kingbox <37674310+lopezvg@users.noreply.github.com> Date: Wed, 4 Apr 2018 20:11:25 +0200 Subject: [PATCH 08/13] Torrentrapid, Torrentlocura, Mispelisyseries, Descargas2020: mejoras en pantalla de Servidores MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mejora la legibilidad de los títulos en la pantalla de Servidores. Unido a mejoras en Unify y Videolibrary --- plugin.video.alfa/channels/descargas2020.py | 50 ++++++++++++++---- plugin.video.alfa/channels/mispelisyseries.py | 50 ++++++++++++++---- plugin.video.alfa/channels/torrentlocura.py | 50 ++++++++++++++---- plugin.video.alfa/channels/torrentrapid.py | 52 ++++++++++++++----- 4 files changed, 157 insertions(+), 45 deletions(-) diff --git a/plugin.video.alfa/channels/descargas2020.py b/plugin.video.alfa/channels/descargas2020.py index e6902ffd..7266760b 100644 --- a/plugin.video.alfa/channels/descargas2020.py +++ b/plugin.video.alfa/channels/descargas2020.py @@ -334,15 +334,27 @@ def findvideos(item): # escraped torrent url = scrapertools.find_single_match(data, patron) + title = re.sub(r'\s(\[.*?\])', ' ', title) #scrapea calidad y año + + if item.infoLabels['year']: #añadir el año para series, filtrado por Unify + year = '[%s]' % str(item.infoLabels['year']) + else: + year = "" + + if item.contentType == "episode": + item.contentType = "tvshow" #forzar contenido a "tvshow" para que Unify no destroce el título + if "Temp" in title and item.quality != "": #scrapear información duplicada en Series title = re.sub(r'Temp.*?\[', '[', title) title = re.sub(r'\[Cap.*?\]', '', title) - title = str(item.contentSeason) + "x" + str(item.contentEpisodeNumber) + " - " + item.contentTitle + ", " + title + title = '%sx%s - %s, %s, %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle, year, title) + + itemlist.append(item.clone(title=title, action="", folder=False)) if url != "": #Torrent itemlist.append( - Item(channel=item.channel, action="play", server="torrent", title=title, quality=title, fulltitle=title, - url=url, thumbnail=caratula, plot=item.plot, folder=False)) + Item(channel=item.channel, action="play", server="torrent", title=title, fulltitle=title, + url=url, thumbnail=caratula, plot=item.plot, infoLabels=item.infoLabels, folder=False)) logger.debug("url: " + url + " / title: " + title + " / calidad: " + item.quality + " / context: " + str(item.context)) @@ -380,8 +392,7 @@ def findvideos(item): #logger.debug(enlaces_ver) if len(enlaces_ver) > 0: - itemlist.append(item.clone(title="", action="", folder=False)) - itemlist.append(item.clone(title=" Enlaces Ver: ", action="", text_color="green", folder=False)) + itemlist.append(item.clone(title=" Enlaces Ver: ", action="", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: if "Ver" in titulo: @@ -397,14 +408,13 @@ def findvideos(item): if devuelve: enlace = devuelve[0][1] itemlist.append( - Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, quality=titulo, - fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) + Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, + fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False)) except: pass if len(enlaces_descargar) > 0: - itemlist.append(item.clone(title="", action="", folder=False)) - itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", text_color="green", folder=False)) + itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: if "Ver" not in titulo: @@ -424,9 +434,9 @@ def findvideos(item): devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] - itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, quality=parte_titulo, + itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=parte_titulo, fulltitle=parte_titulo, url=enlace, thumbnail=logo, - plot=item.plot, folder=False)) + plot=item.plot, infoLabels=item.infoLabels, folder=False)) except: pass @@ -590,6 +600,24 @@ def newest(categoria): itemlist.extend(listado(item)) if itemlist[-1].title == ">> Página siguiente": itemlist.pop() + + if categoria == 'peliculas 4k': + item.url = host+'peliculas-hd/4kultrahd/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + if categoria == 'anime': + item.url = host+'anime/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + if categoria == 'documentales': + item.url = host+'documentales/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: diff --git a/plugin.video.alfa/channels/mispelisyseries.py b/plugin.video.alfa/channels/mispelisyseries.py index 0e4d44cd..14e37ff8 100644 --- a/plugin.video.alfa/channels/mispelisyseries.py +++ b/plugin.video.alfa/channels/mispelisyseries.py @@ -334,15 +334,27 @@ def findvideos(item): # escraped torrent url = scrapertools.find_single_match(data, patron) + title = re.sub(r'\s(\[.*?\])', ' ', title) #scrapea calidad y año + + if item.infoLabels['year']: #añadir el año para series, filtrado por Unify + year = '[%s]' % str(item.infoLabels['year']) + else: + year = "" + + if item.contentType == "episode": + item.contentType = "tvshow" #forzar contenido a "tvshow" para que Unify no destroce el título + if "Temp" in title and item.quality != "": #scrapear información duplicada en Series title = re.sub(r'Temp.*?\[', '[', title) title = re.sub(r'\[Cap.*?\]', '', title) - title = str(item.contentSeason) + "x" + str(item.contentEpisodeNumber) + " - " + item.contentTitle + ", " + title + title = '%sx%s - %s, %s, %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle, year, title) + + itemlist.append(item.clone(title=title, action="", folder=False)) if url != "": #Torrent itemlist.append( - Item(channel=item.channel, action="play", server="torrent", title=title, quality=title, fulltitle=title, - url=url, thumbnail=caratula, plot=item.plot, folder=False)) + Item(channel=item.channel, action="play", server="torrent", title=title, fulltitle=title, + url=url, thumbnail=caratula, plot=item.plot, infoLabels=item.infoLabels, folder=False)) logger.debug("url: " + url + " / title: " + title + " / calidad: " + item.quality + " / context: " + str(item.context)) @@ -380,8 +392,7 @@ def findvideos(item): #logger.debug(enlaces_ver) if len(enlaces_ver) > 0: - itemlist.append(item.clone(title="", action="", folder=False)) - itemlist.append(item.clone(title=" Enlaces Ver: ", action="", text_color="green", folder=False)) + itemlist.append(item.clone(title=" Enlaces Ver: ", action="", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: if "Ver" in titulo: @@ -397,14 +408,13 @@ def findvideos(item): if devuelve: enlace = devuelve[0][1] itemlist.append( - Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, quality=titulo, - fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) + Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, + fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False)) except: pass if len(enlaces_descargar) > 0: - itemlist.append(item.clone(title="", action="", folder=False)) - itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", text_color="green", folder=False)) + itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: if "Ver" not in titulo: @@ -424,9 +434,9 @@ def findvideos(item): devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] - itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, quality=parte_titulo, + itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=parte_titulo, fulltitle=parte_titulo, url=enlace, thumbnail=logo, - plot=item.plot, folder=False)) + plot=item.plot, infoLabels=item.infoLabels, folder=False)) except: pass @@ -590,6 +600,24 @@ def newest(categoria): itemlist.extend(listado(item)) if itemlist[-1].title == ">> Página siguiente": itemlist.pop() + + if categoria == 'peliculas 4k': + item.url = host+'peliculas-hd/4kultrahd/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + if categoria == 'anime': + item.url = host+'anime/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + if categoria == 'documentales': + item.url = host+'documentales/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: diff --git a/plugin.video.alfa/channels/torrentlocura.py b/plugin.video.alfa/channels/torrentlocura.py index 6dce5ad7..4b89efc7 100755 --- a/plugin.video.alfa/channels/torrentlocura.py +++ b/plugin.video.alfa/channels/torrentlocura.py @@ -334,15 +334,27 @@ def findvideos(item): # escraped torrent url = scrapertools.find_single_match(data, patron) + title = re.sub(r'\s(\[.*?\])', ' ', title) #scrapea calidad y año + + if item.infoLabels['year']: #añadir el año para series, filtrado por Unify + year = '[%s]' % str(item.infoLabels['year']) + else: + year = "" + + if item.contentType == "episode": + item.contentType = "tvshow" #forzar contenido a "tvshow" para que Unify no destroce el título + if "Temp" in title and item.quality != "": #scrapear información duplicada en Series title = re.sub(r'Temp.*?\[', '[', title) title = re.sub(r'\[Cap.*?\]', '', title) - title = str(item.contentSeason) + "x" + str(item.contentEpisodeNumber) + " - " + item.contentTitle + ", " + title + title = '%sx%s - %s, %s, %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle, year, title) + + itemlist.append(item.clone(title=title, action="", folder=False)) if url != "": #Torrent itemlist.append( - Item(channel=item.channel, action="play", server="torrent", title=title, quality=title, fulltitle=title, - url=url, thumbnail=caratula, plot=item.plot, folder=False)) + Item(channel=item.channel, action="play", server="torrent", title=title, fulltitle=title, + url=url, thumbnail=caratula, plot=item.plot, infoLabels=item.infoLabels, folder=False)) logger.debug("url: " + url + " / title: " + title + " / calidad: " + item.quality + " / context: " + str(item.context)) @@ -380,8 +392,7 @@ def findvideos(item): #logger.debug(enlaces_ver) if len(enlaces_ver) > 0: - itemlist.append(item.clone(title="", action="", folder=False)) - itemlist.append(item.clone(title=" Enlaces Ver: ", action="", text_color="green", folder=False)) + itemlist.append(item.clone(title=" Enlaces Ver: ", action="", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: if "Ver" in titulo: @@ -397,14 +408,13 @@ def findvideos(item): if devuelve: enlace = devuelve[0][1] itemlist.append( - Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, quality=titulo, - fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) + Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, + fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False)) except: pass if len(enlaces_descargar) > 0: - itemlist.append(item.clone(title="", action="", folder=False)) - itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", text_color="green", folder=False)) + itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: if "Ver" not in titulo: @@ -424,9 +434,9 @@ def findvideos(item): devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] - itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, quality=parte_titulo, + itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=parte_titulo, fulltitle=parte_titulo, url=enlace, thumbnail=logo, - plot=item.plot, folder=False)) + plot=item.plot, infoLabels=item.infoLabels, folder=False)) except: pass @@ -590,6 +600,24 @@ def newest(categoria): itemlist.extend(listado(item)) if itemlist[-1].title == ">> Página siguiente": itemlist.pop() + + if categoria == 'peliculas 4k': + item.url = host+'peliculas-hd/4kultrahd/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + if categoria == 'anime': + item.url = host+'anime/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + if categoria == 'documentales': + item.url = host+'documentales/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: diff --git a/plugin.video.alfa/channels/torrentrapid.py b/plugin.video.alfa/channels/torrentrapid.py index fa93fce0..830d9b2a 100644 --- a/plugin.video.alfa/channels/torrentrapid.py +++ b/plugin.video.alfa/channels/torrentrapid.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# -*- coding: utf-8 -*- import re @@ -334,15 +334,27 @@ def findvideos(item): # escraped torrent url = scrapertools.find_single_match(data, patron) + title = re.sub(r'\s(\[.*?\])', ' ', title) #scrapea calidad y año + + if item.infoLabels['year']: #añadir el año para series, filtrado por Unify + year = '[%s]' % str(item.infoLabels['year']) + else: + year = "" + + if item.contentType == "episode": + item.contentType = "tvshow" #forzar contenido a "tvshow" para que Unify no destroce el título + if "Temp" in title and item.quality != "": #scrapear información duplicada en Series title = re.sub(r'Temp.*?\[', '[', title) title = re.sub(r'\[Cap.*?\]', '', title) - title = str(item.contentSeason) + "x" + str(item.contentEpisodeNumber) + " - " + item.contentTitle + ", " + title + title = '%sx%s - %s, %s, %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle, year, title) + + itemlist.append(item.clone(title=title, action="", folder=False)) if url != "": #Torrent itemlist.append( - Item(channel=item.channel, action="play", server="torrent", title=title, quality=title, fulltitle=title, - url=url, thumbnail=caratula, plot=item.plot, folder=False)) + Item(channel=item.channel, action="play", server="torrent", title=title, fulltitle=title, + url=url, thumbnail=caratula, plot=item.plot, infoLabels=item.infoLabels, folder=False)) logger.debug("url: " + url + " / title: " + title + " / calidad: " + item.quality + " / context: " + str(item.context)) @@ -380,8 +392,7 @@ def findvideos(item): #logger.debug(enlaces_ver) if len(enlaces_ver) > 0: - itemlist.append(item.clone(title="", action="", folder=False)) - itemlist.append(item.clone(title=" Enlaces Ver: ", action="", text_color="green", folder=False)) + itemlist.append(item.clone(title=" Enlaces Ver: ", action="", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: if "Ver" in titulo: @@ -397,14 +408,13 @@ def findvideos(item): if devuelve: enlace = devuelve[0][1] itemlist.append( - Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, quality=titulo, - fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) + Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, + fulltitle=titulo, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False)) except: pass if len(enlaces_descargar) > 0: - itemlist.append(item.clone(title="", action="", folder=False)) - itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", text_color="green", folder=False)) + itemlist.append(item.clone(title=" Enlaces Descargar: ", action="", folder=False)) for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: if "Ver" not in titulo: @@ -424,9 +434,9 @@ def findvideos(item): devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] - itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, quality=parte_titulo, + itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=parte_titulo, fulltitle=parte_titulo, url=enlace, thumbnail=logo, - plot=item.plot, folder=False)) + plot=item.plot, infoLabels=item.infoLabels, folder=False)) except: pass @@ -590,6 +600,24 @@ def newest(categoria): itemlist.extend(listado(item)) if itemlist[-1].title == ">> Página siguiente": itemlist.pop() + + if categoria == 'peliculas 4k': + item.url = host+'peliculas-hd/4kultrahd/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + if categoria == 'anime': + item.url = host+'anime/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + if categoria == 'documentales': + item.url = host+'documentales/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: From 535c0ff4a7eb194f1ef805eaa5b6ca5454969c26 Mon Sep 17 00:00:00 2001 From: Kingbox <37674310+lopezvg@users.noreply.github.com> Date: Thu, 5 Apr 2018 09:15:42 +0200 Subject: [PATCH 09/13] =?UTF-8?q?Clonaci=C3=B3n=20de=20Mispeliculasyseries?= =?UTF-8?q?,=20descargas2020,=20torrentrapid=20y=20torrentlocura=20sobre?= =?UTF-8?q?=20c=C3=B3digo=20mejorado=20de=20newpct1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mejoras en la legibilidad de títulos en pantalla de servidores --- plugin.video.alfa/channels/descargas2020.py | 16 +++++++--------- plugin.video.alfa/channels/mispelisyseries.py | 16 +++++++--------- plugin.video.alfa/channels/torrentlocura.py | 16 +++++++--------- plugin.video.alfa/channels/torrentrapid.py | 18 ++++++++---------- 4 files changed, 29 insertions(+), 37 deletions(-) diff --git a/plugin.video.alfa/channels/descargas2020.py b/plugin.video.alfa/channels/descargas2020.py index 7266760b..904ce44e 100644 --- a/plugin.video.alfa/channels/descargas2020.py +++ b/plugin.video.alfa/channels/descargas2020.py @@ -334,23 +334,21 @@ def findvideos(item): # escraped torrent url = scrapertools.find_single_match(data, patron) - title = re.sub(r'\s(\[.*?\])', ' ', title) #scrapea calidad y año - if item.infoLabels['year']: #añadir el año para series, filtrado por Unify year = '[%s]' % str(item.infoLabels['year']) else: year = "" - - if item.contentType == "episode": - item.contentType = "tvshow" #forzar contenido a "tvshow" para que Unify no destroce el título - if "Temp" in title and item.quality != "": #scrapear información duplicada en Series + if item.contentType == "episode": #scrapear información duplicada en Series title = re.sub(r'Temp.*?\[', '[', title) title = re.sub(r'\[Cap.*?\]', '', title) - title = '%sx%s - %s, %s, %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle, year, title) - - itemlist.append(item.clone(title=title, action="", folder=False)) + title = '%sx%s - %s %s, %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle, year, title) + itemlist.append(item.clone(title=title, action="", folder=False)) #Título con todos los datos del vídeo + + if item.contentType != "episode": + title = re.sub(r'\s(\[.*?\])', ' ', title) #scrapea calidad en pelis + if url != "": #Torrent itemlist.append( Item(channel=item.channel, action="play", server="torrent", title=title, fulltitle=title, diff --git a/plugin.video.alfa/channels/mispelisyseries.py b/plugin.video.alfa/channels/mispelisyseries.py index 14e37ff8..0613dd36 100644 --- a/plugin.video.alfa/channels/mispelisyseries.py +++ b/plugin.video.alfa/channels/mispelisyseries.py @@ -334,23 +334,21 @@ def findvideos(item): # escraped torrent url = scrapertools.find_single_match(data, patron) - title = re.sub(r'\s(\[.*?\])', ' ', title) #scrapea calidad y año - if item.infoLabels['year']: #añadir el año para series, filtrado por Unify year = '[%s]' % str(item.infoLabels['year']) else: year = "" - - if item.contentType == "episode": - item.contentType = "tvshow" #forzar contenido a "tvshow" para que Unify no destroce el título - if "Temp" in title and item.quality != "": #scrapear información duplicada en Series + if item.contentType == "episode": #scrapear información duplicada en Series title = re.sub(r'Temp.*?\[', '[', title) title = re.sub(r'\[Cap.*?\]', '', title) - title = '%sx%s - %s, %s, %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle, year, title) - - itemlist.append(item.clone(title=title, action="", folder=False)) + title = '%sx%s - %s %s, %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle, year, title) + itemlist.append(item.clone(title=title, action="", folder=False)) #Título con todos los datos del vídeo + + if item.contentType != "episode": + title = re.sub(r'\s(\[.*?\])', ' ', title) #scrapea calidad en pelis + if url != "": #Torrent itemlist.append( Item(channel=item.channel, action="play", server="torrent", title=title, fulltitle=title, diff --git a/plugin.video.alfa/channels/torrentlocura.py b/plugin.video.alfa/channels/torrentlocura.py index 4b89efc7..adc94465 100755 --- a/plugin.video.alfa/channels/torrentlocura.py +++ b/plugin.video.alfa/channels/torrentlocura.py @@ -334,23 +334,21 @@ def findvideos(item): # escraped torrent url = scrapertools.find_single_match(data, patron) - title = re.sub(r'\s(\[.*?\])', ' ', title) #scrapea calidad y año - if item.infoLabels['year']: #añadir el año para series, filtrado por Unify year = '[%s]' % str(item.infoLabels['year']) else: year = "" - - if item.contentType == "episode": - item.contentType = "tvshow" #forzar contenido a "tvshow" para que Unify no destroce el título - if "Temp" in title and item.quality != "": #scrapear información duplicada en Series + if item.contentType == "episode": #scrapear información duplicada en Series title = re.sub(r'Temp.*?\[', '[', title) title = re.sub(r'\[Cap.*?\]', '', title) - title = '%sx%s - %s, %s, %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle, year, title) - - itemlist.append(item.clone(title=title, action="", folder=False)) + title = '%sx%s - %s %s, %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle, year, title) + itemlist.append(item.clone(title=title, action="", folder=False)) #Título con todos los datos del vídeo + + if item.contentType != "episode": + title = re.sub(r'\s(\[.*?\])', ' ', title) #scrapea calidad en pelis + if url != "": #Torrent itemlist.append( Item(channel=item.channel, action="play", server="torrent", title=title, fulltitle=title, diff --git a/plugin.video.alfa/channels/torrentrapid.py b/plugin.video.alfa/channels/torrentrapid.py index 830d9b2a..2ef3d953 100644 --- a/plugin.video.alfa/channels/torrentrapid.py +++ b/plugin.video.alfa/channels/torrentrapid.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# -*- coding: utf-8 -*- import re @@ -334,23 +334,21 @@ def findvideos(item): # escraped torrent url = scrapertools.find_single_match(data, patron) - title = re.sub(r'\s(\[.*?\])', ' ', title) #scrapea calidad y año - if item.infoLabels['year']: #añadir el año para series, filtrado por Unify year = '[%s]' % str(item.infoLabels['year']) else: year = "" - - if item.contentType == "episode": - item.contentType = "tvshow" #forzar contenido a "tvshow" para que Unify no destroce el título - if "Temp" in title and item.quality != "": #scrapear información duplicada en Series + if item.contentType == "episode": #scrapear información duplicada en Series title = re.sub(r'Temp.*?\[', '[', title) title = re.sub(r'\[Cap.*?\]', '', title) - title = '%sx%s - %s, %s, %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle, year, title) - - itemlist.append(item.clone(title=title, action="", folder=False)) + title = '%sx%s - %s %s, %s' % (str(item.contentSeason), str(item.contentEpisodeNumber), item.contentTitle, year, title) + itemlist.append(item.clone(title=title, action="", folder=False)) #Título con todos los datos del vídeo + + if item.contentType != "episode": + title = re.sub(r'\s(\[.*?\])', ' ', title) #scrapea calidad en pelis + if url != "": #Torrent itemlist.append( Item(channel=item.channel, action="play", server="torrent", title=title, fulltitle=title, From 0a121b7f73235fcd8cf5b7862da4048ef3949e24 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Thu, 5 Apr 2018 14:52:25 -0500 Subject: [PATCH 10/13] kbagi: fix --- plugin.video.alfa/servers/kbagi.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/plugin.video.alfa/servers/kbagi.py b/plugin.video.alfa/servers/kbagi.py index b74162d1..4467e870 100644 --- a/plugin.video.alfa/servers/kbagi.py +++ b/plugin.video.alfa/servers/kbagi.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- +from channels import kbagi from core import httptools from core import jsontools from core import scrapertools @@ -8,15 +9,16 @@ from platformcode import logger def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) + domain = "diskokosmiko.mx" if "kbagi.com" in page_url: - from channels import kbagi - logueado, error_message = kbagi.login("kbagi.com") - if not logueado: - return False, error_message + domain = "kbagi.com" + logueado, error_message = kbagi.login(domain) + if not logueado: + return False, error_message data = httptools.downloadpage(page_url).data if ("File was deleted" or "Not Found" or "File was locked by administrator") in data: - return False, "[kbagi] El archivo no existe o ha sido borrado" + return False, "[%s] El archivo no existe o ha sido borrado" %domain return True, "" From c71b7a6c27c3bbc71ee6bffce05103eb1b3a709d Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Thu, 5 Apr 2018 14:53:22 -0500 Subject: [PATCH 11/13] httptools: update --- plugin.video.alfa/core/httptools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin.video.alfa/core/httptools.py b/plugin.video.alfa/core/httptools.py index d1a553e2..ffda92c4 100755 --- a/plugin.video.alfa/core/httptools.py +++ b/plugin.video.alfa/core/httptools.py @@ -25,7 +25,7 @@ ficherocookies = os.path.join(config.get_data_path(), "cookies.dat") # Headers por defecto, si no se especifica nada default_headers = dict() -default_headers["User-Agent"] = "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3163.100 Safari/537.36" +default_headers["User-Agent"] = "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3163.100 Safari/537.36" default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8" default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3" default_headers["Accept-Charset"] = "UTF-8" From 745f5c7a242f793eb60a013e8574a6c8dc670371 Mon Sep 17 00:00:00 2001 From: Alfa <30527549+alfa-addon@users.noreply.github.com> Date: Thu, 5 Apr 2018 15:54:08 -0500 Subject: [PATCH 12/13] Update unify.py --- plugin.video.alfa/platformcode/unify.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugin.video.alfa/platformcode/unify.py b/plugin.video.alfa/platformcode/unify.py index f83a4855..ecc63199 100644 --- a/plugin.video.alfa/platformcode/unify.py +++ b/plugin.video.alfa/platformcode/unify.py @@ -412,7 +412,6 @@ def title_format(item): # Compureba si estamos en findvideos, y si hay server, si es asi no se muestra el # titulo sino el server, en caso contrario se muestra el titulo normalmente. - # MODIFICADO: muestra también los títulos para findvideos, sino la información es muy escasa #logger.debug('item.title antes de server: %s'%item.title) if item.action != 'play' and item.server: @@ -421,7 +420,7 @@ def title_format(item): if item.quality == 'default': quality = '' #logger.debug('language_color: %s'%language_color) - item.title = '%s %s' % (server, item.title) #EL TITULO ES NECESARIO porque la calidad sola es insuficiente + item.title = '%s %s' % (server, set_color(quality,'quality')) if lang: item.title = add_languages(item.title, simple_language) #logger.debug('item.title: %s' % item.title) From 58d8709534afc3a74acfb639b88fa054e0fee219 Mon Sep 17 00:00:00 2001 From: Alfa <30527549+alfa-addon@users.noreply.github.com> Date: Thu, 5 Apr 2018 16:05:18 -0500 Subject: [PATCH 13/13] v2.5.7 --- plugin.video.alfa/addon.xml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index 8510f408..5bd05fdc 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@ - + @@ -19,12 +19,16 @@ [B]Estos son los cambios para esta versión:[/B] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR] - » cinemahd » seriesdanko - » doramasmp4 » pelisplus - » descargas2020 + » diskokosmiko » gamovideo + » mispelisyseries » pelisplus + » seriespapaya » descargas2020 + » openload » torrentlocura + » torrentrapid » streamcloud + » danimados » animemovil + » serieslan ¤ arreglos internos - ¤ Gracias a la colaboración de @t1254362 en esta versión + ¤ Gracias a la colaboración de @pipcat y @lopezvg en ésta versión Navega con Kodi por páginas web para ver sus videos de manera fácil. Browse web pages using Kodi