From e20b32b7e96af6ed065f369aec3008aa22712c31 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Tue, 17 Oct 2017 16:58:57 -0500 Subject: [PATCH 01/14] pelismundo: codigo mejorado MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit pelismundo: código mejorado --- plugin.video.alfa/channels/pelismundo.py | 44 ++++++------------------ 1 file changed, 11 insertions(+), 33 deletions(-) diff --git a/plugin.video.alfa/channels/pelismundo.py b/plugin.video.alfa/channels/pelismundo.py index 2e89c06e..b7126ca1 100644 --- a/plugin.video.alfa/channels/pelismundo.py +++ b/plugin.video.alfa/channels/pelismundo.py @@ -12,15 +12,8 @@ from core import tmdb from core.item import Item from platformcode import config, logger -__channel__='allcalidad' - -host = "http://www.pelismundo.com/" - -try: - __modo_grafico__ = config.get_setting('modo_grafico', __channel__) -except: - __modo_grafico__ = True - +host = "http://www.pelismundo.com" +idiomas = [["Castellano","ESP"],["Subtitulad","VOSE"],["Latino","LAT"]] def mainlist(item): logger.info() @@ -80,10 +73,9 @@ def sub_search(item): patron = '(?s)href="([^"]+)".*?' patron += 'title="([^"]+)".*?' patron += 'src="([^"]+)".*?' - patron += 'Idioma.*?tag">([^<]+).*?' - patron += 'Calidad(.*?<)\/' + patron += 'Idioma(.*?)Cal' + patron += 'idad(.*?<)\/' match = scrapertools.find_multiple_matches(bloque, patron) - scrapertools.printMatches(match) for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedlanguages, scrapedquality in match: year = scrapertools.find_single_match(scrapedtitle, '[0-9]{4}') scrapedquality = scrapertools.find_single_match(scrapedquality, 'rel="tag">([^<]+)<') @@ -93,21 +85,14 @@ def sub_search(item): scrapedtitle = scrapedtitle.replace(st, "") title = scrapedtitle if year: - title += " (" + year + ")" + title += "(" + year + ")" if scrapedquality: title += " (" + scrapedquality + ")" - patronidiomas = '' idiomas_disponibles = [] - matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Castellano') - if matchidioma: - idiomas_disponibles.append("ESP") - matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Subtitulado') - if matchidioma: - idiomas_disponibles.append("VOSE") - matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Latino') - if matchidioma: - idiomas_disponibles.append("LAT") idiomas_disponibles1 = "" + for lang in range(len(idiomas)): + if idiomas[lang][0] in scrapedlanguages: + idiomas_disponibles.append(idiomas[lang][1]) if idiomas_disponibles: idiomas_disponibles1 = "[" + "/".join(idiomas_disponibles) + "]" title += " %s" %idiomas_disponibles1 @@ -171,18 +156,11 @@ def peliculas(item): title += " (" + year + ")" if scrapedquality: title += " (" + scrapedquality + ")" - patronidiomas = '' idiomas_disponibles = [] - matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Castellano') - if matchidioma: - idiomas_disponibles.append("ESP") - matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Subtitulado') - if matchidioma: - idiomas_disponibles.append("VOSE") - matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Latino') - if matchidioma: - idiomas_disponibles.append("LAT") idiomas_disponibles1 = "" + for lang in range(len(idiomas)): + if idiomas[lang][0] in scrapedlanguages: + idiomas_disponibles.append(idiomas[lang][1]) if idiomas_disponibles: idiomas_disponibles1 = "[" + "/".join(idiomas_disponibles) + "]" title += " %s" %idiomas_disponibles1 From 0e7c8d22ef567722181c165e206bcfb2d6026759 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 18 Oct 2017 09:19:57 -0500 Subject: [PATCH 02/14] Update pelismundo.py --- plugin.video.alfa/channels/pelismundo.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/plugin.video.alfa/channels/pelismundo.py b/plugin.video.alfa/channels/pelismundo.py index b7126ca1..d40d4e43 100644 --- a/plugin.video.alfa/channels/pelismundo.py +++ b/plugin.video.alfa/channels/pelismundo.py @@ -13,7 +13,7 @@ from core.item import Item from platformcode import config, logger host = "http://www.pelismundo.com" -idiomas = [["Castellano","ESP"],["Subtitulad","VOSE"],["Latino","LAT"]] +idiomas = {"Castellano":"CAST","Subtitulad":"VOSE","Latino":"LAT"} def mainlist(item): logger.info() @@ -90,9 +90,9 @@ def sub_search(item): title += " (" + scrapedquality + ")" idiomas_disponibles = [] idiomas_disponibles1 = "" - for lang in range(len(idiomas)): - if idiomas[lang][0] in scrapedlanguages: - idiomas_disponibles.append(idiomas[lang][1]) + for lang in idiomas.keys(): + if lang in scrapedlanguages: + idiomas_disponibles.append(idiomas[lang]) if idiomas_disponibles: idiomas_disponibles1 = "[" + "/".join(idiomas_disponibles) + "]" title += " %s" %idiomas_disponibles1 @@ -157,10 +157,10 @@ def peliculas(item): if scrapedquality: title += " (" + scrapedquality + ")" idiomas_disponibles = [] + for lang in idiomas.keys(): + if lang in scrapedlanguages: + idiomas_disponibles.append(idiomas[lang]) idiomas_disponibles1 = "" - for lang in range(len(idiomas)): - if idiomas[lang][0] in scrapedlanguages: - idiomas_disponibles.append(idiomas[lang][1]) if idiomas_disponibles: idiomas_disponibles1 = "[" + "/".join(idiomas_disponibles) + "]" title += " %s" %idiomas_disponibles1 From 341953539eaeba7af52928212808ed62a98471d3 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 18 Oct 2017 09:29:02 -0500 Subject: [PATCH 03/14] Update pelismundo.py --- plugin.video.alfa/channels/pelismundo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin.video.alfa/channels/pelismundo.py b/plugin.video.alfa/channels/pelismundo.py index d40d4e43..c87f783f 100644 --- a/plugin.video.alfa/channels/pelismundo.py +++ b/plugin.video.alfa/channels/pelismundo.py @@ -197,7 +197,7 @@ def findvideos(item): title = "Ver en: %s " + "(" + scrapedlanguage + ")" itemlist.append(item.clone(action = "play", title = title, - language = item.language, + language = scrapedlanguage, quality = item.quality, url = scrapedurl )) From efa960bcb757fbd92a9b125ea94a08424f2740c0 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 18 Oct 2017 12:57:34 -0500 Subject: [PATCH 04/14] flashx fix --- plugin.video.alfa/servers/flashx.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/plugin.video.alfa/servers/flashx.py b/plugin.video.alfa/servers/flashx.py index e8e2a385..7ce0c6b7 100644 --- a/plugin.video.alfa/servers/flashx.py +++ b/plugin.video.alfa/servers/flashx.py @@ -14,8 +14,7 @@ def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url, cookies=False).data - - if 'File Not Found' in data or 'file was deleted' in data: + if 'file was deleted' in data: return False, "[FlashX] El archivo no existe o ha sido borrado" elif 'Video is processing now' in data: return False, "[FlashX] El archivo se está procesando" @@ -45,7 +44,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= headers['Accept'] = "*/*" headers['Host'] = "www.flashx.tv" - coding_url = 'https://www.flashx.tv/flashx.php?f=x&fxfx=6' + coding_url = 'https://www.flashx.tv/flashx.php?f=y&fxfx=6' headers['X-Requested-With'] = 'XMLHttpRequest' httptools.downloadpage(coding_url, headers=headers) @@ -56,7 +55,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= headers.pop('X-Requested-With') headers['Content-Type'] = 'application/x-www-form-urlencoded' - data = httptools.downloadpage('https://www.flashx.tv/dl?playnow', post, headers, replace_headers=True).data + data = httptools.downloadpage('https://www.flashx.tv/dl?playitnow', post, headers, replace_headers=True).data # Si salta aviso, se carga la pagina de comprobacion y luego la inicial # LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS @@ -64,7 +63,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"') try: data = httptools.downloadpage(url_reload, cookies=False).data - data = httptools.downloadpage('https://www.flashx.tv/dl?playnow', post, headers, replace_headers=True).data + data = httptools.downloadpage('https://www.flashx.tv/dl?playitnow', post, headers, replace_headers=True).data # LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS except: pass From 5cb64e4b41b79cbc4f0951a302a3a2aa8b5461e3 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 18 Oct 2017 15:31:52 -0500 Subject: [PATCH 05/14] hdfull fix marcar como visto --- plugin.video.alfa/channels/hdfull.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugin.video.alfa/channels/hdfull.py b/plugin.video.alfa/channels/hdfull.py index c319625a..9e4ad2f1 100644 --- a/plugin.video.alfa/channels/hdfull.py +++ b/plugin.video.alfa/channels/hdfull.py @@ -616,6 +616,8 @@ def findvideos(item): url_targets = item.url ## Vídeos + id = "" + type = "" if "###" in item.url: id = item.url.split("###")[1].split(";")[0] type = item.url.split("###")[1].split(";")[1] @@ -698,6 +700,9 @@ def findvideos(item): it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize()) it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server)) + for item in it2: + if "###" not in item.url: + item.url += "###" + id + ";" + type itemlist.extend(it1) itemlist.extend(it2) ## 2 = película @@ -707,7 +712,6 @@ def findvideos(item): action="add_pelicula_to_library", url=url_targets, thumbnail = item.thumbnail, fulltitle = item.contentTitle )) - return itemlist From cc4fc8cbdea4ad5f96dadbd575c1bfe1db0d43d3 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 18 Oct 2017 17:29:30 -0500 Subject: [PATCH 06/14] Delete vixto.json --- plugin.video.alfa/channels/vixto.json | 100 -------------------------- 1 file changed, 100 deletions(-) delete mode 100755 plugin.video.alfa/channels/vixto.json diff --git a/plugin.video.alfa/channels/vixto.json b/plugin.video.alfa/channels/vixto.json deleted file mode 100755 index a23a7fa4..00000000 --- a/plugin.video.alfa/channels/vixto.json +++ /dev/null @@ -1,100 +0,0 @@ -{ - "id": "vixto", - "name": "Vixto", - "active": true, - "adult": false, - "language": ["cast", "lat"], - "banner": "vixto.png", - "thumbnail": "http://i.imgur.com/y4c4HT2.png", - "version": 1, - "changes": [ - { - "date": "15/03/2017", - "description": "limpieza código" - }, - { - "date": "16/02/2017", - "description": "Correccion para el apartado de series" - }, - { - "date": "12/11/2016", - "description": "Primera version, sustituye a oranline" - } - ], - "categories": [ - "movie", - "tvshow", - "vos" - ], - "settings": [ - { - "id": "include_in_newest_peliculas", - "type": "bool", - "label": "Incluir en Novedades - Películas", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "modo_grafico", - "type": "bool", - "label": "Buscar información extra", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "perfil", - "type": "list", - "label": "Perfil de color", - "default": 0, - "enabled": true, - "visible": true, - "lvalues": [ - "Perfil 3", - "Perfil 2", - "Perfil 1" - ] - }, - { - "id": "filterlanguages", - "type": "list", - "label": "Mostrar enlaces en idioma...", - "default": 3, - "enabled": true, - "visible": true, - "lvalues": [ - "VOSE", - "Latino", - "Español", - "No filtrar" - ] - }, - { - "id": "filterlinks", - "type": "list", - "label": "Mostrar enlaces de tipo...", - "default": 2, - "enabled": true, - "visible": true, - "lvalues": [ - "Solo Descarga", - "Solo Online", - "No filtrar" - ] - }, - { - "id": "orderlinks", - "type": "list", - "label": "Ordenar enlaces por...", - "default": 2, - "enabled": true, - "visible": true, - "lvalues": [ - "Servidor", - "Idioma", - "Más recientes" - ] - } - ] -} \ No newline at end of file From 88d26523cdb58b3ea4711656069960fa7bb02453 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 18 Oct 2017 17:30:00 -0500 Subject: [PATCH 07/14] Delete vixto.py --- plugin.video.alfa/channels/vixto.py | 383 ---------------------------- 1 file changed, 383 deletions(-) delete mode 100755 plugin.video.alfa/channels/vixto.py diff --git a/plugin.video.alfa/channels/vixto.py b/plugin.video.alfa/channels/vixto.py deleted file mode 100755 index 6337d4d6..00000000 --- a/plugin.video.alfa/channels/vixto.py +++ /dev/null @@ -1,383 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from core import httptools -from core import scrapertools -from core import servertools -from core import tmdb -from core.item import Item -from platformcode import config, logger - -# Configuracion del canal -__modo_grafico__ = config.get_setting('modo_grafico', "vixto") -__perfil__ = config.get_setting('perfil', "vixto") - -# Fijar perfil de color -perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], - ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], - ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] -color1, color2, color3 = perfil[__perfil__] - -host = "http://www.vixto.net/" - - -def mainlist(item): - logger.info() - itemlist = list() - - itemlist.append(item.clone(title="Películas", text_color=color2, action="", - text_bold=True)) - itemlist.append(item.clone(action="listado", title=" Estrenos", text_color=color1, url=host, - thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/" - "0/Directors%20Chair.png")) - itemlist.append(item.clone(action="listado", title=" Novedades", text_color=color1, url=host, - thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/" - "0/Directors%20Chair.png")) - itemlist.append(item.clone(action="listado", title="Series - Novedades", text_color=color2, url=host, - thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/" - "0/TV%20Series.png", text_bold=True)) - - itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3, - url="http://www.vixto.net/buscar?q=")) - - itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) - return itemlist - - -def configuracion(item): - from platformcode import platformtools - ret = platformtools.show_channel_settings() - platformtools.itemlist_refresh() - return ret - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = item.url + texto - try: - return busqueda(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%{0}".format(line)) - return [] - - -def newest(categoria): - logger.info() - itemlist = list() - item = Item() - try: - if categoria == 'peliculas': - item.url = host - itemlist = listado(item) - - if itemlist[-1].action == "listado": - itemlist.pop() - item.title = "Estrenos" - itemlist.extend(listado(item)) - - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - return itemlist - - -def listado(item): - logger.info() - itemlist = list() - - item.infoLabels['mediatype'] = "movie" - if "Estrenos" in item.title: - bloque_head = "ESTRENOS CARTELERA" - elif "Series" in item.title: - bloque_head = "RECIENTE SERIES" - item.infoLabels['mediatype'] = "tvshow" - else: - bloque_head = "RECIENTE PELICULAS" - - # Descarga la página - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |\s{2}", "", data) - - # Extrae las entradas (carpetas) - bloque = scrapertools.find_single_match(data, bloque_head + '\s*(.*?)') - patron = '
(.*?)
' \ - '(.*?).*?href.*?>(.*?)' - matches = scrapertools.find_multiple_matches(bloque, patron) - - for scrapedurl, scrapedthumbnail, calidad, idiomas, scrapedtitle in matches: - title = scrapedtitle - langs = [] - if 'idio idi1' in idiomas: - langs.append("VOS") - if 'idio idi2' in idiomas: - langs.append("LAT") - if 'idio idi4' in idiomas: - langs.append("ESP") - if langs: - title += " [%s]" % "/".join(langs) - if calidad: - title += " %s" % calidad - - filtro_thumb = scrapedthumbnail.replace("http://image.tmdb.org/t/p/w342", "") - filtro_list = {"poster_path": filtro_thumb} - filtro_list = filtro_list.items() - - if item.contentType == "tvshow": - new_item = item.clone(action="episodios", title=title, url=scrapedurl, thumbnail=scrapedthumbnail, - fulltitle=scrapedtitle, infoLabels={'filtro': filtro_list}, - contentTitle=scrapedtitle, context="buscar_trailer", text_color=color1, - show=scrapedtitle, text_bold=False) - else: - new_item = item.clone(action="findvideos", title=title, url=scrapedurl, thumbnail=scrapedthumbnail, - fulltitle=scrapedtitle, infoLabels={'filtro': filtro_list}, text_bold=False, - contentTitle=scrapedtitle, context="buscar_trailer", text_color=color1) - - itemlist.append(new_item) - - if item.action == "listado": - try: - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - except: - pass - - return itemlist - - -def busqueda(item): - logger.info() - itemlist = list() - - # Descarga la página - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |\s{2}", "", data) - - # Extrae las entradas (carpetas) - bloque = scrapertools.find_single_match(data, '

Peliculas

(.*?)') - bloque += scrapertools.find_single_match(data, '

Series

(.*?)') - - patron = '
Temporada:(.*?)') - matches = scrapertools.find_multiple_matches(bloque, 'href="([^"]+)">(.*?)') - - for scrapedurl, scrapedtitle in matches: - title = "Temporada %s" % scrapedtitle - - new_item = item.clone(action="", title=title, text_color=color2) - new_item.infoLabels["season"] = scrapedtitle - new_item.infoLabels["mediatype"] = "season" - data_season = httptools.downloadpage(scrapedurl).data - data_season = re.sub(r"\n|\r|\t| |\s{2}", "", data_season) - patron = '
  • .*?href="([^"]+)"(.*?)
    .*?href.*?>' \ - '(.*?)' - matches = scrapertools.find_multiple_matches(data_season, patron) - - elementos = [] - for url, status, title in matches: - if not "Enlaces Disponibles" in status: - continue - elementos.append(title) - item_epi = item.clone(action="findvideos", url=url, text_color=color1) - item_epi.infoLabels["season"] = scrapedtitle - episode = scrapertools.find_single_match(title, 'Capitulo (\d+)') - titulo = scrapertools.find_single_match(title, 'Capitulo \d+\s*-\s*(.*?)$') - item_epi.infoLabels["episode"] = episode - item_epi.infoLabels["mediatype"] = "episode" - item_epi.title = "%sx%s %s" % (scrapedtitle, episode.zfill(2), titulo) - - itemlist.insert(0, item_epi) - if elementos: - itemlist.insert(0, new_item) - - if item.infoLabels["tmdb_id"] and itemlist: - try: - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - except: - pass - - if itemlist: - if config.get_videolibrary_support(): - itemlist.append(Item(channel=item.channel, title="Añadir serie a la videoteca", text_color="green", - filtro=True, action="add_serie_to_library", fulltitle=item.fulltitle, - extra="episodios", url=item.url, infoLabels=item.infoLabels, show=item.show)) - else: - itemlist.append(item.clone(title="Serie sin episodios disponibles", action="", text_color=color3)) - return itemlist - - -def findvideos(item): - logger.info() - itemlist = list() - - try: - filtro_idioma = config.get_setting("filterlanguages", item.channel) - filtro_enlaces = config.get_setting("filterlinks", item.channel) - except: - filtro_idioma = 3 - filtro_enlaces = 2 - - dict_idiomas = {'Castellano': 2, 'Latino': 1, 'Subtitulada': 0} - - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |\s{2}", "", data) - - if not item.infoLabels["tmdb_id"]: - year = scrapertools.find_single_match(data, 'Lanzamiento.*?(\d{4})') - - if year != "": - item.infoLabels['filtro'] = "" - item.infoLabels['year'] = int(year) - - # Ampliamos datos en tmdb - try: - tmdb.set_infoLabels_item(item, __modo_grafico__) - except: - pass - - if not item.infoLabels['plot']: - plot = scrapertools.find_single_match(data, '

    (.*?)

    ') - item.infoLabels['plot'] = plot - - if filtro_enlaces != 0: - list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "Ver Online", item) - if list_enlaces: - itemlist.append(item.clone(action="", title="Enlaces Online", text_color=color1, - text_bold=True)) - itemlist.extend(list_enlaces) - if filtro_enlaces != 1: - list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "Descarga Directa", item) - if list_enlaces: - itemlist.append(item.clone(action="", title="Enlaces Descargas", text_color=color1, - text_bold=True)) - itemlist.extend(list_enlaces) - - # Opción "Añadir esta película a la videoteca de XBMC" - if itemlist and item.contentType == "movie": - contextual = config.is_xbmc() - itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="", - text_color="magenta", contextual=contextual)) - if item.extra != "findvideos": - if config.get_videolibrary_support(): - itemlist.append(Item(channel=item.channel, title="Añadir enlaces a la videoteca", text_color="green", - filtro=True, action="add_pelicula_to_library", fulltitle=item.fulltitle, - extra="findvideos", url=item.url, infoLabels=item.infoLabels, - contentType=item.contentType, contentTitle=item.contentTitle, show=item.show)) - elif not itemlist and item.contentType == "movie": - itemlist.append(item.clone(title="Película sin enlaces disponibles", action="", text_color=color3)) - - return itemlist - - -def bloque_enlaces(data, filtro_idioma, dict_idiomas, tipo, item): - logger.info() - - lista_enlaces = list() - bloque = scrapertools.find_single_match(data, tipo + '(.*?)') - patron = '\s*.*?([^<]+)' \ - '.*?(.*?)' - matches = scrapertools.find_multiple_matches(bloque, patron) - filtrados = [] - for language, scrapedurl, calidad, orden in matches: - language = language.strip() - server = scrapertools.find_single_match(scrapedurl, 'http(?:s|)://(?:www.|)(\w+).') - if server == "ul": - server = "uploadedto" - if server == "streamin": - server = "streaminto" - if server == "waaw": - server = "netutv" - - if servertools.is_server_enabled(server): - try: - servers_module = __import__("servers." + server) - title = " Mirror en " + server + " (" + language + ") (Calidad " + calidad.strip() + ")" - if filtro_idioma == 3 or item.filtro: - lista_enlaces.append(item.clone(title=title, action="play", server=server, text_color=color2, - url=scrapedurl, idioma=language, orden=orden, language=language)) - else: - idioma = dict_idiomas[language] - if idioma == filtro_idioma: - lista_enlaces.append(item.clone(title=title, text_color=color2, action="play", - url=scrapedurl, server=server, idioma=language, orden=orden, - language=language)) - else: - if language not in filtrados: - filtrados.append(language) - except: - pass - - order = config.get_setting("orderlinks", item.channel) - if order == 0: - lista_enlaces.sort(key=lambda item: item.server) - elif order == 1: - lista_enlaces.sort(key=lambda item: item.idioma) - else: - lista_enlaces.sort(key=lambda item: item.orden, reverse=True) - - if filtro_idioma != 3: - if len(filtrados) > 0: - title = "Mostrar enlaces filtrados en %s" % ", ".join(filtrados) - lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3, - filtro=True)) - - return lista_enlaces - - -def play(item): - logger.info() - itemlist = list() - enlace = servertools.findvideosbyserver(item.url, item.server) - itemlist.append(item.clone(url=enlace[0][1])) - - return itemlist From 4c7a349db2c6a1285203fd72cc746110e4e21ee9 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Thu, 19 Oct 2017 08:25:39 -0500 Subject: [PATCH 08/14] Delete crimenes.json --- plugin.video.alfa/channels/crimenes.json | 37 ------------------------ 1 file changed, 37 deletions(-) delete mode 100755 plugin.video.alfa/channels/crimenes.json diff --git a/plugin.video.alfa/channels/crimenes.json b/plugin.video.alfa/channels/crimenes.json deleted file mode 100755 index 500ac3a4..00000000 --- a/plugin.video.alfa/channels/crimenes.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "id": "crimenes", - "name": "Crimenes Imperfectos", - "active": true, - "adult": false, - "language": ["cast"], - "banner": "crimenes.png", - "thumbnail": "crimenes.png", - "version": 1, - "changes": [ - { - "date": "19/06/2017", - "description": "correcion xml" - }, - { - "date": "15/03/2017", - "description": "limpieza código" - }, - { - "date": "01/07/2016", - "description": "Eliminado código innecesario." - } - ], - "categories": [ - "movie" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": false, - "visible": false - } - ] -} \ No newline at end of file From 915952c85dd3dd99887e3c296e28805a6324ece3 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Thu, 19 Oct 2017 08:25:54 -0500 Subject: [PATCH 09/14] Delete crimenes.py --- plugin.video.alfa/channels/crimenes.py | 167 ------------------------- 1 file changed, 167 deletions(-) delete mode 100755 plugin.video.alfa/channels/crimenes.py diff --git a/plugin.video.alfa/channels/crimenes.py b/plugin.video.alfa/channels/crimenes.py deleted file mode 100755 index 9615b336..00000000 --- a/plugin.video.alfa/channels/crimenes.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -import xbmc -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import logger - - -# Main list manual - -def listav(item): - itemlist = [] - - data = scrapertools.cache_page(item.url) - - patronbloque = '
  • Date: Fri, 20 Oct 2017 09:13:35 -0500 Subject: [PATCH 11/14] downace, mensaje error de servidor --- plugin.video.alfa/servers/downace.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugin.video.alfa/servers/downace.py b/plugin.video.alfa/servers/downace.py index c77d4c34..43fe5756 100644 --- a/plugin.video.alfa/servers/downace.py +++ b/plugin.video.alfa/servers/downace.py @@ -10,7 +10,8 @@ def test_video_exists(page_url): data = httptools.downloadpage(page_url).data if "no longer exists" in data: return False, "[Downace] El fichero ha sido borrado" - + if "please+try+again+later." in data: + return False, "[Downace] Error de downace, no se puede generar el enlace al video" return True, "" From 6bf0100f4172d2bd0350c7860c0e87828b381774 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Fri, 20 Oct 2017 09:26:47 -0500 Subject: [PATCH 12/14] downace: actualizado test_video_exists --- plugin.video.alfa/servers/downace.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin.video.alfa/servers/downace.py b/plugin.video.alfa/servers/downace.py index 43fe5756..c3f81e18 100644 --- a/plugin.video.alfa/servers/downace.py +++ b/plugin.video.alfa/servers/downace.py @@ -8,8 +8,8 @@ from platformcode import logger def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data - if "no longer exists" in data: - return False, "[Downace] El fichero ha sido borrado" + if "no longer exists" in data or "to copyright issues" in data: + return False, "[Downace] El video ha sido borrado" if "please+try+again+later." in data: return False, "[Downace] Error de downace, no se puede generar el enlace al video" return True, "" From 5ed94e84fca47e75acd3d76150b2f305eb960a51 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Fri, 20 Oct 2017 11:20:56 -0500 Subject: [PATCH 13/14] animeflv_me: raparado paginador --- plugin.video.alfa/channels/animeflv_me.py | 54 ++++------------------- 1 file changed, 9 insertions(+), 45 deletions(-) diff --git a/plugin.video.alfa/channels/animeflv_me.py b/plugin.video.alfa/channels/animeflv_me.py index 52ba1639..b762c316 100755 --- a/plugin.video.alfa/channels/animeflv_me.py +++ b/plugin.video.alfa/channels/animeflv_me.py @@ -12,14 +12,14 @@ from core import servertools from core.item import Item from platformcode import config, logger -CHANNEL_HOST = "http://animeflv.me/" +CHANNEL_HOST = "http://animeflv.co" CHANNEL_DEFAULT_HEADERS = [ ["User-Agent", "Mozilla/5.0"], ["Accept-Encoding", "gzip, deflate"], ["Referer", CHANNEL_HOST] ] -REGEX_NEXT_PAGE = r"class='current'>\d+?
  • )(.+?)(?:)' REGEX_THUMB = r'src="(http://media.animeflv\.co/uploads/thumbs/[^"]+?)"' REGEX_PLOT = r'Línea de historia:

    (.*?)' @@ -61,14 +61,6 @@ def get_cookie_value(): header_string = "|User-Agent=Mozilla/5.0&Referer=http://animeflv.co&Cookie=" + \ get_cookie_value() -def __find_next_page(html): - """ - Busca el enlace a la pagina siguiente - """ - - return scrapertools.find_single_match(html, REGEX_NEXT_PAGE) - - def __extract_info_from_serie(html): title = scrapertools.find_single_match(html, REGEX_TITLE) title = clean_title(title) @@ -131,15 +123,15 @@ def mainlist(item): itemlist.append(Item(channel=item.channel, action="letras", title="Por orden alfabético")) itemlist.append(Item(channel=item.channel, action="generos", title="Por géneros", - url=urlparse.urljoin(CHANNEL_HOST, "ListadeAnime"))) + url= CHANNEL_HOST + "/ListadeAnime")) itemlist.append(Item(channel=item.channel, action="series", title="Por popularidad", - url=urlparse.urljoin(CHANNEL_HOST, "/ListadeAnime/MasVisto"))) + url=CHANNEL_HOST + "/ListadeAnime/MasVisto")) itemlist.append(Item(channel=item.channel, action="series", title="Novedades", - url=urlparse.urljoin(CHANNEL_HOST, "ListadeAnime/Nuevo"))) + url=CHANNEL_HOST + "/ListadeAnime/Nuevo")) itemlist.append(Item(channel=item.channel, action="series", title="Últimos", - url=urlparse.urljoin(CHANNEL_HOST, "ListadeAnime/LatestUpdate"))) + url=CHANNEL_HOST + "/ListadeAnime/LatestUpdate")) itemlist.append(Item(channel=item.channel, action="search", title="Buscar...", - url=urlparse.urljoin(CHANNEL_HOST, "Buscar?s="))) + url=CHANNEL_HOST + "/Buscar?s=")) itemlist = renumbertools.show_option(item.channel, itemlist) @@ -148,15 +140,11 @@ def mainlist(item): def letras(item): logger.info() - base_url = 'http://animeflv.co/ListadeAnime?c=' - itemlist = list() itemlist.append(Item(channel=item.channel, action="series", title="#", url=base_url + "#")) for letter in "ABCDEFGHIJKLMNOPQRSTUVWXYZ": - logger.debug("title=[%s], url=[%s], thumbnail=[]" % (letter, base_url + letter)) - itemlist.append(Item(channel=item.channel, action="series", title=letter, url=base_url + letter)) return itemlist @@ -172,8 +160,6 @@ def generos(item): list_genre = re.findall(REGEX_GENERO, html) for url, genero in list_genre: - logger.debug("title=[%s], url=[%s], thumbnail=[]" % (genero, url)) - itemlist.append(Item(channel=item.channel, action="series", title=genero, url=url)) return itemlist @@ -181,12 +167,9 @@ def generos(item): def search(item, texto): logger.info() - texto = texto.replace(" ", "%20") item.url = "%s%s" % (item.url, texto) - html = get_url_contents(item.url) - try: # Se encontro un solo resultado y se redicciono a la página de la serie if html.find('Ver') >= 0: @@ -198,9 +181,6 @@ def search(item, texto): items = [] for show in show_list: title, url, thumbnail, plot = show - - logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, thumbnail)) - items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item))) except: @@ -214,35 +194,25 @@ def search(item, texto): def series(item): logger.info() - page_html = get_url_contents(item.url) - show_list = __find_series(page_html) - items = [] for show in show_list: title, url, thumbnail, plot = show - - logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, thumbnail)) - items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item))) - url_next_page = __find_next_page(page_html) - + url_next_page = scrapertools.find_single_match(page_html, REGEX_NEXT_PAGE) if url_next_page: - items.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url=url_next_page)) + items.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url= CHANNEL_HOST + url_next_page)) return items def episodios(item): logger.info() - itemlist = [] - html_serie = get_url_contents(item.url) - info_serie = __extract_info_from_serie(html_serie) if info_serie[3]: plot = info_serie[3] @@ -250,11 +220,9 @@ def episodios(item): plot = '' episodes = re.findall(REGEX_EPISODE, html_serie, re.DOTALL) - es_pelicula = False for url, title, date in episodes: episode = scrapertools.find_single_match(title, r'Episodio (\d+)') - # El enlace pertenece a un episodio if episode: season = 1 @@ -268,9 +236,6 @@ def episodios(item): title = "%s (%s)" % (title, date) item.url = url es_pelicula = True - - logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, item.thumbnail)) - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail, plot=plot, show=item.show, fulltitle="%s %s" % (item.show, title))) @@ -318,7 +283,6 @@ def findvideos(item): videoitem.thumbnail = item.thumbnail regex_video_list = r'var part = \[([^\]]+)' - videos_html = scrapertools.find_single_match(iframe_html, regex_video_list) videos = re.findall('"([^"]+)"', videos_html, re.DOTALL) for quality_id, video_url in enumerate(videos): From 54c818984a0371fb31a7ca3e084e77a3c100a622 Mon Sep 17 00:00:00 2001 From: alfa-addon <inter95@protonmail.com> Date: Fri, 20 Oct 2017 22:00:22 -0400 Subject: [PATCH 14/14] v2.2.4 --- plugin.video.alfa/addon.xml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index 7ce576a7..979c4786 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@ <?xml version="1.0" encoding="UTF-8" standalone="yes"?> -<addon id="plugin.video.alfa" name="Alfa" version="2.2.3" provider-name="Alfa Addon"> +<addon id="plugin.video.alfa" name="Alfa" version="2.2.4" provider-name="Alfa Addon"> <requires> <import addon="xbmc.python" version="2.1.0"/> <import addon="script.module.libtorrent" optional="true"/> @@ -19,12 +19,12 @@ </assets> <news>[B]Estos son los cambios para esta versión:[/B] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR] - » animeyt » pelismundo - » asialiveaction » animeflv_me - » newpct1 » wopelis - » gvideo » powvideo - ¤ arreglos internos - [COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] y [COLOR yellow]robalo[/COLOR] por su colaboración en esta versión[/COLOR] + » playmax » playpornx + » canalporno » divxatope + » flashx » verpeliculasnuevas + » animeflv_me » hdfull + » pelismundo » downace + » gamovideo ¤ arreglos internos </news> <description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description> <summary lang="en">Browse web pages using Kodi</summary>