From c949b2780a5449d58956cbc2a164ab8ff0b24cc9 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 18 Apr 2018 17:28:17 -0500 Subject: [PATCH 01/12] vidlox: updated pattern --- plugin.video.alfa/servers/vidlox.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin.video.alfa/servers/vidlox.json b/plugin.video.alfa/servers/vidlox.json index da5f5c58..73a2ae0a 100644 --- a/plugin.video.alfa/servers/vidlox.json +++ b/plugin.video.alfa/servers/vidlox.json @@ -4,7 +4,7 @@ "ignore_urls": [], "patterns": [ { - "pattern": "(https://vidlox.(?:tv|me)/embed-.*?.html)", + "pattern": "(?i)(https://vidlox.(?:tv|me)/embed-.*?.html)", "url": "\\1" } ] From 46b87fe9553f583de12efeb81df89c4c75d3973a Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 18 Apr 2018 17:30:24 -0500 Subject: [PATCH 02/12] downace: actualizado test_video_exists --- plugin.video.alfa/servers/downace.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugin.video.alfa/servers/downace.py b/plugin.video.alfa/servers/downace.py index c3f81e18..7c649414 100644 --- a/plugin.video.alfa/servers/downace.py +++ b/plugin.video.alfa/servers/downace.py @@ -12,6 +12,8 @@ def test_video_exists(page_url): return False, "[Downace] El video ha sido borrado" if "please+try+again+later." in data: return False, "[Downace] Error de downace, no se puede generar el enlace al video" + if "File has been removed due to inactivity" in data: + return False, "[Downace] El archivo ha sido removido por inactividad" return True, "" From 4706be70123b0ca94b4f18da1a3759bb38d33c2e Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Thu, 19 Apr 2018 08:15:50 -0500 Subject: [PATCH 03/12] vertelenovelas: web sin videos --- .../channels/vertelenovelas.json | 22 ------------------- 1 file changed, 22 deletions(-) delete mode 100755 plugin.video.alfa/channels/vertelenovelas.json diff --git a/plugin.video.alfa/channels/vertelenovelas.json b/plugin.video.alfa/channels/vertelenovelas.json deleted file mode 100755 index c8dd2f2b..00000000 --- a/plugin.video.alfa/channels/vertelenovelas.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "id": "vertelenovelas", - "name": "Ver Telenovelas", - "active": true, - "adult": false, - "language": ["cast", "lat"], - "thumbnail": "vertelenovelas.png", - "banner": "vertelenovelas.png", - "categories": [ - "tvshow" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": true, - "visible": true - } - ] -} \ No newline at end of file From b58717436d903d1f476865a5ecd6bb57bed4a288 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Thu, 19 Apr 2018 08:16:08 -0500 Subject: [PATCH 04/12] vertelenovelas: web sin videos --- plugin.video.alfa/channels/vertelenovelas.py | 117 ------------------- 1 file changed, 117 deletions(-) delete mode 100755 plugin.video.alfa/channels/vertelenovelas.py diff --git a/plugin.video.alfa/channels/vertelenovelas.py b/plugin.video.alfa/channels/vertelenovelas.py deleted file mode 100755 index df0b926c..00000000 --- a/plugin.video.alfa/channels/vertelenovelas.py +++ /dev/null @@ -1,117 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import httptools -from core import scrapertools -from core.item import Item -from platformcode import logger - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, title="Ultimos capítulos", action="ultimos", url="http://www.vertelenovelas.cc/", - viewmode="movie")) - itemlist.append(Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = "http://www.vertelenovelas.cc/ajax/autocompletex.php?q=" + texto - try: - return series(item) - - # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - - -def ultimos(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '' - matches = re.compile(patron, re.DOTALL).findall(data) - for match in matches: - title = scrapertools.find_single_match(match, '([^<]+)') - if title == "": - title = scrapertools.find_single_match(match, '([^<]+)') - url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, '([^<]+)') - url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, '

  • ([^<]+)' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: - title = scrapertools.htmlclean(scrapedtitle) - url = urlparse.urljoin(item.url, scrapedurl) - itemlist.append( - Item(channel=item.channel, action="findvideos", title=title, url=url, - folder=True, fulltitle=title)) - return itemlist - - -def findvideos(item): - logger.info() - data = httptools.downloadpage(item.url).data - pattern = 'data-id="([^"]+)"' - list_servers = re.compile(pattern, re.DOTALL).findall(data) - list_urls = [] - for _id in list_servers: - post = "id=%s" % _id - data = httptools.downloadpage("http://www.vertelenovelas.cc/goto/", post=post).data - list_urls.append(scrapertools.find_single_match(data, 'document\.location = "([^"]+)";')) - from core import servertools - itemlist = servertools.find_video_items(data=", ".join(list_urls)) - for videoitem in itemlist: - # videoitem.title = item.title - videoitem.channel = item.channel - return itemlist From b06627f8634f86ecb8911984d57a0f881e85801b Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 25 Apr 2018 10:42:23 -0500 Subject: [PATCH 05/12] tvvip: nuevo canal --- plugin.video.alfa/channels/tvvip.json | 23 + plugin.video.alfa/channels/tvvip.py | 666 ++++++++++++++++++++++++++ 2 files changed, 689 insertions(+) create mode 100644 plugin.video.alfa/channels/tvvip.json create mode 100644 plugin.video.alfa/channels/tvvip.py diff --git a/plugin.video.alfa/channels/tvvip.json b/plugin.video.alfa/channels/tvvip.json new file mode 100644 index 00000000..a81d7fef --- /dev/null +++ b/plugin.video.alfa/channels/tvvip.json @@ -0,0 +1,23 @@ +{ + "id": "tvvip", + "name": "TV-VIP", + "active": true, + "adult": false, + "language": ["cast"], + "thumbnail": "http://i.imgur.com/gNHVlI4.png", + "banner": "http://i.imgur.com/wyRk5AG.png", + "categories": [ + "movie", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": "!eq(-1,'') + !eq(-2,'')", + "visible": true + } + ] +} diff --git a/plugin.video.alfa/channels/tvvip.py b/plugin.video.alfa/channels/tvvip.py new file mode 100644 index 00000000..4487b3bc --- /dev/null +++ b/plugin.video.alfa/channels/tvvip.py @@ -0,0 +1,666 @@ +# -*- coding: utf-8 -*- + +import os +import re +import sys +import unicodedata +import urllib +import time + +from core import channeltools +from core import httptools +from core import jsontools +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item +from platformcode import config, logger +from platformcode import platformtools + +host = "http://tv-vip.com" + +def mainlist(item): + logger.info() + item.viewmode = "movie" + itemlist = [] + + data = httptools.downloadpage(host + "/json/playlist/home/index.json") + + itemlist.append(Item(channel=item.channel, title="Películas", action="submenu", + thumbnail=host+"/json/playlist/peliculas/thumbnail.jpg", + fanart=host+"/json/playlist/peliculas/background.jpg")) + itemlist.append(Item(channel=item.channel, title="Series", action="submenu", + thumbnail=host+"/json/playlist/series/poster.jpg", + fanart=host+"/json/playlist/series/background.jpg")) + itemlist.append(Item(channel=item.channel, title="Versión Original", action="entradasconlistas", + url=host+"/json/playlist/version-original/index.json", + thumbnail=host+"/json/playlist/version-original/thumbnail.jpg", + fanart=host+"/json/playlist/version-original/background.jpg")) + itemlist.append(Item(channel=item.channel, title="Documentales", action="entradasconlistas", + url=host+"/json/playlist/documentales/index.json", + thumbnail=host+"/json/playlist/documentales/thumbnail.jpg", + fanart=host+"/json/playlist/documentales/background.jpg")) + itemlist.append(Item(channel=item.channel, title="Películas Infantiles", action="entradasconlistas", + url=host+"/json/playlist/peliculas-infantiles/index.json", + thumbnail=host+"/json/playlist/peliculas-infantiles/thumbnail.jpg", + fanart=host+"/json/playlist/peliculas-infantiles/background.jpg")) + itemlist.append(Item(channel=item.channel, title="Series Infantiles", action="entradasconlistas", + url=host+"/json/playlist/series-infantiles/index.json", + thumbnail=host+"/json/playlist/series-infantiles/thumbnail.jpg", + fanart=host+"/json/playlist/series-infantiles/background.jpg")) + itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", + thumbnail="http://i.imgur.com/gNHVlI4.png", fanart="http://i.imgur.com/9loVksV.png")) + + return itemlist + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "%20") + if item.title == "Buscar...": item.extra = "local" + item.url = host + "/video-prod/s/search?q=%s&n=100" % texto + try: + return busqueda(item, texto) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def busqueda(item, texto): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = jsontools.load(data) + for child in data["objectList"]: + infolabels = {} + infolabels['year'] = child['year'] + if child['tags']: infolabels['genre'] = ', '.join([x.strip() for x in child['tags']]) + infolabels['rating'] = child['rate'].replace(',', '.') + infolabels['votes'] = child['rateCount'] + if child['cast']: infolabels['cast'] = child['cast'].split(",") + infolabels['director'] = child['director'] + if 'playListChilds' not in child: + infolabels['plot'] = child['description'] + type = "repo" + fulltitle = child['name'] + title = child['name'] + infolabels['duration'] = child['duration'] + if child['height'] < 720: + quality = "[B] [SD][/B]" + elif child['height'] < 1080: + quality = "[B] [720p][/B]" + elif child['height'] < 2160: + quality = "[B] [1080p][/B]" + elif child['height'] >= 2160: + quality = "[B] [4k][/B]" + if child['name'] == "": + title = child['id'].rsplit(".", 1)[0] + else: + title = child['name'] + if child['year']: + title += " (" + child['year'] + ")" + title += quality + else: + type = "playlist" + infolabels['plot'] = "Contiene:\n" + "\n".join(child['playListChilds']) + "\n".join(child['repoChilds']) + fulltitle = child['id'] + title = "[COLOR red][LISTA][/COLOR] " + child['id'].replace('-', ' ').capitalize() + " ([COLOR gold]" + \ + str(child['number']) + "[/COLOR])" + # En caso de búsqueda global se filtran los resultados + if item.extra != "local": + if "+" in texto: texto = "|".join(texto.split("+")) + if not re.search(r'(?i)' + texto, title, flags=re.DOTALL): continue + url = host + "/json/%s/%s/index.json" % (type, child["id"]) + # Fanart + if child['hashBackground']: + fanart = host + "/json/%s/%s/background.jpg" % (type, child["id"]) + else: + fanart = host + "/json/%s/%s/thumbnail.jpg" % (type, child["id"]) + # Thumbnail + if child['hasPoster']: + thumbnail = host + "/json/%s/%s/poster.jpg" % (type, child["id"]) + else: + thumbnail = fanart + if type == 'playlist': + itemlist.insert(0, Item(channel=item.channel, action="entradasconlistas", title=title, + url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, + infoLabels=infolabels, viewmode="movie_with_plot", folder=True)) + else: + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, + thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, contentTitle=fulltitle, + context="05", infoLabels=infolabels, viewmode="movie_with_plot", folder=True)) + return itemlist + + +def submenu(item): + logger.info() + itemlist = [] + if item.title == "Series": + itemlist.append(Item(channel=item.channel, title="Nuevos Capítulos", action="episodios", + url=host+"/json/playlist/nuevos-capitulos/index.json", + thumbnail=host+"/json/playlist/nuevos-capitulos/background.jpg", + fanart=host+"/json/playlist/nuevos-capitulos/background.jpg")) + itemlist.append(Item(channel=item.channel, title="Más Vistas", action="series", + url=host+"/json/playlist/top-series/index.json", + thumbnail=host+"/playlist/top-series/thumbnail.jpg", + fanart=host+"/json/playlist/top-series/background.jpg", + extra1="Series")) + itemlist.append(Item(channel=item.channel, title="Últimas Series", action="series", + url=host+"/json/playlist/series/index.json", + thumbnail=item.thumbnail, fanart=item.fanart, extra1="Series")) + itemlist.append(Item(channel=item.channel, title="Lista de Series A-Z", action="series", + url=host+"/json/playlist/series/index.json", thumbnail=item.thumbnail, + fanart=item.fanart, extra1="Series")) + else: + itemlist.append(Item(channel=item.channel, title="Novedades", action="entradas", + url=host+"/json/playlist/000-novedades/index.json", + thumbnail=host+"/json/playlist/ultimas-peliculas/thumbnail.jpg", + fanart=host+"/json/playlist/ultimas-peliculas/background.jpg")) + itemlist.append(Item(channel=item.channel, title="Más vistas", action="entradas", + url=host+"/json/playlist/peliculas-mas-vistas/index.json", + thumbnail=host+"/json/playlist/peliculas-mas-vistas/thumbnail.jpg", + fanart=host+"/json/playlist/peliculas-mas-vistas/background.jpg")) + itemlist.append(Item(channel=item.channel, title="Categorías", action="cat", + url=host+"/json/playlist/peliculas/index.json", + thumbnail=item.thumbnail, fanart=item.fanart)) + itemlist.append(Item(channel=item.channel, title="Películas 3D", action="entradasconlistas", + url=host+"/json/playlist/3D/index.json", + thumbnail=host+"/json/playlist/3D/thumbnail.jpg", + fanart=host+"/json/playlist/3D/background.jpg")) + return itemlist + + +def cat(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = jsontools.load(data) + exception = ["peliculas-mas-vistas", "ultimas-peliculas"] + for child in data["sortedPlaylistChilds"]: + if child["id"] not in exception: + url = host + "/json/playlist/%s/index.json" % child["id"] + # Fanart + if child['hashBackground']: + fanart = host + "/json/playlist/%s/background.jpg" % child["id"] + else: + fanart = host + "/json/playlist/%s/thumbnail.jpg" % child["id"] + # Thumbnail + thumbnail = host + "/json/playlist/%s/thumbnail.jpg" % child["id"] + title = child['id'].replace('-', ' ').capitalize().replace("Manga", "Animación/Cine Oriental") + title += " ([COLOR gold]" + str(child['number']) + "[/COLOR])" + itemlist.append( + Item(channel=item.channel, action="entradasconlistas", title=title, url=url, + thumbnail=thumbnail, fanart=fanart, folder=True)) + return itemlist + + +def entradas(item): + logger.info() + itemlist = [] + infolabels = {} + if item.title == "Nuevos Capítulos": + context = "5" + else: + context = "05" + data = httptools.downloadpage(item.url).data + data = jsontools.load(data) + for child in data["sortedRepoChilds"]: + infolabels['year'] = child['year'] + url = host + "/json/repo/%s/index.json" % child["id"] + thumbnail = "" + if child['hasPoster']: + thumbnail = host + "/json/repo/%s/poster.jpg" % child["id"] + if child['height'] < 720: + quality = "[B] [SD][/B]" + elif child['height'] < 1080: + quality = "[B] [720p][/B]" + elif child['height'] < 2160: + quality = "[B] [1080p][/B]" + elif child['height'] >= 2160: + quality = "[B] [4k][/B]" + fulltitle = child['name'] + title = child['name'] + if child['year']: + title += " (" + child['year'] + ")" + title += quality + itemlist.append(Item(channel=item.channel, action="findvideos", server="", title=title, url=url, + thumbnail=thumbnail, fulltitle=fulltitle, infoLabels=infolabels, + contentTitle=fulltitle, context=context)) + tmdb.set_infoLabels(itemlist) + return itemlist + + +def entradasconlistas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = jsontools.load(data) + # Si hay alguna lista + contentSerie = False + contentList = False + if data['playListChilds']: + itemlist.append(Item(channel=item.channel, title="**LISTAS**", action="", text_color="red", text_blod=True, + folder=False)) + for child in data['sortedPlaylistChilds']: + infolabels = {} + infolabels['plot'] = "Contiene:\n" + "\n".join(child['playListChilds']) + "\n".join(child['repoChilds']) + if child['seasonNumber'] and not contentList and re.search(r'(?i)temporada', child['id']): + infolabels['season'] = child['seasonNumber'] + contentSerie = True + else: + contentSerie = False + contentList = True + title = child['id'].replace('-', ' ').capitalize() + " ([COLOR gold]" + str(child['number']) + "[/COLOR])" + url = host + "/json/playlist/%s/index.json" % child["id"] + thumbnail = host + "/json/playlist/%s/thumbnail.jpg" % child["id"] + if child['hashBackground']: + fanart = host + "/json/playlist/%s/background.jpg" % child["id"] + else: + fanart = host + "/json/playlist/%s/thumbnail.jpg" % child["id"] + itemlist.append(Item(channel=item.channel, action="entradasconlistas", title=title, + url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=child['id'], + infoLabels=infolabels, viewmode="movie_with_plot")) + else: + contentList = True + + if data["sortedRepoChilds"] and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, title="**VÍDEOS**", action="", text_color="blue", text_blod=True, + folder=False)) + + for child in data["sortedRepoChilds"]: + infolabels = {} + infolabels['plot'] = child['description'] + infolabels['year'] = data['year'] + if child['tags']: infolabels['genre'] = ', '.join([x.strip() for x in child['tags']]) + infolabels['rating'] = child['rate'].replace(',', '.') + infolabels['votes'] = child['rateCount'] + infolabels['duration'] = child['duration'] + if child['cast']: infolabels['cast'] = child['cast'].split(",") + infolabels['director'] = child['director'] + url = host + "/json/repo/%s/index.json" % child["id"] + # Fanart + if child['hashBackground']: + fanart = host + "/json/repo/%s/background.jpg" % child["id"] + else: + fanart = host + "/json/repo/%s/thumbnail.jpg" % child["id"] + # Thumbnail + if child['hasPoster']: + thumbnail = host + "/json/repo/%s/poster.jpg" % child["id"] + else: + thumbnail = fanart + if child['height'] < 720: + quality = "[B] [SD][/B]" + elif child['height'] < 1080: + quality = "[B] [720p][/B]" + elif child['height'] < 2160: + quality = "[B] [1080p][/B]" + elif child['height'] >= 2160: + quality = "[B] [4k][/B]" + fulltitle = child['name'] + if child['name'] == "": + title = child['id'].rsplit(".", 1)[0] + else: + title = child['name'] + if child['year']: + title += " (" + child['year'] + ")" + title += quality + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, + thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels, + contentTitle=fulltitle, context="05", viewmode="movie_with_plot", folder=True)) + # Se añade item para añadir la lista de vídeos a la videoteca + if data['sortedRepoChilds'] and len(itemlist) > 0 and contentList: + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, text_color="green", title="Añadir esta lista a la videoteca", + url=item.url, action="listas")) + elif contentSerie: + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, + action="series_library", fulltitle=data['name'], show=data['name'], + text_color="green")) + + return itemlist + + +def series(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = jsontools.load(data) + exception = ["top-series", "nuevos-capitulos"] + for child in data["sortedPlaylistChilds"]: + if child["id"] not in exception: + infolabels = {} + infolabels['plot'] = child['description'] + infolabels['year'] = child['year'] + if child['tags']: infolabels['genre'] = ', '.join([x.strip() for x in child['tags']]) + infolabels['rating'] = child['rate'].replace(',', '.') + infolabels['votes'] = child['rateCount'] + if child['cast']: infolabels['cast'] = child['cast'].split(",") + infolabels['director'] = child['director'] + infolabels['mediatype'] = "episode" + if child['seasonNumber']: infolabels['season'] = child['seasonNumber'] + url = host + "/json/playlist/%s/index.json" % child["id"] + # Fanart + if child['hashBackground']: + fanart = host + "/json/playlist/%s/background.jpg" % child["id"] + else: + fanart = host + "/json/playlist/%s/thumbnail.jpg" % child["id"] + # Thumbnail + if child['hasPoster']: + thumbnail = host + "/json/playlist/%s/poster.jpg" % child["id"] + else: + thumbnail = fanart + if item.extra1 == "Series": + if child['name'] != "": + fulltitle = child['name'] + fulltitle = fulltitle.replace('-', '') + title = child['name'] + " (" + child['year'] + ")" + else: + title = fulltitle = child['id'].capitalize() + if "Temporada" not in title: + title += " [Temporadas: [COLOR gold]" + str(child['numberOfSeasons']) + "[/COLOR]]" + elif item.title == "Más Vistas": + title = title.replace("- Temporada", "--- Temporada") + else: + if data['name'] != "": + fulltitle = data['name'] + if child['seasonNumber']: + title = data['name'] + " --- Temporada " + child['seasonNumber'] + \ + " [COLOR gold](" + str(child['number']) + ")[/COLOR]" + else: + title = child['name'] + " [COLOR gold](" + str(child['number']) + ")[/COLOR]" + else: + fulltitle = data['id'] + if child['seasonNumber']: + title = data['id'].capitalize() + " --- Temporada " + child['seasonNumber'] + \ + " [COLOR gold](" + str(child['number']) + ")[/COLOR]" + else: + title = data['id'].capitalize() + " [COLOR gold](" + str(child['number']) + ")[/COLOR]" + + if not child['playListChilds']: + action = "episodios" + else: + action = "series" + itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, server="", + thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels, + contentSerieName=fulltitle, context="25", viewmode="movie_with_plot", folder=True)) + if len(itemlist) == len(data["sortedPlaylistChilds"]) and item.extra1 != "Series": + itemlist.sort(key=lambda item: item.title, reverse=True) + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", show=data['name'], + text_color="green", extra="series_library")) + if item.title == "Últimas Series": return itemlist + if item.title == "Lista de Series A-Z": itemlist.sort(key=lambda item: item.fulltitle) + if data["sortedRepoChilds"] and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, title="**VÍDEOS RELACIONADOS/MISMA TEMÁTICA**", text_color="blue", + text_blod=True, action="", folder=False)) + for child in data["sortedRepoChilds"]: + infolabels = {} + if child['description']: + infolabels['plot'] = data['description'] + else: + infolabels['plot'] = child['description'] + infolabels['year'] = data['year'] + if not child['tags']: + infolabels['genre'] = ', '.join([x.strip() for x in data['tags']]) + else: + infolabels['genre'] = ', '.join([x.strip() for x in child['tags']]) + infolabels['rating'] = child['rate'].replace(',', '.') + infolabels['duration'] = child['duration'] + if child['cast']: infolabels['cast'] = child['cast'].split(",") + infolabels['director'] = child['director'] + url = host + "/json/repo/%s/index.json" % child["id"] + if child['hashBackground']: + fanart = host + "/json/repo/%s/background.jpg" % child["id"] + else: + fanart = host + "/json/repo/%s/thumbnail.jpg" % child["id"] + # Thumbnail + if child['hasPoster']: + thumbnail = host + "/json/repo/%s/poster.jpg" % child["id"] + else: + thumbnail = fanart + if child['height'] < 720: + quality = "[B] [SD][/B]" + elif child['height'] < 1080: + quality = "[B] [720p][/B]" + elif child['height'] < 2160: + quality = "[B] [1080p][/B]" + elif child['height'] >= 2160: + quality = "[B] [1080p][/B]" + fulltitle = child['name'] + if child['name'] == "": + title = child['id'].rsplit(".", 1)[0] + else: + title = child['name'] + if child['year']: + title += " (" + child['year'] + ")" + title += quality + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, + server="", thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels, + contentSerieName=fulltitle, context="25", viewmode="movie_with_plot", folder=True)) + if item.extra == "new": + itemlist.sort(key=lambda item: item.title, reverse=True) + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + # Redirección para actualización de videoteca + if item.extra == "series_library": + itemlist = series_library(item) + return itemlist + data = httptools.downloadpage(item.url).data + data = jsontools.load(data) + # Se prueba un método u otro porque algunas series no están bien listadas + if data["sortedRepoChilds"]: + for child in data["sortedRepoChilds"]: + if item.infoLabels: + item.infoLabels['duration'] = str(child['duration']) + item.infoLabels['season'] = str(data['seasonNumber']) + item.infoLabels['episode'] = str(child['episode']) + item.infoLabels['mediatype'] = "episode" + #contentTitle = item.fulltitle + "|" + str(data['seasonNumber']) + "|" + str(child['episode']) + # En caso de venir del apartado nuevos capítulos se redirige a la función series para mostrar los demás + if item.title == "Nuevos Capítulos": + url = host + "/json/playlist/%s/index.json" % child["season"] + action = "series" + extra = "new" + else: + url = host + "/json/repo/%s/index.json" % child["id"] + action = "findvideos" + extra = "" + if child['hasPoster']: + thumbnail = host + "/json/repo/%s/poster.jpg" % child["id"] + else: + thumbnail = host + "/json/repo/%s/thumbnail.jpg" % child["id"] + try: + title = fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1] + except: + title = fulltitle = child['id'] + itemlist.append(item.clone(action=action, server="", title=title, url=url, thumbnail=thumbnail, + fanart=item.fanart, fulltitle=fulltitle, contentSerieName=fulltitle, context="35", + viewmode="movie", extra=extra, show=item.fulltitle, folder=True)) + else: + for child in data["repoChilds"]: + url = host + "/json/repo/%s/index.json" % child + if data['hasPoster']: + thumbnail = host + "/json/repo/%s/poster.jpg" % child + else: + thumbnail = host + "/json/repo/%s/thumbnail.jpg" % child + title = fulltitle = child.capitalize().replace('_', ' ') + itemlist.append(item.clone(action="findvideos", server="", title=title, url=url, thumbnail=thumbnail, + fanart=item.fanart, fulltitle=fulltitle, contentSerieName=item.fulltitle, + context="25", show=item.fulltitle, folder=True)) + # Opción de añadir a la videoteca en casos de series de una única temporada + if len(itemlist) > 0 and not "---" in item.title and item.title != "Nuevos Capítulos": + if config.get_videolibrary_support() and item.show == "": + if "-" in item.title: + show = item.title.split('-')[0] + else: + show = item.title.split('(')[0] + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color="green", + url=item.url, action="add_serie_to_library", show=show, extra="series_library")) + return itemlist + + +def series_library(item): + logger.info() + # Funcion unicamente para añadir/actualizar series a la libreria + lista_episodios = [] + show = item.show.strip() + data_serie = anti_cloudflare(item.url, host=host, headers=headers) + data_serie = jsontools.load(data_serie) + # Para series que en la web se listan divididas por temporadas + if data_serie["sortedPlaylistChilds"]: + for season_name in data_serie["sortedPlaylistChilds"]: + url_season = host + "/json/playlist/%s/index.json" % season_name['id'] + data = anti_cloudflare(url_season, host=host, headers=headers) + data = jsontools.load(data) + if data["sortedRepoChilds"]: + for child in data["sortedRepoChilds"]: + url = host + "/json/repo/%s/index.json" % child["id"] + fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1] + try: + check_filename = scrapertools.get_season_and_episode(fulltitle) + except: + fulltitle += " " + str(data['seasonNumber']) + "x00" + lista_episodios.append(Item(channel=item.channel, action="findvideos", server="", + title=fulltitle, extra=url, url=item.url, fulltitle=fulltitle, + contentTitle=fulltitle, show=show)) + else: + for child in data["repoChilds"]: + url = host + "/json/repo/%s/index.json" % child + fulltitle = child.capitalize().replace('_', ' ') + try: + check_filename = scrapertools.get_season_and_episode(fulltitle) + except: + fulltitle += " " + str(data['seasonNumber']) + "x00" + lista_episodios.append(Item(channel=item.channel, action="findvideos", server="", + title=fulltitle, extra=url, url=item.url, contentTitle=fulltitle, + fulltitle=fulltitle, show=show)) + # Para series directas de una sola temporada + else: + data = data_serie + if data["sortedRepoChilds"]: + for child in data["sortedRepoChilds"]: + url = host + "/json/repo/%s/index.json" % child["id"] + fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1] + try: + check_filename = scrapertools.get_season_and_episode(fulltitle) + except: + fulltitle += " 1x00" + lista_episodios.append(Item(channel=item.channel, action="findvideos", server="", title=fulltitle, + contentTitle=fulltitle, url=item.url, extra=url, fulltitle=fulltitle, + show=show)) + else: + for child in data["repoChilds"]: + url = host + "/json/repo/%s/index.json" % child + fulltitle = child.capitalize().replace('_', ' ') + try: + check_filename = scrapertools.get_season_and_episode(fulltitle) + except: + fulltitle += " 1x00" + lista_episodios.append(Item(channel=item.channel, action="findvideos", server="", title=fulltitle, + contentTitle=fulltitle, url=item.url, extra=url, fulltitle=fulltitle, + show=show)) + return lista_episodios + + +def findvideos(item): + logger.info() + itemlist = [] + # En caso de llamarse a la función desde una serie de la videoteca + if item.extra.startswith("http"): item.url = item.extra + data = httptools.downloadpage(item.url).data + data = jsontools.load(data) + id = urllib.quote(data['id']) + for child in data["profiles"].keys(): + videopath = urllib.quote(data["profiles"][child]['videoUri']) + for i in range(0, len(data["profiles"][child]['servers'])): + url = data["profiles"][child]['servers'][i]['url'] + videopath + size = " " + data["profiles"][child]["sizeHuman"] + resolution = " [" + (data["profiles"][child]['videoResolution']) + "]" + title = "Ver vídeo en " + resolution.replace('1920x1080', 'HD-1080p') + if i == 0: + title += size + " [COLOR purple]Mirror " + str(i + 1) + "[/COLOR]" + else: + title += size + " [COLOR green]Mirror " + str(i + 1) + "[/COLOR]" + # Para poner enlaces de mayor calidad al comienzo de la lista + if data["profiles"][child]["profileId"] == "default": + itemlist.insert(i, item.clone(action="play", server="directo", title=title, url=url, + viewmode="list", extra=id, folder=False)) + else: + itemlist.append(item.clone(action="play", server="directo", title=title, url=url, + viewmode="list", extra=id, folder=False)) + itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", + text_color="magenta")) + if len(itemlist) > 0 and item.extra == "": + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir enlaces a la videoteca", text_color="green", + url=item.url, action="add_pelicula_to_library", + infoLabels={'title':item.fulltitle}, extra="findvideos", fulltitle=item.fulltitle)) + return itemlist + + +def play(item): + logger.info() + itemlist = [] + uri = scrapertools.find_single_match(item.url, '(/transcoder[\w\W]+)') + uri_request = host + "/video-prod/s/uri?uri=%s&_=%s" % (uri, int(time.time())) + data = httptools.downloadpage(uri_request).data + data = jsontools.load(data) + url = item.url.replace(".tv-vip.com/transcoder/", ".tv-vip.info/c/transcoder/") + "?tt=" + str(data['tt']) + \ + "&mm=" + data['mm'] + "&bb=" + data['bb'] + itemlist.append(item.clone(action="play", server="directo", url=url, folder=False)) + return itemlist + + +def listas(item): + logger.info() + # Para añadir listas a la videoteca en carpeta CINE + itemlist = [] + data = anti_cloudflare(item.url, host=host, headers=headers) + data = jsontools.load(data) + head = header_string + get_cookie_value() + for child in data["sortedRepoChilds"]: + infolabels = {} + # Fanart + if child['hashBackground']: + fanart = host + "/json/repo/%s/background.jpg" % child["id"] + else: + fanart = host + "/json/repo/%s/thumbnail.jpg" % child["id"] + # Thumbnail + if child['hasPoster']: + thumbnail = host + "/json/repo/%s/poster.jpg" % child["id"] + else: + thumbnail = fanart + thumbnail += head + fanart += head + url = host + "/json/repo/%s/index.json" % child["id"] + if child['name'] == "": + title = scrapertools.slugify(child['id'].rsplit(".", 1)[0]) + else: + title = scrapertools.slugify(child['name']) + title = title.replace('-', ' ').replace('_', ' ').capitalize() + infolabels['title'] = title + try: + from core import videolibrarytools + new_item = item.clone(title=title, url=url, fulltitle=title, fanart=fanart, extra="findvideos", + thumbnail=thumbnail, infoLabels=infolabels, category="Cine") + videolibrarytools.library.add_movie(new_item) + error = False + except: + error = True + import traceback + logger.error(traceback.format_exc()) + if not error: + itemlist.append(Item(channel=item.channel, title='Lista añadida correctamente a la videoteca', + action="", folder=False)) + else: + itemlist.append(Item(channel=item.channel, title='ERROR. Han ocurrido uno o varios errores en el proceso', + action="", folder=False)) + + return itemlist From e0944e5e349bf7442b923ce5d5c489af50cea05b Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 25 Apr 2018 10:43:37 -0500 Subject: [PATCH 06/12] flashx --- plugin.video.alfa/servers/flashx.py | 60 ++--------------------------- 1 file changed, 4 insertions(+), 56 deletions(-) diff --git a/plugin.video.alfa/servers/flashx.py b/plugin.video.alfa/servers/flashx.py index 7ad3a1a7..e657ab05 100644 --- a/plugin.video.alfa/servers/flashx.py +++ b/plugin.video.alfa/servers/flashx.py @@ -18,66 +18,14 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("url=" + page_url) - - data = httptools.downloadpage(page_url).data - - cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.bz/counter.cgi.*?[^(?:'|")]+)""") - cgi_counter = cgi_counter.replace("%0A","").replace("%22","") - httptools.downloadpage(cgi_counter, cookies=False) - - time.sleep(6) - - url_playitnow = "https://www.flashx.bz/dl?playitnow" - fid = scrapertools.find_single_match(data, 'input type="hidden" name="id" value="([^"]*)"') - fname = scrapertools.find_single_match(data, 'input type="hidden" name="fname" value="([^"]*)"') - fhash = scrapertools.find_single_match(data, 'input type="hidden" name="hash" value="([^"]*)"') - - headers = {'Content': 'application/x-www-form-urlencoded'} - post_parameters = { - "op": "download1", - "usr_login": "", - "id": fid, - "fname": fname, - "referer": "https://www.flashx.bz/", - "hash": fhash, - "imhuman": "Continue To Video" - } - data = httptools.downloadpage(url_playitnow, urllib.urlencode(post_parameters), headers=headers).data - - video_urls = [] - media_urls = scrapertools.find_multiple_matches(data, "{src: '([^']+)'.*?,label: '([^']+)'") - subtitle = "" - for media_url, label in media_urls: - if media_url.endswith(".srt") and label == "Spanish": - try: - from core import filetools - data = httptools.downloadpage(media_url) - subtitle = os.path.join(config.get_data_path(), 'sub_flashx.srt') - filetools.write(subtitle, data) - except: - import traceback - logger.info("Error al descargar el subtítulo: " + traceback.format_exc()) - - for media_url, label in media_urls: - if not media_url.endswith("png") and not media_url.endswith(".srt"): - video_urls.append(["." + media_url.rsplit('.', 1)[1] + " [flashx]", media_url, 0, subtitle]) - - for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) - - return video_urls - - -def get_video_url_anterior(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) pfxfx = "" data = httptools.downloadpage(page_url, cookies=False).data data = data.replace("\n","") - cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.ws/counter.cgi.*?[^(?:'|")]+)""") + cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.bz/counter.cgi.*?[^(?:'|")]+)""") cgi_counter = cgi_counter.replace("%0A","").replace("%22","") - playnow = scrapertools.find_single_match(data, 'https://www.flashx.ws/dl[^"]+') + playnow = scrapertools.find_single_match(data, 'https://www.flashx.bz/dl[^"]+') # Para obtener el f y el fxfx - js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.ws/js\w+/c\w+.*?[^(?:'|")]+)""") + js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.bz/js\w+/c\w+.*?[^(?:'|")]+)""") data_fxfx = httptools.downloadpage(js_fxfx).data mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","") matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)') @@ -87,7 +35,7 @@ def get_video_url_anterior(page_url, premium=False, user="", password="", video_ logger.info("mfxfxfx2= %s" %pfxfx) if pfxfx == "": pfxfx = "ss=yes&f=fail&fxfx=6" - coding_url = 'https://www.flashx.ws/flashx.php?%s' %pfxfx + coding_url = 'https://www.flashx.bz/flashx.php?%s' %pfxfx # {f: 'y', fxfx: '6'} bloque = scrapertools.find_single_match(data, '(?s)Form method="POST" action(.*?)span') flashx_id = scrapertools.find_single_match(bloque, 'name="id" value="([^"]+)"') From 7c57c6bda7bf107a71708e592d8cd18eb75574a9 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 25 Apr 2018 11:27:02 -0500 Subject: [PATCH 07/12] unshortenit: updated --- plugin.video.alfa/lib/unshortenit.py | 681 ++++++++++++++++++--------- 1 file changed, 454 insertions(+), 227 deletions(-) diff --git a/plugin.video.alfa/lib/unshortenit.py b/plugin.video.alfa/lib/unshortenit.py index 79631c65..f8591eed 100755 --- a/plugin.video.alfa/lib/unshortenit.py +++ b/plugin.video.alfa/lib/unshortenit.py @@ -1,227 +1,454 @@ -# -*- coding: utf-8 -*- - -try: - from selenium.webdriver import PhantomJS - from contextlib import closing - - linkbucks_support = True -except: - linkbucks_support = False -try: - from urllib.request import urlsplit, urlparse -except: - from urlparse import urlsplit, urlparse -import json -import os -import re -import time -from base64 import b64decode - -import requests - - -class UnshortenIt(object): - _headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', - 'Accept-Encoding': 'gzip,deflate,sdch', - 'Accept-Language': 'en-US,en;q=0.8', - 'Connection': 'keep-alive', - 'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.69 Safari/537.36'} - _adfly_regex = r'adf\.ly|q\.gs|j\.gs|u\.bb|ay\.gy' - _linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co' - _adfocus_regex = r'adfoc\.us' - _lnxlu_regex = r'lnx\.lu' - _shst_regex = r'sh\.st' - _this_dir, _this_filename = os.path.split(__file__) - _timeout = 10 - - def unshorten(self, uri, type=None, timeout=10): - domain = urlsplit(uri).netloc - self._timeout = timeout - - if re.search(self._adfly_regex, domain, re.IGNORECASE) or type == 'adfly': - return self._unshorten_adfly(uri) - if re.search(self._adfocus_regex, domain, re.IGNORECASE) or type == 'adfocus': - return self._unshorten_adfocus(uri) - if re.search(self._linkbucks_regex, domain, re.IGNORECASE) or type == 'linkbucks': - if linkbucks_support: - return self._unshorten_linkbucks(uri) - else: - return uri, 'linkbucks.com not supported. Install selenium package to add support.' - if re.search(self._lnxlu_regex, domain, re.IGNORECASE) or type == 'lnxlu': - return self._unshorten_lnxlu(uri) - if re.search(self._shst_regex, domain, re.IGNORECASE): - return self._unshorten_shst(uri) - - try: - # headers stop t.co from working so omit headers if this is a t.co link - if domain == 't.co': - r = requests.get(uri, timeout=self._timeout) - return r.url, r.status_code - # p.ost.im uses meta http refresh to redirect. - if domain == 'p.ost.im': - r = requests.get(uri, headers=self._headers, timeout=self._timeout) - uri = re.findall(r'.*url\=(.*?)\"\.*', r.text)[0] - return uri, 200 - r = requests.head(uri, headers=self._headers, timeout=self._timeout) - while True: - if 'location' in r.headers: - r = requests.head(r.headers['location']) - uri = r.url - else: - return r.url, r.status_code - - except Exception as e: - return uri, str(e) - - def _unshorten_adfly(self, uri): - - try: - r = requests.get(uri, headers=self._headers, timeout=self._timeout) - html = r.text - ysmm = re.findall(r"var ysmm =.*\;?", html) - - if len(ysmm) > 0: - ysmm = re.sub(r'var ysmm \= \'|\'\;', '', ysmm[0]) - - left = '' - right = '' - - for c in [ysmm[i:i + 2] for i in range(0, len(ysmm), 2)]: - left += c[0] - right = c[1] + right - - decoded_uri = b64decode(left.encode() + right.encode())[2:].decode() - - if re.search(r'go\.php\?u\=', decoded_uri): - decoded_uri = b64decode(re.sub(r'(.*?)u=', '', decoded_uri)).decode() - - return decoded_uri, r.status_code - else: - return uri, 'No ysmm variable found' - - except Exception as e: - return uri, str(e) - - def _unshorten_linkbucks(self, uri): - try: - with closing(PhantomJS( - service_log_path=os.path.dirname(os.path.realpath(__file__)) + '/ghostdriver.log')) as browser: - browser.get(uri) - - # wait 5 seconds - time.sleep(5) - - page_source = browser.page_source - - link = re.findall(r'skiplink(.*?)\>', page_source) - if link is not None: - link = re.sub(r'\shref\=|\"', '', link[0]) - if link == '': - return uri, 'Failed to extract link.' - return link, 200 - else: - return uri, 'Failed to extract link.' - - except Exception as e: - return uri, str(e) - - def _unshorten_adfocus(self, uri): - orig_uri = uri - try: - http_header = { - "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.46 Safari/535.11", - "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", - "Accept-Language": "nl-NL,nl;q=0.8,en-US;q=0.6,en;q=0.4", - "Cache-Control": "no-cache", - "Pragma": "no-cache" - } - - r = requests.get(uri, headers=http_header, timeout=self._timeout) - html = r.text - - adlink = re.findall("click_url =.*;", html) - - if len(adlink) > 0: - uri = re.sub('^click_url = "|"\;$', '', adlink[0]) - if re.search(r'http(s|)\://adfoc\.us/serve/skip/\?id\=', uri): - http_header = { - "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.46 Safari/535.11", - "Accept-Encoding": "gzip,deflate,sdch", - "Accept-Language": "en-US,en;,q=0.8", - "Connection": "keep-alive", - "Host": "adfoc.us", - "Cache-Control": "no-cache", - "Pragma": "no-cache", - "Referer": orig_uri, - } - r = requests.get(uri, headers=http_header, timeout=self._timeout) - - uri = r.url - return uri, r.status_code - else: - return uri, 'No click_url variable found' - except Exception as e: - return uri, str(e) - - def _unshorten_lnxlu(self, uri): - try: - r = requests.get(uri, headers=self._headers, timeout=self._timeout) - html = r.text - - code = re.findall('/\?click\=(.*)\."', html) - - if len(code) > 0: - payload = {'click': code[0]} - r = requests.get('http://lnx.lu/', params=payload, headers=self._headers, timeout=self._timeout) - return r.url, r.status_code - else: - return uri, 'No click variable found' - except Exception as e: - return uri, str(e) - - def _unshorten_shst(self, uri): - try: - r = requests.get(uri, headers=self._headers, timeout=self._timeout) - html = r.text - - session_id = re.findall(r'sessionId\:(.*?)\"\,', html) - if len(session_id) > 0: - session_id = re.sub(r'\s\"', '', session_id[0]) - - http_header = { - "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.46 Safari/535.11", - "Accept-Encoding": "gzip,deflate,sdch", - "Accept-Language": "en-US,en;,q=0.8", - "Connection": "keep-alive", - "Content-Type": "application/x-www-form-urlencoded", - "Host": "sh.st", - "Referer": uri, - "Origin": "http://sh.st", - "X-Requested-With": "XMLHttpRequest" - } - - time.sleep(5) - - payload = {'adSessionId': session_id, 'callback': 'c'} - r = requests.get('http://sh.st/shortest-url/end-adsession', params=payload, headers=http_header, - timeout=self._timeout) - response = r.content[6:-2].decode('utf-8') - - if r.status_code == 200: - resp_uri = json.loads(response)['destinationUrl'] - if resp_uri is not None: - uri = resp_uri - else: - return uri, 'Error extracting url' - else: - return uri, 'Error extracting url' - - return uri, r.status_code - - except Exception as e: - return uri, str(e) - - -def unshorten(uri, type=None, timeout=10): - unshortener = UnshortenIt() - return unshortener.unshorten(uri, type, timeout) +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +try: + from urllib.parse import urlsplit, urlparse, parse_qs, urljoin +except: + from urlparse import urlsplit, urlparse, parse_qs, urljoin + +import json +import os +import re +import time +import urllib +from base64 import b64decode +from platformcode import logger + +import xbmc + +from core import httptools + + +def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL): + rec = re.compile(regex, flags=flags) + match = rec.search(text) + if not match: + return False + return match.group(1) + + +class UnshortenIt(object): + _adfly_regex = r'adf\.ly|q\.gs|j\.gs|u\.bb|ay\.gy|threadsphere\.bid|restorecosm\.bid' + _linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co' + _adfocus_regex = r'adfoc\.us' + _lnxlu_regex = r'lnx\.lu' + _shst_regex = r'sh\.st' + _hrefli_regex = r'href\.li' + _anonymz_regex = r'anonymz\.com' + _shrink_service_regex = r'shrink-service\.it' + _rapidcrypt_regex = r'rapidcrypt\.net' + + _maxretries = 5 + + _this_dir, _this_filename = os.path.split(__file__) + _timeout = 10 + + def unshorten(self, uri, type=None): + + domain = urlsplit(uri).netloc + + if not domain: + return uri, "No domain found in URI!" + + had_google_outbound, uri = self._clear_google_outbound_proxy(uri) + + if re.search(self._adfly_regex, domain, + re.IGNORECASE) or type == 'adfly': + return self._unshorten_adfly(uri) + if re.search(self._adfocus_regex, domain, + re.IGNORECASE) or type == 'adfocus': + return self._unshorten_adfocus(uri) + if re.search(self._linkbucks_regex, domain, + re.IGNORECASE) or type == 'linkbucks': + return self._unshorten_linkbucks(uri) + if re.search(self._lnxlu_regex, domain, + re.IGNORECASE) or type == 'lnxlu': + return self._unshorten_lnxlu(uri) + if re.search(self._shrink_service_regex, domain, re.IGNORECASE): + return self._unshorten_shrink_service(uri) + if re.search(self._shst_regex, domain, re.IGNORECASE): + return self._unshorten_shst(uri) + if re.search(self._hrefli_regex, domain, re.IGNORECASE): + return self._unshorten_hrefli(uri) + if re.search(self._anonymz_regex, domain, re.IGNORECASE): + return self._unshorten_anonymz(uri) + if re.search(self._rapidcrypt_regex, domain, re.IGNORECASE): + return self._unshorten_rapidcrypt(uri) + + return uri, 200 + + def unwrap_30x(self, uri, timeout=10): + + domain = urlsplit(uri).netloc + self._timeout = timeout + + loop_counter = 0 + try: + + if loop_counter > 5: + raise ValueError("Infinitely looping redirect from URL: '%s'" % + (uri,)) + + # headers stop t.co from working so omit headers if this is a t.co link + if domain == 't.co': + r = httptools.downloadpage(uri, timeout=self._timeout) + return r.url, r.code + # p.ost.im uses meta http refresh to redirect. + if domain == 'p.ost.im': + r = httptools.downloadpage(uri, timeout=self._timeout) + uri = re.findall(r'.*url\=(.*?)\"\.*', r.data)[0] + return uri, r.code + else: + + while True: + r = httptools.downloadpage( + uri, + timeout=self._timeout, + follow_redirects=False, + only_headers=True) + if not r.success: + return uri, -1 + + retries = 0 + if 'location' in r.headers and retries < self._maxretries: + r = httptools.downloadpage( + r.headers['location'], + follow_redirects=False, + only_headers=True) + uri = r.url + loop_counter += 1 + retries = retries + 1 + else: + return r.url, r.code + + except Exception as e: + return uri, str(e) + + def _clear_google_outbound_proxy(self, url): + ''' + So google proxies all their outbound links through a redirect so they can detect outbound links. + This call strips them out if they are present. + + This is useful for doing things like parsing google search results, or if you're scraping google + docs, where google inserts hit-counters on all outbound links. + ''' + + # This is kind of hacky, because we need to check both the netloc AND + # part of the path. We could use urllib.parse.urlsplit, but it's + # easier and just as effective to use string checks. + if url.startswith("http://www.google.com/url?") or \ + url.startswith("https://www.google.com/url?"): + + qs = urlparse(url).query + query = parse_qs(qs) + + if "q" in query: # Google doc outbound links (maybe blogspot, too) + return True, query["q"].pop() + elif "url" in query: # Outbound links from google searches + return True, query["url"].pop() + else: + raise ValueError( + "Google outbound proxy URL without a target url ('%s')?" % + url) + + return False, url + + def _unshorten_adfly(self, uri): + logger.info() + try: + r = httptools.downloadpage( + uri, timeout=self._timeout, cookies=False) + html = r.data + logger.info("Intel33 %s" %html) + ysmm = re.findall(r"var ysmm =.*\;?", html) + + if len(ysmm) > 0: + ysmm = re.sub(r'var ysmm \= \'|\'\;', '', ysmm[0]) + + left = '' + right = '' + + for c in [ysmm[i:i + 2] for i in range(0, len(ysmm), 2)]: + left += c[0] + right = c[1] + right + + # Additional digit arithmetic + encoded_uri = list(left + right) + numbers = ((i, n) for i, n in enumerate(encoded_uri) if str.isdigit(n)) + for first, second in zip(numbers, numbers): + xor = int(first[1]) ^ int(second[1]) + if xor < 10: + encoded_uri[first[0]] = str(xor) + + decoded_uri = b64decode("".join(encoded_uri).encode())[16:-16].decode() + + if re.search(r'go\.php\?u\=', decoded_uri): + decoded_uri = b64decode(re.sub(r'(.*?)u=', '', decoded_uri)).decode() + + return decoded_uri, r.code + else: + return uri, 'No ysmm variable found' + + except Exception as e: + return uri, str(e) + + def _unshorten_linkbucks(self, uri): + ''' + (Attempt) to decode linkbucks content. HEAVILY based on the OSS jDownloader codebase. + This has necessidated a license change. + + ''' + + r = httptools.downloadpage(uri, timeout=self._timeout) + + firstGet = time.time() + + baseloc = r.url + + if "/notfound/" in r.url or \ + "(>Link Not Found<|>The link may have been deleted by the owner|To access the content, you must complete a quick survey\.)" in r.data: + return uri, 'Error: Link not found or requires a survey!' + + link = None + + content = r.data + + regexes = [ + r"
    .*?/a>.*?.*?[^<]+)", content) + if not scripts: + return uri, "No script bodies found?" + + js = False + + for script in scripts: + # cleanup + script = re.sub(r"[\r\n\s]+\/\/\s*[^\r\n]+", "", script) + if re.search(r"\s*var\s*f\s*=\s*window\['init'\s*\+\s*'Lb'\s*\+\s*'js'\s*\+\s*''\];[\r\n\s]+", script): + js = script + + if not js: + return uri, "Could not find correct script?" + + token = find_in_text(r"Token\s*:\s*'([a-f0-9]{40})'", js) + if not token: + token = find_in_text(r"\?t=([a-f0-9]{40})", js) + + assert token + + authKeyMatchStr = r"A(?:'\s*\+\s*')?u(?:'\s*\+\s*')?t(?:'\s*\+\s*')?h(?:'\s*\+\s*')?K(?:'\s*\+\s*')?e(?:'\s*\+\s*')?y" + l1 = find_in_text(r"\s*params\['" + authKeyMatchStr + r"'\]\s*=\s*(\d+?);", js) + l2 = find_in_text( + r"\s*params\['" + authKeyMatchStr + r"'\]\s*=\s?params\['" + authKeyMatchStr + r"'\]\s*\+\s*(\d+?);", + js) + + if any([not l1, not l2, not token]): + return uri, "Missing required tokens?" + + authkey = int(l1) + int(l2) + + p1_url = urljoin(baseloc, "/director/?t={tok}".format(tok=token)) + r2 = httptools.downloadpage(p1_url, timeout=self._timeout) + + p1_url = urljoin(baseloc, "/scripts/jquery.js?r={tok}&{key}".format(tok=token, key=l1)) + r2_1 = httptools.downloadpage(p1_url, timeout=self._timeout) + + time_left = 5.033 - (time.time() - firstGet) + xbmc.sleep(max(time_left, 0) * 1000) + + p3_url = urljoin(baseloc, "/intermission/loadTargetUrl?t={tok}&aK={key}&a_b=false".format(tok=token, + key=str(authkey))) + r3 = httptools.downloadpage(p3_url, timeout=self._timeout) + + resp_json = json.loads(r3.data) + if "Url" in resp_json: + return resp_json['Url'], r3.code + + return "Wat", "wat" + + def inValidate(self, s): + # Original conditional: + # (s == null || s != null && (s.matches("[\r\n\t ]+") || s.equals("") || s.equalsIgnoreCase("about:blank"))) + if not s: + return True + + if re.search("[\r\n\t ]+", s) or s.lower() == "about:blank": + return True + else: + return False + + def _unshorten_adfocus(self, uri): + orig_uri = uri + try: + + r = httptools.downloadpage(uri, timeout=self._timeout) + html = r.data + + adlink = re.findall("click_url =.*;", html) + + if len(adlink) > 0: + uri = re.sub('^click_url = "|"\;$', '', adlink[0]) + if re.search(r'http(s|)\://adfoc\.us/serve/skip/\?id\=', uri): + http_header = dict() + http_header["Host"] = "adfoc.us" + http_header["Referer"] = orig_uri + + r = httptools.downloadpage(uri, headers=http_header, timeout=self._timeout) + + uri = r.url + return uri, r.code + else: + return uri, 'No click_url variable found' + except Exception as e: + return uri, str(e) + + def _unshorten_lnxlu(self, uri): + try: + r = httptools.downloadpage(uri, timeout=self._timeout) + html = r.data + + code = re.findall('/\?click\=(.*)\."', html) + + if len(code) > 0: + payload = {'click': code[0]} + r = httptools.downloadpage( + 'http://lnx.lu?' + urllib.urlencode(payload), + timeout=self._timeout) + return r.url, r.code + else: + return uri, 'No click variable found' + except Exception as e: + return uri, str(e) + + def _unshorten_shst(self, uri): + try: + r = httptools.downloadpage(uri, timeout=self._timeout) + html = r.data + + session_id = re.findall(r'sessionId\:(.*?)\"\,', html) + if len(session_id) > 0: + session_id = re.sub(r'\s\"', '', session_id[0]) + + http_header = dict() + http_header["Content-Type"] = "application/x-www-form-urlencoded" + http_header["Host"] = "sh.st" + http_header["Referer"] = uri + http_header["Origin"] = "http://sh.st" + http_header["X-Requested-With"] = "XMLHttpRequest" + + xbmc.sleep(5 * 1000) + + payload = {'adSessionId': session_id, 'callback': 'c'} + r = httptools.downloadpage( + 'http://sh.st/shortest-url/end-adsession?' + + urllib.urlencode(payload), + headers=http_header, + timeout=self._timeout) + response = r.data[6:-2].decode('utf-8') + + if r.code == 200: + resp_uri = json.loads(response)['destinationUrl'] + if resp_uri is not None: + uri = resp_uri + else: + return uri, 'Error extracting url' + else: + return uri, 'Error extracting url' + + return uri, r.code + + except Exception as e: + return uri, str(e) + + def _unshorten_hrefli(self, uri): + try: + # Extract url from query + parsed_uri = urlparse(uri) + extracted_uri = parsed_uri.query + if not extracted_uri: + return uri, 200 + # Get url status code + r = httptools.downloadpage( + extracted_uri, + timeout=self._timeout, + follow_redirects=False, + only_headers=True) + return r.url, r.code + except Exception as e: + return uri, str(e) + + def _unshorten_anonymz(self, uri): + # For the moment they use the same system as hrefli + return self._unshorten_hrefli(uri) + + def _unshorten_shrink_service(self, uri): + try: + r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False) + html = r.data + + uri = re.findall(r"", html)[0] + + from core import scrapertools + uri = scrapertools.decodeHtmlentities(uri) + + uri = uri.replace("/", "/") \ + .replace(":", ":") \ + .replace(".", ".") \ + .replace("!", "!") \ + .replace("#", "#") \ + .replace("?", "?") \ + .replace("_", "_") + + return uri, r.code + + except Exception as e: + return uri, str(e) + + def _unshorten_rapidcrypt(self, uri): + try: + r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False) + html = r.data + + uri = re.findall(r'Click to continue', html)[0] + + return uri, r.code + + except Exception as e: + return uri, str(e) + + +def unwrap_30x_only(uri, timeout=10): + unshortener = UnshortenIt() + uri, status = unshortener.unwrap_30x(uri, timeout=timeout) + return uri, status + + +def unshorten_only(uri, type=None, timeout=10): + unshortener = UnshortenIt() + uri, status = unshortener.unshorten(uri, type=type) + return uri, status + + +def unshorten(uri, type=None, timeout=10): + unshortener = UnshortenIt() + uri, status = unshortener.unshorten(uri, type=type) + if status == 200: + uri, status = unshortener.unwrap_30x(uri, timeout=timeout) + return uri, status From 7b86fce6bfc701115bcc7ffcba2cf886036570b7 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 25 Apr 2018 11:35:46 -0500 Subject: [PATCH 08/12] Update unshortenit.py --- plugin.video.alfa/lib/unshortenit.py | 1 - 1 file changed, 1 deletion(-) diff --git a/plugin.video.alfa/lib/unshortenit.py b/plugin.video.alfa/lib/unshortenit.py index f8591eed..ee953a06 100755 --- a/plugin.video.alfa/lib/unshortenit.py +++ b/plugin.video.alfa/lib/unshortenit.py @@ -159,7 +159,6 @@ class UnshortenIt(object): r = httptools.downloadpage( uri, timeout=self._timeout, cookies=False) html = r.data - logger.info("Intel33 %s" %html) ysmm = re.findall(r"var ysmm =.*\;?", html) if len(ysmm) > 0: From 3700f4871ae2221dfce5b303aaa5fd388cd40c9a Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 25 Apr 2018 12:03:12 -0500 Subject: [PATCH 09/12] clipwatching: updated pattern --- plugin.video.alfa/servers/clipwatching.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin.video.alfa/servers/clipwatching.json b/plugin.video.alfa/servers/clipwatching.json index d02c62a8..1a7fd813 100644 --- a/plugin.video.alfa/servers/clipwatching.json +++ b/plugin.video.alfa/servers/clipwatching.json @@ -4,7 +4,7 @@ "ignore_urls": [], "patterns": [ { - "pattern": "clipwatching.com/(.*?).html", + "pattern": "clipwatching.com/(\\w+)", "url": "http://clipwatching.com/\\1.html" } ] From 3eb4cfd3a8e25457a0941af1c63f9e168ab6ffcc Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 25 Apr 2018 12:50:19 -0500 Subject: [PATCH 10/12] seriecanal: web no usada --- plugin.video.alfa/channels/seriecanal.json | 61 ---------------------- 1 file changed, 61 deletions(-) delete mode 100755 plugin.video.alfa/channels/seriecanal.json diff --git a/plugin.video.alfa/channels/seriecanal.json b/plugin.video.alfa/channels/seriecanal.json deleted file mode 100755 index b3166f5f..00000000 --- a/plugin.video.alfa/channels/seriecanal.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "id": "seriecanal", - "name": "Seriecanal", - "active": true, - "adult": false, - "language": ["cast"], - "thumbnail": "http://i.imgur.com/EwMK8Yd.png", - "banner": "seriecanal.png", - "categories": [ - "tvshow", - "vos" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": true, - "visible": true - }, - { - "id": "modo_grafico", - "type": "bool", - "label": "Buscar información extra", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "user", - "type": "text", - "label": "Usuario", - "color": "0xFFd50b0b", - "enabled": true, - "visible": true - }, - { - "id": "password", - "type": "text", - "label": "Contraseña", - "color": "0xFFd50b0b", - "enabled": true, - "visible": true, - "hidden": true - }, - { - "id": "perfil", - "type": "list", - "label": "Perfil de color", - "default": 2, - "enabled": true, - "visible": true, - "lvalues": [ - "Perfil 3", - "Perfil 2", - "Perfil 1" - ] - } - ] -} \ No newline at end of file From 20d200257694ba41abbaef274954e3f88eb0c425 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 25 Apr 2018 12:50:32 -0500 Subject: [PATCH 11/12] seriecanal: web no usada --- plugin.video.alfa/channels/seriecanal.py | 252 ----------------------- 1 file changed, 252 deletions(-) delete mode 100755 plugin.video.alfa/channels/seriecanal.py diff --git a/plugin.video.alfa/channels/seriecanal.py b/plugin.video.alfa/channels/seriecanal.py deleted file mode 100755 index 0ac2bfb4..00000000 --- a/plugin.video.alfa/channels/seriecanal.py +++ /dev/null @@ -1,252 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urllib -import urlparse - -from core import scrapertools -from core import servertools -from platformcode import config, logger - -__modo_grafico__ = config.get_setting('modo_grafico', "seriecanal") -__perfil__ = config.get_setting('perfil', "descargasmix") - -# Fijar perfil de color -perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], - ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], - ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] -color1, color2, color3 = perfil[__perfil__] - -URL_BASE = "http://www.seriecanal.com/" - - -def login(): - logger.info() - data = scrapertools.downloadpage(URL_BASE) - if "Cerrar Sesion" in data: - return True, "" - - usuario = config.get_setting("user", "seriecanal") - password = config.get_setting("password", "seriecanal") - if usuario == "" or password == "": - return False, 'Regístrate en www.seriecanal.com e introduce tus datos en "Configurar Canal"' - else: - post = urllib.urlencode({'username': usuario, 'password': password}) - data = scrapertools.downloadpage("http://www.seriecanal.com/index.php?page=member&do=login&tarea=acceder", - post=post) - if "Bienvenid@, se ha identificado correctamente en nuestro sistema" in data: - return True, "" - else: - return False, "Error en el login. El usuario y/o la contraseña no son correctos" - - -def mainlist(item): - logger.info() - itemlist = [] - item.text_color = color1 - - result, message = login() - if result: - itemlist.append(item.clone(action="series", title="Últimos episodios", url=URL_BASE)) - itemlist.append(item.clone(action="genero", title="Series por género")) - itemlist.append(item.clone(action="alfabetico", title="Series por orden alfabético")) - itemlist.append(item.clone(action="search", title="Buscar...")) - else: - itemlist.append(item.clone(action="", title=message, text_color="red")) - - itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) - - return itemlist - - -def configuracion(item): - from platformcode import platformtools - ret = platformtools.show_channel_settings() - platformtools.itemlist_refresh() - return ret - - -def search(item, texto): - logger.info() - item.url = "http://www.seriecanal.com/index.php?page=portada&do=category&method=post&category_id=0&order=" \ - "C_Create&view=thumb&pgs=1&p2=1" - try: - post = "keyserie=" + texto - item.extra = post - return series(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def genero(item): - logger.info() - itemlist = [] - data = scrapertools.downloadpage(URL_BASE) - data = scrapertools.find_single_match(data, '
      (.*?)
    ') - - matches = scrapertools.find_multiple_matches(data, '([^"]+)') - for scrapedurl, scrapedtitle in matches: - scrapedtitle = scrapedtitle.capitalize() - url = urlparse.urljoin(URL_BASE, scrapedurl) - itemlist.append(item.clone(action="series", title=scrapedtitle, url=url)) - - return itemlist - - -def alfabetico(item): - logger.info() - itemlist = [] - data = scrapertools.downloadpage(URL_BASE) - data = scrapertools.find_single_match(data, '
      (.*?)
    ') - - matches = scrapertools.find_multiple_matches(data, '([^"]+)') - for scrapedurl, scrapedtitle in matches: - url = urlparse.urljoin(URL_BASE, scrapedurl) - itemlist.append(item.clone(action="series", title=scrapedtitle, url=url)) - return itemlist - - -def series(item): - logger.info() - itemlist = [] - item.infoLabels = {} - item.text_color = color2 - - if item.extra != "": - data = scrapertools.downloadpage(item.url, post=item.extra) - else: - data = scrapertools.downloadpage(item.url) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - - patron = '
    ([^"]+).*?([^"]+)

    .*?' \ - '

    (.*?)

    ' - - matches = scrapertools.find_multiple_matches(data, patron) - - for scrapedthumbnail, scrapedurl, scrapedplot, scrapedtitle, scrapedtemp, scrapedepi in matches: - title = scrapedtitle + " - " + scrapedtemp + " - " + scrapedepi - url = urlparse.urljoin(URL_BASE, scrapedurl) - temporada = scrapertools.find_single_match(scrapedtemp, "(\d+)") - new_item = item.clone() - new_item.contentType = "tvshow" - if temporada != "": - new_item.infoLabels['season'] = temporada - new_item.contentType = "season" - - logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + scrapedthumbnail + "]") - itemlist.append(new_item.clone(action="findvideos", title=title, fulltitle=scrapedtitle, url=url, - thumbnail=scrapedthumbnail, plot=scrapedplot, contentTitle=scrapedtitle, - context=["buscar_trailer"], show=scrapedtitle)) - - try: - from core import tmdb - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - except: - pass - # Extra marca siguiente página - next_page = scrapertools.find_single_match(data, 'Episodio - Enlaces de Descarga(.*?)') - patron = '

    ([^"]+)' - matches = scrapertools.find_multiple_matches(data_download, patron) - for scrapedurl, scrapedepi in matches: - new_item = item.clone() - if "Episodio" not in scrapedepi: - scrapedtitle = "[Torrent] Episodio " + scrapedepi - else: - scrapedtitle = "[Torrent] " + scrapedepi - scrapedtitle = scrapertools.htmlclean(scrapedtitle) - - new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)") - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]") - itemlist.append(new_item.clone(action="play", title=scrapedtitle, url=scrapedurl, server="torrent", - contentType="episode")) - - # Busca en la seccion online - data_online = scrapertools.find_single_match(data, "Enlaces de Visionado Online(.*?)") - patron = '([^"]+)' - matches = scrapertools.find_multiple_matches(data_online, patron) - - for scrapedurl, scrapedthumb, scrapedtitle in matches: - # Deshecha enlaces de trailers - scrapedtitle = scrapertools.htmlclean(scrapedtitle) - if (scrapedthumb != "images/series/youtube.png") & (scrapedtitle != "Trailer"): - new_item = item.clone() - server = scrapertools.find_single_match(scrapedthumb, "images/series/(.*?).png") - title = "[" + server.capitalize() + "]" + " " + scrapedtitle - - new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)") - itemlist.append(new_item.clone(action="play", title=title, url=scrapedurl, contentType="episode")) - - # Comprueba si hay otras temporadas - if not "No hay disponible ninguna Temporada adicional" in data: - data_temp = scrapertools.find_single_match(data, '

    (.*?)') - data_temp = re.sub(r"\n|\r|\t|\s{2}| ", "", data_temp) - patron = '

    ([^"]+)' - matches = scrapertools.find_multiple_matches(data_temp, patron) - for scrapedurl, scrapedtitle in matches: - new_item = item.clone() - url = urlparse.urljoin(URL_BASE, scrapedurl) - scrapedtitle = scrapedtitle.capitalize() - temporada = scrapertools.find_single_match(scrapedtitle, "Temporada (\d+)") - if temporada != "": - new_item.infoLabels['season'] = temporada - new_item.infoLabels['episode'] = "" - itemlist.append(new_item.clone(action="findvideos", title=scrapedtitle, url=url, text_color="red", - contentType="season")) - - try: - from core import tmdb - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - except: - pass - - new_item = item.clone() - if config.is_xbmc(): - new_item.contextual = True - itemlist.append(new_item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="", - text_color="magenta")) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - - if item.extra == "torrent": - itemlist.append(item.clone()) - else: - # Extrae url de enlace bit.ly - if item.url.startswith("http://bit.ly/"): - item.url = scrapertools.getLocationHeaderFromResponse(item.url) - video_list = servertools.findvideos(item.url) - if video_list: - url = video_list[0][1] - server = video_list[0][2] - itemlist.append(item.clone(server=server, url=url)) - - return itemlist From 513b93949e9e4c11d02bc129fef3eb932405cf93 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 25 Apr 2018 14:13:58 -0500 Subject: [PATCH 12/12] Add files via upload --- plugin.video.alfa/channels/seriecanal.json | 61 +++++ plugin.video.alfa/channels/seriecanal.py | 252 +++++++++++++++++++++ 2 files changed, 313 insertions(+) create mode 100644 plugin.video.alfa/channels/seriecanal.json create mode 100644 plugin.video.alfa/channels/seriecanal.py diff --git a/plugin.video.alfa/channels/seriecanal.json b/plugin.video.alfa/channels/seriecanal.json new file mode 100644 index 00000000..b3166f5f --- /dev/null +++ b/plugin.video.alfa/channels/seriecanal.json @@ -0,0 +1,61 @@ +{ + "id": "seriecanal", + "name": "Seriecanal", + "active": true, + "adult": false, + "language": ["cast"], + "thumbnail": "http://i.imgur.com/EwMK8Yd.png", + "banner": "seriecanal.png", + "categories": [ + "tvshow", + "vos" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "user", + "type": "text", + "label": "Usuario", + "color": "0xFFd50b0b", + "enabled": true, + "visible": true + }, + { + "id": "password", + "type": "text", + "label": "Contraseña", + "color": "0xFFd50b0b", + "enabled": true, + "visible": true, + "hidden": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/seriecanal.py b/plugin.video.alfa/channels/seriecanal.py new file mode 100644 index 00000000..0ac2bfb4 --- /dev/null +++ b/plugin.video.alfa/channels/seriecanal.py @@ -0,0 +1,252 @@ +# -*- coding: utf-8 -*- + +import re +import urllib +import urlparse + +from core import scrapertools +from core import servertools +from platformcode import config, logger + +__modo_grafico__ = config.get_setting('modo_grafico', "seriecanal") +__perfil__ = config.get_setting('perfil', "descargasmix") + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] +color1, color2, color3 = perfil[__perfil__] + +URL_BASE = "http://www.seriecanal.com/" + + +def login(): + logger.info() + data = scrapertools.downloadpage(URL_BASE) + if "Cerrar Sesion" in data: + return True, "" + + usuario = config.get_setting("user", "seriecanal") + password = config.get_setting("password", "seriecanal") + if usuario == "" or password == "": + return False, 'Regístrate en www.seriecanal.com e introduce tus datos en "Configurar Canal"' + else: + post = urllib.urlencode({'username': usuario, 'password': password}) + data = scrapertools.downloadpage("http://www.seriecanal.com/index.php?page=member&do=login&tarea=acceder", + post=post) + if "Bienvenid@, se ha identificado correctamente en nuestro sistema" in data: + return True, "" + else: + return False, "Error en el login. El usuario y/o la contraseña no son correctos" + + +def mainlist(item): + logger.info() + itemlist = [] + item.text_color = color1 + + result, message = login() + if result: + itemlist.append(item.clone(action="series", title="Últimos episodios", url=URL_BASE)) + itemlist.append(item.clone(action="genero", title="Series por género")) + itemlist.append(item.clone(action="alfabetico", title="Series por orden alfabético")) + itemlist.append(item.clone(action="search", title="Buscar...")) + else: + itemlist.append(item.clone(action="", title=message, text_color="red")) + + itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + item.url = "http://www.seriecanal.com/index.php?page=portada&do=category&method=post&category_id=0&order=" \ + "C_Create&view=thumb&pgs=1&p2=1" + try: + post = "keyserie=" + texto + item.extra = post + return series(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def genero(item): + logger.info() + itemlist = [] + data = scrapertools.downloadpage(URL_BASE) + data = scrapertools.find_single_match(data, '

    ') + + matches = scrapertools.find_multiple_matches(data, '([^"]+)') + for scrapedurl, scrapedtitle in matches: + scrapedtitle = scrapedtitle.capitalize() + url = urlparse.urljoin(URL_BASE, scrapedurl) + itemlist.append(item.clone(action="series", title=scrapedtitle, url=url)) + + return itemlist + + +def alfabetico(item): + logger.info() + itemlist = [] + data = scrapertools.downloadpage(URL_BASE) + data = scrapertools.find_single_match(data, '
      (.*?)
    ') + + matches = scrapertools.find_multiple_matches(data, '([^"]+)') + for scrapedurl, scrapedtitle in matches: + url = urlparse.urljoin(URL_BASE, scrapedurl) + itemlist.append(item.clone(action="series", title=scrapedtitle, url=url)) + return itemlist + + +def series(item): + logger.info() + itemlist = [] + item.infoLabels = {} + item.text_color = color2 + + if item.extra != "": + data = scrapertools.downloadpage(item.url, post=item.extra) + else: + data = scrapertools.downloadpage(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '
    ([^"]+).*?([^"]+)

    .*?' \ + '

    (.*?)

    ' + + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedthumbnail, scrapedurl, scrapedplot, scrapedtitle, scrapedtemp, scrapedepi in matches: + title = scrapedtitle + " - " + scrapedtemp + " - " + scrapedepi + url = urlparse.urljoin(URL_BASE, scrapedurl) + temporada = scrapertools.find_single_match(scrapedtemp, "(\d+)") + new_item = item.clone() + new_item.contentType = "tvshow" + if temporada != "": + new_item.infoLabels['season'] = temporada + new_item.contentType = "season" + + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(new_item.clone(action="findvideos", title=title, fulltitle=scrapedtitle, url=url, + thumbnail=scrapedthumbnail, plot=scrapedplot, contentTitle=scrapedtitle, + context=["buscar_trailer"], show=scrapedtitle)) + + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + except: + pass + # Extra marca siguiente página + next_page = scrapertools.find_single_match(data, 'Episodio - Enlaces de Descarga(.*?)') + patron = '

    ([^"]+)' + matches = scrapertools.find_multiple_matches(data_download, patron) + for scrapedurl, scrapedepi in matches: + new_item = item.clone() + if "Episodio" not in scrapedepi: + scrapedtitle = "[Torrent] Episodio " + scrapedepi + else: + scrapedtitle = "[Torrent] " + scrapedepi + scrapedtitle = scrapertools.htmlclean(scrapedtitle) + + new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)") + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]") + itemlist.append(new_item.clone(action="play", title=scrapedtitle, url=scrapedurl, server="torrent", + contentType="episode")) + + # Busca en la seccion online + data_online = scrapertools.find_single_match(data, "Enlaces de Visionado Online(.*?)") + patron = '([^"]+)' + matches = scrapertools.find_multiple_matches(data_online, patron) + + for scrapedurl, scrapedthumb, scrapedtitle in matches: + # Deshecha enlaces de trailers + scrapedtitle = scrapertools.htmlclean(scrapedtitle) + if (scrapedthumb != "images/series/youtube.png") & (scrapedtitle != "Trailer"): + new_item = item.clone() + server = scrapertools.find_single_match(scrapedthumb, "images/series/(.*?).png") + title = "[" + server.capitalize() + "]" + " " + scrapedtitle + + new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)") + itemlist.append(new_item.clone(action="play", title=title, url=scrapedurl, contentType="episode")) + + # Comprueba si hay otras temporadas + if not "No hay disponible ninguna Temporada adicional" in data: + data_temp = scrapertools.find_single_match(data, '