From 8ff7249ed45ff972622243b571398c5908ce7e49 Mon Sep 17 00:00:00 2001 From: Intel1 Date: Fri, 7 Sep 2018 11:15:10 -0500 Subject: [PATCH] Varios 1 cineasiaenlinea: web no existe repelis: updated zentorrents: eliminado web no estable clipwatchings: fix test_video_url thevid: nuevo server vivio: nuevo server thevideome: pattern updated --- .../channels/cineasiaenlinea.json | 61 - plugin.video.alfa/channels/cineasiaenlinea.py | 177 -- plugin.video.alfa/channels/repelis.py | 34 +- plugin.video.alfa/channels/zentorrents.json | 24 - plugin.video.alfa/channels/zentorrents.py | 1419 ----------------- plugin.video.alfa/servers/clipwatching.py | 2 +- plugin.video.alfa/servers/thevid.json | 42 + plugin.video.alfa/servers/thevid.py | 30 + plugin.video.alfa/servers/thevideome.json | 2 +- plugin.video.alfa/servers/vevio.json | 42 + plugin.video.alfa/servers/vevio.py | 29 + 11 files changed, 168 insertions(+), 1694 deletions(-) delete mode 100755 plugin.video.alfa/channels/cineasiaenlinea.json delete mode 100755 plugin.video.alfa/channels/cineasiaenlinea.py delete mode 100755 plugin.video.alfa/channels/zentorrents.json delete mode 100755 plugin.video.alfa/channels/zentorrents.py create mode 100644 plugin.video.alfa/servers/thevid.json create mode 100644 plugin.video.alfa/servers/thevid.py create mode 100644 plugin.video.alfa/servers/vevio.json create mode 100644 plugin.video.alfa/servers/vevio.py diff --git a/plugin.video.alfa/channels/cineasiaenlinea.json b/plugin.video.alfa/channels/cineasiaenlinea.json deleted file mode 100755 index 68ea28e2..00000000 --- a/plugin.video.alfa/channels/cineasiaenlinea.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "id": "cineasiaenlinea", - "name": "CineAsiaEnLinea", - "active": true, - "adult": false, - "language": ["cast", "lat"], - "thumbnail": "http://i.imgur.com/5KOU8uy.png?3", - "banner": "cineasiaenlinea.png", - "categories": [ - "movie", - "vos" - ], - "settings": [ - { - "id": "modo_grafico", - "type": "bool", - "label": "Buscar información extra", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en búsqueda global", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_peliculas", - "type": "bool", - "label": "Incluir en Novedades - Películas", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_terror", - "type": "bool", - "label": "Incluir en Novedades - terror", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "perfil", - "type": "list", - "label": "Perfil de color", - "default": 3, - "enabled": true, - "visible": true, - "lvalues": [ - "Sin color", - "Perfil 3", - "Perfil 2", - "Perfil 1" - ] - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/channels/cineasiaenlinea.py b/plugin.video.alfa/channels/cineasiaenlinea.py deleted file mode 100755 index 968b9095..00000000 --- a/plugin.video.alfa/channels/cineasiaenlinea.py +++ /dev/null @@ -1,177 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from core import httptools -from core import scrapertools -from core import servertools -from core import tmdb -from core.item import Item -from platformcode import config, logger -from channelselector import get_thumb - -host = "http://www.cineasiaenlinea.com/" -__channel__='cineasiaenlinea' - -try: - __modo_grafico__ = config.get_setting('modo_grafico', __channel__) -except: - __modo_grafico__ = True - -# Configuracion del canal -__perfil__ = int(config.get_setting('perfil', 'cineasiaenlinea')) - -# Fijar perfil de color -perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], - ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], - ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] - -if __perfil__ - 1 >= 0: - color1, color2, color3 = perfil[__perfil__ - 1] -else: - color1 = color2 = color3 = "" - - -def mainlist(item): - logger.info() - itemlist = [] - - itemlist.append(item.clone(action="peliculas", title="Novedades", url=host + "archivos/peliculas", - thumbnail=get_thumb('newest', auto=True), text_color=color1,)) - itemlist.append(item.clone(action="peliculas", title="Estrenos", url=host + "archivos/estrenos", - thumbnail=get_thumb('premieres', auto=True), text_color=color1)) - itemlist.append(item.clone(action="indices", title="Por géneros", url=host, - thumbnail=get_thumb('genres', auto=True), text_color=color1)) - itemlist.append(item.clone(action="indices", title="Por país", url=host, text_color=color1, - thumbnail=get_thumb('country', auto=True))) - itemlist.append(item.clone(action="indices", title="Por año", url=host, text_color=color1, - thumbnail=get_thumb('year', auto=True))) - - itemlist.append(item.clone(title="", action="")) - itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3, - thumbnail=get_thumb('search', auto=True))) - itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) - - return itemlist - - -def configuracion(item): - from platformcode import platformtools - ret = platformtools.show_channel_settings() - platformtools.itemlist_refresh() - return ret - - -def search(item, texto): - logger.info() - - item.url = "%s?s=%s" % (host, texto.replace(" ", "+")) - - try: - return peliculas(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def newest(categoria): - logger.info() - itemlist = [] - item = Item() - try: - if categoria == 'peliculas': - item.url = host + "archivos/peliculas" - elif categoria == 'terror': - item.url = host + "genero/terror" - item.action = "peliculas" - itemlist = peliculas(item) - - if itemlist[-1].action == "peliculas": - itemlist.pop() - - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - return itemlist - - -def peliculas(item): - logger.info() - itemlist = [] - item.text_color = color2 - - # Descarga la página - data = httptools.downloadpage(item.url).data - - patron = '

([^<]+)<.*?src="([^"]+)".*?

([^<]+)<') - elif "año" in item.title: - bloque = scrapertools.find_single_match(data, '(?i)

Peliculas por Año

(.*?)') - matches = scrapertools.find_multiple_matches(bloque, '
([^<]+)<') - - for scrapedurl, scrapedtitle in matches: - if "año" in item.title: - scrapedurl = "%sfecha-estreno/%s" % (host, scrapedurl) - itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, - thumbnail=item.thumbnail, text_color=color3)) - - return itemlist - - -def findvideos(item): - logger.info() - data = httptools.downloadpage(item.url).data - item.infoLabels["plot"] = scrapertools.find_single_match(data, '(?i)

SINOPSIS.*?

(.*?)

') - item.infoLabels["trailer"] = scrapertools.find_single_match(data, 'src="(http://www.youtube.com/embed/[^"]+)"') - - itemlist = servertools.find_video_items(item=item, data=data) - for it in itemlist: - it.thumbnail = item.thumbnail - it.text_color = color2 - - itemlist.append(item.clone(action="add_pelicula_to_library", title="Añadir película a la videoteca")) - if item.infoLabels["trailer"]: - folder = True - if config.is_xbmc(): - folder = False - itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Ver Trailer", folder=folder, - contextual=not folder)) - - return itemlist diff --git a/plugin.video.alfa/channels/repelis.py b/plugin.video.alfa/channels/repelis.py index ca9118b4..2a6e756d 100644 --- a/plugin.video.alfa/channels/repelis.py +++ b/plugin.video.alfa/channels/repelis.py @@ -14,11 +14,12 @@ from core import scrapertools from core import servertools from core import tmdb from core.item import Item -from platformcode import config, logger +from lib import jsunpack +from platformcode import config, logger, platformtools idio = {'es-mx': 'LAT','es-es': 'ESP','en': 'VO'} -cali = {'poor': 'SD','low': 'SD','high': 'HD'} +cali = {'poor': 'SD','low': 'SD','medium': 'HD','high': 'HD'} list_language = idio.values() list_quality = ["SD","HD"] @@ -44,9 +45,17 @@ def mainlist(item): itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos", url = host, extra = "Genero", thumbnail = get_thumb("genres", auto = True) )) itemlist.append(Item(channel = item.channel, title = "")) itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/search?term=", thumbnail = get_thumb("search", auto = True))) + itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False)) autoplay.show_option(item.channel, itemlist) return itemlist + +def configuracion(item): + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + def destacadas(item): logger.info() itemlist = [] @@ -178,12 +187,10 @@ def findvideos(item): dict = jsontools.load(bloque) urlx = httptools.downloadpage(host + dict[0]["url"]) #Para que pueda saltar el cloudflare, se tiene que descargar la página completa for datos in dict: - url1 = httptools.downloadpage(host + datos["url"], follow_redirects=False, only_headers=True).headers.get("location", "") - titulo = "Ver en: %s (" + cali[datos["quality"]] + ") (" + idio[datos["audio"]] + ")" - text_color = "white" - if "youtube" in url1: - titulo = "Ver trailer: %s" - text_color = "yellow" + url1 = datos["url"] + hostname = scrapertools.find_single_match(datos["hostname"].replace("www.",""), "(.*?)\.") + if hostname == "my": hostname = "mailru" + titulo = "Ver en: " + hostname.capitalize() + " (" + cali[datos["quality"]] + ") (" + idio[datos["audio"]] + ")" itemlist.append( item.clone(channel = item.channel, action = "play", @@ -192,7 +199,6 @@ def findvideos(item): title = titulo, url = url1 )) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) itemlist.sort(key=lambda it: (it.language, it.server)) tmdb.set_infoLabels(itemlist, __modo_grafico__) # Requerido para FilterTools @@ -217,5 +223,11 @@ def findvideos(item): def play(item): - item.thumbnail = item.contentThumbnail - return [item] + itemlist = [] + url1 = httptools.downloadpage(host + item.url, follow_redirects=False, only_headers=True).headers.get("location", "") + if "storage" in url1: + url1 = scrapertools.find_single_match(url1, "src=(.*mp4)").replace("%3A",":").replace("%2F","/") + itemlist.append(item.clone(url=url1)) + itemlist = servertools.get_servers_itemlist(itemlist) + itemlist[0].thumbnail = item.contentThumbnail + return itemlist diff --git a/plugin.video.alfa/channels/zentorrents.json b/plugin.video.alfa/channels/zentorrents.json deleted file mode 100755 index fa567d66..00000000 --- a/plugin.video.alfa/channels/zentorrents.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "id": "zentorrents", - "name": "Zentorrent", - "active": false, - "adult": false, - "language": ["cast"], - "banner": "zentorrents.png", - "thumbnail": "http://s6.postimg.cc/9zv90yjip/zentorrentlogo.jpg", - "categories": [ - "torrent", - "movie", - "tvshow" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": true, - "enabled": true, - "visible": true - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/channels/zentorrents.py b/plugin.video.alfa/channels/zentorrents.py deleted file mode 100755 index c633b14a..00000000 --- a/plugin.video.alfa/channels/zentorrents.py +++ /dev/null @@ -1,1419 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import re -import unicodedata -import urllib -import urlparse - -import xbmc -import xbmcgui -from core import httptools -from core import scrapertools -from core.item import Item -from core.scrapertools import decodeHtmlentities as dhe -from platformcode import config, logger - -ACTION_SHOW_FULLSCREEN = 36 -ACTION_GESTURE_SWIPE_LEFT = 511 -ACTION_SELECT_ITEM = 7 -ACTION_PREVIOUS_MENU = 10 -ACTION_MOVE_LEFT = 1 -ACTION_MOVE_RIGHT = 2 -ACTION_MOVE_DOWN = 4 -ACTION_MOVE_UP = 3 -OPTION_PANEL = 6 -OPTIONS_OK = 5 - -host = "http://www.zentorrents.com/" - -api_key = "2e2160006592024ba87ccdf78c28f49f" -api_fankey = "dffe90fba4d02c199ae7a9e71330c987" - - -def mainlist(item): - logger.info() - - itemlist = [] - itemlist.append( - Item(channel=item.channel, title="Películas", action="peliculas", url="http://www.zentorrents.com/peliculas", - thumbnail="http://www.navymwr.org/assets/movies/images/img-popcorn.png", - fanart="http://s18.postimg.cc/u9wyvm809/zen_peliculas.jpg")) - itemlist.append( - Item(channel=item.channel, title="MicroHD", action="peliculas", url="http://www.zentorrents.com/tags/microhd", - thumbnail="http://s11.postimg.cc/5s67cden7/microhdzt.jpg", - fanart="http://s9.postimg.cc/i5qhadsjj/zen_1080.jpg")) - itemlist.append( - Item(channel=item.channel, title="HDrip", action="peliculas", url="http://www.zentorrents.com/tags/hdrip", - thumbnail="http://s10.postimg.cc/pft9z4c5l/hdripzent.jpg", - fanart="http://s15.postimg.cc/5kqx9ln7v/zen_720.jpg")) - itemlist.append( - Item(channel=item.channel, title="Series", action="peliculas", url="http://www.zentorrents.com/series", - thumbnail="http://imgur.com/HbM2dt5.png", fanart="http://s10.postimg.cc/t0xz1t661/zen_series.jpg")) - itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url="", - thumbnail="http://newmedia-art.pl/product_picture/full_size/bed9a8589ad98470258899475cf56cca.jpg", - fanart="http://s23.postimg.cc/jdutugvrf/zen_buscar.jpg")) - - return itemlist - - -def search(item, texto): - logger.info() - - texto = texto.replace(" ", "+") - item.url = "http://www.zentorrents.com//buscar?searchword=%s&ordering=&searchphrase=all&limit=\d+" % (texto) - # item.url = item.url % texto - # itemlist.extend(buscador(item, texto.replace("+", " "))) - item.extra = str(texto) - - try: - return buscador(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def buscador(item): - logger.info() - itemlist = [] - # Descarga la página - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - pepe = item.extra - pepe = pepe.replace("+", " ") - if "highlight" in data: - searchword = scrapertools.get_match(data, '([^<]+)') - data = re.sub(r'[^<]+', searchword, data) - - patron = '
' # Empezamos el patrón por aquí para que no se cuele nada raro - patron += '|

|&|amp;", "", data) - - #

En Un Patio De Paris [DVD Rip]
21/01/2015
[DVD Rip][AC3 5.1 Español Castellano][2014] Antoine es un músico de 40 años que de pronto decide abandonar su carrera.
- - patron = '
En Un Patio De Paris [DVD Rip]
21/01/2015
[DVD Rip][AC3 5.1 Español Castellano][2014] Antoine es un músico de 40 años que de pronto decide abandonar su carrera.
- - patron = '
0: - scrapedurl = urlparse.urljoin(item.url, matches[0]) - title = "[COLOR chocolate]siguiente>>[/COLOR]" - itemlist.append(Item(channel=item.channel, action="peliculas", title=title, url=scrapedurl, - thumbnail="http://s6.postimg.cc/9iwpso8k1/ztarrow2.png", - fanart="http://s6.postimg.cc/4j8vdzy6p/zenwallbasic.jpg", folder=True)) - - return itemlist - - -def fanart(item): - logger.info() - itemlist = [] - url = item.url - data = httptools.downloadpage(url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - title_fan = item.extra - title = re.sub(r'Serie Completa|3D|Temporada.*?Completa', '', title_fan) - title = title.replace(' ', '%20') - title = ''.join((c for c in unicodedata.normalize('NFD', unicode(title.decode('utf-8'))) if - unicodedata.category(c) != 'Mn')).encode("ascii", "ignore") - item.title = re.sub(r'\(.*?\)|\[.*?\]', '', item.title) - item.title = '[COLOR floralwhite]' + item.title + '[/COLOR]' - try: - sinopsis = scrapertools.get_match(data, 'onload="imgLoaded.*?

(.*?)

') - sinopsis = re.sub(r"<\p>

", "", sinopsis) - except: - sinopsis = "" - if not "series" in item.url: - - # filmafinity - title = re.sub(r"cerdas", "cuerdas", title) - url_bing = "http://www.bing.com/search?q=%s+site:filmaffinity.com" % (title.replace(' ', '+')) - data = browser(url_bing) - data = re.sub(r"\n|\r|\t|\s{2}| |", "", data) - - try: - if "myaddrproxy.php" in data: - subdata_bing = scrapertools.get_match(data, - 'li class="b_algo">

(

(Año.*?>(.*?)') - except: - year = "" - if sinopsis == " ": - try: - sinopsis = scrapertools.find_single_match(data, '
(.*?)
') - sinopsis = sinopsis.replace("

", "\n") - sinopsis = re.sub(r"\(FILMAFFINITY\)
", "", sinopsis) - except: - pass - try: - rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') - except: - rating_filma = "Sin puntuacion" - - critica = "" - patron = '
(.*?)
.*?itemprop="author">(.*?)\s*

(

(Año.*?>(.*?)') - except: - year = "" - if sinopsis == " ": - try: - sinopsis = scrapertools.find_single_match(data, '
(.*?)
') - sinopsis = sinopsis.replace("

", "\n") - sinopsis = re.sub(r"\(FILMAFFINITY\)
", "", sinopsis) - except: - pass - try: - rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') - except: - rating_filma = "Sin puntuacion" - print "lobeznito" - print rating_filma - - critica = "" - patron = '
(.*?)
.*?itemprop="author">(.*?)\s*(.*?)h="ID.*?.*?TV Series') - except: - pass - - try: - imdb_id = scrapertools.get_match(subdata_imdb, '
(.*?)<') - except: - ratintg_tvdb = "" - try: - rating = scrapertools.get_match(data, '"vote_average":(.*?),') - except: - - rating = "Sin puntuación" - - id_scraper = id_tmdb + "|" + "serie" + "|" + rating_filma + "|" + critica + "|" + rating + "|" + status # +"|"+emision - posterdb = scrapertools.find_single_match(data_tmdb, '"poster_path":(.*?)","popularity"') - - if "null" in posterdb: - posterdb = item.thumbnail - else: - posterdb = re.sub(r'\\|"', '', posterdb) - posterdb = "https://image.tmdb.org/t/p/original" + posterdb - - if "null" in fan: - fanart = item.fanart - else: - fanart = "https://image.tmdb.org/t/p/original" + fan - - item.extra = fanart - - url = "http://api.themoviedb.org/3/tv/" + id_tmdb + "/images?api_key=" + api_key + "" - data = httptools.downloadpage(url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - - patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' - matches = re.compile(patron, re.DOTALL).findall(data) - - if len(matches) == 0: - patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) == 0: - fanart_info = item.extra - fanart_3 = "" - fanart_2 = item.extra - for fanart_info, fanart_3, fanart_2 in matches: - if fanart == item.fanart: - fanart = "https://image.tmdb.org/t/p/original" + fanart_info - fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info - fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3 - fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 - url = "http://webservice.fanart.tv/v3/tv/" + id + "?api_key=" + api_fankey - data = httptools.downloadpage(url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '"clearlogo":.*?"url": "([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data) - if '"tvbanner"' in data: - tvbanner = scrapertools.get_match(data, '"tvbanner":.*?"url": "([^"]+)"') - tfv = tvbanner - elif '"tvposter"' in data: - tvposter = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') - tfv = tvposter - else: - tfv = posterdb - if '"tvthumb"' in data: - tvthumb = scrapertools.get_match(data, '"tvthumb":.*?"url": "([^"]+)"') - if '"hdtvlogo"' in data: - hdtvlogo = scrapertools.get_match(data, '"hdtvlogo":.*?"url": "([^"]+)"') - if '"hdclearart"' in data: - hdtvclear = scrapertools.get_match(data, '"hdclearart":.*?"url": "([^"]+)"') - if len(matches) == 0: - if '"hdtvlogo"' in data: - if "showbackground" in data: - - if '"hdclearart"' in data: - thumbnail = hdtvlogo - extra = hdtvclear + "|" + year - show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb - else: - thumbnail = hdtvlogo - extra = thumbnail + "|" + year - show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, fanart=item.extra, - category=category, extra=extra, show=show, folder=True)) - - - else: - if '"hdclearart"' in data: - thumbnail = hdtvlogo - extra = hdtvclear + "|" + year - show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb - else: - thumbnail = hdtvlogo - extra = thumbnail + "|" + year - show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, - show=show, category=category, folder=True)) - else: - extra = "" + "|" + year - show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=posterdb, fanart=fanart, extra=extra, show=show, - category=category, folder=True)) - - for logo in matches: - if '"hdtvlogo"' in data: - thumbnail = hdtvlogo - elif not '"hdtvlogo"' in data: - if '"clearlogo"' in data: - thumbnail = logo - else: - thumbnail = item.thumbnail - if '"clearart"' in data: - clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') - if "showbackground" in data: - - extra = clear + "|" + year - show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, - show=show, category=category, folder=True)) - else: - extra = clear + "|" + year - show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, - show=show, category=category, folder=True)) - - if "showbackground" in data: - - if '"clearart"' in data: - clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') - extra = clear + "|" + year - show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb - else: - extra = logo + "|" + year - show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, - show=show, category=category, folder=True)) - - if not '"clearart"' in data and not '"showbackground"' in data: - if '"hdclearart"' in data: - extra = hdtvclear + "|" + year - show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb - else: - extra = thumbnail + "|" + year - show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, - show=show, category=category, folder=True)) - - title_info = "Info" - title_info = "[COLOR skyblue]" + title_info + "[/COLOR]" - if not "series" in item.url: - thumbnail = posterdb - - if "series" in item.url: - - if '"tvposter"' in data: - thumbnail = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') - else: - thumbnail = posterdb - - if "tvbanner" in data: - category = tvbanner - else: - category = show - if '"tvthumb"' in data: - plot = item.plot + "|" + tvthumb - else: - plot = item.plot + "|" + item.thumbnail - if '"tvbanner"' in data: - plot = plot + "|" + tvbanner - elif '"tvthumb"' in data: - plot = plot + "|" + tvthumb - else: - plot = plot + "|" + item.thumbnail - else: - if '"moviethumb"' in data: - plot = item.plot + "|" + thumb - else: - plot = item.plot + "|" + posterdb - - if '"moviebanner"' in data: - plot = plot + "|" + banner - else: - if '"hdmovieclearart"' in data: - plot = plot + "|" + clear - - else: - plot = plot + "|" + posterdb - id = id_scraper - - extra = extra + "|" + id + "|" + title.encode('utf8') - - itemlist.append( - Item(channel=item.channel, action="info", title=title_info, plo=plot, url=item.url, thumbnail=thumbnail, - fanart=fanart_info, extra=extra, category=category, show=show, folder=False)) - - return itemlist - - -def findvideos(item): - logger.info() - - if not "serie" in item.url: - thumbnail = item.category - else: - thumbnail = item.show.split("|")[4] - itemlist = [] - - # Descarga la página - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| |&|amp;", "", data) - - patron = '

(.*?)

.*?src="([^"]+)".*?

= 5 and int(check_rat_tmdba) < 8: - rating = "[COLOR springgreen][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" - elif int(check_rat_tmdba) >= 8 or rating_tmdba_tvdb == 10: - rating = "[COLOR yellow][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" - else: - rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" - print "lolaymaue" - except: - rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" - if "10." in rating: - rating = re.sub(r'10\.\d+', '10', rating) - try: - check_rat_filma = scrapertools.get_match(rating_filma, '(\d)') - print "paco" - print check_rat_filma - if int(check_rat_filma) >= 5 and int(check_rat_filma) < 8: - print "dios" - print check_rat_filma - rating_filma = "[COLOR springgreen][B]" + rating_filma + "[/B][/COLOR]" - elif int(check_rat_filma) >= 8: - - print check_rat_filma - rating_filma = "[COLOR yellow][B]" + rating_filma + "[/B][/COLOR]" - else: - rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" - print "rojo??" - print check_rat_filma - except: - rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" - - try: - if not "serie" in item.url: - url_plot = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ - 1] + "?api_key=" + api_key + "&append_to_response=credits&language=es" - data_plot = httptools.downloadpage(url_plot).data - plot, tagline = scrapertools.find_single_match(data_plot, '"overview":"(.*?)",.*?"tagline":(".*?")') - if plot == "": - plot = item.show.split("|")[2] - - plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" - plot = re.sub(r"\\", "", plot) - - else: - plot = item.show.split("|")[2] - plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" - plot = re.sub(r"\\|

|

", "", plot) - - if item.extra.split("|")[7] != "": - tagline = item.extra.split("|")[7] - # tagline= re.sub(r',','.',tagline) - else: - tagline = "" - except: - title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" - plot = "Esta pelicula no tiene informacion..." - plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") - photo = "http://s6.postimg.cc/nm3gk1xox/noinfosup2.png" - foto = "http://s6.postimg.cc/ub7pb76c1/noinfo.png" - info = "" - - if "serie" in item.url: - check2 = "serie" - icon = "http://s6.postimg.cc/hzcjag975/tvdb.png" - foto = item.show.split("|")[1] - if item.extra.split("|")[5] != "": - critica = item.extra.split("|")[5] - else: - critica = "Esta serie no tiene críticas..." - - photo = item.extra.split("|")[0].replace(" ", "%20") - try: - tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" - except: - tagline = "" - - else: - critica = item.extra.split("|")[5] - if "%20" in critica: - critica = "No hay críticas" - icon = "http://imgur.com/SenkyxF.png" - - photo = item.extra.split("|")[0].replace(" ", "%20") - foto = item.show.split("|")[1] - - try: - if tagline == "\"\"": - tagline = " " - except: - tagline = " " - tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" - check2 = "pelicula" - # Tambien te puede interesar - peliculas = [] - if "serie" in item.url: - - url_tpi = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[ - 5] + "/recommendations?api_key=" + api_key + "&language=es" - data_tpi = httptools.downloadpage(url_tpi).data - tpi = scrapertools.find_multiple_matches(data_tpi, - 'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),"popularity"') - - else: - url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ - 1] + "/recommendations?api_key=" + api_key + "&language=es" - data_tpi = httptools.downloadpage(url_tpi).data - tpi = scrapertools.find_multiple_matches(data_tpi, - 'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),"popularity"') - - for idp, peli, thumb in tpi: - - thumb = re.sub(r'"|}', '', thumb) - if "null" in thumb: - thumb = "http://s6.postimg.cc/tw1vhymj5/noposter.png" - else: - thumb = "https://image.tmdb.org/t/p/original" + thumb - peliculas.append([idp, peli, thumb]) - - check2 = check2.replace("pelicula", "movie").replace("serie", "tvshow") - infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline, - 'rating': rating} - item_info = item.clone(info=infoLabels, icon=icon, extra=id, rating=rating, rating_filma=rating_filma, - critica=critica, contentType=check2, thumb_busqueda="http://imgur.com/OZ1Vg3D.png") - from channels import infoplus - infoplus.start(item_info, peliculas) - - -def info_capitulos(item): - logger.info() - - url = "https://api.themoviedb.org/3/tv/" + item.show.split("|")[5] + "/season/" + item.extra.split("|")[ - 2] + "/episode/" + item.extra.split("|")[3] + "?api_key=" + api_key + "&language=es" - - if "/0" in url: - url = url.replace("/0", "/") - - data = httptools.downloadpage(url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - - patron = '],"name":"(.*?)","overview":"(.*?)".*?"still_path":(.*?),"vote_average":(\d+\.\d).*?,"' - matches = re.compile(patron, re.DOTALL).findall(data) - - if len(matches) == 0: - - url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + item.category + "/default/" + item.extra.split("|")[ - 2] + "/" + item.extra.split("|")[3] + "/es.xml" - if "/0" in url: - url = url.replace("/0", "/") - data = httptools.downloadpage(url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - - patron = '.*?([^<]+).*?(.*?).*?(.*?)' - - matches = re.compile(patron, re.DOTALL).findall(data) - - if len(matches) == 0: - - title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" - plot = "Este capitulo no tiene informacion..." - plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]" - image = "http://s6.postimg.cc/ub7pb76c1/noinfo.png" - foto = "http://s6.postimg.cc/nm3gk1xox/noinfosup2.png" - rating = "" - - - else: - - for name_epi, info, rating in matches: - if "episodes" in data: - foto = scrapertools.get_match(data, '.*?(.*?)') - fanart = "http://thetvdb.com/banners/" + foto - else: - fanart = item.extra.split("|")[1] - plot = info - plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]" - title = name_epi.upper() - title = "[COLOR bisque][B]" + title + "[/B][/COLOR]" - image = fanart - foto = item.extra.split("|")[0] - if not ".png" in foto: - foto = "http://imgur.com/IqYaDrC.png" - foto = re.sub(r'\(.*?\)|" "|" "', '', foto) - foto = re.sub(r' ', '', foto) - try: - - check_rating = scrapertools.get_match(rating, '(\d+).') - - if int(check_rating) >= 5 and int(check_rating) < 8: - rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" - elif int(check_rating) >= 8 and int(check_rating) < 10: - rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]" - elif int(check_rating) == 10: - rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]" - else: - rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" - - except: - rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" - if "10." in rating: - rating = re.sub(r'10\.\d+', '10', rating) - else: - for name_epi, info, fanart, rating in matches: - if info == "" or info == "\\": - info = "Sin informacion del capítulo aún..." - plot = info - plot = re.sub(r'/n', '', plot) - plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]" - title = name_epi.upper() - title = "[COLOR bisque][B]" + title + "[/B][/COLOR]" - image = fanart - image = re.sub(r'"|}', '', image) - if "null" in image: - image = "http://imgur.com/ZiEAVOD.png" - else: - image = "https://image.tmdb.org/t/p/original" + image - foto = item.extra.split("|")[0] - if not ".png" in foto: - foto = "http://imgur.com/IqYaDrC.png" - foto = re.sub(r'\(.*?\)|" "|" "', '', foto) - foto = re.sub(r' ', '', foto) - try: - - check_rating = scrapertools.get_match(rating, '(\d+).') - - if int(check_rating) >= 5 and int(check_rating) < 8: - rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" - elif int(check_rating) >= 8 and int(check_rating) < 10: - rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]" - elif int(check_rating) == 10: - rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]" - else: - rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" - - except: - rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" - if "10." in rating: - rating = re.sub(r'10\.\d+', '10', rating) - ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating) - ventana.doModal() - - -class TextBox2(xbmcgui.WindowDialog): - """ Create a skinned textbox window """ - - def __init__(self, *args, **kwargs): - self.getTitle = kwargs.get('title') - self.getPlot = kwargs.get('plot') - self.getThumbnail = kwargs.get('thumbnail') - self.getFanart = kwargs.get('fanart') - self.getRating = kwargs.get('rating') - - self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/133aoMw.jpg') - self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) - self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45) - self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) - self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) - self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) - - self.addControl(self.background) - self.background.setAnimations( - [('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',), - ('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)]) - self.addControl(self.thumbnail) - self.thumbnail.setAnimations([('conditional', - 'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',), - ('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)]) - self.addControl(self.plot) - self.plot.setAnimations( - [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), ( - 'conditional', - 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',), - ('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)]) - self.addControl(self.fanart) - self.fanart.setAnimations( - [('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), ( - 'conditional', - 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',), - ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) - self.addControl(self.title) - self.title.setText(self.getTitle) - self.title.setAnimations( - [('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',), - ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)]) - self.addControl(self.rating) - self.rating.setText(self.getRating) - self.rating.setAnimations( - [('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',), - ('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)]) - xbmc.sleep(200) - - try: - self.plot.autoScroll(7000, 6000, 30000) - except: - - xbmc.executebuiltin( - 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') - self.plot.setText(self.getPlot) - - def get(self): - self.show() - - def onAction(self, action): - if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: - self.close() - - -def test(): - return True - - -def browser(url): - import mechanize - - # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing - br = mechanize.Browser() - # Browser options - br.set_handle_equiv(False) - br.set_handle_gzip(True) - br.set_handle_redirect(True) - br.set_handle_referer(False) - br.set_handle_robots(False) - # Follows refresh 0 but not hangs on refresh > 0 - br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) - # Want debugging messages? - # br.set_debug_http(True) - # br.set_debug_redirects(True) - # br.set_debug_responses(True) - - # User-Agent (this is cheating, ok?) - br.addheaders = [('User-agent', - 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] - # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] - # Open some site, let's pick a random one, the first that pops in mind - r = br.open(url) - response = r.read() - print response - # if not ".ftrH,.ftrHd,.ftrD>" in response: - if "img,divreturn" in response: - r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) - response = r.read() - - return response - - -def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match): - i = 0 - while i < len(text): - m = match(text, i) - s = m.group(m.lastindex) - i = m.end() - if m.lastindex == 2: - yield "s" - yield text[i:i + int(s)] - i = i + int(s) - else: - yield s - - -def decode_item(next, token): - if token == "i": - # integer: "i" value "e" - data = int(next()) - if next() != "e": - raise ValueError - elif token == "s": - # string: "s" value (virtual tokens) - data = next() - elif token == "l" or token == "d": - # container: "l" (or "d") values "e" - data = [] - tok = next() - while tok != "e": - data.append(decode_item(next, tok)) - tok = next() - if token == "d": - data = dict(zip(data[0::2], data[1::2])) - else: - raise ValueError - return data - - -def decode(text): - try: - src = tokenize(text) - data = decode_item(src.next, src.next()) - for token in src: # look for more tokens - raise SyntaxError("trailing junk") - except (AttributeError, ValueError, StopIteration): - try: - data = data - except: - data = src - - return data - - -def convert_size(size): - import math - if (size == 0): - return '0B' - size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") - i = int(math.floor(math.log(size, 1024))) - p = math.pow(1024, i) - s = round(size / p, 2) - return '%s %s' % (s, size_name[i]) diff --git a/plugin.video.alfa/servers/clipwatching.py b/plugin.video.alfa/servers/clipwatching.py index 839c7290..2362fc4b 100644 --- a/plugin.video.alfa/servers/clipwatching.py +++ b/plugin.video.alfa/servers/clipwatching.py @@ -9,7 +9,7 @@ from platformcode import logger, config def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data - if "File Not Found" in data: + if "File Not Found" in data or "File was deleted" in data: return False, config.get_localized_string(70292) % "ClipWatching" return True, "" diff --git a/plugin.video.alfa/servers/thevid.json b/plugin.video.alfa/servers/thevid.json new file mode 100644 index 00000000..e90af13e --- /dev/null +++ b/plugin.video.alfa/servers/thevid.json @@ -0,0 +1,42 @@ +{ + "active": true, + "find_videos": { + "ignore_urls": [], + "patterns": [ + { + "pattern": "(thevid.net/e/\\w+)", + "url": "https://\\1" + } + ] + }, + "free": true, + "id": "thevid", + "name": "thevid", + "settings": [ + { + "default": false, + "enabled": true, + "id": "black_list", + "label": "@60654", + "type": "bool", + "visible": true + }, + { + "default": 0, + "enabled": true, + "id": "favorites_servers_list", + "label": "@60655", + "lvalues": [ + "No", + "1", + "2", + "3", + "4", + "5" + ], + "type": "list", + "visible": false + } + ], + "thumbnail": "" +} diff --git a/plugin.video.alfa/servers/thevid.py b/plugin.video.alfa/servers/thevid.py new file mode 100644 index 00000000..8d9320bc --- /dev/null +++ b/plugin.video.alfa/servers/thevid.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- + +from core import httptools +from core import scrapertools +from lib import jsunpack +from platformcode import logger, config + + +def test_video_exists(page_url): + logger.info("(page_url='%s')" % page_url) + data = httptools.downloadpage(page_url).data + if "Video not found..." in data: + return False, config.get_localized_string(70292) % "Thevid" + return True, "" + + +def get_video_url(page_url, user="", password="", video_password=""): + logger.info("(page_url='%s')" % page_url) + data = httptools.downloadpage(page_url).data + packed = scrapertools.find_multiple_matches(data, "(?s)") + for pack in packed: + unpacked = jsunpack.unpack(pack) + if "file" in unpacked: + videos = scrapertools.find_multiple_matches(unpacked, 'file.="(//[^"]+)') + video_urls = [] + for video in videos: + video = "https:" + video + video_urls.append(["mp4 [Thevid]", video]) + logger.info("Url: %s" % videos) + return video_urls diff --git a/plugin.video.alfa/servers/thevideome.json b/plugin.video.alfa/servers/thevideome.json index 568f0c90..4fb0f381 100755 --- a/plugin.video.alfa/servers/thevideome.json +++ b/plugin.video.alfa/servers/thevideome.json @@ -4,7 +4,7 @@ "ignore_urls": [], "patterns": [ { - "pattern": "(?:thevideo.me|tvad.me|thevid.net|thevideo.ch|thevideo.us)/(?:embed-|)([A-z0-9]+)", + "pattern": "(?:thevideo.me|tvad.me|thevideo.ch|thevideo.us)/(?:embed-|)([A-z0-9]+)", "url": "https://thevideo.me/embed-\\1.html" } ] diff --git a/plugin.video.alfa/servers/vevio.json b/plugin.video.alfa/servers/vevio.json new file mode 100644 index 00000000..d91e95bf --- /dev/null +++ b/plugin.video.alfa/servers/vevio.json @@ -0,0 +1,42 @@ +{ + "active": true, + "find_videos": { + "ignore_urls": [], + "patterns": [ + { + "pattern": "(vev.io/embed/[A-z0-9]+)", + "url": "https://\\1" + } + ] + }, + "free": true, + "id": "vevio", + "name": "vevio", + "settings": [ + { + "default": false, + "enabled": true, + "id": "black_list", + "label": "@60654", + "type": "bool", + "visible": true + }, + { + "default": 0, + "enabled": true, + "id": "favorites_servers_list", + "label": "@60655", + "lvalues": [ + "No", + "1", + "2", + "3", + "4", + "5" + ], + "type": "list", + "visible": false + } + ], + "thumbnail": "https://s8.postimg.cc/opp2c3p6d/vevio1.png" +} diff --git a/plugin.video.alfa/servers/vevio.py b/plugin.video.alfa/servers/vevio.py new file mode 100644 index 00000000..3f74f993 --- /dev/null +++ b/plugin.video.alfa/servers/vevio.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- + +import urllib +from core import httptools +from core import scrapertools +from platformcode import logger, config + + +def test_video_exists(page_url): + logger.info("(page_url='%s')" % page_url) + data = httptools.downloadpage(page_url).data + if "File was deleted" in data or "Page Cannot Be Found" in data or "Video not found" in data: + return False, "[vevio] El archivo ha sido eliminado o no existe" + return True, "" + + +def get_video_url(page_url, premium=False, user="", password="", video_password=""): + logger.info("url=" + page_url) + video_urls = [] + post = {} + post = urllib.urlencode(post) + url = page_url + data = httptools.downloadpage("https://vev.io/api/serve/video/" + scrapertools.find_single_match(url, "embed/([A-z0-9]+)"), post=post).data + bloque = scrapertools.find_single_match(data, 'qualities":\{(.*?)\}') + matches = scrapertools.find_multiple_matches(bloque, '"([^"]+)":"([^"]+)') + for res, media_url in matches: + video_urls.append( + [scrapertools.get_filename_from_url(media_url)[-4:] + " (" + res + ") [vevio.me]", media_url]) + return video_urls