From ed41edc22d2476b146540835e10e12526d068d44 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Fri, 9 Feb 2018 12:18:01 -0500 Subject: [PATCH 01/11] tmdb: fix tvshow not found --- plugin.video.alfa/core/tmdb.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/plugin.video.alfa/core/tmdb.py b/plugin.video.alfa/core/tmdb.py index d945e003..a318cc7d 100755 --- a/plugin.video.alfa/core/tmdb.py +++ b/plugin.video.alfa/core/tmdb.py @@ -1258,13 +1258,13 @@ class Tmdb(object): self.temporada[numtemporada] = {"status_code": 15, "status_message": "Failed"} self.temporada[numtemporada] = {"episodes": {}} - # if "status_code" in self.temporada[numtemporada]: - # # Se ha producido un error - # msg = "La busqueda de " + buscando + " no dio resultados." - # msg += "\nError de tmdb: %s %s" % ( - # self.temporada[numtemporada]["status_code"], self.temporada[numtemporada]["status_message"]) - # logger.debug(msg) - # self.temporada[numtemporada] = {"episodes": {}} + if "status_code" in self.temporada[numtemporada]: + #Se ha producido un error + msg = "La busqueda de " + buscando + " no dio resultados." + msg += "\nError de tmdb: %s %s" % ( + self.temporada[numtemporada]["status_code"], self.temporada[numtemporada]["status_message"]) + logger.debug(msg) + self.temporada[numtemporada] = {"episodes": {}} return self.temporada[numtemporada] From 16e62184a857555075e07a79c82a0866604fc5c6 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Sat, 10 Feb 2018 09:07:26 -0500 Subject: [PATCH 02/11] peliculasnu: web no existe --- plugin.video.alfa/channels/peliculasnu.json | 76 --------------------- 1 file changed, 76 deletions(-) delete mode 100755 plugin.video.alfa/channels/peliculasnu.json diff --git a/plugin.video.alfa/channels/peliculasnu.json b/plugin.video.alfa/channels/peliculasnu.json deleted file mode 100755 index ba170ee8..00000000 --- a/plugin.video.alfa/channels/peliculasnu.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "id": "peliculasnu", - "name": "Peliculas.Nu", - "language": ["cast", "lat"], - "active": true, - "adult": false, - "thumbnail": "http://i.imgur.com/2iupwXE.png", - "banner": "peliculasnu.png", - "categories": [ - "movie", - "vos" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_peliculas", - "type": "bool", - "label": "Incluir en Novedades - Películas", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_terror", - "type": "bool", - "label": "Incluir en Novedades - terror", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_castellano", - "type": "bool", - "label": "Incluir en Novedades - Castellano", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_latino", - "type": "bool", - "label": "Incluir en Novedades - Latino", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "modo_grafico", - "type": "bool", - "label": "Buscar información extra", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "perfil", - "type": "list", - "label": "Perfil de color", - "default": 2, - "enabled": true, - "visible": true, - "llvalues": [ - "Perfil 3", - "Perfil 2", - "Perfil 1" - ] - } - ] -} \ No newline at end of file From 726d79e2e226f5a5177a5ddb702ab865642838c4 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Sat, 10 Feb 2018 09:07:40 -0500 Subject: [PATCH 03/11] peliculasnu: web no existe --- plugin.video.alfa/channels/peliculasnu.py | 284 ---------------------- 1 file changed, 284 deletions(-) delete mode 100644 plugin.video.alfa/channels/peliculasnu.py diff --git a/plugin.video.alfa/channels/peliculasnu.py b/plugin.video.alfa/channels/peliculasnu.py deleted file mode 100644 index 10f9f5a6..00000000 --- a/plugin.video.alfa/channels/peliculasnu.py +++ /dev/null @@ -1,284 +0,0 @@ -# -*- coding: utf-8 -*- - -import urllib - -from core import httptools -from core import jsontools -from core import scrapertools -from core import servertools -from core import tmdb -from core.item import Item -from platformcode import config, logger - -__modo_grafico__ = config.get_setting("modo_grafico", "peliculasnu") -__perfil__ = config.get_setting("perfil", "peliculasnu") - -# Fijar perfil de color -perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], - ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], - ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] -color1, color2, color3 = perfil[__perfil__] -host = "http://peliculas.nu/" - - -def mainlist(item): - logger.info() - itemlist = [] - item.text_color = color1 - - itemlist.append(item.clone(title="Novedades", action="entradas", url=host, fanart="http://i.imgur.com/c3HS8kj.png")) - itemlist.append(item.clone(title="Más Vistas", action="entradas", url=host + "mas-vistas", - fanart="http://i.imgur.com/c3HS8kj.png")) - itemlist.append(item.clone(title="Mejor Valoradas", action="entradas", url=host + "mejor-valoradas", - fanart="http://i.imgur.com/c3HS8kj.png")) - item.text_color = color2 - itemlist.append(item.clone(title="En Español", action="entradas", url=host + "?s=Español", - fanart="http://i.imgur.com/c3HS8kj.png")) - itemlist.append(item.clone(title="En Latino", action="entradas", url=host + "?s=Latino", - fanart="http://i.imgur.com/c3HS8kj.png")) - itemlist.append( - item.clone(title="En VOSE", action="entradas", url=host + "?s=VOSE", fanart="http://i.imgur.com/c3HS8kj.png")) - item.text_color = color3 - itemlist.append(item.clone(title="Por género", action="indices", fanart="http://i.imgur.com/c3HS8kj.png")) - itemlist.append(item.clone(title="Por letra", action="indices", fanart="http://i.imgur.com/c3HS8kj.png")) - - itemlist.append(item.clone(title="", action="")) - itemlist.append(item.clone(title="Buscar...", action="search")) - itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) - - return itemlist - - -def configuracion(item): - from platformcode import platformtools - ret = platformtools.show_channel_settings() - platformtools.itemlist_refresh() - return ret - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - try: - item.url = "%s?s=%s" % (host, texto) - item.action = "entradas" - return entradas(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def newest(categoria): - logger.info() - itemlist = [] - item = Item() - try: - if categoria == "peliculas": - item.url = host - elif categoria == "terror": - item.url = host+"terror/" - elif categoria == 'castellano': - item.url = host + "?s=Español" - elif categoria == 'latino': - item.url = host + "?s=Latino" - - - item.from_newest = True - item.action = "entradas" - itemlist = entradas(item) - - if itemlist[-1].action == "entradas": - itemlist.pop() - - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - return itemlist - - -def entradas(item): - logger.info() - itemlist = [] - - data = httptools.downloadpage(item.url).data - patron = '
  • .*?href="([^"]+)".*?src="([^"]+)".*?class="Title">([^<]+)<.*?' \ - '.*?"Date AAIco-date_range">(\d+).*?class="Qlty">([^<]+)<.*?

    15: - itemlist.append(item.clone(title=">> Página Siguiente", extra="next", text_color=color3)) - elif item.extra == "next": - next_page = scrapertools.find_single_match(data, '(.*?)') - matches = scrapertools.find_multiple_matches(bloque, '([^<]+)') - for scrapedurl, scrapedtitle in matches: - itemlist.append(item.clone(action=action, url=scrapedurl, title=scrapedtitle)) - - return itemlist - - -def findvideos(item): - logger.info() - itemlist = [] - - tmdb.set_infoLabels_item(item, __modo_grafico__) - data = httptools.downloadpage(item.url).data - - if not item.infoLabels["plot"]: - item.infoLabels["plot"] = scrapertools.find_single_match(data, '

    .*?

    (.*?)

    ') - fanart = scrapertools.find_single_match(data, '|\}\)\))') - if not packed: - packed = data - data_js = jsunpack.unpack(packed) - - subtitle = scrapertools.find_single_match(data_js, 'tracks:\[\{"file":"([^"]+)"') - patron = '{"file":\s*"([^"]+)","label":\s*"([^"]+)","type":\s*"video/([^"]+)"' - matches = scrapertools.find_multiple_matches(data_js, patron) - for url, calidad, extension in matches: - url = url.replace(",", "%2C") - title = ".%s %s [directo]" % (extension, calidad) - itemlist.insert(0, [title, url, 0, subtitle]) - else: - return [item] - - return itemlist From 923d4109ca59133fa461996292180aaa86a92daf Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Sat, 10 Feb 2018 09:33:12 -0500 Subject: [PATCH 04/11] seriesblanco: adicionado tmdb --- plugin.video.alfa/channels/seriesblanco.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/plugin.video.alfa/channels/seriesblanco.py b/plugin.video.alfa/channels/seriesblanco.py index 23f89af6..529d9fe1 100644 --- a/plugin.video.alfa/channels/seriesblanco.py +++ b/plugin.video.alfa/channels/seriesblanco.py @@ -10,6 +10,7 @@ from core import scrapertoolsV2 from core import servertools from core.item import Item from platformcode import config, logger +from core import tmdb from channels import autoplay @@ -108,7 +109,7 @@ def extract_series_from_data(item, data): context.extend(context2) itemlist.append(item.clone(title=name, url=urlparse.urljoin(HOST, url), - action=action, show=name, + action=action, show=name, contentSerieName=name, thumbnail=img, context=context)) @@ -121,6 +122,7 @@ def extract_series_from_data(item, data): # logger.debug("Adding previous page item") itemlist.append(item.clone(title="<< Anterior", extra=item.extra - 1)) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) return itemlist @@ -189,7 +191,8 @@ def search(item, texto): for url, img, title in shows: title = title.strip() itemlist.append(item.clone(title=title, url=urlparse.urljoin(HOST, url), action="episodios", show=title, - thumbnail=img, context=filtertools.context(item, list_idiomas, CALIDADES))) + thumbnail=img, context=filtertools.context(item, list_idiomas, CALIDADES), + contentSerieName=title)) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: @@ -222,12 +225,18 @@ def episodios(item): re.findall("banderas/([^\.]+)", flags, re.MULTILINE)]) filter_lang = idiomas.replace("[", "").replace("]", "").split(" ") display_title = "%s - %s %s" % (item.show, title, idiomas) + + season_episode = scrapertoolsV2.get_season_and_episode(title).split('x') + item.infoLabels['season']= season_episode[0] + item.infoLabels['episode'] = season_episode[1] # logger.debug("Episode found %s: %s" % (display_title, urlparse.urljoin(HOST, url))) itemlist.append(item.clone(title=display_title, url=urlparse.urljoin(HOST, url), action="findvideos", plot=plot, fanart=fanart, language=filter_lang)) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES) + if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios")) From da8246b56d0beaa00612f3290345da74e320685e Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Sat, 10 Feb 2018 14:39:26 -0500 Subject: [PATCH 05/11] rapidvideo: update --- plugin.video.alfa/servers/rapidvideo.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/plugin.video.alfa/servers/rapidvideo.py b/plugin.video.alfa/servers/rapidvideo.py index b92df605..57ff5c27 100755 --- a/plugin.video.alfa/servers/rapidvideo.py +++ b/plugin.video.alfa/servers/rapidvideo.py @@ -35,11 +35,17 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= data = httptools.downloadpage(page_url).data patron = 'https://www.rapidvideo.com/e/[^"]+' match = scrapertools.find_multiple_matches(data, patron) - for url1 in match: - res = scrapertools.find_single_match(url1, '=(\w+)') - data = httptools.downloadpage(url1).data - url = scrapertools.find_single_match(data, 'source src="([^"]+)') - ext = scrapertools.get_filename_from_url(url)[-4:] - video_urls.append(['%s %s [rapidvideo]' % (ext, res), url]) - + if match: + for url1 in match: + res = scrapertools.find_single_match(url1, '=(\w+)') + data = httptools.downloadpage(url1).data + url = scrapertools.find_single_match(data, 'source src="([^"]+)') + ext = scrapertools.get_filename_from_url(url)[-4:] + video_urls.append(['%s %s [rapidvideo]' % (ext, res), url]) + else: + patron = 'data-setup.*?src="([^"]+)".*?' + patron += 'type="([^"]+)"' + match = scrapertools.find_multiple_matches(data, patron) + for url, ext in match: + video_urls.append(['%s [rapidvideo]' % (ext), url]) return video_urls From 430c6793b54fa00912777c5ff9756d6d3b33aa43 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Tue, 13 Feb 2018 10:10:54 -0500 Subject: [PATCH 06/11] kbagi: fix colecciones --- plugin.video.alfa/channels/kbagi.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/plugin.video.alfa/channels/kbagi.py b/plugin.video.alfa/channels/kbagi.py index 325c9547..1fbd4ceb 100644 --- a/plugin.video.alfa/channels/kbagi.py +++ b/plugin.video.alfa/channels/kbagi.py @@ -134,14 +134,14 @@ def listado(item): data = re.sub(r"\n|\r|\t|\s{2}| |
    ", "", data) folder = filetools.join(config.get_data_path(), 'thumbs_kbagi') - patron = '
    (.*?)
    ' + patron = 'data-file-id(.*?)
  • ' bloques = scrapertools.find_multiple_matches(data, patron) for block in bloques: if "adult_info" in block and not adult_content: continue - size = scrapertools.find_single_match(block, '

    ([^<]+)

    ') - scrapedurl, scrapedtitle = scrapertools.find_single_match(block, - '
    ([^<]+)<') + size = scrapertools.find_single_match(block, '([^<]+)

    ') + patron = 'class="name">
    ([^<]+)<' + scrapedurl, scrapedtitle = scrapertools.find_single_match(block, patron) scrapedthumbnail = scrapertools.find_single_match(block, "background-image:url\('([^']+)'") if scrapedthumbnail: try: @@ -161,7 +161,6 @@ def listado(item): else: scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png" - scrapedurl = item.extra + scrapedurl title = "%s (%s)" % (scrapedtitle, size) if "adult_info" in block: From 3e89264230e30668231fb7867779b4619eefa867 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Tue, 13 Feb 2018 10:27:21 -0500 Subject: [PATCH 07/11] Update kbagi.py --- plugin.video.alfa/channels/kbagi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin.video.alfa/channels/kbagi.py b/plugin.video.alfa/channels/kbagi.py index 1fbd4ceb..c356cc38 100644 --- a/plugin.video.alfa/channels/kbagi.py +++ b/plugin.video.alfa/channels/kbagi.py @@ -185,7 +185,7 @@ def listado(item): itemlist.append(new_item) - next_page = scrapertools.find_single_match(data, '
    Date: Fri, 16 Feb 2018 08:50:11 -0500 Subject: [PATCH 09/11] doomtv: fix --- plugin.video.alfa/channels/doomtv.py | 36 +++++++++++++++++++--------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/plugin.video.alfa/channels/doomtv.py b/plugin.video.alfa/channels/doomtv.py index a5f1bd9c..62aa0f89 100644 --- a/plugin.video.alfa/channels/doomtv.py +++ b/plugin.video.alfa/channels/doomtv.py @@ -7,6 +7,7 @@ import urlparse from channels import autoplay from channels import filtertools from core import httptools +from core import jsontools from core import scrapertools from core import servertools from core import tmdb @@ -219,23 +220,38 @@ def newest(categoria): return itemlist +def get_vip(item, url): + logger.info() + itemlist = [] + data = httptools.downloadpage(url+'/videocontent').data + data = re.sub(r'"|\n|\r|\t| |
    |\s{2,}', "", data) + video_id = scrapertools.find_single_match(data, 'id=videoInfo >(.*?)') + new_url = 'https://v.d0stream.com/api/videoinfo/%s?src-url=https://Fv.d0stream.com' % video_id + json_data = httptools.downloadpage(new_url).data + dict_data = jsontools.load(json_data) + sources = dict_data['sources'] + + for vip_item in sources['mp4_cdn']: + vip_url= vip_item['url'] + vip_quality = vip_item['label'] + title ='%s [%s]' % (item.title, vip_quality) + itemlist.append(item.clone(title = title, url=vip_url, action='play', quality=vip_quality, server='directo')) + + return itemlist + def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r'"|\n|\r|\t| |
    |\s{2,}', "", data) - player_vip = scrapertools.find_single_match(data, 'src=(https:\/\/content.jwplatform.com\/players.*?js)') - data_m3u8 = httptools.downloadpage(player_vip, headers= {'referer':item.url}).data - data_m3u8 = re.sub(r'"|\n|\r|\t| |
    |\s{2,}', "", data_m3u8) - url_m3u8 = scrapertools.find_single_match(data_m3u8,',sources:.*?file: (.*?),') - itemlist.append(item.clone(url=url_m3u8, action='play')) + player_vip = scrapertools.find_single_match(data, 'class=movieplay>