From c43162cbc2234c94388ac6c134bae494d5550e62 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Sat, 28 Oct 2017 08:40:11 -0500
Subject: [PATCH 1/5] flashx: lo dicho!!!
---
plugin.video.alfa/servers/flashx.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/plugin.video.alfa/servers/flashx.py b/plugin.video.alfa/servers/flashx.py
index 7326085a..fd654da7 100644
--- a/plugin.video.alfa/servers/flashx.py
+++ b/plugin.video.alfa/servers/flashx.py
@@ -33,11 +33,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
'Cookie': ''}
data = httptools.downloadpage(page_url, headers=headers, replace_headers=True).data
data = data.replace("\n","")
- cgi_counter = scrapertools.find_single_match(data, '(?is)src=.(https://www.flashx.tv/counter.cgi.*?fx=[0-9a-zA-Z=]+)')
+ cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.tv/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
# Para obtener el f y el fxfx
- js_fxfx = scrapertools.find_single_match(data, '(?is)src=.(https://www.flashx.tv/js/code.js.*?=[0-9]+)')
+ js_fxfx = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.tv/js/code.js.*?[^(?:'|")]+)""")
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
From 9a1effbe25a432ca1598b9ec73390ac01e55940a Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Sat, 28 Oct 2017 10:58:15 -0500
Subject: [PATCH 2/5] =?UTF-8?q?cinetux:=20mostrar=20cantidad=20de=20pel?=
=?UTF-8?q?=C3=ADculas?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
plugin.video.alfa/channels/cinetux.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/plugin.video.alfa/channels/cinetux.py b/plugin.video.alfa/channels/cinetux.py
index a2c86f67..3bcc11e5 100644
--- a/plugin.video.alfa/channels/cinetux.py
+++ b/plugin.video.alfa/channels/cinetux.py
@@ -28,9 +28,9 @@ def mainlist(item):
itemlist = []
item.viewmode = viewmode
- data = httptools.downloadpage(CHANNEL_HOST).data
- total = scrapertools.find_single_match(data, "TENEMOS\s(.*?)")
- titulo = "Peliculas"
+ data = httptools.downloadpage(CHANNEL_HOST + "pelicula").data
+ total = scrapertools.find_single_match(data, "Películas(.*?)")
+ titulo = "Peliculas (%s)" %total
itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula",
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
From fc58c717eb70943bddf415f0cc5611a94a2a1d22 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Sat, 28 Oct 2017 11:17:48 -0500
Subject: [PATCH 3/5] plusdede: actualizado findvideos
---
plugin.video.alfa/channels/plusdede.py | 61 +-------------------------
1 file changed, 2 insertions(+), 59 deletions(-)
diff --git a/plugin.video.alfa/channels/plusdede.py b/plugin.video.alfa/channels/plusdede.py
index 227023c6..a1467884 100644
--- a/plugin.video.alfa/channels/plusdede.py
+++ b/plugin.video.alfa/channels/plusdede.py
@@ -25,7 +25,6 @@ color1, color2, color3 = ['0xFFB10021', '0xFFB10021', '0xFFB10004']
def login():
url_origen = "https://www.plusdede.com/login?popup=1"
data = httptools.downloadpage(url_origen, follow_redirects=True).data
- logger.debug("dataPLUSDEDE=" + data)
if re.search(r'(?i)%s' % config.get_setting("plusdedeuser", "plusdede"), data):
return True
@@ -34,12 +33,10 @@ def login():
post = "_token=" + str(token) + "&email=" + str(
config.get_setting("plusdedeuser", "plusdede")) + "&password=" + str(
config.get_setting("plusdedepassword", "plusdede")) + "&app=2131296469"
- # logger.debug("dataPLUSDEDE_POST="+post)
url = "https://www.plusdede.com/"
headers = {"Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token}
data = httptools.downloadpage("https://www.plusdede.com/login", post=post, headers=headers,
replace_headers=False).data
- logger.debug("PLUSDEDE_DATA=" + data)
if "redirect" in data:
return True
else:
@@ -183,7 +180,6 @@ def generos(item):
tipo = item.url.replace("https://www.plusdede.com/", "")
# Descarga la pagina
data = httptools.downloadpage(item.url).data
- logger.debug("data=" + data)
# Extrae las entradas (carpetas)
data = scrapertools.find_single_match(data,
@@ -198,7 +194,6 @@ def generos(item):
plot = ""
# https://www.plusdede.com/pelis?genre_id=1
url = "https://www.plusdede.com/" + tipo + "?genre_id=" + id_genere
- logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title))
@@ -229,11 +224,9 @@ def buscar(item):
# Descarga la pagina
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
- logger.debug("data=" + data)
# Extrae las entradas (carpetas)
json_object = jsontools.load(data)
- logger.debug("content=" + json_object["content"])
data = json_object["content"]
return parse_mixed_results(item, data)
@@ -248,7 +241,6 @@ def parse_mixed_results(item, data):
patron += '.*?
([^<]+)
+'
patron += '.*? ([^<]+)
'
matches = re.compile(patron, re.DOTALL).findall(data)
- logger.debug("PARSE_DATA:" + data)
if item.tipo == "lista":
following = scrapertools.find_single_match(data, '')
data_id = scrapertools.find_single_match(data, 'data-model="10" data-id="([^"]+)">')
@@ -286,7 +278,6 @@ def parse_mixed_results(item, data):
sectionStr = "docu"
referer = urlparse.urljoin(item.url, scrapedurl)
url = urlparse.urljoin(item.url, scrapedurl)
- logger.debug("PELII_title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if item.tipo != "series":
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, extra=referer, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart,
@@ -294,7 +285,6 @@ def parse_mixed_results(item, data):
else:
referer = item.url
url = urlparse.urljoin(item.url, scrapedurl)
- logger.debug("SERIE_title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if item.tipo != "pelis":
itemlist.append(Item(channel=item.channel, action="episodios", title=title, extra=referer, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart,
@@ -304,7 +294,6 @@ def parse_mixed_results(item, data):
'
')
if next_page != "":
url = urlparse.urljoin("https://www.plusdede.com", next_page).replace("amp;", "")
- logger.debug("URL_SIGUIENTE:" + url)
itemlist.append(
Item(channel=item.channel, action="pag_sig", token=item.token, title=">> Página siguiente",
extra=item.extra, url=url))
@@ -323,7 +312,6 @@ def siguientes(item): # No utilizada
# Descarga la pagina
data = httptools.downloadpage(item.url).data
- logger.debug("data=" + data)
# Extrae las entradas (carpetas)
bloque = scrapertools.find_single_match(data, '
Siguiendo
(.*?)
')
@@ -358,7 +346,6 @@ def siguientes(item): # No utilizada
Item(channel=item.channel, action="episodio", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, show=title, fanart=fanart, extra=session + "|" + episode))
- logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
return itemlist
@@ -369,7 +356,6 @@ def episodio(item):
# Descarga la pagina
data = httptools.downloadpage(item.url).data
- # logger.debug("data="+data)
session = str(int(item.extra.split("|")[0]))
episode = str(int(item.extra.split("|")[1]))
@@ -377,7 +363,6 @@ def episodio(item):
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
for bloque_episodios in matchestemporadas:
- logger.debug("bloque_episodios=" + bloque_episodios)
# Extrae los episodios
patron = '(\s*
\s*
]*>]*>[^<]*]*>[^<]*]*>
)?'
@@ -401,7 +386,6 @@ def episodio(item):
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, fanart=item.fanart, show=item.show))
- logger.debug("Abrimos title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist2 = []
for capitulo in itemlist:
@@ -415,11 +399,9 @@ def peliculas(item):
# Descarga la pagina
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
- logger.debug("data_DEF_PELICULAS=" + data)
# Extrae las entradas (carpetas)
json_object = jsontools.load(data)
- logger.debug("html=" + json_object["content"])
data = json_object["content"]
return parse_mixed_results(item, data)
@@ -432,24 +414,18 @@ def episodios(item):
# Descarga la pagina
idserie = ''
data = httptools.downloadpage(item.url).data
- # logger.debug("dataEPISODIOS="+data)
patrontemporada = '
').replace(
" ", "").replace("\n", "")
- logger.debug("calidad_video=" + calidad_video)
calidad_audio = scrapertools.find_single_match(match,
'
(.*?)
').replace(
" ", "").replace("\n", "")
- logger.debug("calidad_audio=" + calidad_audio)
thumb_servidor = scrapertools.find_single_match(match, '

')
- logger.debug("thumb_servidor=" + thumb_servidor)
nombre_servidor = scrapertools.find_single_match(thumb_servidor, "hosts/([^\.]+).png")
- logger.debug("nombre_servidor=" + nombre_servidor)
if jdown != '':
title = "Download " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")"
@@ -696,7 +652,6 @@ def findvideos(item, verTodos=False):
url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, 'href="([^"]+)"'))
thumbnail = thumb_servidor
plot = ""
- logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if sortlinks > 0:
# orden1 para dejar los "downloads" detras de los "ver" al ordenar
# orden2 segun configuración
@@ -788,13 +743,10 @@ def play(item):
headers = {'Referer': item.extra}
data = httptools.downloadpage(item.url, headers=headers).data
- # logger.debug("dataLINK="+data)
url = scrapertools.find_single_match(data,
'
')
url = urlparse.urljoin("https://www.plusdede.com", url)
- # logger.debug("DATA_LINK_FINAL:"+url)
- logger.debug("URL_PLAY:" + url)
headers = {'Referer': item.url}
media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location")
# logger.info("media_url="+media_url)
@@ -808,7 +760,6 @@ def play(item):
videoitem.channel = item.channel
# Marcar como visto
- logger.debug(item)
checkseen(item)
return itemlist
@@ -827,7 +778,6 @@ def checkseen(item):
tipo_str = "pelis"
headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
"X-CSRF-TOKEN": item.token}
- logger.debug("Entrando a checkseen " + url_temp + item.token)
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data
return True
@@ -836,7 +786,6 @@ def infosinopsis(item):
logger.info()
data = httptools.downloadpage(item.url).data
- logger.debug("SINOPSISdata=" + data)
scrapedtitle = scrapertools.find_single_match(data, '([^<]+)
')
scrapedvalue = scrapertools.find_single_match(data, '([^<]+)')
@@ -845,11 +794,8 @@ def infosinopsis(item):
scrapedduration = scrapertools.htmlclean(scrapertools.find_single_match(data,
'Duración\s*([^<]+)
').strip().replace(
" ", "").replace("\n", ""))
- logger.debug(scrapedduration)
scrapedplot = scrapertools.find_single_match(data, '([^<]+)
Género\s*
')
- logger.debug("generos=" + generos)
scrapedgenres = re.compile('
([^<]+)', re.DOTALL).findall(generos)
scrapedcasting = re.compile(
'
([^<]+)
\s*
\s*([^<]+)
',
@@ -954,7 +900,6 @@ def plusdede_check(item):
if item.tipo_esp == "lista":
url_temp = "https://www.plusdede.com/listas/addmediapopup/" + item.tipo + "/" + item.idtemp + "?popup=1"
data = httptools.downloadpage(url_temp).data
- logger.debug("DATA_CHECK_LISTA:" + data)
patron = '
+'
patron += '.*?
([^<]+)+'
@@ -986,8 +931,6 @@ def plusdede_check(item):
"X-CSRF-TOKEN": item.token}
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers,
replace_headers=True).data.strip()
- logger.debug("URL_PLUSDEDECHECK_DATA=" + url_temp + " ITEM:TIPO=" + item.tipo)
- logger.debug("PLUSDEDECHECK_DATA=" + data)
dialog = platformtools
dialog.ok = platformtools.dialog_ok
if data == "1":
@@ -1002,4 +945,4 @@ def plusdede_check(item):
elif item.tipo_esp == "add_list":
dialog.ok('SUCCESS', 'Añadido a la lista!')
else:
- dialog.ok('ERROR', 'No se pudo realizar la acción!')
\ No newline at end of file
+ dialog.ok('ERROR', 'No se pudo realizar la acción!')
From ae7a4a8d837f307b423a0a11026ac0b8a1e0f140 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Sat, 28 Oct 2017 11:26:08 -0500
Subject: [PATCH 4/5] rapidvideo: actualizado test_video_exists
---
plugin.video.alfa/servers/rapidvideo.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/plugin.video.alfa/servers/rapidvideo.py b/plugin.video.alfa/servers/rapidvideo.py
index a0e591e4..a6584eef 100755
--- a/plugin.video.alfa/servers/rapidvideo.py
+++ b/plugin.video.alfa/servers/rapidvideo.py
@@ -23,6 +23,8 @@ def test_video_exists(page_url):
if "Object not found" in response.data:
return False, "[Rapidvideo] El archivo no existe o ha sido borrado"
+ if reponse.code == 500:
+ return False, "[Rapidvideo] Error de servidor, inténtelo más tarde."
return True, ""
From b0b4b218f0e0bb9d4ed7192dc6f256ebb91d9ec2 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Sat, 28 Oct 2017 20:55:12 -0500
Subject: [PATCH 5/5] animemovil: fast fix
---
plugin.video.alfa/channels/animemovil.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/plugin.video.alfa/channels/animemovil.py b/plugin.video.alfa/channels/animemovil.py
index a33e40a8..fe586fa3 100644
--- a/plugin.video.alfa/channels/animemovil.py
+++ b/plugin.video.alfa/channels/animemovil.py
@@ -86,7 +86,7 @@ def recientes(item):
tipo = "tvshow"
show = contentTitle
action = "episodios"
- context = renumbertools.context
+ context = renumbertools.context(item)
if item.extra == "recientes":
action = "findvideos"
context = ""
@@ -96,7 +96,7 @@ def recientes(item):
action = "peliculas"
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
-
+ action ="findvideos"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,