.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
for server, quality, url in itemla:
- if "Calidad Alta" in quality:
- quality = "HQ"
if "HQ" in quality:
quality = "HD"
+ if "Calidad Alta" in quality:
+ quality = "HQ"
if " Calidad media - Carga mas rapido" in quality:
quality = "360p"
server = server.lower().strip()
@@ -160,6 +170,7 @@ def findvideos(item):
server = 'rapidvideo'
if "netu" in server:
server = 'netutv'
+ url = googl(url)
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
thumbnail=scrapedthumbnail, plot=scrapedplot,
title="Enlace encontrado en: %s [%s]" % (server.capitalize(), quality)))
diff --git a/plugin.video.alfa/channels/hdfull.py b/plugin.video.alfa/channels/hdfull.py
index 1a311969..9e4ad2f1 100644
--- a/plugin.video.alfa/channels/hdfull.py
+++ b/plugin.video.alfa/channels/hdfull.py
@@ -1,4 +1,3 @@
-
# -*- coding: utf-8 -*-
import base64
@@ -16,7 +15,6 @@ from platformcode import platformtools
host = "http://hdfull.tv"
-A_A = {'User-Agent':'Mozilla/5.0 AppLeWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 SaFAri/537.36'}
if config.get_setting('hdfulluser', 'hdfull'):
account = True
else:
@@ -30,7 +28,7 @@ def settingCanal(item):
def login():
logger.info()
- data = agrupa_datos(httptools.downloadpage(host, headers=A_A).data)
+ data = agrupa_datos(httptools.downloadpage(host).data)
patron = "
"
sid = scrapertools.find_single_match(data, patron)
@@ -39,7 +37,7 @@ def login():
'hdfull') + "&password=" + config.get_setting(
'hdfullpassword', 'hdfull') + "&action=login"
- httptools.downloadpage(host, post=post, headers=A_A)
+ httptools.downloadpage(host, post=post)
def mainlist(item):
@@ -139,7 +137,7 @@ def menuseries(item):
def search(item, texto):
logger.info()
- data = agrupa_datos(httptools.downloadpage(host, headers=A_A).data)
+ data = agrupa_datos(httptools.downloadpage(host).data)
sid = scrapertools.get_match(data, '.__csrf_magic. value="(sid:[^"]+)"')
item.extra = urllib.urlencode({'__csrf_magic': sid}) + '&menu=search&query=' + texto
@@ -175,7 +173,7 @@ def items_usuario(item):
itemlist = []
## Carga estados
- status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
+ status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
## Fichas usuario
url = item.url.split("?")[0]
@@ -189,7 +187,7 @@ def items_usuario(item):
next_page = url + "?" + post
## Carga las fichas de usuario
- data = httptools.downloadpage(url, post=post, headers=A_A).data
+ data = httptools.downloadpage(url, post=post).data
fichas_usuario = jsontools.load(data)
for ficha in fichas_usuario:
@@ -257,7 +255,7 @@ def listado_series(item):
itemlist = []
- data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
+ data = agrupa_datos(httptools.downloadpage(item.url).data)
patron = '
'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -277,10 +275,10 @@ def fichas(item):
textoidiomas=''
infoLabels=dict()
## Carga estados
- status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
+ status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
if item.title == "Buscar...":
- data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra, headers=A_A).data)
+ data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra).data)
s_p = scrapertools.get_match(data, '
(.*?)