|
|
|
|
@@ -1,4 +1,3 @@
|
|
|
|
|
|
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
|
|
|
|
|
import base64
|
|
|
|
|
@@ -16,7 +15,6 @@ from platformcode import platformtools
|
|
|
|
|
|
|
|
|
|
host = "http://hdfull.tv"
|
|
|
|
|
|
|
|
|
|
A_A = {'User-Agent':'Mozilla/5.0 AppLeWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 SaFAri/537.36'}
|
|
|
|
|
if config.get_setting('hdfulluser', 'hdfull'):
|
|
|
|
|
account = True
|
|
|
|
|
else:
|
|
|
|
|
@@ -30,7 +28,7 @@ def settingCanal(item):
|
|
|
|
|
def login():
|
|
|
|
|
logger.info()
|
|
|
|
|
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(host, headers=A_A).data)
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(host).data)
|
|
|
|
|
|
|
|
|
|
patron = "<input type='hidden' name='__csrf_magic' value=\"([^\"]+)\" />"
|
|
|
|
|
sid = scrapertools.find_single_match(data, patron)
|
|
|
|
|
@@ -39,7 +37,7 @@ def login():
|
|
|
|
|
'hdfull') + "&password=" + config.get_setting(
|
|
|
|
|
'hdfullpassword', 'hdfull') + "&action=login"
|
|
|
|
|
|
|
|
|
|
httptools.downloadpage(host, post=post, headers=A_A)
|
|
|
|
|
httptools.downloadpage(host, post=post)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def mainlist(item):
|
|
|
|
|
@@ -139,7 +137,7 @@ def menuseries(item):
|
|
|
|
|
def search(item, texto):
|
|
|
|
|
logger.info()
|
|
|
|
|
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(host, headers=A_A).data)
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(host).data)
|
|
|
|
|
|
|
|
|
|
sid = scrapertools.get_match(data, '.__csrf_magic. value="(sid:[^"]+)"')
|
|
|
|
|
item.extra = urllib.urlencode({'__csrf_magic': sid}) + '&menu=search&query=' + texto
|
|
|
|
|
@@ -175,7 +173,7 @@ def items_usuario(item):
|
|
|
|
|
|
|
|
|
|
itemlist = []
|
|
|
|
|
## Carga estados
|
|
|
|
|
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
|
|
|
|
|
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
|
|
|
|
|
|
|
|
|
|
## Fichas usuario
|
|
|
|
|
url = item.url.split("?")[0]
|
|
|
|
|
@@ -189,7 +187,7 @@ def items_usuario(item):
|
|
|
|
|
next_page = url + "?" + post
|
|
|
|
|
|
|
|
|
|
## Carga las fichas de usuario
|
|
|
|
|
data = httptools.downloadpage(url, post=post, headers=A_A).data
|
|
|
|
|
data = httptools.downloadpage(url, post=post).data
|
|
|
|
|
fichas_usuario = jsontools.load(data)
|
|
|
|
|
|
|
|
|
|
for ficha in fichas_usuario:
|
|
|
|
|
@@ -257,7 +255,7 @@ def listado_series(item):
|
|
|
|
|
|
|
|
|
|
itemlist = []
|
|
|
|
|
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(item.url).data)
|
|
|
|
|
|
|
|
|
|
patron = '<div class="list-item"><a href="([^"]+)"[^>]+>([^<]+)</a></div>'
|
|
|
|
|
matches = re.compile(patron, re.DOTALL).findall(data)
|
|
|
|
|
@@ -277,10 +275,10 @@ def fichas(item):
|
|
|
|
|
textoidiomas=''
|
|
|
|
|
infoLabels=dict()
|
|
|
|
|
## Carga estados
|
|
|
|
|
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
|
|
|
|
|
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
|
|
|
|
|
|
|
|
|
|
if item.title == "Buscar...":
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra, headers=A_A).data)
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra).data)
|
|
|
|
|
s_p = scrapertools.get_match(data, '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split(
|
|
|
|
|
'<h3 class="section-title">')
|
|
|
|
|
|
|
|
|
|
@@ -292,7 +290,7 @@ def fichas(item):
|
|
|
|
|
else:
|
|
|
|
|
data = s_p[0] + s_p[1]
|
|
|
|
|
else:
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(item.url).data)
|
|
|
|
|
|
|
|
|
|
data = re.sub(
|
|
|
|
|
r'<div class="span-6[^<]+<div class="item"[^<]+' + \
|
|
|
|
|
@@ -364,12 +362,11 @@ def fichas(item):
|
|
|
|
|
|
|
|
|
|
def episodios(item):
|
|
|
|
|
logger.info()
|
|
|
|
|
# A_F = L_A
|
|
|
|
|
id = "0"
|
|
|
|
|
itemlist = []
|
|
|
|
|
|
|
|
|
|
## Carga estados
|
|
|
|
|
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
|
|
|
|
|
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
|
|
|
|
|
|
|
|
|
|
url_targets = item.url
|
|
|
|
|
|
|
|
|
|
@@ -379,7 +376,7 @@ def episodios(item):
|
|
|
|
|
item.url = item.url.split("###")[0]
|
|
|
|
|
|
|
|
|
|
## Temporadas
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(item.url).data)
|
|
|
|
|
|
|
|
|
|
if id == "0":
|
|
|
|
|
## Se saca el id de la serie de la página cuando viene de listado_series
|
|
|
|
|
@@ -413,7 +410,7 @@ def episodios(item):
|
|
|
|
|
for scrapedurl in matches:
|
|
|
|
|
|
|
|
|
|
## Episodios
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(scrapedurl, headers=A_A).data)
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(scrapedurl).data)
|
|
|
|
|
|
|
|
|
|
sid = scrapertools.get_match(data, "<script>var sid = '(\d+)'")
|
|
|
|
|
ssid = scrapertools.get_match(scrapedurl, "temporada-(\d+)")
|
|
|
|
|
@@ -421,7 +418,7 @@ def episodios(item):
|
|
|
|
|
|
|
|
|
|
url = host + "/a/episodes"
|
|
|
|
|
|
|
|
|
|
data = httptools.downloadpage(url, post=post, headers=A_A).data
|
|
|
|
|
data = httptools.downloadpage(url, post=post).data
|
|
|
|
|
|
|
|
|
|
episodes = jsontools.load(data)
|
|
|
|
|
|
|
|
|
|
@@ -483,9 +480,10 @@ def episodios(item):
|
|
|
|
|
|
|
|
|
|
def novedades_episodios(item):
|
|
|
|
|
logger.info()
|
|
|
|
|
|
|
|
|
|
itemlist = []
|
|
|
|
|
## Carga estados
|
|
|
|
|
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
|
|
|
|
|
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
|
|
|
|
|
|
|
|
|
|
## Episodios
|
|
|
|
|
url = item.url.split("?")[0]
|
|
|
|
|
@@ -497,7 +495,7 @@ def novedades_episodios(item):
|
|
|
|
|
post = post.replace("start=" + old_start, "start=" + start)
|
|
|
|
|
next_page = url + "?" + post
|
|
|
|
|
|
|
|
|
|
data = httptools.downloadpage(url, post=post, headers=A_A).data
|
|
|
|
|
data = httptools.downloadpage(url, post=post).data
|
|
|
|
|
|
|
|
|
|
episodes = jsontools.load(data)
|
|
|
|
|
|
|
|
|
|
@@ -569,7 +567,7 @@ def generos(item):
|
|
|
|
|
|
|
|
|
|
itemlist = []
|
|
|
|
|
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(item.url).data)
|
|
|
|
|
data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="http://hdfull.tv/peliculas"(.*?)</ul>')
|
|
|
|
|
|
|
|
|
|
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
|
|
|
|
|
@@ -588,10 +586,10 @@ def generos(item):
|
|
|
|
|
|
|
|
|
|
def generos_series(item):
|
|
|
|
|
logger.info()
|
|
|
|
|
# A_F= L_A
|
|
|
|
|
|
|
|
|
|
itemlist = []
|
|
|
|
|
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(item.url).data)
|
|
|
|
|
data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="http://hdfull.tv/series"(.*?)</ul>')
|
|
|
|
|
|
|
|
|
|
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
|
|
|
|
|
@@ -614,7 +612,7 @@ def findvideos(item):
|
|
|
|
|
it1 = []
|
|
|
|
|
it2 = []
|
|
|
|
|
## Carga estados
|
|
|
|
|
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
|
|
|
|
|
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
|
|
|
|
|
url_targets = item.url
|
|
|
|
|
|
|
|
|
|
## Vídeos
|
|
|
|
|
@@ -643,10 +641,10 @@ def findvideos(item):
|
|
|
|
|
it1.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
|
|
|
|
|
thumbnail=item.thumbnail, show=item.show, folder=True))
|
|
|
|
|
|
|
|
|
|
data_js = httptools.downloadpage("http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js", headers=A_A).data
|
|
|
|
|
data_js = httptools.downloadpage("http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js").data
|
|
|
|
|
key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')
|
|
|
|
|
|
|
|
|
|
data_js = httptools.downloadpage("http://hdfull.tv/js/providers.js", headers=A_A).data
|
|
|
|
|
data_js = httptools.downloadpage("http://hdfull.tv/js/providers.js").data
|
|
|
|
|
try:
|
|
|
|
|
data_js = jhexdecode(data_js)
|
|
|
|
|
except:
|
|
|
|
|
@@ -659,7 +657,7 @@ def findvideos(item):
|
|
|
|
|
data_js = re.sub(r':(function.*?\})', r':"\g<1>"', decode_aa)
|
|
|
|
|
data_js = re.sub(r':(var[^,]+),', r':"\g<1>",', data_js)
|
|
|
|
|
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
|
|
|
|
|
data = agrupa_datos(httptools.downloadpage(item.url).data)
|
|
|
|
|
data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
|
|
|
|
|
data_decrypt = jsontools.load(obfs(base64.b64decode(data_obf), 126 - int(key)))
|
|
|
|
|
|
|
|
|
|
@@ -724,7 +722,7 @@ def play(item):
|
|
|
|
|
type = item.url.split("###")[1].split(";")[1]
|
|
|
|
|
item.url = item.url.split("###")[0]
|
|
|
|
|
post = "target_id=%s&target_type=%s&target_status=1" % (id, type)
|
|
|
|
|
data = httptools.downloadpage(host + "/a/status", post=post, headers=A_A).data
|
|
|
|
|
data = httptools.downloadpage(host + "/a/status", post=post).data
|
|
|
|
|
|
|
|
|
|
devuelve = servertools.findvideosbyserver(item.url, item.server)
|
|
|
|
|
if devuelve:
|
|
|
|
|
@@ -787,7 +785,7 @@ def set_status(item):
|
|
|
|
|
path = "/a/favorite"
|
|
|
|
|
post = "like_id=" + id + "&like_type=" + type + "&like_comment=&vote=-1"
|
|
|
|
|
|
|
|
|
|
data = httptools.downloadpage(host + path, post=post, headers=A_A).data
|
|
|
|
|
data = httptools.downloadpage(host + path, post=post).data
|
|
|
|
|
|
|
|
|
|
title = "[COLOR green][B]OK[/B][/COLOR]"
|
|
|
|
|
|
|
|
|
|
|