Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
unknown
2017-11-12 20:01:35 -03:00
7 changed files with 59 additions and 58 deletions

View File

@@ -132,6 +132,16 @@ def episodios(item):
return itemlist
def googl(url):
logger.info()
a=url.split("/")
link=a[3]
link="http://www.trueurl.net/?q=http%3A%2F%2Fgoo.gl%2F"+link+"&lucky=on&Uncloak=Find+True+URL"
data_other = httptools.downloadpage(link).data
data_other = re.sub(r"\n|\r|\t|\s{2}| ", "", data_other)
patron='<td class="withbg">Destination URL<\/td><td><A title="(.+?)"'
trueurl = scrapertools.find_single_match(data_other, patron)
return trueurl
def findvideos(item):
logger.info()
@@ -147,10 +157,10 @@ def findvideos(item):
scrapedthumbnail = scrapertools.find_single_match(data, '<div class="caracteristicas"><img src="([^<]+)">')
itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
for server, quality, url in itemla:
if "Calidad Alta" in quality:
quality = "HQ"
if "HQ" in quality:
quality = "HD"
if "Calidad Alta" in quality:
quality = "HQ"
if " Calidad media - Carga mas rapido" in quality:
quality = "360p"
server = server.lower().strip()
@@ -160,6 +170,7 @@ def findvideos(item):
server = 'rapidvideo'
if "netu" in server:
server = 'netutv'
url = googl(url)
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
thumbnail=scrapedthumbnail, plot=scrapedplot,
title="Enlace encontrado en: %s [%s]" % (server.capitalize(), quality)))

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
import base64
@@ -16,7 +15,6 @@ from platformcode import platformtools
host = "http://hdfull.tv"
A_A = {'User-Agent':'Mozilla/5.0 AppLeWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 SaFAri/537.36'}
if config.get_setting('hdfulluser', 'hdfull'):
account = True
else:
@@ -30,7 +28,7 @@ def settingCanal(item):
def login():
logger.info()
data = agrupa_datos(httptools.downloadpage(host, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(host).data)
patron = "<input type='hidden' name='__csrf_magic' value=\"([^\"]+)\" />"
sid = scrapertools.find_single_match(data, patron)
@@ -39,7 +37,7 @@ def login():
'hdfull') + "&password=" + config.get_setting(
'hdfullpassword', 'hdfull') + "&action=login"
httptools.downloadpage(host, post=post, headers=A_A)
httptools.downloadpage(host, post=post)
def mainlist(item):
@@ -139,7 +137,7 @@ def menuseries(item):
def search(item, texto):
logger.info()
data = agrupa_datos(httptools.downloadpage(host, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(host).data)
sid = scrapertools.get_match(data, '.__csrf_magic. value="(sid:[^"]+)"')
item.extra = urllib.urlencode({'__csrf_magic': sid}) + '&menu=search&query=' + texto
@@ -175,7 +173,7 @@ def items_usuario(item):
itemlist = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
## Fichas usuario
url = item.url.split("?")[0]
@@ -189,7 +187,7 @@ def items_usuario(item):
next_page = url + "?" + post
## Carga las fichas de usuario
data = httptools.downloadpage(url, post=post, headers=A_A).data
data = httptools.downloadpage(url, post=post).data
fichas_usuario = jsontools.load(data)
for ficha in fichas_usuario:
@@ -257,7 +255,7 @@ def listado_series(item):
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(item.url).data)
patron = '<div class="list-item"><a href="([^"]+)"[^>]+>([^<]+)</a></div>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -277,10 +275,10 @@ def fichas(item):
textoidiomas=''
infoLabels=dict()
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
if item.title == "Buscar...":
data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra).data)
s_p = scrapertools.get_match(data, '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split(
'<h3 class="section-title">')
@@ -292,7 +290,7 @@ def fichas(item):
else:
data = s_p[0] + s_p[1]
else:
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = re.sub(
r'<div class="span-6[^<]+<div class="item"[^<]+' + \
@@ -364,12 +362,11 @@ def fichas(item):
def episodios(item):
logger.info()
# A_F = L_A
id = "0"
itemlist = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
url_targets = item.url
@@ -379,7 +376,7 @@ def episodios(item):
item.url = item.url.split("###")[0]
## Temporadas
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(item.url).data)
if id == "0":
## Se saca el id de la serie de la página cuando viene de listado_series
@@ -413,7 +410,7 @@ def episodios(item):
for scrapedurl in matches:
## Episodios
data = agrupa_datos(httptools.downloadpage(scrapedurl, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(scrapedurl).data)
sid = scrapertools.get_match(data, "<script>var sid = '(\d+)'")
ssid = scrapertools.get_match(scrapedurl, "temporada-(\d+)")
@@ -421,7 +418,7 @@ def episodios(item):
url = host + "/a/episodes"
data = httptools.downloadpage(url, post=post, headers=A_A).data
data = httptools.downloadpage(url, post=post).data
episodes = jsontools.load(data)
@@ -483,9 +480,10 @@ def episodios(item):
def novedades_episodios(item):
logger.info()
itemlist = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
## Episodios
url = item.url.split("?")[0]
@@ -497,7 +495,7 @@ def novedades_episodios(item):
post = post.replace("start=" + old_start, "start=" + start)
next_page = url + "?" + post
data = httptools.downloadpage(url, post=post, headers=A_A).data
data = httptools.downloadpage(url, post=post).data
episodes = jsontools.load(data)
@@ -569,7 +567,7 @@ def generos(item):
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="http://hdfull.tv/peliculas"(.*?)</ul>')
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
@@ -588,10 +586,10 @@ def generos(item):
def generos_series(item):
logger.info()
# A_F= L_A
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="http://hdfull.tv/series"(.*?)</ul>')
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
@@ -614,7 +612,7 @@ def findvideos(item):
it1 = []
it2 = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
url_targets = item.url
## Vídeos
@@ -643,10 +641,10 @@ def findvideos(item):
it1.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=True))
data_js = httptools.downloadpage("http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js", headers=A_A).data
data_js = httptools.downloadpage("http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js").data
key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')
data_js = httptools.downloadpage("http://hdfull.tv/js/providers.js", headers=A_A).data
data_js = httptools.downloadpage("http://hdfull.tv/js/providers.js").data
try:
data_js = jhexdecode(data_js)
except:
@@ -659,7 +657,7 @@ def findvideos(item):
data_js = re.sub(r':(function.*?\})', r':"\g<1>"', decode_aa)
data_js = re.sub(r':(var[^,]+),', r':"\g<1>",', data_js)
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
data = agrupa_datos(httptools.downloadpage(item.url).data)
data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
data_decrypt = jsontools.load(obfs(base64.b64decode(data_obf), 126 - int(key)))
@@ -724,7 +722,7 @@ def play(item):
type = item.url.split("###")[1].split(";")[1]
item.url = item.url.split("###")[0]
post = "target_id=%s&target_type=%s&target_status=1" % (id, type)
data = httptools.downloadpage(host + "/a/status", post=post, headers=A_A).data
data = httptools.downloadpage(host + "/a/status", post=post).data
devuelve = servertools.findvideosbyserver(item.url, item.server)
if devuelve:
@@ -787,7 +785,7 @@ def set_status(item):
path = "/a/favorite"
post = "like_id=" + id + "&like_type=" + type + "&like_comment=&vote=-1"
data = httptools.downloadpage(host + path, post=post, headers=A_A).data
data = httptools.downloadpage(host + path, post=post).data
title = "[COLOR green][B]OK[/B][/COLOR]"

View File

@@ -29,7 +29,8 @@ list_servers = ['powvideo',
'nowvideo',
'gamovideo',
'kingvid',
'vidabc'
'vidabc',
'streamixcloud'
]
@@ -308,11 +309,11 @@ def findvideos(item):
for i in range(len(list_links)):
a=list_links[i].title
b=a.lstrip('Ver en')
b=a[a.find("en") + 2:]
c=b.split('[')
d=c[0].rstrip( )
d=d.lstrip( )
list_links[i].server=d
list_links[i].server=d.replace("streamix", "streamixcloud")
list_links = servertools.get_servers_itemlist(list_links)
autoplay.start(list_links, item)

View File

@@ -67,10 +67,12 @@ def lista(item):
title = name
url = host + link
scrapedthumbnail = host + img
context1=[renumbertools.context(item), autoplay.context]
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,
context=context1))
logger.info("gasdfsa "+str(b))
context=context))
if b<29:
a=a+1
url="https://serieslan.com/pag-"+str(a)
@@ -136,7 +138,7 @@ def episodios(item):
thumbnail=scrapedthumbnail))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
@@ -201,17 +203,3 @@ def findvideos(item):
else:
return []
def play(item):
logger.info()
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail, folder=False))
return itemlist

View File

@@ -23,8 +23,8 @@ ficherocookies = os.path.join(config.get_data_path(), "cookies.dat")
# Headers por defecto, si no se especifica nada
default_headers = dict()
default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0"
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3"
default_headers["Accept-Charset"] = "UTF-8"
default_headers["Accept-Encoding"] = "gzip"

View File

@@ -412,8 +412,12 @@ class SettingsWindow(xbmcgui.WindowXMLDialog):
self.addControl(control)
control.setVisible(False)
control.setLabel(c["label"])
control.setText(self.values[c["id"]])
# frodo fix
s = self.values[c["id"]]
if s is None:
s = ''
control.setText(s)
# control.setText(self.values[c["id"]])
control.setWidth(self.controls_width - 5)
control.setHeight(self.height_control)

View File

@@ -7,13 +7,12 @@ from core import scrapertools
from lib import jsunpack
from platformcode import logger
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:58.0) Gecko/20100101 ' \
'Firefox/58.0'}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:58.0) Gecko/20100101 Firefox/58.0'}
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, headers=headers).data
data = httptools.downloadpage(page_url).data
if "File was deleted" in data or "Not Found" in data or "File was locked by administrator" in data:
return False, "[Gamovideo] El archivo no existe o ha sido borrado"
@@ -25,7 +24,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, headers=headers).data
data = httptools.downloadpage(page_url).data
packer = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>")
if packer != "":