Merge pull request #479 from pipcat/master
Correcciones en servers y canales
This commit is contained in:
@@ -81,6 +81,11 @@ def addFavourite(item):
|
||||
platformtools.dialog_notification('Enlace repetido', 'Ya tienes este enlace en la carpeta')
|
||||
return False
|
||||
|
||||
# Si es una película, completar información de tmdb si no se tiene activado tmdb_plus_info
|
||||
if item.contentType == 'movie' and not config.get_setting('tmdb_plus_info', default=False):
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels(item, True) # obtener más datos en "segunda pasada" (actores, duración, ...)
|
||||
|
||||
# Guardar
|
||||
alfav.user_favorites[i_perfil]['items'].append(item.tourl())
|
||||
alfav.save()
|
||||
|
||||
@@ -450,7 +450,7 @@ def findvideos(item):
|
||||
if scrapedurl.startswith("https://cloud.pelispedia.vip/html5.php") or scrapedurl.startswith("https://cloud.pelispedia.stream/html5.php"):
|
||||
parms = dict(re.findall('[&|\?]{1}([^=]*)=([^&]*)', scrapedurl))
|
||||
for cal in ['360', '480', '720', '1080']:
|
||||
if parms[cal]:
|
||||
if cal in parms:
|
||||
url_v = 'https://pelispedia.video/v.php?id=%s&sub=%s&active=%s' % (parms[cal], parms['sub'], cal)
|
||||
title = "Ver video en [HTML5 " + cal + "p]"
|
||||
new_item = item.clone(title=title, url=url_v, action="play", referer=item.url)
|
||||
@@ -505,7 +505,8 @@ def play(item):
|
||||
|
||||
elif item.url.startswith("https://load.pelispedia.vip/embed/"):
|
||||
# 1- Descargar
|
||||
data, ck = gktools.get_data_and_cookie(item)
|
||||
# ~ data, ck = gktools.get_data_and_cookie(item)
|
||||
data, ck_sucuri, ck_cfduid = obtener_data_cookies(item.url, item.referer)
|
||||
|
||||
# 2- Calcular datos
|
||||
gsv = scrapertools.find_single_match(data, '<meta name="google-site-verification" content="([^"]*)"')
|
||||
@@ -519,7 +520,8 @@ def play(item):
|
||||
url = item.url.replace('/embed/', '/stream/') + '/' + token
|
||||
|
||||
# 3- Descargar página
|
||||
data = gktools.get_data_with_cookie(url, ck, item.url)
|
||||
# ~ data = gktools.get_data_with_cookie(url, ck, item.url)
|
||||
data, ck_sucuri, ck_cfduid = obtener_data_cookies(url, item.url, ck_sucuri, ck_cfduid)
|
||||
|
||||
# 4- Extraer enlaces
|
||||
url = scrapertools.find_single_match(data, '<meta (?:name|property)="og:url" content="([^"]+)"')
|
||||
@@ -556,6 +558,39 @@ def obtener_data(url, referer=''):
|
||||
|
||||
return data
|
||||
|
||||
def obtener_data_cookies(url, referer='', ck_sucuri = '', ck_cfduid = ''):
|
||||
|
||||
headers = {}
|
||||
if referer != '': headers['Referer'] = referer
|
||||
if ck_sucuri != '' and ck_cfduid != '': headers['Cookie'] = ck_sucuri + '; __cfduid=' + ck_cfduid
|
||||
elif ck_sucuri != '': headers['Cookie'] = ck_sucuri
|
||||
elif ck_cfduid != '': headers['Cookie'] = '__cfduid=%s' % ck_cfduid
|
||||
|
||||
resp = httptools.downloadpage(url, headers=headers, cookies=False)
|
||||
if ck_cfduid == '': ck_cfduid = obtener_cfduid(resp.headers)
|
||||
|
||||
if "Javascript is required" in resp.data:
|
||||
ck_sucuri = decodificar_cookie(resp.data)
|
||||
logger.info("Javascript is required. Cookie necesaria %s" % ck_sucuri)
|
||||
|
||||
headers['Cookie'] = ck_sucuri
|
||||
if ck_cfduid != '': headers['Cookie'] += '; __cfduid=%s' % ck_cfduid
|
||||
|
||||
resp = httptools.downloadpage(url, headers=headers, cookies=False)
|
||||
if ck_cfduid == '': ck_cfduid = obtener_cfduid(resp.headers)
|
||||
|
||||
return resp.data, ck_sucuri, ck_cfduid
|
||||
|
||||
def obtener_cfduid(headers):
|
||||
ck_name = '__cfduid'
|
||||
ck_value = ''
|
||||
for h in headers:
|
||||
ck = scrapertools.find_single_match(headers[h], '%s=([^;]*)' % ck_name)
|
||||
if ck:
|
||||
ck_value = ck
|
||||
break
|
||||
return ck_value
|
||||
|
||||
|
||||
def rshift(val, n): return val>>n if val >= 0 else (val+0x100000000)>>n
|
||||
|
||||
|
||||
@@ -111,12 +111,22 @@ def search_section(item, data, sectionType):
|
||||
|
||||
itemlist = []
|
||||
for url, thumbnail, title in sectionResultsRE:
|
||||
filtro_list = {"poster_path": scrapertools.find_single_match(thumbnail, "w\w+(/\w+.....)")}
|
||||
|
||||
newitem = item.clone(action = "seasons" if sectionType == "series" else "findvideos",
|
||||
title = title,
|
||||
thumbnail = thumbnail,
|
||||
url = url)
|
||||
if sectionType == "series":
|
||||
newitem.show = title;
|
||||
url = url,
|
||||
infoLabels = {'filtro': filtro_list.items(), 'year': '-'})
|
||||
|
||||
if sectionType == 'series':
|
||||
newitem.show = title
|
||||
newitem.contentType = 'tvshow'
|
||||
newitem.contentSerieName = title
|
||||
else:
|
||||
newitem.contentType = 'movie'
|
||||
newitem.contentTitle = title
|
||||
|
||||
itemlist.append(newitem)
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -6,6 +6,10 @@
|
||||
{
|
||||
"pattern": "https://cloudvideo.tv/embed-([a-z0-9]+).html",
|
||||
"url": "https://cloudvideo.tv/embed-\\1.html"
|
||||
},
|
||||
{
|
||||
"pattern": "https://cloudvideo.tv/([a-z0-9]+)",
|
||||
"url": "https://cloudvideo.tv/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "flashx.co/([A-z0-9]+)\\.jsp",
|
||||
"url": "https://www.flashx.co/\\1.jsp"
|
||||
"pattern": "flashx.co/([A-z0-9]+)\\.(jsp|html)",
|
||||
"url": "https://www.flashx.co/\\1.\\2"
|
||||
},
|
||||
{
|
||||
"pattern": "flashx.(?:tv|pw|ws|sx|to)/(?:embed.php\\?c=|embed-|playvid-|)([A-z0-9]+)",
|
||||
|
||||
42
plugin.video.alfa/servers/flix555.json
Normal file
42
plugin.video.alfa/servers/flix555.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "flix555.com/([A-z0-9]+)",
|
||||
"url": "https://flix555.com/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "flix555",
|
||||
"name": "flix555",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://flix555.com/img/logo.png"
|
||||
}
|
||||
48
plugin.video.alfa/servers/flix555.py
Normal file
48
plugin.video.alfa/servers/flix555.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re, base64, urllib, time
|
||||
|
||||
from core import httptools, scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger, platformtools
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
resp = httptools.downloadpage(page_url)
|
||||
if resp.code == 404 or '<b>File Not Found</b>' in resp.data:
|
||||
return False, "[flix555] El video no está disponible"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
# ~ logger.info(data)
|
||||
|
||||
post = {}
|
||||
inputs = scrapertools.find_multiple_matches(data, '<input type="(?:hidden|submit)" name="([^"]*)" value="([^"]*)"')
|
||||
for nom, valor in inputs: post[nom] = valor
|
||||
post = urllib.urlencode(post)
|
||||
# ~ logger.info(post)
|
||||
|
||||
espera = scrapertools.find_single_match(data, '<span id="cxc">(\d+)</span>')
|
||||
platformtools.dialog_notification('Cargando flix555', 'Espera de %s segundos requerida' % espera)
|
||||
time.sleep(int(espera))
|
||||
|
||||
data = httptools.downloadpage(page_url, post=post).data
|
||||
# ~ logger.info(data)
|
||||
|
||||
packed = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
|
||||
unpacked = jsunpack.unpack(packed)
|
||||
# ~ logger.info(unpacked)
|
||||
|
||||
matches = scrapertools.find_multiple_matches(unpacked, 'file\s*:\s*"([^"]*)"\s*,\s*label\s*:\s*"([^"]*)"')
|
||||
if matches:
|
||||
for url, lbl in matches:
|
||||
if not url.endswith('.srt'):
|
||||
itemlist.append(['[%s]' % lbl, url])
|
||||
|
||||
return itemlist
|
||||
|
||||
Reference in New Issue
Block a user