@@ -206,11 +206,12 @@ def episodesxseasons(item):
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
from lib.generictools import privatedecrypt
|
||||
logger.info()
|
||||
from lib import jsunpack
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
|
||||
patron = "onclick=clickLink\(this, '([^']+)', '([^']+)', '([^']+)'\);>"
|
||||
@@ -218,18 +219,8 @@ def findvideos(item):
|
||||
headers = {'referer': item.url}
|
||||
for url, quality, language in matches:
|
||||
|
||||
data = httptools.downloadpage(url, headers=headers, follow_redirects=False).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
packed = scrapertools.find_single_match(data, '(eval\(.*?);var')
|
||||
unpacked = jsunpack.unpack(packed)
|
||||
server = scrapertools.find_single_match(unpacked, "src:.'(http://\D+)/")
|
||||
id = scrapertools.find_single_match(unpacked, "src:.'http://\D+/.*?description:.'(.*?).'")
|
||||
if server == '':
|
||||
if 'powvideo' in unpacked:
|
||||
id = scrapertools.find_single_match(unpacked, ",description:.'(.*?).'")
|
||||
server = 'https://powvideo.net'
|
||||
url = '%s/%s' % (server, id)
|
||||
if server != '' and id != '':
|
||||
url = privatedecrypt(url, headers)
|
||||
if url != '':
|
||||
language = IDIOMAS[language]
|
||||
if quality.lower() == 'premium':
|
||||
quality = '720p'
|
||||
|
||||
@@ -46,7 +46,7 @@ def mainlist(item):
|
||||
|
||||
itemlist.append(item.clone(title="Por Año",
|
||||
action="seccion",
|
||||
url=host + '/peliculas/2017/',
|
||||
url=host + '/peliculas/2019/',
|
||||
thumbnail=get_thumb('year', auto=True),
|
||||
seccion='anios'
|
||||
))
|
||||
@@ -60,7 +60,7 @@ def mainlist(item):
|
||||
|
||||
itemlist.append(item.clone(title="Buscar",
|
||||
action="search",
|
||||
url=host + '/api/elastic/suggest?query=',
|
||||
url=host + '/api/suggest?query=',
|
||||
thumbnail=get_thumb('search', auto=True)
|
||||
))
|
||||
|
||||
@@ -185,14 +185,16 @@ def seccion(item):
|
||||
def busqueda(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
headers = {'referer':host, 'X-Requested-With': 'XMLHttpRequest'}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
logger.debug(data)
|
||||
dict_data = jsontools.load(data)
|
||||
resultados = dict_data['result'][0]['options']
|
||||
resultados = dict_data['suggest']['result'][0]['options']
|
||||
|
||||
for resultado in resultados:
|
||||
if 'title' in resultado['_source']:
|
||||
title = resultado['_source']['title']
|
||||
thumbnail = 'http://s3.amazonaws.com/pelisfox' + '/' + resultado['_source']['cover']
|
||||
thumbnail = 'https://static.pelisfox.tv/static/movie' + '/' + resultado['_source']['cover']
|
||||
plot = resultado['_source']['sinopsis']
|
||||
url = host + resultado['_source']['url'] + '/'
|
||||
|
||||
|
||||
@@ -226,7 +226,6 @@ def findvideos(item):
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for video_url in matches:
|
||||
|
||||
language = 'latino'
|
||||
if not config.get_setting('unify'):
|
||||
title = ' [%s]' % language.capitalize()
|
||||
@@ -235,34 +234,39 @@ def findvideos(item):
|
||||
|
||||
if 'pelisplus.net' in video_url:
|
||||
referer = video_url
|
||||
post = {'r':item.url}
|
||||
post = {'r':item.url, 'd': 'www.pelisplus.net'}
|
||||
post = urllib.urlencode(post)
|
||||
video_url = video_url.replace('/v/', '/api/sources/')
|
||||
video_url = video_url.replace('/v/', '/api/source/')
|
||||
url_data = httptools.downloadpage(video_url, post=post, headers={'Referer':referer}).data
|
||||
patron = '"file":"([^"]+)","label":"([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(url_data)
|
||||
for url, quality in matches:
|
||||
url = url.replace('\/', '/')
|
||||
url = 'https://www.pelisplus.net' + url.replace('\/', '/')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='%s' + title, url=url, action='play', language=IDIOMAS[language],
|
||||
quality=quality, infoLabels=item.infoLabels))
|
||||
|
||||
else:
|
||||
url_data = get_source(video_url)
|
||||
url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
|
||||
if 'server' in url:
|
||||
hidden_data = get_source(hidden_url)
|
||||
url = scrapertools.find_single_match(hidden_data, '<iframe src="([^"]+)"')
|
||||
if not 'vidoza' in video_url:
|
||||
url_data = get_source(video_url)
|
||||
|
||||
|
||||
if 'vidoza' not in video_url:
|
||||
url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
|
||||
else:
|
||||
url = video_url
|
||||
if not 'server' in url:
|
||||
url = url
|
||||
if 'pelishd.net' in url:
|
||||
vip_data = httptools.downloadpage(url, headers={'Referer':item.url}, follow_redirects=False).data
|
||||
dejuiced = generictools.dejuice(vip_data)
|
||||
url = scrapertools.find_single_match(dejuiced, '"file":"([^"]+)"')
|
||||
|
||||
if 'pelishd' in url:
|
||||
vip_data = httptools.downloadpage(url, headers={'Referer':item.url}, follow_redirects=False)
|
||||
try:
|
||||
dejuiced = generictools.dejuice(vip_data.data)
|
||||
url = scrapertools.find_single_match(dejuiced, '"file":"([^"]+)"')
|
||||
except:
|
||||
pass
|
||||
|
||||
if url != '':
|
||||
if url != '' and 'rekovers' not in url:
|
||||
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=IDIOMAS[language],
|
||||
infoLabels=item.infoLabels))
|
||||
|
||||
|
||||
@@ -154,14 +154,15 @@ def search(item, texto):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
servers = {'netu': 'http://hqq.tv/player/embed_player.php?vid=',
|
||||
servers = {'netu': 'https://hqq.tv/player/embed_player.php?vid=',
|
||||
'open': 'https://openload.co/embed/',
|
||||
'netv': 'http://goo.gl/',
|
||||
'gamo': 'http://gamovideo.com/embed-',
|
||||
'powvideo': 'http://powvideo.net/embed-',
|
||||
'play': 'http://streamplay.to/embed-',
|
||||
'vido': 'http://vidoza.net/embed-',
|
||||
'net': 'http://hqq.tv/player/embed_player.php?vid='
|
||||
'net': 'https://hqq.tv/player/embed_player.php?vid=',
|
||||
'ntu': 'https://hqq.tv/player/embed_player.php?vid='
|
||||
}
|
||||
data = get_source(item.url)
|
||||
noemitido = scrapertools.find_single_match(data, '<p><img src=(http://darkiller.com/images/subiendo.png) border=0\/><\/p>')
|
||||
|
||||
@@ -80,9 +80,6 @@ def select_menu(item):
|
||||
itemlist.append(Item(channel=item.channel, title='Por Año', action='section', url=url,
|
||||
thumbnail=get_thumb('year', auto=True), type='all'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=url + 'ajax/1/?q=',
|
||||
thumbnail=get_thumb("search", auto=True), type=item.type))
|
||||
|
||||
return itemlist
|
||||
|
||||
def sub_menu(item):
|
||||
@@ -121,6 +118,8 @@ def sub_menu(item):
|
||||
url=url + '?q=%s+subtitulado' % link_type, action='list_all',
|
||||
thumbnail=get_thumb('vose', auto=True), type=item.type, send_lang='VOSE',
|
||||
link_type=link_type))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=url + '?q=',
|
||||
thumbnail=get_thumb("search", auto=True), type=item.type, link_type=link_type))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
42
plugin.video.alfa/servers/uqload.json
Normal file
42
plugin.video.alfa/servers/uqload.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https://uqload.com/embed-([a-z0-9]+).html",
|
||||
"url": "https://uqload.com/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "uqload",
|
||||
"name": "uqload",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://uqload.com/img/logo.png?v=0"
|
||||
}
|
||||
34
plugin.video.alfa/servers/uqload.py
Normal file
34
plugin.video.alfa/servers/uqload.py
Normal file
@@ -0,0 +1,34 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------
|
||||
# Conector Uqload By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
import re
|
||||
from core import httptools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url)
|
||||
|
||||
if data.code == 404:
|
||||
return False, "[Uqload] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
patron = 'sources:.?\["([^"]+)"\]'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for url in matches:
|
||||
video_urls.append(["[uqload]", url])
|
||||
|
||||
return video_urls
|
||||
Reference in New Issue
Block a user