playview + servidores nuevos + fixes
This commit is contained in:
65
plugin.video.alfa/channels/playview.json
Normal file
65
plugin.video.alfa/channels/playview.json
Normal file
@@ -0,0 +1,65 @@
|
||||
{
|
||||
"id": "playview",
|
||||
"name": "Playview",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"lat", "cast"
|
||||
],
|
||||
"thumbnail": "https://s15.postimg.cc/pkcz7kda3/playview.png",
|
||||
"banner": "",
|
||||
"version": 1,
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Cast",
|
||||
"Lat",
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
317
plugin.video.alfa/channels/playview.py
Normal file
317
plugin.video.alfa/channels/playview.py
Normal file
@@ -0,0 +1,317 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel Playview -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
|
||||
IDIOMAS = {'Latino':'Lat', 'Español':'Cast', 'Subtitulado':'VOSE'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = ['HD 1080p', 'HD 720p', 'DVDRIP', 'CAM']
|
||||
list_servers = ['openload', 'vidoza', 'clipwatching', 'fastplay', 'flashx', 'gamovideo', 'powvideo', 'streamango',
|
||||
'streamcherry', 'rapidvideo']
|
||||
|
||||
host = 'https://playview.io/'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Películas', action='submenu', type='movie',
|
||||
thumbnail=get_thumb('movies', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title='Series', action='submenu', type='tvshow',
|
||||
thumbnail=get_thumb('tvshows', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title='Anime', action='list_all', url=host+'anime-online',
|
||||
type='tvshow', first=0, thumbnail=get_thumb('anime', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title='Buscar', action='search', url=host+'search/',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def submenu(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
if item.type == 'movie':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='Todas', action='list_all', url=host + 'peliculas-online', type='movie',
|
||||
first=0, thumbnail=get_thumb('all', auto=True)))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='Generos', action='genres', thumbnail=get_thumb('genres', auto=True)))
|
||||
else:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='Todas', action='list_all', url=host + 'series-online', type='tvshow',
|
||||
first=0, thumbnail=get_thumb('all', auto=True)))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='Series Animadas', action='list_all', url=host + 'series-animadas-online',
|
||||
type='tvshow', first=0, thumbnail=get_thumb('animacion', auto=True)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def genres(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(host)
|
||||
patron = '<li value=(\d+)><a href=(.*?)>(.*?)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for value, url, title in matches:
|
||||
if value not in ['1', '4', '22', '23', '24']:
|
||||
if value == '20':
|
||||
title = 'Familiar'
|
||||
itemlist.append(Item(channel=item.channel, title=title, action='list_all', url=url, type='Movie', first=0))
|
||||
|
||||
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
next = False
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = 'spotlight_container>.*?image lazy data-original=(.*?)>.*?<div class=spotlight_title>(.*?)<'
|
||||
patron += '(.*?) sres>(\d{4})<.*?playLink href=(.*?)>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
first = item.first
|
||||
last = first+19
|
||||
if last > len(matches):
|
||||
last = len(matches)
|
||||
next = True
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, type_data, year, scrapedurl in matches[first:last]:
|
||||
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
season = scrapertools.find_single_match(type_data, 'class=title-season>Temporada<.*?> (\d+) <')
|
||||
episode = scrapertools.find_single_match(type_data, 'class=title-season>Episodio<.*?> (\d+) <')
|
||||
if season != '' or episode != '':
|
||||
item.type = 'tvshow'
|
||||
else:
|
||||
item.type = 'movie'
|
||||
|
||||
new_item = Item(channel=item.channel, title=title, url=url, thumbnail=scrapedthumbnail, type=item.type,
|
||||
infoLabels={'year': year})
|
||||
|
||||
if item.type == 'tvshow':
|
||||
new_item.action = 'episodios'
|
||||
new_item.contentSerieName = scrapedtitle
|
||||
season = season.strip()
|
||||
episode = episode.strip()
|
||||
if season == '':
|
||||
if 'Anime' in item.title:
|
||||
season = 1
|
||||
else:
|
||||
season = scrapertools.find_single_match(url, '.*?temp-(\d+)')
|
||||
new_item.contentSeasonNumber = season
|
||||
else:
|
||||
new_item.contentSeasonNumber = season
|
||||
|
||||
if episode != '':
|
||||
new_item.contentEpisodeNumber = episode
|
||||
|
||||
if season != '' and episode != '':
|
||||
new_item.title = '%s %sx%s' % (new_item.title, season, episode)
|
||||
elif episode == '':
|
||||
new_item.title = '%s Temporada %s' % (new_item.title, season)
|
||||
else:
|
||||
new_item.action = 'findvideos'
|
||||
new_item.contentTitle = scrapedtitle
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
# Paginación
|
||||
|
||||
if not next:
|
||||
url_next_page = item.url
|
||||
first = last
|
||||
else:
|
||||
url_next_page = scrapertools.find_single_match(data, "<a href=([^ ]+) class=page-link aria-label=Next>")
|
||||
first = 0
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all', first=first))
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_data(post):
|
||||
logger.info()
|
||||
|
||||
post = urllib.urlencode(post)
|
||||
data = httptools.downloadpage(host + 'playview', post=post).data
|
||||
|
||||
return data
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
try:
|
||||
id, type = scrapertools.find_single_match(data, 'data-id=(\d+) data-type=(.*?) ')
|
||||
post = {'set': 'LoadOptionsEpisode', 'action': 'EpisodeList', 'id': id, 'type': '1'}
|
||||
data = get_data(post)
|
||||
patron = 'data-episode="(\d+)".*?title="(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
for episode, title in matches:
|
||||
post = {'set': 'LoadOptionsEpisode', 'action': 'Step1', 'id': id, 'type': '1',
|
||||
'episode': episode}
|
||||
season = scrapertools.find_single_match(item.url, '.*?temp-(\d+)')
|
||||
if season == '':
|
||||
season = 1
|
||||
infoLabels['season'] = season
|
||||
infoLabels['episode'] = episode
|
||||
if title[0].isdigit():
|
||||
title = '%sx%s' % (season, title)
|
||||
else:
|
||||
title = '%sx%s - %s' % (season, episode, title)
|
||||
itemlist.append(Item(channel=item.channel, title=title, contentSeasonNumber=season,
|
||||
contentEpisodeNumber=episode, action='findvideos', post=post, type=item.type,
|
||||
infoLabels=infoLabels))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, ))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
set_mode = 'LoadOptions'
|
||||
episode = ''
|
||||
if item.type == 'tvshow':
|
||||
post = item.post
|
||||
id= post['id']
|
||||
episode = post['episode']
|
||||
type = post['type']
|
||||
set_mode = 'LoadOptionsEpisode'
|
||||
|
||||
else:
|
||||
data=get_source(item.url)
|
||||
try:
|
||||
id, type = scrapertools.find_single_match(data, 'data-id=(\d+) data-type=(.*?) ')
|
||||
post = {'set': set_mode, 'action': 'Step1', 'id': id, 'type': type}
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
data = get_data(post)
|
||||
patron = 'data-quality="(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for quality in matches:
|
||||
post = {'set': set_mode, 'action': 'Step2', 'id': id, 'type': type, 'quality': quality, 'episode':episode}
|
||||
data = get_data(post)
|
||||
patron = 'getplayer" data-id="(\d+)"> <h4>(.*?)</h4>.*?title="(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for video_id, language, server in matches:
|
||||
post = {'set': set_mode, 'action': 'Step3', 'id': video_id, 'type': type}
|
||||
data = get_data(post)
|
||||
url = scrapertools.find_single_match(data, '<iframe class="embed.*?src="(.*?)"')
|
||||
if 'clipwatching' in url:
|
||||
url = url.replace('https://clipwatching.com/embed-', '')
|
||||
title = '%s [%s] [%s]'
|
||||
quality = quality.replace('(','').replace(')', '')
|
||||
if url != '':
|
||||
itemlist.append(Item(channel=item.channel, title=title, language=IDIOMAS[language], url=url,
|
||||
action='play', quality=quality, infoLabels=item.infoLabels))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % (i.server.capitalize(), i.quality,
|
||||
i.language))
|
||||
|
||||
itemlist=sorted(itemlist, key=lambda i: i.language)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos' and type == 'Movie':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
contentTitle=item.contentTitle))
|
||||
return itemlist
|
||||
except:
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
item.first = 0
|
||||
|
||||
if texto != '':
|
||||
return list_all(item)
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
item.type = 'movie'
|
||||
item.first = 0
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = host + 'peliculas-online'
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + 'peliculas-online/animacion'
|
||||
elif categoria == 'terror':
|
||||
item.url = host + 'peliculas-online/terror'
|
||||
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == 'Siguiente >>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
from lib import jsunpack
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
@@ -16,8 +17,10 @@ def test_video_exists(page_url):
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
packed = scrapertools.find_single_match(data, "text/javascript'>(.*?)\s*</script>")
|
||||
unpacked = jsunpack.unpack(packed)
|
||||
video_urls = []
|
||||
videos = scrapertools.find_multiple_matches(data, 'file:"([^"]+).*?label:"([^"]+)')
|
||||
videos = scrapertools.find_multiple_matches(unpacked, 'file:"([^"]+).*?label:"([^"]+)')
|
||||
for video, label in videos:
|
||||
video_urls.append([label + " [clipwatching]", video])
|
||||
logger.info("Url: %s" %videos)
|
||||
|
||||
42
plugin.video.alfa/servers/cloudvideo.json
Normal file
42
plugin.video.alfa/servers/cloudvideo.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https://cloudvideo.tv/embed-([a-z0-9]+).html",
|
||||
"url": "https://cloudvideo.tv/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "cloudvideo",
|
||||
"name": "cloudvideo",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://cloudvideo.tv/static/img/logo5.png"
|
||||
}
|
||||
43
plugin.video.alfa/servers/cloudvideo.py
Normal file
43
plugin.video.alfa/servers/cloudvideo.py
Normal file
@@ -0,0 +1,43 @@
|
||||
# Conector Cloudvideo By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
import re
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url)
|
||||
|
||||
if data.code == 404:
|
||||
return False, "[Cloud] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
enc_data = scrapertools.find_single_match(data, "type='text/javascript'>(.*?)</script>")
|
||||
dec_data = jsunpack.unpack(enc_data)
|
||||
sources = scrapertools.find_single_match(dec_data, "sources:\[(.*?)]")
|
||||
patron = "{file:(.*?)}"
|
||||
matches = re.compile(patron, re.DOTALL).findall(sources)
|
||||
scrapertools.printMatches(matches)
|
||||
for url in matches:
|
||||
quality = 'm3u8'
|
||||
video_url = url
|
||||
if 'label' in url:
|
||||
url = url.split(',')
|
||||
video_url = url[0]
|
||||
quality = url[1].replace('label:','')
|
||||
video_urls.append(['cloudvideo [%s]' % quality, video_url])
|
||||
|
||||
return video_urls
|
||||
42
plugin.video.alfa/servers/filevideo.json
Normal file
42
plugin.video.alfa/servers/filevideo.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https://www.filevideo.net/embed-(?:embed-|)([A-z0-9]+)",
|
||||
"url": "http://filevideo.net/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "filevideo",
|
||||
"name": "filevideo",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://s15.postimg.cc/b7jj9dbbf/filevideo.png"
|
||||
}
|
||||
42
plugin.video.alfa/servers/filevideo.py
Normal file
42
plugin.video.alfa/servers/filevideo.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
if "Not Found" in data or "File was deleted" in data:
|
||||
return False, "[Filevideo] El fichero no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
enc_data = scrapertools.find_single_match(data, "type='text/javascript'>(eval.*?)\s*</script>")
|
||||
dec_data = jsunpack.unpack(enc_data)
|
||||
|
||||
video_urls = []
|
||||
media_urls = scrapertools.find_multiple_matches(dec_data, '\{file\s*:\s*"([^"]+)",label\s*:\s*"([^"]+)"\}')
|
||||
for media_url, label in media_urls:
|
||||
ext = scrapertools.get_filename_from_url(media_url)[-4:]
|
||||
video_urls.append(["%s %sp [filevideo]" % (ext, label), media_url])
|
||||
|
||||
video_urls.reverse()
|
||||
m3u8 = scrapertools.find_single_match(dec_data, '\{file\:"(.*?.m3u8)"\}')
|
||||
if m3u8:
|
||||
title = video_urls[-1][0].split(" ", 1)[1]
|
||||
video_urls.insert(0, [".m3u8 %s" % title, m3u8])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
42
plugin.video.alfa/servers/upvid.json
Normal file
42
plugin.video.alfa/servers/upvid.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https://upvid.co/embed-([a-z0-9]+).html",
|
||||
"url": "https://upvid.co/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "upvid",
|
||||
"name": "upvid",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://s15.postimg.cc/gz0tee0gb/zt_Oi_E6_S-_400x400.jpg"
|
||||
}
|
||||
83
plugin.video.alfa/servers/upvid.py
Normal file
83
plugin.video.alfa/servers/upvid.py
Normal file
@@ -0,0 +1,83 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------
|
||||
# Conector UpVID By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
import re
|
||||
import urllib
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
import sys, os
|
||||
import re, base64
|
||||
from lib.aadecode import decode as aadecode
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url)
|
||||
|
||||
if data.code == 404:
|
||||
return False, "[upvid] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium = False, user = "", password = "", video_password = ""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
video_urls = []
|
||||
headers = {'referer': page_url}
|
||||
|
||||
for i in range(0, 3):
|
||||
data = httptools.downloadpage(page_url, headers=headers).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
if '<input type=hidden' in data:
|
||||
break
|
||||
else:
|
||||
page_url = scrapertools.find_single_match(data, "iframe src=(.*?) scrolling")
|
||||
|
||||
|
||||
# logger.debug(data)
|
||||
# decodificar función para obtener función y clave
|
||||
# ------------------------------------------------
|
||||
code = re.findall('<script>\s*゚ω゚(.*?)</script>', data, flags=re.DOTALL)[0]
|
||||
text_decode = aadecode(code)
|
||||
funcion, clave = re.findall("func\.innerHTML = (\w*)\('([^']*)', ", text_decode, flags=re.DOTALL)[0]
|
||||
|
||||
# decodificar javascript en campos html hidden
|
||||
# --------------------------------------------
|
||||
oculto = re.findall('<input type=hidden value=([^ ]+) id=func', data, flags=re.DOTALL)[0]
|
||||
funciones = resuelve(clave, base64.b64decode(oculto))
|
||||
|
||||
oculto = re.findall('<input type=hidden value=([^ ]+) id=code', data, flags=re.DOTALL)[0]
|
||||
codigo = resuelve(clave, base64.b64decode(oculto))
|
||||
|
||||
url, type = scrapertools.find_single_match(funciones, "setAttribute\('src', '(.*?)'\);\s.*?type', 'video/(.*?)'")
|
||||
|
||||
video_urls.append(['upvid [%s]' % type ,url])
|
||||
|
||||
return video_urls
|
||||
|
||||
|
||||
def resuelve(r, o):
|
||||
a = '';
|
||||
n = 0
|
||||
e = range(256)
|
||||
for f in range(256):
|
||||
n = (n + e[f] + ord(r[(f % len(r))])) % 256
|
||||
t = e[f];
|
||||
e[f] = e[n];
|
||||
e[n] = t
|
||||
f = 0;
|
||||
n = 0
|
||||
for h in range(len(o)):
|
||||
f = (f + 1) % 256
|
||||
n = (n + e[f]) % 256
|
||||
t = e[f];
|
||||
e[f] = e[n];
|
||||
e[n] = t
|
||||
a += chr(ord(o[h]) ^ e[(e[f] + e[n]) % 256])
|
||||
return a
|
||||
42
plugin.video.alfa/servers/vidzella.json
Normal file
42
plugin.video.alfa/servers/vidzella.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https://vidzella.me/e/([a-zA-Z0-9]+)",
|
||||
"url": "https://vidzella.me/e/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "vidzella",
|
||||
"name": "vidzella",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://s15.postimg.cc/albqao5pn/vidzella.png"
|
||||
}
|
||||
33
plugin.video.alfa/servers/vidzella.py
Normal file
33
plugin.video.alfa/servers/vidzella.py
Normal file
@@ -0,0 +1,33 @@
|
||||
# Conector Vidzella By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
import re
|
||||
from core import httptools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url)
|
||||
|
||||
if data.code == 404:
|
||||
return False, "[Vidzella] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
logger.debug(data)
|
||||
patron = "src=([^ ]+) type='.*?/(.*?)'"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for url, type in matches:
|
||||
video_urls.append(['vidzella %s' % type, url])
|
||||
|
||||
return video_urls
|
||||
42
plugin.video.alfa/servers/vivo.json
Normal file
42
plugin.video.alfa/servers/vivo.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https://vivo.sx/([a-zA-Z0-9]+)",
|
||||
"url": "https://vivo.sx/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "vivo",
|
||||
"name": "vivo",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://s15.postimg.cc/oiyhtpdqj/vivo.png"
|
||||
}
|
||||
39
plugin.video.alfa/servers/vivo.py
Normal file
39
plugin.video.alfa/servers/vivo.py
Normal file
@@ -0,0 +1,39 @@
|
||||
# Conector Vivo By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
import re
|
||||
import base64
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url)
|
||||
|
||||
if data.code == 404:
|
||||
return False, "[Vivo] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
|
||||
enc_data = scrapertools.find_single_match(data, "Core.InitializeStream \('(.*?)'\)")
|
||||
logger.debug(enc_data)
|
||||
dec_data = base64.b64decode(enc_data)
|
||||
|
||||
logger.debug(dec_data)
|
||||
|
||||
for url in eval(dec_data):
|
||||
video_urls.append(['vivo', url])
|
||||
|
||||
return video_urls
|
||||
@@ -6,6 +6,10 @@
|
||||
{
|
||||
"pattern": "watchvideo.us/(?:embed-|)([A-z0-9]+)",
|
||||
"url": "http://watchvideo.us/embed-\\1.html"
|
||||
},
|
||||
{
|
||||
"pattern": "watchvideo17.us/(?:embed-|)([A-z0-9]+)",
|
||||
"url": "http://watchvideo.us/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
@@ -20,15 +21,17 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
enc_data = scrapertools.find_single_match(data, "type='text/javascript'>(eval.*?)\s*</script>")
|
||||
dec_data = jsunpack.unpack(enc_data)
|
||||
|
||||
video_urls = []
|
||||
media_urls = scrapertools.find_multiple_matches(data, '\{file\s*:\s*"([^"]+)",label\s*:\s*"([^"]+)"\}')
|
||||
media_urls = scrapertools.find_multiple_matches(dec_data, '\{file\s*:\s*"([^"]+)",label\s*:\s*"([^"]+)"\}')
|
||||
for media_url, label in media_urls:
|
||||
ext = scrapertools.get_filename_from_url(media_url)[-4:]
|
||||
video_urls.append(["%s %sp [watchvideo]" % (ext, label), media_url])
|
||||
|
||||
video_urls.reverse()
|
||||
m3u8 = scrapertools.find_single_match(data, '\{file\:"(.*?.m3u8)"\}')
|
||||
m3u8 = scrapertools.find_single_match(dec_data, '\{file\:"(.*?.m3u8)"\}')
|
||||
if m3u8:
|
||||
title = video_urls[-1][0].split(" ", 1)[1]
|
||||
video_urls.insert(0, [".m3u8 %s" % title, m3u8])
|
||||
|
||||
Reference in New Issue
Block a user