Merge remote-tracking branch 'alfa-addon/master' into fixes
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.5.18" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.5.19" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,15 +19,13 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
|
||||
» grantorrent » descargas2020
|
||||
» torrentlocura » torrentrapid
|
||||
» tumejortorrent » tvsinpagar
|
||||
» mispelisyseries » vidoza
|
||||
» streamplay » powvideo
|
||||
» streamcloud
|
||||
» cinecalidad » animemovil
|
||||
» seriesverde » animejl
|
||||
» pelisipad » gmobi
|
||||
» inkapelis » pelisgratis
|
||||
» pelispedia » animeyt
|
||||
» qserie
|
||||
¤ arreglos internos
|
||||
|
||||
¤ Agradecimientos al equipo iSOD, @alaquepasa por colaborar con ésta versión.
|
||||
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
|
||||
30
plugin.video.alfa/channels/animejl.json
Normal file
30
plugin.video.alfa/channels/animejl.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"id": "animejl",
|
||||
"name": "AnimeJL",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "https://www.animejl.net/img/Logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Episodios de anime",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
198
plugin.video.alfa/channels/animejl.py
Normal file
198
plugin.video.alfa/channels/animejl.py
Normal file
@@ -0,0 +1,198 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel AnimeJL -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
|
||||
host = 'https://www.animejl.net/'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Nuevos Episodios", action="new_episodes",
|
||||
thumbnail=get_thumb('new_episodes', auto=True), url=host))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", thumbnail=get_thumb('all', auto=True),
|
||||
url=host + 'animes'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Series", action="list_all",
|
||||
thumbnail=get_thumb('tvshows', auto=True), url=host+'animes?type%5B%5D=1&order=default'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
|
||||
thumbnail=get_thumb('movies',auto=True), url=host + 'animes?type%5B%5D=2&order=default'))
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
|
||||
url=host + 'animes?q='))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}|"|\(|\)', "", data)
|
||||
return data
|
||||
|
||||
|
||||
def new_episodes(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
data = scrapertools.find_single_match(data, "<h2>Últimos episodios</h2>.*?</ul>")
|
||||
patron = "<li><a href='(.*?)' class.*?<img src='(.*?)' alt='(.*?)'></span><span class='Capi'>(.*?)</span>"
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedepi in matches:
|
||||
url = host+scrapedurl
|
||||
thumbnail = host+scrapedthumbnail
|
||||
title = '%s %s' % (scrapedtitle, scrapedepi)
|
||||
itemlist.append(Item(channel=item.channel, action='findvideos',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
contentSerieName=scrapedtitle,
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = "<article class='Anime alt B'><a href='(.*?)'>.*?class=.*?<img src='(.*?)' alt='(.*?)'>"
|
||||
patron +="</figure><span class='Type .*?'>(.*?)</span>.*?star.*?<p>(.*?)</p>"
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, type, plot in matches:
|
||||
url = host + scrapedurl
|
||||
thumbnail = host+scrapedthumbnail
|
||||
title = scrapedtitle
|
||||
type = type
|
||||
season = ''
|
||||
if 'season' in scrapedtitle.lower():
|
||||
season = scrapertools.find_single_match(scrapedtitle, 'season (\d+)')
|
||||
scrapedtitle = scrapertools.find_single_match(scrapedtitle, '(.*?) season')
|
||||
|
||||
new_item = Item(channel=item.channel, action='episodios',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
contentSerieName=scrapedtitle,
|
||||
plot=plot,
|
||||
type=item.type,
|
||||
infoLabels={}
|
||||
)
|
||||
if type.lower() == 'anime':
|
||||
new_item.contentSerieName = scrapedtitle
|
||||
new_item.contentSeasonNumber = season
|
||||
else:
|
||||
new_item.contentTitle = scrapedtitle
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
# Paginacion
|
||||
next_page = scrapertools.find_single_match(data,
|
||||
"<li><a href='([^']+)'><span>»</span></a></li></ul>")
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="list_all",
|
||||
title=">> Página siguiente",
|
||||
url=host+next_page,
|
||||
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
base_data = get_source(item.url)
|
||||
data = scrapertools.find_single_match(base_data, '<div class=Title>Lista de episodios</div>.*?</ul>')
|
||||
if data == '':
|
||||
data = scrapertools.find_single_match(base_data, '<div class=Title>Formatos disponibles</div>.*?</ul>')
|
||||
|
||||
if 'listepisodes' in data.lower():
|
||||
patron = "<li><a href='(.*?)' class.*?>(.*?)<i class='fa-eye-slash'></i></a></li>"
|
||||
elif 'listcaps' in data.lower():
|
||||
patron = "<a href=(.*?)>.*?alt=(.*?)>"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.strip()
|
||||
n=0
|
||||
for char in title[::-1]:
|
||||
n += 1
|
||||
if char == ' ':
|
||||
break
|
||||
episode = title[-n:]
|
||||
episode = scrapertools.find_single_match(episode, r' (\d+)')
|
||||
|
||||
url = host + scrapedurl
|
||||
itemlist.append(Item(channel=item.channel, title='Episodio %s' % episode, thumbnail=item.thumbnail, url=url,
|
||||
action='findvideos'))
|
||||
if item.type.lower != 'anime' and len(itemlist)==1:
|
||||
return findvideos(itemlist[0])
|
||||
else:
|
||||
return itemlist[::-1]
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = '[%s]' % videoitem.server.capitalize()
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
try:
|
||||
if texto != '':
|
||||
return list_all(item)
|
||||
else:
|
||||
return []
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'anime':
|
||||
item.url = host
|
||||
itemlist = new_episodes(item)
|
||||
if itemlist[-1].title == '>> Página siguiente':
|
||||
itemlist.pop()
|
||||
return itemlist
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
@@ -11,6 +11,7 @@ from core.item import Item
|
||||
from platformcode import platformtools, config, logger
|
||||
|
||||
|
||||
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', 'animemovil')
|
||||
__perfil__ = ''
|
||||
|
||||
@@ -28,6 +29,7 @@ host = "http://animemovil.com"
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="recientes", title="Episodios Recientes", thumbnail=item.thumbnail,
|
||||
@@ -48,6 +50,8 @@ def mainlist(item):
|
||||
itemlist.append(item.clone(title="Configurar canal", action="openconfig", text_color=color5, folder=False))
|
||||
if renumbertools.context:
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -280,71 +284,42 @@ def findvideos(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'\n|\s{2,}', '', data)
|
||||
strm_id = scrapertools.find_single_match(data, '"id": (.*?),')
|
||||
streams = scrapertools.find_single_match(data, '"stream": (.*?)};')
|
||||
dict_strm = jsontools.load(streams)
|
||||
|
||||
akiba_url = scrapertools.find_single_match(data, '<div class="x-link"><a href="(.*?)"')
|
||||
url = httptools.downloadpage('http:'+akiba_url, follow_redirects=False).headers.get('location')
|
||||
title = '%s (%s)' % (item.title, 'akiba')
|
||||
itemlist.append(item.clone(title=title, url=url, action='play'))
|
||||
base_url = 'http:%s%s/' % (dict_strm['accessPoint'], strm_id)
|
||||
for server in dict_strm['servers']:
|
||||
expire = dict_strm['expire']
|
||||
signature = dict_strm['signature']
|
||||
last_modify = dict_strm['last_modify']
|
||||
callback = 'playerWeb'
|
||||
|
||||
info = scrapertools.find_single_match(data, 'episodio_info=(.*?);')
|
||||
dict_info = jsontools.load(info)
|
||||
|
||||
servers = dict_info['stream']['servers']
|
||||
id = dict_info['id']
|
||||
access_point = dict_info['stream']['accessPoint']
|
||||
expire = dict_info['stream']['expire']
|
||||
callback = dict_info['stream']['callback']
|
||||
signature = dict_info['stream']['signature']
|
||||
last_modify = dict_info['stream']['last_modify']
|
||||
|
||||
for server in servers:
|
||||
stream_info = 'http:%s/%s/%s?expire=%s&callback=%s&signature=%s&last_modify=%s' % \
|
||||
(access_point, id, server, expire, callback, signature, last_modify)
|
||||
|
||||
try:
|
||||
dict_stream = jsontools.load(httptools.downloadpage(stream_info).data)
|
||||
if dict_stream['status']:
|
||||
kind = dict_stream['result']['kind']
|
||||
if kind == 'iframe':
|
||||
url = dict_stream['result']['src']
|
||||
title = '%s (%s)' % (item.title, server)
|
||||
elif kind == 'jwplayer':
|
||||
url_style = dict_stream['result']['setup']
|
||||
if server != 'rin':
|
||||
|
||||
if 'playlist' in url_style:
|
||||
part = 1
|
||||
for media_list in url_style['playlist']:
|
||||
url = media_list['file']
|
||||
title = '%s (%s) - parte %s' % (item.title, server, part)
|
||||
itemlist.append(item.clone(title=title, url=url, action='play'))
|
||||
part += 1
|
||||
else:
|
||||
url = url_style['file']
|
||||
title = '%s (%s)' % (item.title, server)
|
||||
else:
|
||||
src_list = url_style['sources']
|
||||
for source in src_list:
|
||||
url = source['file']
|
||||
quality = source['label']
|
||||
title = '%s [%s](%s)' % (item.title, quality, server)
|
||||
itemlist.append(item.clone(title=title, url=url, action='play'))
|
||||
|
||||
elif kind == 'javascript':
|
||||
if 'jsCode' in dict_stream['result']:
|
||||
jscode = dict_stream['result']['jsCode']
|
||||
url = scrapertools.find_single_match(jscode, 'xmlhttp.open\("GET", "(.*?)"')
|
||||
title = '%s (%s)' % (item.title, server)
|
||||
strm_url = base_url +'%s?expire=%s&callback=%s&signature=%s&last_modify=%s' % (server, expire, callback,
|
||||
signature, last_modify)
|
||||
|
||||
strm_data = httptools.downloadpage(strm_url).data
|
||||
strm_data = scrapertools.unescape(strm_data)
|
||||
title = '%s'
|
||||
language = ''
|
||||
if server not in ['fire', 'meph']:
|
||||
urls = scrapertools.find_multiple_matches(strm_data, '"(?:file|src)"*?:.*?"(.*?)"')
|
||||
for url in urls:
|
||||
if url != '':
|
||||
itemlist.append(item.clone(title=title, url=url, action='play'))
|
||||
except:
|
||||
pass
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
url = url.replace ('\\/','/')
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play'))
|
||||
elif server in ['fire', 'mpeh']:
|
||||
url = scrapertools.find_single_match(strm_data, 'xmlhttp.open(\"GET\", \"(.*?)\"')
|
||||
if url != '':
|
||||
url = url.replace('\\/', '/')
|
||||
itemlist.append(Item(channel=item.channel, title=url, url=url, action='play'))
|
||||
else:
|
||||
continue
|
||||
|
||||
|
||||
servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
item = Item()
|
||||
|
||||
@@ -11,6 +11,8 @@ from core.item import Item
|
||||
from core import tmdb
|
||||
from platformcode import config,logger
|
||||
|
||||
import gktools
|
||||
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', 'animeyt')
|
||||
|
||||
HOST = "http://animeyt.tv/"
|
||||
@@ -138,7 +140,7 @@ def episodios(item):
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for url, scrapedtitle, episode in matches:
|
||||
|
||||
|
||||
season = 1
|
||||
episode = int(episode)
|
||||
season, episode = renumbertools.numbered_for_tratk(item.channel, scrapedtitle, season, episode)
|
||||
@@ -158,16 +160,20 @@ def findvideos(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
|
||||
patron = 'Player\("(.*?)"'
|
||||
# ~ patron = 'Player\("(.*?)"'
|
||||
patron = 'iframe src="([^"]*)"'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for url in matches:
|
||||
if "cldup" in url:
|
||||
title = "Opcion Cldup"
|
||||
if "chumi" in url:
|
||||
title = "Opcion Chumi"
|
||||
itemlist.append(item.clone(channel=item.channel, folder=False, title=title, action="play", url=url))
|
||||
title = scrapertools.find_single_match(url, '/([^\.]*)\.php\?')
|
||||
# ~ title = 'PDT'
|
||||
# ~ if "cldup" in url:
|
||||
# ~ title = "Opcion Cldup"
|
||||
# ~ if "chumi" in url:
|
||||
# ~ title = "Opcion Chumi"
|
||||
if title == 'rakuten': # de momento es el único resuelto
|
||||
itemlist.append(item.clone(channel=item.channel, folder=False, title=title, action="play", url=url, referer=item.url))
|
||||
|
||||
if item.extra != "library":
|
||||
if config.get_videolibrary_support() and item.extra:
|
||||
@@ -176,16 +182,18 @@ def findvideos(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def player(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if 'https://s2.animeyt.tv/rakuten.php?' in item.url:
|
||||
itemlist = gktools.gk_play(item)
|
||||
|
||||
data = httptools.downloadpage(item.url, add_referer=True).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
|
||||
url = scrapertools.find_single_match(data, 'sources: \[{file:\'(.*?)\'')
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
# PENDIENTE ANALIZAR DEMÁS CASOS...
|
||||
# ~ else:
|
||||
# ~ headers = {'Referer': item.referer}
|
||||
# ~ resp = httptools.downloadpage(item.url, headers=headers, cookies=False)
|
||||
# ~ with open('animeyt-play-%s.html' % item.title, 'w') as f: f.write(resp.data); f.close()
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "bajui",
|
||||
"name": "Bajui",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "bajui.png",
|
||||
|
||||
@@ -67,7 +67,7 @@ def mainlist(item):
|
||||
itemlist.append(
|
||||
item.clone(title="CineCalidad Portugues",
|
||||
action="submenu",
|
||||
host="http://cinemaqualidade.to/",
|
||||
host="http://www.cinemaqualidade.to/",
|
||||
thumbnail=thumbbr,
|
||||
extra="filmes",
|
||||
))
|
||||
@@ -81,7 +81,7 @@ def submenu(item):
|
||||
idioma = 'peliculas'
|
||||
idioma2 = "destacada"
|
||||
host = item.host
|
||||
if item.host == "http://cinemaqualidade.to/":
|
||||
if item.host == "http://www.cinemaqualidade.to/":
|
||||
idioma = "filmes"
|
||||
idioma2 = "destacado"
|
||||
logger.info()
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
{
|
||||
"id": "gmobi",
|
||||
"name": "gmobi",
|
||||
"name": "GNULA.mobi",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "http://gnula.mobi/wp-content/uploads/2016/08/Untitled-6.png",
|
||||
"thumbnail": "http://www.gnula.mobi/wp-content/uploads/2018/05/Captura-1.png?%3E",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"adult"
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
|
||||
@@ -12,7 +12,7 @@ from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = 'http://gnula.mobi/'
|
||||
host = 'http://www.gnula.mobi/'
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
@@ -68,17 +68,24 @@ def peliculas(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="col-mt-5 postsh">.*?href="(.*?)" title="(.*?)".*?under-title">(.*?)<.*?src="(.*?)"'
|
||||
patron = '<div class="col-mt-5 postsh">.*?href="(.*?)" title="(.*?)".*?<.*?src="(.*?)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedyear, scrapedtitle, scrapedthumbnail in matches:
|
||||
year = scrapertools.find_single_match(scrapedyear, r'.*?\((\d{4})\)')
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle = scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, infoLabels={'year': year}))
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
filter_thumb = scrapedthumbnail.replace("http://image.tmdb.org/t/p/w300", "")
|
||||
filter_list = {"poster_path": filter_thumb}
|
||||
filter_list = filter_list.items()
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=scrapedtitle,
|
||||
fulltitle = scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
infoLabels={'filtro': filter_list}))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
next_page_url = scrapertools.find_single_match(data, '<link rel="next" href="(.*?)"')
|
||||
if next_page_url != "":
|
||||
next_page_url = item.url + next_page_url
|
||||
next_page_url = next_page_url
|
||||
itemlist.append(item.clone(action="peliculas", title="Siguiente >>", text_color="yellow",
|
||||
url=next_page_url))
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "http://i.imgur.com/I7MxHZI.png",
|
||||
"thumbnail": "https://www.inkapelis.com/wp-content/uploads/2016/07/logitoinkapelis-min.png",
|
||||
"banner": "inkapelis.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
@@ -35,6 +35,22 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_castellano",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - castellano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
@@ -56,6 +72,20 @@
|
||||
"Perfil 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"LAT",
|
||||
"CAST",
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "filtro_defecto_peliculas",
|
||||
"type": "label",
|
||||
|
||||
@@ -9,6 +9,10 @@ from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
from channels import filtertools
|
||||
from channels import autoplay
|
||||
|
||||
|
||||
__modo_grafico__ = config.get_setting("modo_grafico", "inkapelis")
|
||||
__perfil__ = config.get_setting("perfil", "inkapelis")
|
||||
|
||||
@@ -19,14 +23,34 @@ perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E'],
|
||||
color1, color2, color3, color4 = perfil[__perfil__]
|
||||
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT', 'Español':'CAST', 'Subtitulado': 'VOSE'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = ['Cam', 'TSHQ', 'Dvdrip', 'Blurayrip', 'HD Rip 320p', 'hd rip 320p', 'HD Real 720p', 'Full HD 1080p']
|
||||
list_servers = ['openload', 'gamovideo', 'streamplay', 'streamango', 'vidoza']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Novedades", action="entradas", url="http://www.inkapelis.com/",
|
||||
extra="Novedades", text_color=color1, thumbnail=get_thumb('newest', auto=True)))
|
||||
itemlist.append(item.clone(title="Estrenos", action="entradas", url="http://www.inkapelis.com/genero/estrenos/",
|
||||
text_color=color1, thumbnail=get_thumb('premieres', auto=True)))
|
||||
#itemlist.append(item.clone(title="Estrenos", action="entradas", url="http://www.inkapelis.com/genero/estrenos/",
|
||||
# text_color=color1, thumbnail=get_thumb('premieres', auto=True)))
|
||||
itemlist.append(item.clone(title="Castellano", action="entradas",
|
||||
url="https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Castellano&s=",
|
||||
extra="Buscar", text_color=color1, thumbnail=get_thumb('espanolas', auto=True)))
|
||||
|
||||
itemlist.append(item.clone(title="Latino", action="entradas",
|
||||
url="https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Latino&s=",
|
||||
extra="Buscar", text_color=color1, thumbnail=get_thumb('latino', auto=True)))
|
||||
|
||||
itemlist.append(item.clone(title="VOSE", action="entradas",
|
||||
url="https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Subtitulada&s=",
|
||||
extra="Buscar", text_color=color1, thumbnail=get_thumb('newest', auto=True)))
|
||||
|
||||
itemlist.append(item.clone(title="Géneros", action="generos", url="http://www.inkapelis.com/", text_color=color1,
|
||||
thumbnail=get_thumb('genres', auto=True),))
|
||||
itemlist.append(item.clone(title="Buscar...", action="", text_color=color1))
|
||||
@@ -43,6 +67,9 @@ def mainlist(item):
|
||||
itemlist.append(
|
||||
new_item.clone(action="filtro", title=title, url="http://www.inkapelis.com/?s=", text_color=color2))
|
||||
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -66,6 +93,16 @@ def newest(categoria):
|
||||
if categoria == "terror":
|
||||
item.url = "https://www.inkapelis.com/genero/terror/"
|
||||
item.action = "entradas"
|
||||
|
||||
if categoria == "castellano":
|
||||
item.url = "https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Castellano&s="
|
||||
item.extra = "Buscar"
|
||||
item.action = "entradas"
|
||||
|
||||
if categoria == "latino":
|
||||
item.url = "https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Latino&s="
|
||||
item.extra = "Buscar"
|
||||
item.action = "entradas"
|
||||
itemlist = entradas(item)
|
||||
|
||||
if itemlist[-1].action == "entradas":
|
||||
@@ -246,8 +283,6 @@ def entradas(item):
|
||||
thumbnail = scrapedthumbnail.replace("w185", "original")
|
||||
title = scrapedtitle
|
||||
calidad = calidad.strip()
|
||||
if calidad:
|
||||
title += " [" + calidad + "]"
|
||||
|
||||
itemlist.append(item.clone(action="findvideos", title=title, url=scrapedurl, thumbnail=thumbnail,
|
||||
contentTitle=scrapedtitle, fulltitle=scrapedtitle,
|
||||
@@ -272,6 +307,10 @@ def entradas(item):
|
||||
if category == "Eroticas +18":
|
||||
continue
|
||||
idioma = idioma.strip()
|
||||
if idioma in IDIOMAS:
|
||||
idioma = IDIOMAS[idioma]
|
||||
else:
|
||||
idioma = IDIOMAS['Subtitulado']
|
||||
calidad = calidad.strip()
|
||||
scrapedtitle = scrapedtitle.replace("Ver Pelicula ", "")
|
||||
title = scrapedtitle
|
||||
@@ -358,13 +397,17 @@ def findvideos(item):
|
||||
patron = '<td><a href="([^"]+)".*?title="([^"]+)".*?<td>([^"]+)<\/td><td>([^"]+)<\/td>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, server, idioma, calidad in matches:
|
||||
if idioma in IDIOMAS:
|
||||
idioma= IDIOMAS[idioma]
|
||||
else:
|
||||
idioma = IDIOMAS['Subtitulado']
|
||||
if server == "Embed":
|
||||
server = "Nowvideo"
|
||||
if server == "Ul":
|
||||
server = "Uploaded"
|
||||
title = "%s [%s][%s]" % (server, idioma, calidad)
|
||||
itemlist.append(item.clone(action="play", title=title, url=url, language = idioma, quality = calidad,
|
||||
server = server))
|
||||
itemlist.append(item.clone(action="play", title=title, url=url, language=idioma, quality=calidad,
|
||||
server=server, infoLabels=item.infoLabels))
|
||||
|
||||
patron = 'id="(embed[0-9]*)".*?<div class="calishow">(.*?)<.*?src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -374,8 +417,15 @@ def findvideos(item):
|
||||
title = "Directo"
|
||||
idioma = scrapertools.find_single_match(data, 'href="#%s".*?>([^<]+)<' % id_embed)
|
||||
title = "%s [%s][%s]" % (title.capitalize(), idioma, calidad)
|
||||
itemlist.append(item.clone(action="play", title=title, url=url, language = idioma, quality = calidad,
|
||||
server = server))
|
||||
itemlist.append(item.clone(action="play", title=title, url=url, language=idioma, quality=calidad,
|
||||
server=server))
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if itemlist:
|
||||
if not config.get_setting('menu_trailer', item.channel):
|
||||
@@ -402,4 +452,7 @@ def play(item):
|
||||
else:
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.infoLabels=item.infoLabels
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"id": "lacajita",
|
||||
"name": "LaCajita",
|
||||
"language": ["cast", "lat"],
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"thumbnail": "http://i.imgur.com/LVdupxc.png",
|
||||
"categories": [
|
||||
@@ -66,4 +66,4 @@
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,26 +10,8 @@ from core import tmdb
|
||||
from core import jsontools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
tgenero = {"Comedia": "https://s7.postimg.cc/ne9g9zgwb/comedia.png",
|
||||
"Suspense": "https://s13.postimg.cc/wmw6vl1cn/suspenso.png",
|
||||
"Drama": "https://s16.postimg.cc/94sia332d/drama.png",
|
||||
"Acción": "https://s3.postimg.cc/y6o9puflv/accion.png",
|
||||
"Aventura": "https://s10.postimg.cc/6su40czih/aventura.png",
|
||||
"Romance": "https://s15.postimg.cc/fb5j8cl63/romance.png",
|
||||
"Animación": "https://s13.postimg.cc/5on877l87/animacion.png",
|
||||
"Ciencia ficción": "https://s9.postimg.cc/diu70s7j3/cienciaficcion.png",
|
||||
"Terror": "https://s7.postimg.cc/yi0gij3gb/terror.png",
|
||||
"Documental": "https://s16.postimg.cc/7xjj4bmol/documental.png",
|
||||
"Música": "https://s29.postimg.cc/bbxmdh9c7/musical.png",
|
||||
"Fantasía": "https://s13.postimg.cc/65ylohgvb/fantasia.png",
|
||||
"Misterio": "https://s1.postimg.cc/w7fdgf2vj/misterio.png",
|
||||
"Crimen": "https://s4.postimg.cc/6z27zhirx/crimen.png",
|
||||
"Familia": "https://s7.postimg.cc/6s7vdhqrf/familiar.png",
|
||||
"Guerra": "https://s4.postimg.cc/n1h2jp2jh/guerra.png",
|
||||
"Western": "https://s23.postimg.cc/lzyfbjzhn/western.png",
|
||||
"Historia": "https://s15.postimg.cc/fmc050h1n/historia.png"
|
||||
}
|
||||
|
||||
thumbletras = {'#': 'https://s32.postimg.cc/drojt686d/image.png',
|
||||
'a': 'https://s32.postimg.cc/llp5ekfz9/image.png',
|
||||
@@ -73,53 +55,46 @@ def mainlist(item):
|
||||
|
||||
itemlist.append(item.clone(title="Estrenos",
|
||||
action="lista",
|
||||
thumbnail='https://s21.postimg.cc/fy69wzm93/estrenos.png',
|
||||
fanart='https://s21.postimg.cc/fy69wzm93/estrenos.png',
|
||||
thumbnail=get_thumb('premieres', auto=True),
|
||||
url=host + 'estrenos'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Todas",
|
||||
action="lista",
|
||||
thumbnail='https://s18.postimg.cc/fwvaeo6qh/todas.png',
|
||||
fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png',
|
||||
thumbnail=get_thumb('all', auto=True),
|
||||
url=host
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Generos",
|
||||
action="seccion",
|
||||
url=host,
|
||||
thumbnail='https://s3.postimg.cc/5s9jg2wtf/generos.png',
|
||||
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png',
|
||||
thumbnail=get_thumb('genres', auto=True),
|
||||
extra='generos'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Alfabetico",
|
||||
action="seccion",
|
||||
url=host,
|
||||
thumbnail='https://s17.postimg.cc/fwi1y99en/a-z.png',
|
||||
fanart='https://s17.postimg.cc/fwi1y99en/a-z.png',
|
||||
thumbnail=get_thumb('alphabet', auto=True),
|
||||
extra='a-z'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Mas Vistas",
|
||||
action="lista",
|
||||
thumbnail='https://s9.postimg.cc/wmhzu9d7z/vistas.png',
|
||||
fanart='https://s9.postimg.cc/wmhzu9d7z/vistas.png',
|
||||
thumbnail=get_thumb('more watched', auto=True),
|
||||
url=host + 'peliculas-mas-vistas'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Mas Votadas",
|
||||
action="lista",
|
||||
thumbnail='https://s7.postimg.cc/9kg1nthzf/votadas.png',
|
||||
fanart='https://s7.postimg.cc/9kg1nthzf/votadas.png',
|
||||
thumbnail=get_thumb('more voted', auto=True),
|
||||
url=host + 'peliculas-mas-votadas'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Buscar",
|
||||
action="search",
|
||||
url=host + '?s=',
|
||||
thumbnail='https://s30.postimg.cc/pei7txpa9/buscar.png',
|
||||
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
|
||||
thumbnail=get_thumb('search', auto=True)
|
||||
))
|
||||
|
||||
return itemlist
|
||||
@@ -178,7 +153,7 @@ def seccion(item):
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
if item.extra == 'generos':
|
||||
patron = '<li class=cat-item cat-item-.*?><a href=(.*?)>(.*?)<\/li>'
|
||||
patron = '<li class=cat-item cat-item-.*?><a href=(.*?)>(.*?)</a><\/li>'
|
||||
elif item.extra == 'a-z':
|
||||
patron = '<li><a href=(.*?)>(\w|#)<\/a><\/li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -187,12 +162,8 @@ def seccion(item):
|
||||
url = scrapedurl
|
||||
thumbnail = ''
|
||||
if item.extra == 'generos':
|
||||
title = re.sub(r'<\/a> \(\d+\)', '', scrapedtitle)
|
||||
cantidad = re.findall(r'.*?<\/a> \((\d+)\)', scrapedtitle)
|
||||
th_title = title
|
||||
title = title + ' (' + cantidad[0] + ')'
|
||||
if th_title in tgenero:
|
||||
thumbnail = tgenero[th_title]
|
||||
#cantidad = re.findall(r'.*?<\/a> \((\d+)\)', scrapedtitle)
|
||||
title = scrapedtitle
|
||||
else:
|
||||
title = scrapedtitle
|
||||
if title.lower() in thumbletras:
|
||||
|
||||
@@ -434,6 +434,7 @@ def nuevos_cap(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = jsontools.load(data)
|
||||
logger.debug(data)
|
||||
capitulos = []
|
||||
if "Nuevas" in item.title:
|
||||
for child in data["b"]:
|
||||
@@ -455,13 +456,15 @@ def nuevos_cap(item):
|
||||
infoLabels['season'] = int(season)
|
||||
except:
|
||||
infoLabels['season'] = 0
|
||||
|
||||
if "Nuevos" in item.title:
|
||||
if not child['episode']:
|
||||
episode = scrapertools.find_single_match(child['name'], '\d+x(\d+)')
|
||||
if not episode:
|
||||
episode = "0"
|
||||
infoLabels['episode'] = int(episode)
|
||||
elif "al" in child['episode']:
|
||||
episode = "0"
|
||||
infoLabels['episode'] = int(episode)
|
||||
else:
|
||||
infoLabels['episode'] = int(child['episode'])
|
||||
infoLabels['mediatype'] = "episode"
|
||||
|
||||
@@ -16,6 +16,8 @@ from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
from core import filetools
|
||||
|
||||
import gktools
|
||||
|
||||
__channel__ = "pelispedia"
|
||||
|
||||
CHANNEL_HOST = "http://www.pelispedia.tv/"
|
||||
@@ -510,98 +512,12 @@ def findvideos(item):
|
||||
|
||||
def play(item):
|
||||
logger.info("url=%s" % item.url)
|
||||
itemlist = []
|
||||
|
||||
if item.url.startswith("https://pelispedia.video/v.php"):
|
||||
itemlist = gktools.gk_play(item)
|
||||
|
||||
headers = {'Referer': item.referer}
|
||||
resp = httptools.downloadpage(item.url, headers=headers, cookies=False)
|
||||
|
||||
for h in resp.headers:
|
||||
ck = scrapertools.find_single_match(resp.headers[h], '__cfduid=([^;]*)')
|
||||
if ck:
|
||||
gsv = scrapertools.find_single_match(resp.data, '<meta name="google-site-verification" content="([^"]*)"')
|
||||
token = generar_token(gsv, 'b0a8c83650f18ccc7c87b16e3c460474'+'yt'+'b0a8c83650f18ccc7c87b16e3c460474'+'2653')
|
||||
playparms = scrapertools.find_single_match(resp.data, 'Play\("([^"]*)","([^"]*)","([^"]*)"')
|
||||
if playparms:
|
||||
link = playparms[0]
|
||||
subtitle = '' if playparms[1] == '' or playparms[2] == '' else playparms[2] + playparms[1] + '.srt'
|
||||
else:
|
||||
link = scrapertools.find_single_match(item.url, 'id=([^;]*)')
|
||||
subtitle = ''
|
||||
# ~ logger.info("gsv: %s token: %s ck: %s link: %s" % (gsv, token, ck, link))
|
||||
|
||||
post = "link=%s&token=%s" % (link, token)
|
||||
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': '__cfduid=' + ck}
|
||||
data = httptools.downloadpage("https://pelispedia.video/plugins/gkpedia.php", post=post, headers=headers, cookies=False).data
|
||||
|
||||
mp4 = scrapertools.find_single_match(data, '"link":"([^"]*)')
|
||||
if mp4:
|
||||
mp4 = mp4.replace('\/', '/')
|
||||
if 'chomikuj.pl/' in mp4: mp4 += "|Referer=%s" % item.referer
|
||||
itemlist.append(['.mp4', mp4, 0, subtitle])
|
||||
|
||||
break
|
||||
|
||||
|
||||
elif item.url.startswith("https://load.pelispedia.vip/embed/"):
|
||||
|
||||
headers = {'Referer': item.referer}
|
||||
resp = httptools.downloadpage(item.url, headers=headers, cookies=False)
|
||||
|
||||
for h in resp.headers:
|
||||
ck = scrapertools.find_single_match(resp.headers[h], '__cfduid=([^;]*)')
|
||||
if ck:
|
||||
gsv = scrapertools.find_single_match(resp.data, '<meta name="google-site-verification" content="([^"]*)"')
|
||||
token = generar_token(gsv, '4fe554b59d760c9986c903b07af8b7a4'+'yt'+'4fe554b59d760c9986c903b07af8b7a4'+'785446346')
|
||||
url = item.url.replace('/embed/', '/stream/') + '/' + token
|
||||
# ~ logger.info("gsv: %s token: %s ck: %s" % (gsv, token, ck))
|
||||
|
||||
headers = {'Referer': item.url, 'Cookie': '__cfduid=' + ck}
|
||||
data = httptools.downloadpage(url, headers=headers, cookies=False).data
|
||||
|
||||
url = scrapertools.find_single_match(data, '<meta (?:name|property)="og:url" content="([^"]+)"')
|
||||
srv = scrapertools.find_single_match(data, '<meta (?:name|property)="og:sitename" content="([^"]+)"')
|
||||
if srv == '' and 'rapidvideo.com/' in url: srv = 'rapidvideo'
|
||||
|
||||
if url != '' and srv != '':
|
||||
itemlist.append(item.clone(url=url, server=srv.lower()))
|
||||
|
||||
elif '<title>Vidoza</title>' in data or '|fastplay|' in data:
|
||||
if '|fastplay|' in data:
|
||||
packed = scrapertools.find_single_match(data, "<script type='text/javascript'>(eval\(.*?)</script>")
|
||||
from lib import jsunpack
|
||||
data = jsunpack.unpack(packed)
|
||||
data = data.replace("\\'", "'")
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, 'file\s*:\s*"([^"]+)"\s*,\s*label\s*:\s*"([^"]+)"')
|
||||
subtitle = ''
|
||||
for fil, lbl in matches:
|
||||
if fil.endswith('.srt') and not fil.endswith('empty.srt'):
|
||||
subtitle = fil
|
||||
if not subtitle.startswith('http'):
|
||||
domi = scrapertools.find_single_match(data, 'aboutlink\s*:\s*"([^"]*)')
|
||||
subtitle = domi + subtitle
|
||||
break
|
||||
|
||||
for fil, lbl in matches:
|
||||
if not fil.endswith('.srt'):
|
||||
itemlist.append([lbl, fil, 0, subtitle])
|
||||
|
||||
break
|
||||
|
||||
|
||||
else:
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.channel = __channel__
|
||||
|
||||
logger.info("retorna itemlist: %s" % itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
def obtener_data(url, referer=''):
|
||||
@@ -665,82 +581,3 @@ def decodificar_cookie(data):
|
||||
|
||||
g = eval(l1)
|
||||
return eval(l2).replace(";path=/;max-age=86400", "")
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md5"):
|
||||
import hashlib
|
||||
target_key_size = key_size + iv_size
|
||||
derived_bytes = ""
|
||||
number_of_derived_words = 0
|
||||
block = None
|
||||
hasher = hashlib.new(hash_algorithm)
|
||||
while number_of_derived_words < target_key_size:
|
||||
if block is not None:
|
||||
hasher.update(block)
|
||||
|
||||
hasher.update(passwd)
|
||||
hasher.update(salt)
|
||||
block = hasher.digest()
|
||||
hasher = hashlib.new(hash_algorithm)
|
||||
|
||||
for i in range(1, iterations):
|
||||
hasher.update(block)
|
||||
block = hasher.digest()
|
||||
hasher = hashlib.new(hash_algorithm)
|
||||
|
||||
derived_bytes += block[0: min(len(block), (target_key_size - number_of_derived_words) * 4)]
|
||||
|
||||
number_of_derived_words += len(block)/4
|
||||
|
||||
return {
|
||||
"key": derived_bytes[0: key_size * 4],
|
||||
"iv": derived_bytes[key_size * 4:]
|
||||
}
|
||||
|
||||
def obtener_cripto(password, plaintext):
|
||||
import os, base64, json
|
||||
SALT_LENGTH = 8
|
||||
BLOCK_SIZE = 16
|
||||
KEY_SIZE = 32
|
||||
|
||||
salt = os.urandom(SALT_LENGTH)
|
||||
iv = os.urandom(BLOCK_SIZE)
|
||||
|
||||
paddingLength = 16 - (len(plaintext) % 16)
|
||||
paddedPlaintext = plaintext+chr(paddingLength)*paddingLength
|
||||
|
||||
kdf = evpKDF(password, salt)
|
||||
|
||||
try: # Intentar con librería AES del sistema
|
||||
from Crypto.Cipher import AES
|
||||
cipherSpec = AES.new(kdf['key'], AES.MODE_CBC, iv)
|
||||
except: # Si falla intentar con librería del addon
|
||||
import jscrypto
|
||||
cipherSpec = jscrypto.new(kdf['key'], jscrypto.MODE_CBC, iv)
|
||||
ciphertext = cipherSpec.encrypt(paddedPlaintext)
|
||||
|
||||
return json.dumps({'ct': base64.b64encode(ciphertext), 'iv': iv.encode("hex"), 's': salt.encode("hex")}, sort_keys=True, separators=(',', ':'))
|
||||
|
||||
def generar_token(gsv, pwd):
|
||||
txt = obtener_cripto(pwd, gsv)
|
||||
|
||||
_0x382d28 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
|
||||
|
||||
valors = [0, 0, 0]
|
||||
cicle = 0
|
||||
retorn = ''
|
||||
for ch in txt:
|
||||
valors[cicle] = ord(ch)
|
||||
cicle += 1
|
||||
if cicle == 3:
|
||||
primer = _0x382d28[valors[0] >> 0x2]
|
||||
segon = _0x382d28[((valors[0] & 0x3) << 0x4) | (valors[1] >> 0x4)]
|
||||
tercer = _0x382d28[((valors[1] & 0xf) << 0x2) | (valors[2] >> 0x6)]
|
||||
quart = _0x382d28[valors[2] & 0x3f]
|
||||
retorn += primer + segon + tercer + quart
|
||||
|
||||
valors = [0, 0, 0]
|
||||
cicle = 0
|
||||
|
||||
return retorn
|
||||
|
||||
@@ -215,8 +215,7 @@ def generos(item):
|
||||
|
||||
for scrapedtitle, scrapedurl in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
title = scrapedtitle.decode('cp1252')
|
||||
title = title.encode('utf-8')
|
||||
title = scrapedtitle.capitalize()
|
||||
if title.lower() in tgenero:
|
||||
thumbnail = tgenero[title.lower()]
|
||||
fanart = tgenero[title.lower()]
|
||||
@@ -225,7 +224,7 @@ def generos(item):
|
||||
fanart = ''
|
||||
plot = ''
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="todas", title=title.lower(), fulltitle=item.fulltitle, url=url,
|
||||
Item(channel=item.channel, action="todas", title=title, fulltitle=item.fulltitle, url=url,
|
||||
thumbnail=thumbnail, plot=plot, fanart=fanart))
|
||||
|
||||
return itemlist
|
||||
@@ -241,19 +240,12 @@ def ultimas(item):
|
||||
|
||||
for scrapedtitle, scrapedurl in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
thumbnail = scrapertools.get_match(data, '<link rel="image_src" href="([^"]+)"/>')
|
||||
realplot = scrapertools.find_single_match(data, '<p itemprop="articleBody">([^<]+)<\/p> ')
|
||||
plot = scrapertools.remove_htmltags(realplot)
|
||||
inutil = re.findall(r' Temporada \d', scrapedtitle)
|
||||
title = scrapedtitle
|
||||
title = scrapertools.decodeHtmlentities(title)
|
||||
realtitle = scrapedtitle.replace(inutil[0], '')
|
||||
fanart = 'https://s22.postimg.cc/cb7nmhwv5/ultimas.png'
|
||||
season = scrapertools.find_single_match(scrapedtitle, 'Temporada (\d+)')
|
||||
title = scrapertools.find_single_match(scrapedtitle, '(.*?) Temporada')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="temporadas", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fanart=fanart, contentSerieName=realtitle))
|
||||
|
||||
Item(channel=item.channel, action="temporadas", title=scrapedtitle, url=url, contentSerieName=title,
|
||||
contentSeasonNumber=season, infoLabels={'season':season}))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -290,42 +282,38 @@ def lasmas(item):
|
||||
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
realplot = ''
|
||||
base_data = httptools.downloadpage(item.url).data
|
||||
base_data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", base_data)
|
||||
thumbnail = ''
|
||||
if item.extra == 'letras':
|
||||
patron = '<li><a href="([^"]+)" title="Series que comienzan con.*?">([^<]+)</a></li>'
|
||||
data = base_data
|
||||
patron = '<li><a href=([^ ]+) title=Series que comienzan con.*?>([^<]+)</a></li>'
|
||||
else:
|
||||
patron = '<a href="([^"]+)" title="([^V]+)' + item.extra + '.*?">'
|
||||
if item.extra == 'Vista':
|
||||
type = 'Vistas'
|
||||
else:
|
||||
type = 'Votadas'
|
||||
data = scrapertools.find_single_match(base_data, 'white>Las m.s %s <span(.*?)</a>(?:</div>|</center>)' %
|
||||
type)
|
||||
patron = '<a href=([^ ]+) title=(.*?) ' + item.extra
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
if item.extra != 'letras':
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
thumbnail = scrapertools.get_match(data, '<link rel="image_src" href="([^"]+)"/>')
|
||||
realplot = scrapertools.find_single_match(data, '<p itemprop="articleBody">([^<]+)<\/p> ')
|
||||
plot = scrapertools.remove_htmltags(realplot)
|
||||
action = 'temporadas'
|
||||
else:
|
||||
if scrapedtitle.lower() in thumbletras:
|
||||
thumbnail = thumbletras[scrapedtitle.lower()]
|
||||
else:
|
||||
thumbnail = ''
|
||||
plot = ''
|
||||
action = 'todas'
|
||||
title = scrapedtitle.replace(': ', '')
|
||||
title = scrapertools.decodeHtmlentities(title)
|
||||
if item.extra == 'letras':
|
||||
fanart = 'https://s17.postimg.cc/fwi1y99en/a-z.png'
|
||||
elif item.extra == 'Vista':
|
||||
fanart = 'https://s9.postimg.cc/wmhzu9d7z/vistas.png'
|
||||
else:
|
||||
fanart = ''
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fanart=fanart, contentSerieName=scrapedtitle))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url,
|
||||
thumbnail=thumbnail, contentSerieName=scrapedtitle))
|
||||
if item.extra != 'letras':
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -162,14 +162,14 @@ def seasons(item):
|
||||
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="all_episodes",
|
||||
extra="episodios",
|
||||
contentSerieName=item.contentSerieName,
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def epidodios(item):
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = seasons(item)
|
||||
|
||||
@@ -291,9 +291,25 @@ def settings(item):
|
||||
|
||||
|
||||
def submenu_tools(item):
|
||||
import os
|
||||
from core import filetools
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
|
||||
#Herramientas de testeo masivo
|
||||
test_path = os.path.join(config.get_runtime_path(), "channels/test.py")
|
||||
|
||||
if filetools.exists(test_path):
|
||||
itemlist.append(Item(channel=CHANNELNAME, title="Herramientas de Testeo masivo", action="", folder=False,
|
||||
thumbnail=get_thumb("channels.png")))
|
||||
itemlist.append(Item(title='- Testear canales ...', channel="test", action="channel_test_selected"))
|
||||
itemlist.append(Item(title='- Testear servidores ...', channel="test", action="server_test_selected"))
|
||||
itemlist.append(Item(title='- Testear novedades!', channel="test", action="news_test_all"))
|
||||
itemlist.append(Item(title='- Upload tests to web!', channel="test", action="web_update_tests"))
|
||||
itemlist.append(
|
||||
Item(channel=CHANNELNAME, action="", title="", folder=False, thumbnail=get_thumb("setting_0.png")))
|
||||
|
||||
|
||||
itemlist.append(Item(channel=CHANNELNAME, title="Herramientas de canales", action="", folder=False,
|
||||
thumbnail=get_thumb("channels.png")))
|
||||
itemlist.append(Item(channel=CHANNELNAME, title=" Comprobar archivos *_data.json", action="conf_tools",
|
||||
@@ -651,3 +667,4 @@ def overwrite_tools(item):
|
||||
logger.error(message)
|
||||
|
||||
p_dialog2.close()
|
||||
|
||||
|
||||
297
plugin.video.alfa/lib/gktools.py
Normal file
297
plugin.video.alfa/lib/gktools.py
Normal file
@@ -0,0 +1,297 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
Características "GK" :
|
||||
- Utiliza una cookie __cfduid
|
||||
- Utiliza meta name="google-site-verification" como texto a encriptar
|
||||
- La clave para encriptar se calcula en los js
|
||||
- Se calcula un token criptográfico en función del texto y clave
|
||||
|
||||
A partir de aquí 2 opciones:
|
||||
|
||||
a) Si la url indica que hay un /embed/
|
||||
- se cambia /embed/ por /stream/ y se añade /token
|
||||
- se descarga la página, dónde se pueden extraer los videos
|
||||
|
||||
b) Sino (enlaces directos)
|
||||
- se busca un identificador
|
||||
- si hay una llamada a Play() en el js, el id se saca de allí
|
||||
- sino el id puede estar en la url
|
||||
- con el identificador y el token se llama a un php (gkpluginsphp, gkpedia)
|
||||
- el php devuelve la lista de enlaces a los videos
|
||||
|
||||
Notas:
|
||||
- Creado a partir de lo visto en pelispedia y animeyt, que utilizan este sistema.
|
||||
- Para otros canales habrá que añadir sus datos en las funciones calcular_*
|
||||
o hacer que estas funciones puedan extraer lo necesario de los js.
|
||||
|
||||
'''
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
import urlparse
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
# ~ from platformcode import logger
|
||||
|
||||
def gk_play(item):
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Descargar para tratar header y data por separado
|
||||
# ------------------------------------------------
|
||||
headers = {'Referer': item.referer}
|
||||
resp = httptools.downloadpage(item.url, headers=headers, cookies=False)
|
||||
# ~ with open('gk_play1.html', 'w') as f: f.write(resp.data); f.close()
|
||||
|
||||
# Obtener cookie __cfduid
|
||||
# -----------------------
|
||||
for h in resp.headers:
|
||||
ck = scrapertools.find_single_match(resp.headers[h], '__cfduid=([^;]*)')
|
||||
if ck:
|
||||
break
|
||||
if not ck: return itemlist
|
||||
|
||||
# Extraer datos y calcular token
|
||||
# ------------------------------
|
||||
gsv = scrapertools.find_single_match(resp.data, '<meta name="google-site-verification" content="([^"]*)"')
|
||||
if not gsv: return itemlist
|
||||
|
||||
suto = calcular_sutorimux(item.url) # valor que se calcula en función del dominio
|
||||
sufijo = calcular_sufijo(item.url) # valor predeterminado que se establece en el código js
|
||||
|
||||
token = generar_token(gsv, suto+'yt'+suto+sufijo)
|
||||
|
||||
|
||||
# Descargar y extraer videos
|
||||
# --------------------------
|
||||
|
||||
if '/embed/' in item.url:
|
||||
url = item.url.replace('/embed/', '/stream/') + '/' + token
|
||||
headers = {'Referer': item.url, 'Cookie': '__cfduid=' + ck}
|
||||
data = httptools.downloadpage(url, headers=headers, cookies=False).data
|
||||
# ~ with open('gk_play2.html', 'w') as f: f.write(resp.data); f.close()
|
||||
|
||||
# Extraer enlaces de la respuesta
|
||||
# -------------------------------
|
||||
url = scrapertools.find_single_match(data, '<meta (?:name|property)="og:url" content="([^"]+)"')
|
||||
srv = scrapertools.find_single_match(data, '<meta (?:name|property)="og:sitename" content="([^"]+)"')
|
||||
if srv == '' and 'rapidvideo.com/' in url: srv = 'rapidvideo'
|
||||
|
||||
if url != '' and srv != '':
|
||||
itemlist.append(item.clone(url=url, server=srv.lower()))
|
||||
|
||||
elif '<title>Vidoza</title>' in data or '|fastplay|' in data:
|
||||
if '|fastplay|' in data:
|
||||
packed = scrapertools.find_single_match(data, "<script type='text/javascript'>(eval\(.*?)</script>")
|
||||
from lib import jsunpack
|
||||
data = jsunpack.unpack(packed)
|
||||
data = data.replace("\\'", "'")
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, 'file\s*:\s*"([^"]+)"\s*,\s*label\s*:\s*"([^"]+)"')
|
||||
subtitle = ''
|
||||
for fil, lbl in matches:
|
||||
if fil.endswith('.srt') and not fil.endswith('empty.srt'):
|
||||
subtitle = fil
|
||||
if not subtitle.startswith('http'):
|
||||
domi = scrapertools.find_single_match(data, 'aboutlink\s*:\s*"([^"]*)')
|
||||
subtitle = domi + subtitle
|
||||
break
|
||||
|
||||
for fil, lbl in matches:
|
||||
if not fil.endswith('.srt'):
|
||||
itemlist.append([lbl, fil, 0, subtitle])
|
||||
|
||||
|
||||
else:
|
||||
playparms = scrapertools.find_single_match(resp.data, 'Play\("([^"]*)","([^"]*)","([^"]*)"')
|
||||
if playparms:
|
||||
link = playparms[0]
|
||||
subtitle = '' if playparms[1] == '' or playparms[2] == '' else playparms[2] + playparms[1] + '.srt'
|
||||
else:
|
||||
subtitle = ''
|
||||
link = scrapertools.find_single_match(resp.data, 'Play\("([^"]*)"')
|
||||
if not link:
|
||||
link = scrapertools.find_single_match(item.url, 'id=([^;]*)')
|
||||
|
||||
if link:
|
||||
# ~ logger.info('%s %s %s' % (item.url, link, token))
|
||||
url_gk = calcular_url_gk(item.url)
|
||||
|
||||
post = "link=%s&token=%s" % (link, token)
|
||||
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': '__cfduid=' + ck}
|
||||
|
||||
data = httptools.downloadpage(url_gk, post=post, headers=headers, cookies=False).data
|
||||
# ~ with open('gk_play3.html', 'w') as f: f.write(resp.data); f.close()
|
||||
|
||||
# Extraer enlaces de la respuesta
|
||||
# -------------------------------
|
||||
matches = scrapertools.find_multiple_matches(data, '"link"\s*:\s*"([^"]*)"\s*,\s*"label"\s*:\s*"([^"]*)"\s*,\s*"type"\s*:\s*"([^"]*)"')
|
||||
if matches:
|
||||
for url, lbl, typ in matches:
|
||||
itemlist.append(['[%s][%s]' % (typ, lbl), corregir_url(url, item.referer), 0, subtitle])
|
||||
else:
|
||||
url = scrapertools.find_single_match(data, '"link"\s*:\s*"([^"]*)"')
|
||||
if url:
|
||||
itemlist.append(['.mp4', corregir_url(url, item.referer), 0, subtitle])
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
# Correcciones en las urls finales obtenidas
|
||||
# ------------------------------------------
|
||||
def corregir_url(url, referer):
|
||||
url = url.replace('\/', '/')
|
||||
if 'chomikuj.pl/' in url: url += "|Referer=%s" % referer
|
||||
return url
|
||||
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
||||
# Generar un token válido a partir de un texto y una clave
|
||||
# --------------------------------------------------------
|
||||
|
||||
# gsv: google-site-verification, obtenido de '<meta name="google-site-verification" content="([^"]*)"'
|
||||
# pwd: Password
|
||||
def generar_token(gsv, pwd):
|
||||
txt = obtener_cripto(pwd, gsv)
|
||||
|
||||
_0x382d28 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
|
||||
|
||||
valors = [0, 0, 0]
|
||||
cicle = 0
|
||||
retorn = ''
|
||||
for ch in txt:
|
||||
valors[cicle] = ord(ch)
|
||||
cicle += 1
|
||||
if cicle == 3:
|
||||
primer = _0x382d28[valors[0] >> 0x2]
|
||||
segon = _0x382d28[((valors[0] & 0x3) << 0x4) | (valors[1] >> 0x4)]
|
||||
tercer = _0x382d28[((valors[1] & 0xf) << 0x2) | (valors[2] >> 0x6)]
|
||||
quart = _0x382d28[valors[2] & 0x3f]
|
||||
retorn += primer + segon + tercer + quart
|
||||
|
||||
valors = [0, 0, 0]
|
||||
cicle = 0
|
||||
|
||||
return retorn
|
||||
|
||||
|
||||
def obtener_cripto(password, plaintext):
|
||||
import os, base64, json
|
||||
SALT_LENGTH = 8
|
||||
BLOCK_SIZE = 16
|
||||
KEY_SIZE = 32
|
||||
|
||||
salt = os.urandom(SALT_LENGTH)
|
||||
iv = os.urandom(BLOCK_SIZE)
|
||||
|
||||
paddingLength = 16 - (len(plaintext) % 16)
|
||||
paddedPlaintext = plaintext+chr(paddingLength)*paddingLength
|
||||
|
||||
kdf = evpKDF(password, salt)
|
||||
|
||||
try: # Intentar con librería AES del sistema
|
||||
from Crypto.Cipher import AES
|
||||
cipherSpec = AES.new(kdf['key'], AES.MODE_CBC, iv)
|
||||
except: # Si falla intentar con librería del addon
|
||||
import jscrypto
|
||||
cipherSpec = jscrypto.new(kdf['key'], jscrypto.MODE_CBC, iv)
|
||||
ciphertext = cipherSpec.encrypt(paddedPlaintext)
|
||||
|
||||
return json.dumps({'ct': base64.b64encode(ciphertext), 'iv': iv.encode("hex"), 's': salt.encode("hex")}, sort_keys=True, separators=(',', ':'))
|
||||
|
||||
|
||||
def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md5"):
|
||||
import hashlib
|
||||
target_key_size = key_size + iv_size
|
||||
derived_bytes = ""
|
||||
number_of_derived_words = 0
|
||||
block = None
|
||||
hasher = hashlib.new(hash_algorithm)
|
||||
while number_of_derived_words < target_key_size:
|
||||
if block is not None:
|
||||
hasher.update(block)
|
||||
|
||||
hasher.update(passwd)
|
||||
hasher.update(salt)
|
||||
block = hasher.digest()
|
||||
hasher = hashlib.new(hash_algorithm)
|
||||
|
||||
for i in range(1, iterations):
|
||||
hasher.update(block)
|
||||
block = hasher.digest()
|
||||
hasher = hashlib.new(hash_algorithm)
|
||||
|
||||
derived_bytes += block[0: min(len(block), (target_key_size - number_of_derived_words) * 4)]
|
||||
|
||||
number_of_derived_words += len(block)/4
|
||||
|
||||
return {
|
||||
"key": derived_bytes[0: key_size * 4],
|
||||
"iv": derived_bytes[key_size * 4:]
|
||||
}
|
||||
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
# Valores extraídos de los js para los dominios tratados (pendiente automatizar!)
|
||||
# Ej: https://pelispedia.video/plugins/gkpluginsphp.js?v=3.3
|
||||
# Ej: https://s2.animeyt.tv/rakuten/plugins/rakuten676.js?v=200000000
|
||||
|
||||
def calcular_sutorimux(url):
|
||||
dominio = urlparse.urlparse(url).netloc
|
||||
claves = {
|
||||
'pelispedia.video': 'b0a8c83650f18ccc7c87b16e3c460474',
|
||||
'load.pelispedia.vip': '4fe554b59d760c9986c903b07af8b7a4',
|
||||
|
||||
's1.animeyt.tv': '0cdf0d0302091bc22a0afdc3f13c0773',
|
||||
's2.animeyt.tv': '079c3ee3ca289af95d819d93b852ed94',
|
||||
's3.animeyt.tv': '6c21a435bce9f5926d26db567fee1241',
|
||||
's4.animeyt.tv': '38546fb4797f2f7c5b6690a5b4a47e34',
|
||||
's10.animeyt.tv': 'be88e4cc014c0ae6f9f2d1f947b3b23b',
|
||||
's.animeyt.tv': '49f911abffe682820dc5b54777713974',
|
||||
'server.animeyt.tv': '2c60637d7f7aa54225c20aea61a2b468',
|
||||
'api.animeyt.tv': '54092dea9fd2e163aaa59ad0c4351866',
|
||||
}
|
||||
return '' if dominio not in claves else claves[dominio]
|
||||
|
||||
|
||||
def calcular_sufijo(url):
|
||||
dominio = urlparse.urlparse(url).netloc
|
||||
claves = {
|
||||
'pelispedia.video': '2653',
|
||||
'load.pelispedia.vip': '785446346',
|
||||
|
||||
's1.animeyt.tv': '',
|
||||
's2.animeyt.tv': '3497510',
|
||||
's3.animeyt.tv': '',
|
||||
's4.animeyt.tv': '',
|
||||
's10.animeyt.tv': '',
|
||||
's.animeyt.tv': '',
|
||||
'server.animeyt.tv': '',
|
||||
'api.animeyt.tv': '',
|
||||
}
|
||||
return '' if dominio not in claves else claves[dominio]
|
||||
|
||||
|
||||
def calcular_url_gk(url):
|
||||
dominio = urlparse.urlparse(url).netloc
|
||||
claves = {
|
||||
'pelispedia.video': 'https://pelispedia.video/plugins/cloupedia.php', # plugins/gkpedia.php
|
||||
'load.pelispedia.vip': '',
|
||||
|
||||
's1.animeyt.tv': '',
|
||||
's2.animeyt.tv': 'https://s2.animeyt.tv/rakuten/plugins/gkpluginsphp.php',
|
||||
's3.animeyt.tv': '',
|
||||
's4.animeyt.tv': '',
|
||||
's10.animeyt.tv': '',
|
||||
's.animeyt.tv': '',
|
||||
'server.animeyt.tv': '',
|
||||
'api.animeyt.tv': '',
|
||||
}
|
||||
return '' if dominio not in claves else claves[dominio]
|
||||
@@ -493,6 +493,20 @@ def set_context_commands(item, parent_item):
|
||||
(sys.argv[0], item.clone(channel="favorites", action="addFavourite",
|
||||
from_channel=item.channel,
|
||||
from_action=item.action).tourl())))
|
||||
#Herramientas de desarrollador
|
||||
from core import filetools
|
||||
|
||||
test_path = os.path.join(config.get_runtime_path(), "channels/test.py")
|
||||
|
||||
if parent_item.action == 'filterchannels' and item.action == 'mainlist' and filetools.exists(test_path):
|
||||
|
||||
channel_parameters = channeltools.get_channel_parameters(item.channel)
|
||||
context_commands.append(("TESTEAR ESTE CANAL",
|
||||
"XBMC.RunPlugin(%s?%s)" %
|
||||
(sys.argv[0],
|
||||
Item(channel='test', action='channel_test',
|
||||
config=channel_parameters['channel']).tourl())))
|
||||
|
||||
# Buscar en otros canales
|
||||
if item.contentType in ['movie', 'tvshow'] and item.channel != 'search':
|
||||
# Buscar en otros canales
|
||||
|
||||
Reference in New Issue
Block a user