Merge branch 'master' into Fixes
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="1.7.0" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="1.9.0" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,8 +19,10 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Arreglos[/B][/COLOR]
|
||||
[I]- arreglo sección adultos
|
||||
[I]- flashx
|
||||
[I]- serieslan
|
||||
- streamplay
|
||||
- descargasmix
|
||||
- canalpelis - Canal nuevo
|
||||
- fixes internos[/I]
|
||||
|
||||
[COLOR green]Gracias a [COLOR yellow][B]msdos[/B][/COLOR] por su colaboración en esta versión[/COLOR]
|
||||
|
||||
21
plugin.video.alfa/channels/autoplay.py
Executable file → Normal file
21
plugin.video.alfa/channels/autoplay.py
Executable file → Normal file
@@ -98,11 +98,14 @@ def start(itemlist, item):
|
||||
favorite_servers = []
|
||||
favorite_quality = []
|
||||
|
||||
# Guarda el valor actual de "Accion al seleccionar vídeo:" en preferencias
|
||||
user_config_setting = config.get_setting("default_action")
|
||||
# Guarda el valor actual de "Accion y Player Mode" en preferencias
|
||||
user_config_setting_action = config.get_setting("default_action")
|
||||
user_config_setting_player = config.get_setting("player_mode")
|
||||
# Habilita la accion "Ver en calidad alta" (si el servidor devuelve más de una calidad p.e. gdrive)
|
||||
if user_config_setting != 2:
|
||||
if user_config_setting_action != 2:
|
||||
config.set_setting("default_action", 2)
|
||||
if user_config_setting_player != 0:
|
||||
config.set_setting("player_mode", 0)
|
||||
|
||||
# Informa que AutoPlay esta activo
|
||||
platformtools.dialog_notification('AutoPlay Activo', '', sound=False)
|
||||
@@ -274,9 +277,11 @@ def start(itemlist, item):
|
||||
platformtools.dialog_notification("AutoPlay", "Nueva Calidad/Servidor disponible en la "
|
||||
"configuracion", sound=False)
|
||||
|
||||
# Restaura si es necesario el valor previo de "Accion al seleccionar vídeo:" en preferencias
|
||||
if user_config_setting != 2:
|
||||
config.set_setting("default_action", user_config_setting)
|
||||
# Restaura si es necesario el valor previo de "Accion y Player Mode" en preferencias
|
||||
if user_config_setting_action != 2:
|
||||
config.set_setting("default_action", user_config_setting_action)
|
||||
if user_config_setting_player != 0:
|
||||
config.set_setting("player_mode", user_config_setting_player)
|
||||
|
||||
# devuelve la lista de enlaces para la eleccion manual
|
||||
return itemlist
|
||||
@@ -524,12 +529,16 @@ def save(item, dict_data_saved):
|
||||
# Obtiene el nodo AUTOPLAY desde el json
|
||||
autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY')
|
||||
|
||||
new_config = dict_data_saved
|
||||
if not new_config['active']:
|
||||
new_config['language']=0
|
||||
channel_node = autoplay_node.get(item.from_channel)
|
||||
config.set_setting("filter_languages", dict_data_saved.pop("language"), item.from_channel)
|
||||
channel_node['settings'] = dict_data_saved
|
||||
|
||||
result, json_data = jsontools.update_node(autoplay_node, 'autoplay', 'AUTOPLAY')
|
||||
|
||||
|
||||
return result
|
||||
|
||||
|
||||
|
||||
55
plugin.video.alfa/channels/canalpelis.json
Normal file
55
plugin.video.alfa/channels/canalpelis.json
Normal file
@@ -0,0 +1,55 @@
|
||||
{
|
||||
"id": "canalpelis",
|
||||
"name": "CanalPelis",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": "es",
|
||||
"fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/canalpelisbg.jpg",
|
||||
"thumbnail": "http://www.canalpelis.com/wp-content/uploads/2016/11/logo_web.gif",
|
||||
"banner": "",
|
||||
"version": 1,
|
||||
"changes": [
|
||||
{
|
||||
"date": "15/08/17",
|
||||
"description": "Nuevo Canal"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"latino",
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "orden_episodios",
|
||||
"type": "bool",
|
||||
"label": "Mostrar los episodios de las series en orden descendente",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
377
plugin.video.alfa/channels/canalpelis.py
Executable file
377
plugin.video.alfa/channels/canalpelis.py
Executable file
@@ -0,0 +1,377 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel CanalPelis -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core import channeltools
|
||||
from core import tmdb
|
||||
from platformcode import config, logger
|
||||
|
||||
__channel__ = "canalpelis"
|
||||
|
||||
host = "http://www.canalpelis.com/"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
__perfil__ = 0
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
|
||||
if __perfil__ < 3:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
|
||||
thumbnail = "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/%s.png"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Peliculas", action="peliculas",thumbnail=thumbnail % 'peliculas',
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'movies/', viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Géneros", action="generos",thumbnail=thumbnail % 'generos',
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'genre/', viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Año de Estreno", action="year_release",
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'release/', viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Buscar", action="search",thumbnail=thumbnail % 'busqueda',
|
||||
text_blod=True, url=host, page=0))
|
||||
|
||||
itemlist.append(item.clone(title="Series", action="series", extra='serie', url=host + 'tvshows/',
|
||||
viewmode="movie_with_plot", text_blod=True, viewcontent='movies',
|
||||
thumbnail=thumbnail % 'series', page=0))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
|
||||
|
||||
try:
|
||||
return sub_search(item)
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
def sub_search(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div class="thumbnail animation-2"><a href="([^"]+)">.*?' # url
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)" />.*?' # img and title
|
||||
patron += '<span class="([^"]+)".*?' # tipo
|
||||
patron += '<span class="year">([^<]+)</span>' # year
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, year in matches:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
|
||||
action="findvideos", infoLabels={"year": year},
|
||||
thumbnail=scrapedthumbnail, text_color=color3, page=0))
|
||||
|
||||
paginacion = scrapertools.find_single_match(
|
||||
data, '<a class="page larger" href="([^"]+)">\d+</a>')
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="sub_search",
|
||||
title="» Siguiente »", url=paginacion))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
|
||||
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title.strip()
|
||||
patron += '<span class="icon-star2"></span>(.*?)/div>.*?' # rating
|
||||
patron += '<span class="quality">([^<]+)</span>.*?' # calidad
|
||||
patron += '<a href="([^"]+)"><div class="see"></div>.*?' # url
|
||||
patron += '<span>(\d+)</span>' # year
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, rating, calidad, scrapedurl, year in matches[item.page:item.page + 20]:
|
||||
if 'Próximamente' not in calidad:
|
||||
scrapedtitle = scrapedtitle.replace('Ver ', '').strip()
|
||||
contentTitle = scrapedtitle.partition(':')[0].partition(',')[0]
|
||||
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
|
||||
scrapedtitle, year, calidad)
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
|
||||
url=scrapedurl, infoLabels={'year': year, 'rating': rating},
|
||||
contentTitle=contentTitle, thumbnail=scrapedthumbnail,
|
||||
title=title, context="buscar_trailer"))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 20 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 20,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(
|
||||
data, "<span class=\"current\">\d+</span><a href='([^']+)'")
|
||||
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
|
||||
for item in itemlist:
|
||||
if item.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
item.fanart = scrapertools.find_single_match(
|
||||
data, "<meta property='og:image' content='([^']+)' />")
|
||||
item.fanart = item.fanart.replace('w780', 'original')
|
||||
item.plot = scrapertools.find_single_match(data, '</span></h4><p>([^*]+)</p><h4')
|
||||
item.plot = scrapertools.htmlclean(item.plot)
|
||||
item.infoLabels['director'] = scrapertools.find_single_match(
|
||||
data, '<div class="name"><a href="[^"]+">([^<]+)</a>')
|
||||
item.infoLabels['genre'] = scrapertools.find_single_match(
|
||||
data, 'rel="tag">[^<]+</a><a href="[^"]+" rel="tag">([^<]+)</a>')
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
# url, title, cantidad
|
||||
patron = '<li class="cat-item cat-item-[^"]+"><a href="([^"]+)" title="[^"]+">([^<]+)</a> <i>([^<]+)</i></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, cantidad in matches:
|
||||
if cantidad != '0' and scrapedtitle != '# Próximamente':
|
||||
title = "%s (%s)" % (scrapedtitle, cantidad)
|
||||
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=title, page=0,
|
||||
url=scrapedurl, text_color=color3, viewmode="movie_with_plot"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def year_release(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
|
||||
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(datas)
|
||||
|
||||
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
|
||||
scrapedtitle = scrapedtitle.replace('Ver ', '').replace(
|
||||
' Online HD', '').replace('ver ', '').replace(' Online', '')
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle,
|
||||
thumbnail=scrapedthumbnail, contentType='tvshow'))
|
||||
|
||||
url_next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=__channel__, action="series",
|
||||
title="» Siguiente »", url=url_next_page))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
datas = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(datas)
|
||||
patron = '<span class="title">([^<]+)<i>.*?' # numeros de temporadas
|
||||
patron += '<img src="([^"]+)"></a></div>' # capitulos
|
||||
|
||||
matches = scrapertools.find_multiple_matches(datas, patron)
|
||||
if len(matches) > 1:
|
||||
for scrapedseason, scrapedthumbnail in matches:
|
||||
scrapedseason = " ".join(scrapedseason.split())
|
||||
temporada = scrapertools.find_single_match(scrapedseason, '(\d+)')
|
||||
new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail)
|
||||
new_item.infoLabels['season'] = temporada
|
||||
new_item.extra = ""
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
for i in itemlist:
|
||||
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
|
||||
if i.infoLabels['title']:
|
||||
# Si la temporada tiene nombre propio añadirselo al titulo del item
|
||||
i.title += " - %s" % (i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si la temporada tiene poster propio remplazar al de la serie
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
itemlist.sort(key=lambda it: it.title)
|
||||
|
||||
return itemlist
|
||||
else:
|
||||
return episodios(item)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
datas = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(datas)
|
||||
patron = '<div class="imagen"><a href="([^"]+)">.*?' # url cap, img
|
||||
patron += '<div class="numerando">(.*?)</div>.*?' # numerando cap
|
||||
patron += '<a href="[^"]+">([^<]+)</a>' # title de episodios
|
||||
|
||||
matches = scrapertools.find_multiple_matches(datas, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedname in matches:
|
||||
scrapedtitle = scrapedtitle.replace('--', '0')
|
||||
patron = '(\d+) - (\d+)'
|
||||
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
|
||||
season, episode = match[0]
|
||||
|
||||
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
|
||||
continue
|
||||
|
||||
title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname))
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
|
||||
contentType="episode")
|
||||
if 'infoLabels' not in new_item:
|
||||
new_item.infoLabels = {}
|
||||
|
||||
new_item.infoLabels['season'] = season
|
||||
new_item.infoLabels['episode'] = episode.zfill(2)
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
# TODO no hacer esto si estamos añadiendo a la videoteca
|
||||
if not item.extra:
|
||||
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
for i in itemlist:
|
||||
if i.infoLabels['title']:
|
||||
# Si el capitulo tiene nombre propio añadirselo al titulo del item
|
||||
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
|
||||
'episode'], i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si el capitulo tiene imagen propia remplazar al poster
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
|
||||
reverse=config.get_setting('orden_episodios', __channel__))
|
||||
|
||||
# Opción "Añadir esta serie a la videoteca"
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
patron = '<div id="option-(\d+)" class="play-box-iframe.*?src="([^"]+)" frameborder="0" scrolling="no" allowfullscreen></iframe>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for option, url in matches:
|
||||
lang = scrapertools.find_single_match(
|
||||
data, '<li><a class="options" href="#option-%s"><b class="icon-play_arrow"><\/b> (.*?)<span class="dt_flag">' % option)
|
||||
lang = lang.replace('Español ', '').replace('B.S.O. ', '')
|
||||
|
||||
server = servertools.get_server_from_url(url)
|
||||
title = "%s [COLOR yellow](%s) (%s)[/COLOR]" % (item.contentTitle, server.title(), lang)
|
||||
itemlist.append(item.clone(action='play', url=url, title=title, extra1=title,
|
||||
server=server, text_color=color3))
|
||||
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url=item.url, action="add_pelicula_to_library",
|
||||
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/libreria.png',
|
||||
extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
8
plugin.video.alfa/channels/descargasmix.json
Executable file → Normal file
8
plugin.video.alfa/channels/descargasmix.json
Executable file → Normal file
@@ -59,6 +59,14 @@
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
54
plugin.video.alfa/channels/serieslan.py
Executable file → Normal file
54
plugin.video.alfa/channels/serieslan.py
Executable file → Normal file
@@ -85,14 +85,14 @@ def episodios(item):
|
||||
total_episode += 1
|
||||
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, total_episode)
|
||||
if len(name.split(pat)) == i:
|
||||
title += "{0}x{1:02d} ".format(season, episode)
|
||||
title += "%sx%s " % (season, str(episode).zfill(2))
|
||||
else:
|
||||
title += "{0}x{1:02d}_".format(season, episode)
|
||||
title += "%sx%s_" % (season, str(episode).zfill(2))
|
||||
else:
|
||||
total_episode += 1
|
||||
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, total_episode)
|
||||
|
||||
title += "{0}x{1:02d} ".format(season, episode)
|
||||
title += "%sx%s " % (season, str(episode).zfill(2))
|
||||
|
||||
url = host + "/" + link
|
||||
if "disponible" in link:
|
||||
@@ -110,21 +110,17 @@ def episodios(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
# def getUrlVideo(item):
|
||||
def findvideos(item):
|
||||
## Kodi 17+
|
||||
## Openload as default server
|
||||
logger.info()
|
||||
|
||||
import base64
|
||||
|
||||
itemlist = []
|
||||
|
||||
## Urls
|
||||
urlServer = "https://openload.co/embed/%s/"
|
||||
urlApiGetKey = "https://serieslan.com/idv.php?i=%s"
|
||||
url_server = "https://openload.co/embed/%s/"
|
||||
url_api_get_key = "https://serieslan.com/ide.php?i=%s&k=%s"
|
||||
|
||||
## JS
|
||||
def txc(key, str):
|
||||
def txc(key, _str):
|
||||
s = range(256)
|
||||
j = 0
|
||||
res = ''
|
||||
@@ -135,13 +131,13 @@ def findvideos(item):
|
||||
s[j] = x
|
||||
i = 0
|
||||
j = 0
|
||||
for y in range(len(str)):
|
||||
for y in range(len(_str)):
|
||||
i = (i + 1) % 256
|
||||
j = (j + s[i]) % 256
|
||||
x = s[i]
|
||||
s[i] = s[j]
|
||||
s[j] = x
|
||||
res += chr(ord(str[y]) ^ s[(s[i] + s[j]) % 256])
|
||||
res += chr(ord(_str[y]) ^ s[(s[i] + s[j]) % 256])
|
||||
return res
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
@@ -151,20 +147,26 @@ def findvideos(item):
|
||||
'<div id="tab-1" class="tab-content current">.+?<img src="([^"]*)">')
|
||||
show = scrapertools.find_single_match(data, '<span>Episodio: <\/span>([^"]*)<\/p><p><span>Idioma')
|
||||
thumbnail = host + thumbnail
|
||||
data = httptools.downloadpage(urlApiGetKey % idv, headers={'Referer': item.url}).data
|
||||
video_url = urlServer % (txc(ide, base64.decodestring(data)))
|
||||
server = "openload"
|
||||
if " SUB" in item.title:
|
||||
lang = "VOS"
|
||||
elif " Sub" in item:
|
||||
lang = "VOS"
|
||||
else:
|
||||
lang = "Latino"
|
||||
title = "Enlace encontrado en " + server + " [" + lang + "]"
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, show=show, url=video_url, plot=item.plot,
|
||||
thumbnail=thumbnail, server=server, folder=False))
|
||||
data = httptools.downloadpage(url_api_get_key % (idv, ide), headers={'Referer': item.url}).data
|
||||
data = eval(data)
|
||||
|
||||
return itemlist
|
||||
if type(data) == list:
|
||||
logger.debug("inside")
|
||||
video_url = url_server % (txc(ide, base64.decodestring(data[2])))
|
||||
server = "openload"
|
||||
if " SUB" in item.title:
|
||||
lang = "VOS"
|
||||
elif " Sub" in item:
|
||||
lang = "VOS"
|
||||
else:
|
||||
lang = "Latino"
|
||||
title = "Enlace encontrado en " + server + " [" + lang + "]"
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, show=show, url=video_url, plot=item.plot,
|
||||
thumbnail=thumbnail, server=server, folder=False))
|
||||
|
||||
return itemlist
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def play(item):
|
||||
|
||||
@@ -143,6 +143,13 @@ def render_items(itemlist, parent_item):
|
||||
# TODO: ¿Se puede eliminar esta linea? yo no he visto que haga ningun efecto.
|
||||
xbmcplugin.setPluginFanart(int(sys.argv[1]), os.path.join(config.get_runtime_path(), "fanart.jpg"))
|
||||
|
||||
# Esta opcion es para poder utilizar el xbmcplugin.setResolvedUrl()
|
||||
# if item.isPlayable == True or (config.get_setting("player_mode") == 1 and item.action == "play"):
|
||||
if config.get_setting("player_mode") == 1 and item.action == "play":
|
||||
if item.folder:
|
||||
item.folder = False
|
||||
listitem.setProperty('IsPlayable', 'true')
|
||||
|
||||
# Añadimos los infoLabels
|
||||
set_infolabels(listitem, item)
|
||||
|
||||
@@ -640,7 +647,7 @@ def get_dialogo_opciones(item, default_action, strm):
|
||||
# "Añadir a videoteca"
|
||||
opciones.append(config.get_localized_string(30161))
|
||||
|
||||
if default_action == "3":
|
||||
if default_action == 3:
|
||||
seleccion = len(opciones) - 1
|
||||
|
||||
# Busqueda de trailers en youtube
|
||||
@@ -800,11 +807,16 @@ def set_player(item, xlistitem, mediaurl, view, strm):
|
||||
# Reproduce
|
||||
xbmc_player = xbmc.Player()
|
||||
xbmc_player.play(playlist, xlistitem)
|
||||
|
||||
# elif config.get_setting("player_mode") == 1 or item.isPlayable:
|
||||
elif config.get_setting("player_mode") == 1:
|
||||
logger.info("mediaurl :" + mediaurl)
|
||||
logger.info("Tras setResolvedUrl")
|
||||
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xbmcgui.ListItem(path=mediaurl))
|
||||
# si es un archivo de la videoteca enviar a marcar como visto
|
||||
if strm or item.strm_path:
|
||||
from platformcode import xbmc_videolibrary
|
||||
xbmc_videolibrary.mark_auto_as_watched(item)
|
||||
xlistitem.setPath(mediaurl)
|
||||
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xlistitem)
|
||||
|
||||
elif config.get_setting("player_mode") == 2:
|
||||
xbmc.executebuiltin("PlayMedia(" + mediaurl + ")")
|
||||
|
||||
0
plugin.video.alfa/resources/settings.xml
Executable file → Normal file
0
plugin.video.alfa/resources/settings.xml
Executable file → Normal file
4
plugin.video.alfa/servers/streamplay.py
Executable file → Normal file
4
plugin.video.alfa/servers/streamplay.py
Executable file → Normal file
@@ -17,7 +17,8 @@ def test_video_exists(page_url):
|
||||
data = httptools.downloadpage(page_url, headers={'Referer': referer}).data
|
||||
if data == "File was deleted":
|
||||
return False, "[Streamplay] El archivo no existe o ha sido borrado"
|
||||
|
||||
elif "Video is processing now" in data:
|
||||
return False, "[Streamplay] El archivo se está procesando"
|
||||
return True, ""
|
||||
|
||||
|
||||
@@ -26,7 +27,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
referer = re.sub(r"embed-|player-", "", page_url)[:-5]
|
||||
data = httptools.downloadpage(page_url, headers={'Referer': referer}).data
|
||||
|
||||
|
||||
matches = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
|
||||
data = jsunpack.unpack(matches).replace("\\", "")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user