Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
unknown
2018-08-16 08:23:58 -03:00
86 changed files with 4948 additions and 2279 deletions

View File

@@ -1,10 +1,12 @@
# -*- coding: utf-8 -*-
import os
import random
import re
import threading
import time
import traceback
from platformcode import platformtools
from BaseHTTPServer import HTTPServer
from HTTPWebSocketsHandler import HTTPWebSocketsHandler

View File

@@ -8,6 +8,8 @@ import sys
import threading
import time
from functools import wraps
# Requerido para el ejecutable en windows
import SimpleHTTPServer
sys.dont_write_bytecode = True
from platformcode import config

5
mediaserver/genera.bat Normal file
View File

@@ -0,0 +1,5 @@
REM Genera los archivos para el ejecutable en windows de Alfa Mediaserver
python setup.py py2exe -p channels,servers,lib,platformcode
xcopy lib dist\lib /y /s /i
xcopy platformcode dist\platformcode /y /s /i
xcopy resources dist\resources /y /s /i

View File

@@ -14,7 +14,7 @@ settings_dic = {}
adult_setting = {}
def get_addon_version(linea_inicio=0, total_lineas=2):
def get_addon_version(linea_inicio=0, total_lineas=2, with_fix=False):
'''
Devuelve el número de de versión del addon, obtenido desde el archivo addon.xml
'''
@@ -271,6 +271,14 @@ def get_localized_string(code):
return dev
def get_localized_category(categ):
categories = {'movie': get_localized_string(30122), 'tvshow': get_localized_string(30123),
'anime': get_localized_string(30124), 'documentary': get_localized_string(30125),
'vos': get_localized_string(30136), 'adult': get_localized_string(30126),
'direct': get_localized_string(30137), 'torrent': get_localized_string(70015)}
return categories[categ] if categ in categories else categ
def get_videolibrary_path():
value = get_setting("videolibrarypath")
if value == "":

View File

@@ -7,12 +7,12 @@ import os
from inspect import isclass
from controller import Controller
from platformcode import logger
from platformcode import config, logger
def load_controllers():
controllers = []
path = os.path.split(__file__)[0]
path = os.path.join(config.get_runtime_path(),"platformcode\controllers")
for fname in os.listdir(path):
mod, ext = os.path.splitext(fname)
fname = os.path.join(path, fname)

18
mediaserver/setup.py Normal file
View File

@@ -0,0 +1,18 @@
# setup.py
# Para crear el ejecutable de Alfa mediaserver en windows
# Se usa py2exe
# Linea de comandos para la creacion: python setup.py py2exe -p channels,servers,lib,platformcode
from distutils.core import setup
import glob
import py2exe
setup(packages=['channels','servers','lib','platformcode','platformcode/controllers'],
data_files=[("channels",glob.glob("channels\\*.py")),
("channels",glob.glob("channels\\*.json")),
("servers",glob.glob("servers\\*.py")),
("servers",glob.glob("servers\\*.json")),
("",glob.glob("addon.xml")),
],
console=["alfa.py"]
)

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.6.1" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.7" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,12 +19,12 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
¤ dospelis ¤ goodpelis
¤ pelisr ¤ sonpelis
¤ jawcloud ¤ estream
¤ pelisplusco ¤ hdfull
¤ cinemahd
¤ elitetorrent ¤ grantorrent
¤ newpct1 ¤ seriesanimadas
¤ seriesblanco ¤ rapidvideo
¤ watchvideo ¤ pelispedia
¤ beeg
¤ arreglos internos
¤ Agradecimientos a @angedam por colaborar en ésta versión

View File

@@ -1,39 +0,0 @@
{
"id": "alltorrent",
"name": "Alltorrent",
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "altorrent.png",
"fanart": "altorrent.jpg",
"categories": [
"torrent",
"movie"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,314 +0,0 @@
# -*- coding: utf-8 -*-
import re
import sys
import urllib
import urlparse
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
from lib import generictools
host = 'http://alltorrent.net/'
__modo_grafico__ = config.get_setting('modo_grafico', 'alltorrent')
def mainlist(item):
logger.info()
itemlist = []
thumb_pelis = get_thumb("channels_movie.png")
thumb_pelis_hd = get_thumb("channels_movie_hd.png")
thumb_series = get_thumb("channels_tvshow.png")
thumb_series_hd = get_thumb("channels_tvshow_hd.png")
thumb_buscar = get_thumb("search.png")
itemlist.append(item.clone(title="[COLOR springgreen][B]Todas Las Películas[/B][/COLOR]", action="listado",
url=host, thumbnail=thumb_pelis, extra="pelicula"))
itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 1080p[/COLOR]", action="listado",
url=host + "rezolucia/1080p/", thumbnail=thumb_pelis_hd, extra="pelicula"))
itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 720p[/COLOR]", action="listado",
url=host + "rezolucia/720p/", thumbnail=thumb_pelis_hd, extra="pelicula"))
itemlist.append(item.clone(title="[COLOR springgreen] Incluyen Hdrip[/COLOR]", action="listado",
url=host + "rezolucia/hdrip/", thumbnail=thumb_pelis, extra="pelicula"))
itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 3D[/COLOR]", action="listado",
url=host + "rezolucia/3d/", thumbnail=thumb_pelis_hd, extra="pelicula"))
itemlist.append(item.clone(title="[COLOR floralwhite][B]Buscar[/B][/COLOR]", action="search", thumbnail=thumb_buscar,
extra="titulo"))
itemlist.append(item.clone(title="[COLOR oldlace] Por Título[/COLOR]", action="search", thumbnail=thumb_buscar,
extra="titulo"))
itemlist.append(item.clone(title="[COLOR oldlace] Por Año[/COLOR]", action="search", thumbnail=thumb_buscar,
extra="año"))
itemlist.append(item.clone(title="[COLOR oldlace] Por Rating Imdb[/COLOR]", action="search", thumbnail=thumb_buscar,
extra="rating"))
return itemlist
def listado(item):
logger.info()
itemlist = []
# Descarga la página
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
except:
pass
if not data and item.extra != "año": #Si la web está caída salimos sin dar error
logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: LISTADO:. La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
elif not data and item.extra == "año": #cuando no hay datos para un año, da error. Tratamos de evitar el error
return itemlist
patron = '<div class="browse-movie-wrap col-xs-10 col-sm-4 col-md-5 col-lg-4"><a href="([^"]+)".*?src="([^"]+)".*?alt="([^"]+)".*?rel="tag">([^"]+)<\/a>\s?<\/div><div class="[^"]+">(.*?)<\/div><\/div><\/div>'
#data = scrapertools.find_single_match(data, patron)
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches and not '<ul class="tsc_pagination tsc_pagination' in data: #error
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error
return itemlist #Salimos
logger.error("ERROR 02: LISTADO: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: LISTADO: Ha cambiado la estructura de la Web. Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedqualities in matches:
item_local = item.clone() #Creamos copia de Item para trabajar
title_subs = []
title = re.sub('\r\n', '', scrapedtitle).decode('utf8').strip()
item_local.url = scrapedurl
item_local.thumbnail = scrapedthumbnail
scrapedtorrent = ''
if scrapedqualities:
patron_quality = '<a href="([^"]+)"\s?rel="[^"]+"\s?title="[^"]+">(.*?)<\/a>'
matches_quality = re.compile(patron_quality, re.DOTALL).findall(scrapedqualities)
quality = ''
for scrapedtorrent, scrapedquality in matches_quality:
quality_inter = scrapedquality
quality_inter = re.sub('HDr$', 'HDrip', quality_inter)
quality_inter = re.sub('720$', '720p', quality_inter)
quality_inter = re.sub('1080$', '1080p', quality_inter)
if quality:
quality += ', %s' % quality_inter
else:
quality = quality_inter
if quality:
item_local.quality = quality
item_local.language = [] #Verificamos el idioma por si encontramos algo
if "latino" in scrapedtorrent.lower() or "latino" in item.url or "latino" in title.lower():
item_local.language += ["LAT"]
if "ingles" in scrapedtorrent.lower() or "ingles" in item.url or "vose" in scrapedurl or "vose" in item.url:
if "VOSE" in scrapedtorrent.lower() or "sub" in title.lower() or "vose" in scrapedurl or "vose" in item.url:
item_local.language += ["VOS"]
else:
item_local.language += ["VO"]
if "dual" in scrapedtorrent.lower() or "dual" in title.lower():
item_local.language[0:0] = ["DUAL"]
#Limpiamos el título de la basura innecesaria
title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("Sub", "").replace("sub", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "")
title = title.replace("- HDRip", "").replace("(HDRip)", "").replace("- Hdrip", "").replace("(microHD)", "").replace("(DVDRip)", "").replace("(HDRip)", "").replace("(BR-LINE)", "").replace("(HDTS-SCREENER)", "").replace("(BDRip)", "").replace("(BR-Screener)", "").replace("(DVDScreener)", "").replace("TS-Screener", "").replace(" TS", "").replace(" Ts", "")
title = re.sub(r'\??\s?\d*?\&.*', '', title).title().strip()
item_local.from_title = title #Guardamos esta etiqueta para posible desambiguación de título
item_local.contentType = "movie"
item_local.contentTitle = title
item_local.extra = "peliculas"
item_local.action = "findvideos"
item_local.title = title.strip()
item_local.infoLabels['year'] = "-"
if scrapedyear >= "1900" and scrapedyear <= "2040":
title_subs += [scrapedyear]
itemlist.append(item_local.clone()) #Pintar pantalla
#if not item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
# return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo
#Pasamos a TMDB la lista completa Itemlist
tmdb.set_infoLabels(itemlist, True)
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
item, itemlist = generictools.post_tmdb_listado(item, itemlist)
# Extrae el paginador
patron = '<li><a href="[^"]+">(\d+)<\/a><\/li>' #total de páginas
patron += '<li><a href="([^"]+\/page\/(\d+)\/)"\s?rel="[^"]+">Página siguiente[^<]+<\/a><\/li><\/ul><\/div><\/ul>' #url siguiente
url_next = ''
if scrapertools.find_single_match(data, patron):
last_page, url_next, next_num = scrapertools.find_single_match(data, patron)
if url_next:
if last_page:
title = '[COLOR gold]Página siguiente >>[/COLOR] %s de %s' % (int(next_num) - 1, last_page)
else:
title = '[COLOR gold]Página siguiente >>[/COLOR] %s' % (int(next_num) - 1)
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url_next, extra=item.extra))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
#Bajamos los datos de la página
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
except:
pass
if not data:
logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
patron = 'id="modal-quality-\w+"><span>(.*?)</span>.*?class="quality-size">(.*?)</p>.*?href="([^"]+)"' #coge los .torrent
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches: #error
logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
for scrapedquality, scrapedsize, scrapedtorrent in matches: #leemos los torrents con la diferentes calidades
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
item_local.quality = scrapedquality
if item.infoLabels['duration']:
item_local.quality += scrapertools.find_single_match(item.quality, '(\s\[.*?\])') #Copiamos la duración
#Añadimos el tamaño para todos
item_local.quality = '%s [%s]' % (item_local.quality, scrapedsize) #Agregamos size al final de calidad
item_local.quality = item_local.quality.replace("G", "G ").replace("M", "M ") #Se evita la palabra reservada en Unify
#Ahora pintamos el link del Torrent
item_local.url = scrapedtorrent
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
item_local.alive = "??" #Calidad del link sin verificar
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Seridor Torrent
itemlist.append(item_local.clone()) #Pintar pantalla
#logger.debug("TORRENT: " + scrapedtorrent + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + scrapedsize + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
#logger.debug(item_local)
#Ahora tratamos el servidor directo
item_local = item.clone()
servidor = 'openload'
item_local.quality = ''
if item.infoLabels['duration']:
item_local.quality = scrapertools.find_single_match(item.quality, '(\s\[.*?\])') #Copiamos la duración
enlace = scrapertools.find_single_match(data, 'button-green-download-big".*?href="([^"]+)"><span class="icon-play">')
if enlace:
try:
devuelve = servertools.findvideosbyserver(enlace, servidor) #existe el link ?
if devuelve:
enlace = devuelve[0][1] #Se guarda el link
item_local.alive = "??" #Se asume poe defecto que es link es dudoso
#Llama a la subfunción de check_list_links(itemlist) para cada link de servidor
item_local.alive = servertools.check_video_link(enlace, servidor, timeout=5) #activo el link ?
#Si el link no está activo se ignora
if item_local.alive == "??": #dudoso
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (servidor.capitalize(), item_local.quality, str(item_local.language))
elif item_local.alive.lower() == "no": #No está activo. Lo preparo, pero no lo pinto
item_local.title = '[COLOR red][%s][/COLOR] [COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.alive, servidor.capitalize(), item_local.quality, str(item_local.language))
logger.debug(item_local.alive + ": ALIVE / " + title + " / " + servidor + " / " + enlace)
raise
else: #Sí está activo
item_local.title = '[COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (servidor.capitalize(), item_local.quality, str(item_local.language))
#Preparamos el resto de variables de Item para ver los vídeos en directo
item_local.action = "play"
item_local.server = servidor
item_local.url = enlace
item_local.title = item_local.title.replace("[]", "").strip()
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip()
itemlist.append(item_local.clone())
#logger.debug(item_local)
except:
pass
return itemlist
def actualizar_titulos(item):
logger.info()
item = generictools.update_title(item) #Llamamos al método que actualiza el título con tmdb.find_and_set_infoLabels
#Volvemos a la siguiente acción en el canal
return item
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
if item.extra == "titulo":
item.url = host + "?s=" + texto
elif item.extra == "año":
item.url = host + "weli/" + texto + "/"
else:
item.extra == "imdb"
item.url = host + "imdb/" + texto + "/"
if texto != '':
return listado(item)
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = host
item.extra = "peliculas"
item.channel = "alltorrents"
itemlist = listado(item)
if itemlist[-1].title == "Página siguiente >>":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,7 +1,7 @@
{
"id": "animeflv_me",
"name": "Animeflv.ME",
"active": true,
"active": false,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/x9AdvBx.png",

View File

@@ -8,6 +8,7 @@
"banner": "https://imgur.com/B1IOAu4.png",
"categories": [
"movie",
"tvshow"
"tvshow",
"vos"
]
}

View File

@@ -6,6 +6,7 @@ import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
@@ -17,15 +18,21 @@ def mainlist(item):
itemlist = list()
itemlist.append(Item(channel=item.channel, action="estrenos", title="Estrenos", url=host))
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas",
url=urlparse.urljoin(host, "p/peliculas.html")))
url=urlparse.urljoin(host, "p/peliculas.html"), type='pl', first=0))
itemlist.append(Item(channel=item.channel, action="lista", title="Series",
url=urlparse.urljoin(host, "p/series.html")))
itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host))
itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host))
itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host))
#itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "/search?q=")))
url=urlparse.urljoin(host, "p/series.html"), type='sr', first=0))
itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host, cat='genre'))
itemlist.append(Item(channel=item.channel, action="category", title="Calidad", url=host, cat='quality'))
itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host, cat='abc'))
itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host, cat='year'))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/search?q="))
return itemlist
@@ -34,155 +41,175 @@ def category(item):
itemlist = list()
data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_generos = "<h2 class='title'>"+item.title+"<\/h2><div class='.+?'><ul class='.+?'><(.+?)><\/ul><\/div>"
data_generos = scrapertools.find_single_match(data, patron_generos)
patron = "<a href='(.+?)'>(.+?)<\/a>"
matches = scrapertools.find_multiple_matches(data_generos, patron)
for scrapedurl, scrapedtitle in matches:
if item.cat == 'abc':
data = scrapertools.find_single_match(data, '<span>Orden Alfabético</span>.*?</ul>')
elif item.cat == 'genre':
data = scrapertools.find_single_match(data, '<span>Géneros</span>.*?</ul>')
elif item.cat == 'year':
data = scrapertools.find_single_match(data, '<span>Año</span>.*?</ul>')
elif item.cat == 'quality':
data = scrapertools.find_single_match(data, '<span>Calidad</span>.*?</ul>')
patron = "<li>([^<]+)<a href='([^']+)'>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
if scrapedtitle != 'Próximas Películas':
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl))
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl, type='cat', first=0))
return itemlist
def search_results(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<span class=.post-labels.>([^<]+)</span>.*?class="poster-bg" src="([^"]+)"/>.*?<h4>.*?'
patron +=">(\d{4})</a>.*?<h6>([^<]+)<a href='([^']+)"
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtype, scrapedthumbnail, scrapedyear, scrapedtitle ,scrapedurl in matches:
title="%s [%s]" % (scrapedtitle,scrapedyear)
new_item= Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail)
if scrapedtype.strip() == 'Serie':
new_item.contentSerieName = scrapedtitle
new_item.action = 'episodios'
new_item.type = 'sr'
else:
new_item.contentTitle = scrapedtitle
new_item.action = 'findvideos'
new_item.type = 'pl'
itemlist.append(new_item)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
return search_results(item)
def estrenos(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_estre = "<div class='widget HTML' data-version='1' id='HTML9'><h2 class='title'>(.+?)<\/a><\/li><\/ul>"
data_estre = scrapertools.find_single_match(data, patron_estre)
patron = '<i class="([^"]+)"><\/i><div class="calidad">.+?' #serie o peli
patron +='<img src="([^"]+)"\/>' #scrapedthumbnail
patron +='<h4>([^"]+)<\/h4>.+?' #scrapedtitle
patron +='<a href="([^"]+)">' #scrapedurl
matches = scrapertools.find_multiple_matches(data_estre, patron)
for scrapedtype, scrapedthumbnail,scrapedtitle,scrapedurl in matches:
title = "%s [%s]" % (scrapedtitle, scrapedtype)
if scrapedtype == "pelicula":
itemlist.append(item.clone(title=title, url=host+scrapedurl, action="findvideos", extra=scrapedtype,
show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie",
context=["buscar_trailer"]))
else:
itemlist.append(item.clone(title=title, url=host+scrapedurl, show=scrapedtitle,
thumbnail=scrapedthumbnail, action="capitulos"))
return itemlist
def capitulos(item):
def episodios(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_datos='<div class="output">(.+?)><\/section>'
data_caps = scrapertools.find_single_match(data, patron_datos)
patron_caps='<img alt=".+?" src="(.+?)"\/><a href="http:\/\/bit.ly\/(.+?)"'
matches = scrapertools.find_multiple_matches(data_caps, patron_caps)
cap=0
for scrapedthumbnail,scrapedurl in matches:
link = scrapedurl
cap=cap+1
link="http://www.trueurl.net/?q=http%3A%2F%2Fbit.ly%2F"+link+"&lucky=on&Uncloak=Find+True+URL"
data_other = httptools.downloadpage(link).data
data_other = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data_other)
patron='<A title="http:\/\/privatelink.de\/\?(.+?)"'
url = scrapertools.find_single_match(data_other, patron)
title="%s%s - %s" % (title,str(cap).zfill(2),item.show)
itemlist.append(item.clone(action='findvideos', title=title,
url=url,show=item.show,thumbnail=scrapedthumbnail))
patron ='<div id="ep(\d+)" class="eps"> <section class="section-post online"><div class="player">.*?'
patron += 'src="([^"]+)"/><a href="([^"]+)" target='
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedepi, scrapedthumbnail, scrapedurl in matches:
url = scrapedurl
title="1x%s - %s" % (scrapedepi, item.contentSerieName)
itemlist.append(item.clone(action='findvideos', title=title, url=url, thumbnail=scrapedthumbnail, type=item.type,
infoLabels=item.infoLabels))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show))
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]",
url=item.url, action="add_serie_to_library", extra="episodios",
contentSerieName=item.contentSerieName))
return itemlist
def bitly(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a href="http:\/\/bit.ly\/(.+?)"'
link = scrapertools.find_single_match(data, patron)
link="http://www.trueurl.net/?q=http%3A%2F%2Fbit.ly%2F"+link+"&lucky=on&Uncloak=Find+True+URL"
data_other = httptools.downloadpage(link).data
data_other = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data_other)
patron='<A title="http:\/\/privatelink.de\/\?(.+?)"'
url = scrapertools.find_single_match(data_other, patron)
if item.contentType=="movie":
contentType="movie"
else:
contentType="serie"
item=(item.clone(action='findvideos',url=url,show=item.show, thumbnail=item.thumbnail, contentType=contentType))
return item
def lista(item):
logger.info()
next = True
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<i class="(.+?)"><\/i>' # scrapedtype
patron +='<div class="calidad">(.+?)<\/div>' # scrapedquality
patron += '<img src="(.+?)"\/>' # scrapedthumbnail
patron += '<h4>(.+?)<\/h4>' # scrapedtitle
patron += "<h5>(.+?)<\/h5>" # scrapedyear
patron += '<a href="(.+?)"' # scrapedurl
#patron += "<\/a>.+?<div class='item-snippet'>(.+?)<" # scrapedplot
if item.title!="Prueba":
pat='<div id="tab-1"><ul class="post-gallery">(.+?)<\/ul><\/div>'
data=scrapertools.find_single_match(data, pat)
data = scrapertools.find_single_match(data, "itemprop='headline'>.*?</h2>.*?</ul>")
patron = '<span class="([^"]+)">.*?<figure class="poster-bg"><header><span>(\d{4})</span></header><img src="([^"]+)" />'
patron += '<footer>(.*?)</footer></figure><h6>([^<]+)</h6><a href="([^"]+)"></a>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtype,scrapedquality,scrapedthumbnail,scrapedtitle,scrapedyear,scrapedurl in matches:
first = int(item.first)
last = first + 19
if last > len(matches):
last = len(matches)
next = False
for scrapedtype, scrapedyear, scrapedthumbnail, scrapedquality, scrapedtitle ,scrapedurl in matches[first:last]:
patron_quality="<span>(.+?)</span>"
quality = scrapertools.find_multiple_matches(scrapedquality, patron_quality)
qual=""
for calidad in quality:
qual=qual+"["+calidad+"] "
title="%s [%s] %s" % (scrapedtitle,scrapedyear,qual)
if item.title =="Series":
itemlist.append(item.clone(title=title, url=host+scrapedurl, extra=scrapedtitle, plot=scrapedtitle,
show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="serie", action="capitulos"))
elif scrapedtype != 'serie':
itemlist.append(
item.clone(title=title, url=host+scrapedurl, action="findvideos", extra=scrapedtype, plot=scrapedtitle,
show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie", context=["buscar_trailer"]))
new_item= Item(channel=item.channel, title=title, url=host+scrapedurl, thumbnail=scrapedthumbnail,
type=scrapedtype, infoLabels={'year':scrapedyear})
if scrapedtype.strip() == 'sr':
new_item.contentSerieName = scrapedtitle
new_item.action = 'episodios'
else:
new_item.contentTitle = scrapedtitle
new_item.action = 'findvideos'
# Paginacion
patron_genero = '<h1>([^"]+)<\/h1>'
genero = scrapertools.find_single_match(data, patron_genero)
if genero == "Romance" or genero == "Drama":
patron = "<a rel='nofollow' class=previouspostslink' href='([^']+)'>Siguiente "
else:
patron = "<span class='current'>.+?href='(.+?)'>"
if scrapedtype == item.type or item.type == 'cat':
itemlist.append(new_item)
next_page_url = scrapertools.find_single_match(data, patron)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
#pagination
url_next_page = item.url
first = last
if next:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', first=first))
if next_page_url != "":
item.url = next_page_url
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=next_page_url,
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
if item.extra == 'pelicula':
item = bitly(item)
dl_links = []
data = httptools.downloadpage(item.url).data
itemlist.extend(servertools.find_video_items(data=data))
show = item.show
for videoitem in itemlist:
videoitem.channel = item.channel
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
return itemlist
### obtiene los gvideo
patron = 'class="Button Sm fa fa-download mg"></a><a target="_blank" rel="nofollow" href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for dl_url in matches:
g_data = httptools.downloadpage(dl_url).data
video_id = scrapertools.find_single_match(g_data, 'jfk-button jfk-button-action" href="([^"]+)">')
g_url = '%s%s' % ('https://drive.google.com', video_id)
g_url = g_url.replace('&amp;', '&')
g_data = httptools.downloadpage(g_url, follow_redirects=False, only_headers=True).headers
url = g_data['location']
dl_links.append(Item(channel=item.channel, title='%s', url=url, action='play', infoLabels=item.infoLabels))
if item.type == 'pl':
new_url = scrapertools.find_single_match(data, '<div class="player">.*?<a href="([^"]+)" target')
data = httptools.downloadpage(new_url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li class="btn.*?" data-video="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for video_id in matches:
url_data = httptools.downloadpage('https://tinyurl.com/%s' % video_id, follow_redirects=False)
url = url_data.headers['location']
itemlist.append(Item(channel=item.channel, title = '%s', url=url, action='play', infoLabels=item.infoLabels))
itemlist.extend(dl_links)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return itemlist

View File

@@ -8,9 +8,12 @@ from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
from platformcode import launcher
from time import sleep
__channel__ = "autoplay"
PLAYED = False
autoplay_node = {}
@@ -80,23 +83,33 @@ def start(itemlist, item):
'''
logger.info()
global PLAYED
global autoplay_node
PLAYED = False
base_item = item
if not config.is_xbmc():
#platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi')
return itemlist
global autoplay_node
if not autoplay_node:
# Obtiene el nodo AUTOPLAY desde el json
autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY')
if not item.channel in autoplay_node:
channel_id = item.channel
if item.channel == 'videolibrary':
autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY')
channel_id = item.contentChannel
if not channel_id in autoplay_node or not autoplay_node['status']:
return itemlist
# Agrega servidores y calidades que no estaban listados a autoplay_node
new_options = check_value(item.channel, itemlist)
new_options = check_value(channel_id, itemlist)
# Obtiene el nodo del canal desde autoplay_node
channel_node = autoplay_node.get(item.channel, {})
channel_node = autoplay_node.get(channel_id, {})
# Obtiene los ajustes des autoplay para este canal
settings_node = channel_node.get('settings', {})
@@ -166,7 +179,7 @@ def start(itemlist, item):
item.context.append({"title": "Configurar AutoPlay",
"action": "autoplay_config",
"channel": "autoplay",
"from_channel": item.channel})
"from_channel": channel_id})
# Si no tiene calidad definida le asigna calidad 'default'
if item.quality == '':
@@ -237,7 +250,6 @@ def start(itemlist, item):
# Se prepara el plan b, en caso de estar activo se agregan los elementos no favoritos al final
plan_b = settings_node['plan_b']
ready = False
text_b = ''
if plan_b:
autoplay_list.extend(autoplay_b)
@@ -246,7 +258,7 @@ def start(itemlist, item):
if autoplay_list or (plan_b and autoplay_b):
played = False
#played = False
max_intentos = 5
max_intentos_servers = {}
@@ -260,9 +272,8 @@ def start(itemlist, item):
# Si no es un elemento favorito si agrega el texto plan b
if autoplay_elem['videoitem'].type_b:
text_b = '(Plan B)'
if not platformtools.is_playing() and not played:
if not platformtools.is_playing() and not PLAYED:
videoitem = autoplay_elem['videoitem']
logger.debug('videoitem %s' % videoitem)
if videoitem.server.lower() not in max_intentos_servers:
max_intentos_servers[videoitem.server.lower()] = max_intentos
@@ -280,7 +291,7 @@ def start(itemlist, item):
# Intenta reproducir los enlaces
# Si el canal tiene metodo play propio lo utiliza
channel = __import__('channels.%s' % item.channel, None, None, ["channels.%s" % item.channel])
channel = __import__('channels.%s' % channel_id, None, None, ["channels.%s" % channel_id])
if hasattr(channel, 'play'):
resolved_item = getattr(channel, 'play')(videoitem)
if len(resolved_item) > 0:
@@ -293,12 +304,12 @@ def start(itemlist, item):
# Verifica si el item viene de la videoteca
try:
if item.contentChannel =='videolibrary':
if base_item.contentChannel =='videolibrary':
# Marca como visto
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_auto_as_watched(item)
xbmc_videolibrary.mark_auto_as_watched(base_item)
# Rellena el video con los datos del item principal y reproduce
play_item = item.clone(url=videoitem)
play_item = base_item.clone(url=videoitem)
platformtools.play_video(play_item.url, autoplay=True)
else:
# Si no viene de la videoteca solo reproduce
@@ -308,7 +319,7 @@ def start(itemlist, item):
try:
if platformtools.is_playing():
played = True
PLAYED = True
break
except:
logger.debug(str(len(autoplay_list)))
@@ -340,7 +351,6 @@ def start(itemlist, item):
if user_config_setting_player != 0:
config.set_setting("player_mode", user_config_setting_player)
# devuelve la lista de enlaces para la eleccion manual
return itemlist
@@ -516,7 +526,7 @@ def autoplay_config(item):
default = num - 1
if default > len(server_list) - 1:
default = 0
set_servers = {"id": "server_%s" % num, "label": u" \u2665 " +"Servidor Favorito %s" % num,
set_servers = {"id": "server_%s" % num, "label": u" \u2665 Servidor Favorito %s" % num,
"color": "0xfffcab14", "type": "list", "default": default,
"enabled": "eq(-%s,true)+eq(-%s,true)" % (pos1, num), "visible": True,
"lvalues": server_list}
@@ -548,7 +558,7 @@ def autoplay_config(item):
if default > len(quality_list) - 1:
default = 0
set_quality = {"id": "quality_%s" % num, "label": u" \u2665 " + "Calidad Favorita %s" % num,
set_quality = {"id": "quality_%s" % num, "label": u" \u2665 Calidad Favorita %s" % num,
"color": "0xfff442d9", "type": "list", "default": default,
"enabled": "eq(-%s,true)+eq(-%s,true)" % (pos1, num), "visible": True,
"lvalues": quality_list}
@@ -633,7 +643,7 @@ def get_languages(channel):
return list_language
def is_active():
def is_active(channel):
'''
Devuelve un booleano q indica si esta activo o no autoplay en el canal desde el que se llama
@@ -650,17 +660,17 @@ def is_active():
autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY')
# Obtine el canal desde el q se hace la llamada
import inspect
module = inspect.getmodule(inspect.currentframe().f_back)
canal = module.__name__.split('.')[1]
logger.debug(canal)
#import inspect
#module = inspect.getmodule(inspect.currentframe().f_back)
#canal = module.__name__.split('.')[1]
canal = channel
# Obtiene el nodo del canal desde autoplay_node
channel_node = autoplay_node.get(canal, {})
# Obtiene los ajustes des autoplay para este canal
settings_node = channel_node.get('settings', {})
# Obtiene el nodo del canal desde autoplay_node
channel_node = autoplay_node.get(canal, {})
# Obtiene los ajustes des autoplay para este canal
settings_node = channel_node.get('settings', {})
return settings_node.get('active', False)
return settings_node.get('active', False)
def reset(item, dict):
@@ -674,3 +684,34 @@ def reset(item, dict):
platformtools.dialog_notification('AutoPlay', '%s: Los datos fueron reiniciados' % item.category)
return
def set_status(status):
logger.info()
# Obtiene el nodo AUTOPLAY desde el json
autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY')
autoplay_node['status'] = status
result, json_data = jsontools.update_node(autoplay_node, 'autoplay', 'AUTOPLAY')
def play_multi_channel(item, itemlist):
logger.info()
global PLAYED
actual_channel = ''
channel_videos = []
video_dict = dict()
set_status(True)
for video_item in itemlist:
if video_item.contentChannel != actual_channel:
actual_channel = video_item.contentChannel
else:
channel_videos.append(video_item)
video_dict[actual_channel] = channel_videos
for channel, videos in video_dict.items():
if not PLAYED:
item.contentChannel = channel
if is_active(channel):
logger.debug('esta activo en %s' % channel)
start(videos, item)
else:
break

View File

@@ -53,8 +53,8 @@ def mainlist(item):
itemlist = []
itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos", url=url_api + "/index/main/0/pc",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias Populares",
url=url_api + "/index/main/0/pc", extra="popular"))
#itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias Populares",
# url=url_api + "/index/main/0/pc", extra="popular"))
itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias completo",
url=url_api + "/index/main/0/pc", extra="nonpopular"))
itemlist.append(
@@ -74,7 +74,7 @@ def videos(item):
title = Video["title"]
itemlist.append(
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot="", show="",
folder=True))
folder=True, contentType="movie"))
# Paginador
Actual = int(scrapertools.get_match(item.url, url_api + '/index/[^/]+/([0-9]+)/pc'))
@@ -93,10 +93,11 @@ def listcategorias(item):
data = scrapertools.cache_page(item.url)
JSONData = json.load(data)
for Tag in JSONData["tags"][item.extra]:
url = url_api + "/index/tag/0/pc?tag=" + Tag
title = Tag
title = title[:1].upper() + title[1:]
#for Tag in JSONData["tags"][item.extra]:
for Tag in JSONData["tags"]:
url = url_api + "/index/tag/0/pc?tag=" + Tag["tag"]
title = '%s - %s' % (str(Tag["tag"]), str(Tag["videos"]))
#title = title[:1].upper() + title[1:]
itemlist.append(
Item(channel=item.channel, action="videos", title=title, url=url, folder=True, viewmode="movie"))
@@ -108,6 +109,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = item.url % (texto)
try:
return videos(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla

View File

@@ -274,8 +274,6 @@ def findvideos(item):
item.title = re.sub('\s\[\d+,?\d*?\s\w[b|B]s\]', '', item.title) #Quitamos size de título, si lo traía
item.title = '%s [%s]' % (item.title, size) #Agregamos size al final del título
item.quality = re.sub('\s\[\d+,?\d*?\s\w[b|B]s\]', '', item.quality) #Quitamos size de calidad, si lo traía
item.quality = '%s [%s]' % (item.quality, size) #Agregamos size al final de calidad
item.quality = item.quality.replace("G", "G ").replace("M", "M ") #Se evita la palabra reservada en Unify
patron_t = '<div class="enlace_descarga".*?<a href="(.*?\.torrent)"'
link_torrent = scrapertools.find_single_match(data, patron_t)
@@ -301,6 +299,10 @@ def findvideos(item):
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
if size:
item.quality = '%s [%s]' % (item.quality, size) #Agregamos size al final de calidad
item.quality = item.quality.replace("G", "G ").replace("M", "M ") #Se evita la palabra reservada en Unify
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()

View File

@@ -10,7 +10,7 @@ from core import tmdb
from core.item import Item
from platformcode import config, logger
HOST = 'http://estrenosli.org/'
HOST = 'http://estrenosby.net/' # 'http://estrenosli.org/'
parameters = channeltools.get_channel_parameters('estrenosgo')
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
@@ -138,7 +138,7 @@ def findvideos(item):
list_opciones = []
IDIOMAS = {"banderita1": "Español", "banderita2": "VOSE", "banderita3": "Latino"}
url = "http://estrenosli.org/ver-online-" + item.url
url = HOST + "ver-online-" + item.url
data = httptools.downloadpage(url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)

View File

@@ -7,7 +7,8 @@
"thumbnail": "gnula.png",
"banner": "gnula.png",
"categories": [
"movie"
"movie",
"vos"
],
"settings": [
{

View File

@@ -17,13 +17,13 @@ def mainlist(item):
itemlist = []
itemlist.append(Item(channel=item.channel, title="Estrenos", action="peliculas",
url= host +"peliculas-online/lista-de-peliculas-online-parte-1/", viewmode="movie",
thumbnail=get_thumb('premieres', auto=True),))
thumbnail=get_thumb('premieres', auto=True), first=0))
itemlist.append(
Item(channel=item.channel, title="Generos", action="generos", url= host + "generos/lista-de-generos/",
thumbnail=get_thumb('genres', auto=True),))
itemlist.append(Item(channel=item.channel, title="Recomendadas", action="peliculas",
url= host + "peliculas-online/lista-de-peliculas-recomendadas/", viewmode="movie",
thumbnail=get_thumb('recomended', auto=True),))
thumbnail=get_thumb('recomended', auto=True), first=0))
itemlist.append(Item(channel = item.channel, action = ""))
itemlist.append(
Item(channel=item.channel, title="Buscar", action="search", url = host_search,
@@ -35,14 +35,9 @@ def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
data = httptools.downloadpage(host).data
url_cse = scrapertools.find_single_match(data, '<form action="([^"]+)"') + "?"
bloque = scrapertools.find_single_match(data, '<form action=.*?</form>').replace('name="q"', "")
matches = scrapertools.find_multiple_matches(bloque, 'name="([^"]+).*?value="([^"]+)')
post = "q=" + texto + "&"
for name, value in matches:
post += name + "=" + value + "&"
data = httptools.downloadpage(url_cse + post).data
cse_token = scrapertools.find_single_match(data, "var cse_token='([^']+)'")
cxv = scrapertools.find_single_match(data, 'cx" value="([^"]+)"')
data = httptools.downloadpage("https://cse.google.es/cse.js?hpg=1&cx=%s" %cxv).data
cse_token = scrapertools.find_single_match(data, 'cse_token": "([^"]+)"')
item.url = host_search %(texto, cse_token)
try:
return sub_search(item)
@@ -99,7 +94,8 @@ def generos(item):
action = 'peliculas',
title = title,
url = url,
viewmode = "movie"))
viewmode = "movie",
first=0))
itemlist = sorted(itemlist, key=lambda item: item.title)
return itemlist
@@ -107,11 +103,18 @@ def generos(item):
def peliculas(item):
logger.info()
itemlist = []
next = True
data = httptools.downloadpage(item.url).data
patron = '<a class="Ntooltip" href="([^"]+)">([^<]+)<span><br[^<]+'
patron += '<img src="([^"]+)"></span></a>(.*?)<br'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, resto in matches:
first = item.first
last = first + 19
if last > len(matches):
last = len(matches)
next = False
for scrapedurl, scrapedtitle, scrapedthumbnail, resto in matches[first:last]:
language = []
plot = scrapertools.htmlclean(resto).strip()
languages = scrapertools.find_multiple_matches(plot, r'\((V.)\)')
@@ -132,6 +135,13 @@ def peliculas(item):
language=language,
quality=quality
))
#paginacion
url_next_page = item.url
first = last
if next:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='peliculas', first=first))
return itemlist
@@ -149,7 +159,7 @@ def findvideos(item):
cuenta = 0
for datos in bloque:
cuenta = cuenta + 1
patron = '<em>(opción %s.*?)</em>' %cuenta
patron = '<em>((?:opción|opción) %s.*?)</em>' %cuenta
scrapedopcion = scrapertools.find_single_match(data, patron)
titulo_opcion = "(" + scrapertools.find_single_match(scrapedopcion, "op.*?, (.*)").upper() + ")"
if "TRAILER" in titulo_opcion or titulo_opcion == "()":

View File

@@ -460,10 +460,18 @@ def findvideos(item):
item_local.quality = quality + tiempo
if "temporada" in temp_epi.lower():
item_local.quality = '%s [Temporada]' % item_local.quality
#Añadimos la duración, que estará en item.quility
if scrapertools.find_single_match(item.quality, '(\[\d+:\d+)'): #si ya tiene la duración, la ponemos
item_local.quality = '%s [%s h]' % (item_local.quality, scrapertools.find_single_match(item.quality, '(\d+:\d+)'))
#if size and item_local.contentType != "episode":
if size:
size = size.replace(".", ",").replace("B,", " B").replace("b,", " b")
item_local.quality = '%s [%s]' % (item_local.quality, size)
if item_local.action == 'show_result': #Viene de una búsqueda global
channel = item_local.channel.capitalize()
if item_local.from_channel:
channel = item_local.from_channel.capitalize()
item_local.quality = '[COLOR yellow][%s][/COLOR] %s' % (channel, item_local.quality)
#Salvamos la url del .torrent
if scrapedurl:

View File

@@ -37,9 +37,9 @@ ACTION_MOVE_UP = 3
set_animation = False
xinfoplus_set = config.get_setting("infoplus_set")
if xinfoplus_set == "Sin animación":
if xinfoplus_set == config.get_localized_string(70129):
set_animation = False
if xinfoplus_set == "Con animación":
if xinfoplus_set == config.get_localized_string(70130):
set_animation = True
def start(item, recomendaciones=[], from_window=False):
@@ -400,7 +400,7 @@ class main(xbmcgui.WindowDialog):
self.plot.autoScroll(11000, 6000, 30000)
except:
xbmc.executebuiltin(
'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")')
config.get_localized_string(70500))
self.plot.setText(dhe(self.infoLabels.get("plot", "")))
xbmc.sleep(200)
@@ -842,7 +842,7 @@ class related(xbmcgui.WindowDialog):
self.info_peli.autoScroll(7000, 6000, 30000)
except:
xbmc.executebuiltin(
'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")')
config.get_localized_string(70500))
self.info_peli.setText(self.info)
if set_animation:
self.info_peli.setAnimations(
@@ -859,9 +859,9 @@ class related(xbmcgui.WindowDialog):
('WindowClose', 'effect=zoom end=0% time=1000 condition=true',)])
if self.infoLabels.get("status") == "Ended" and self.item.contentType != "movie":
status = "[COLOR aquamarine][B]Finalizada %s[/B][/COLOR]"
status = config.get_localized_string(70515)
elif self.infoLabels.get("status") and self.item.contentType != "movie":
status = "[COLOR aquamarine][B]En emisión %s[/B][/COLOR]"
status = config.get_localized_string(70516)
else:
status = "[COLOR aquamarine][B]%s[/B][/COLOR]"
@@ -891,7 +891,7 @@ class related(xbmcgui.WindowDialog):
self.gt_peli = xbmcgui.ControlTextBox(210, 385, 1100, 60, self.fonts["12"])
self.addControl(self.gt_peli)
self.gt_peli.setText("[COLOR limegreen][B]Género: [/B][/COLOR]")
self.gt_peli.setText("[COLOR limegreen][B]%s[/B][/COLOR]" % config.get_localized_string(70499))
if set_animation:
self.gt_peli.setAnimations(
[('conditional', 'effect=slide start=0,-7000 delay=5750 time=700 condition=true tween=circle easing=in',),
@@ -907,7 +907,7 @@ class related(xbmcgui.WindowDialog):
self.pt_peli = xbmcgui.ControlTextBox(210, 410, 307, 60, self.fonts["12"])
self.addControl(self.pt_peli)
self.pt_peli.setText("[COLOR limegreen][B]Productora: [/B][/COLOR]")
self.pt_peli.setText("[COLOR limegreen][B]%s[/B][/COLOR]" % config.get_localized_string(70498))
if set_animation:
self.pt_peli.setAnimations(
[('conditional', 'effect=slide start=0,-7000 delay=5700 time=700 condition=true tween=circle easing=in',),
@@ -1238,7 +1238,7 @@ class Busqueda(xbmcgui.WindowXMLDialog):
else:
self.getControl(1).setLabel(config.get_localized_string(60494))
self.getControl(5).setLabel("[COLOR tomato][B]Cerrar[/B][/COLOR]")
self.getControl(5).setLabel(config.get_localized_string(60495))
self.control_list.reset()
items = []
for item_l in self.lista:
@@ -1577,7 +1577,7 @@ class ActorInfo(xbmcgui.WindowDialog):
self.info_actor.autoScroll(7000, 6000, 30000)
except:
xbmc.executebuiltin(
'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")')
config.get_localized_string(70500))
self.info_actor.setText(
"[COLOR coral][B]%s[/B][/COLOR]" % actor_tmdb.result.get("biography", config.get_localized_string(60504)))
@@ -1601,7 +1601,7 @@ class ActorInfo(xbmcgui.WindowDialog):
else:
self.titulos.append([entradas["id"], entradas.get("title", entradas.get("original_title", "")), thumb])
self.dialog.update(40, '[COLOR rosybrown]Obteniendo filmografía...[/COLOR]')
self.dialog.update(40, config.get_localized_string(60505))
self.mas_pelis = 8
self.idps = []
self.botones = []
@@ -1673,7 +1673,7 @@ class ActorInfo(xbmcgui.WindowDialog):
self.botones.append(self.btn_right)
xbmc.sleep(200)
self.dialog.update(80, '[COLOR plum]Recopilando imágenes...[/COLOR]')
self.dialog.update(80, config.get_localized_string(60506))
self.images = []
for images in actor_tmdb.result.get("images", {}).get("profiles", []):
imagen = "https://image.tmdb.org/t/p/original" + images["file_path"]

View File

@@ -252,10 +252,8 @@ def ultimas(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
data = data.decode('cp1252')
realplot = ''
patron = '<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)" alt=.*? style="width:105px; height:160px; ' \
'border:1px solid #999"\/><\/a>'
patron = '<a href="([^"]+)" title="([^"]+)"> <img src="([^"]+)".*?solid'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -312,10 +310,7 @@ def letras(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
data = data.decode('cp1252')
data = scrapertools.find_single_match(data, '<\/form><\/table><\/div>.*?<\/ul>')
patron = '<li><a href="(.*?)" title="Letra.*?">(.*?)<\/a><\/li>'
patron = '<li><a href="([^"]+)" title="Letra.*?">([^<]+)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
@@ -356,36 +351,40 @@ def findvideos(item):
logger.info()
itemlist = []
new_url = get_link(get_source(item.url))
new_url = get_link(get_source(new_url))
video_id = scrapertools.find_single_match(new_url, 'http.*?h=(\w+)')
new_url = '%s%s' % (host, 'playeropstream/api.php')
post = {'h': video_id}
post = urllib.urlencode(post)
data = httptools.downloadpage(new_url, post=post).data
json_data = jsontools.load(data)
url = json_data['url']
server = servertools.get_server_from_url(url)
title = '%s [%s]' % (server, item.language)
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language=item.language,
server=server, infoLabels=item.infoLabels))
try:
new_url = get_link(get_source(item.url))
new_url = get_link(get_source(new_url))
video_id = scrapertools.find_single_match(new_url, 'http.*?h=(\w+)')
new_url = '%s%s' % (host, 'playeropstream/api.php')
post = {'h': video_id}
post = urllib.urlencode(post)
data = httptools.downloadpage(new_url, post=post).data
json_data = jsontools.load(data)
url = json_data['url']
server = servertools.get_server_from_url(url)
title = '%s [%s]' % (server, item.language)
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language=item.language,
server=server, infoLabels=item.infoLabels))
# Requerido para FilterTools
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
# Requerido para AutoPlay
autoplay.start(itemlist, item)
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
except:
pass
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist

View File

@@ -41,8 +41,8 @@
"visible": true,
"lvalues": [
"Torrentrapid",
"Torrentlocura",
"Tumejortorrent",
"Torrentlocura",
"Tvsinpagar",
"Descargas2020",
"Mispelisyseries"
@@ -52,7 +52,7 @@
"id": "clonenewpct1_channels_list",
"type": "text",
"label": "Lista de clones de NewPct1 y orden de uso",
"default": "('1', 'torrentrapid', 'http://torrentrapid.com/', 'movie, tvshow, season, episode', 'serie_episodios'), ('1', 'torrentlocura', 'http://torrentlocura.com/', 'movie, tvshow, season, episode', ''), ('1', 'tumejortorrent', 'http://tumejortorrent.com/', 'movie, tvshow, season, episode', ''), ('1', 'tvsinpagar', 'http://www.tvsinpagar.com/', 'tvshow, season, episode', ''), ('1', 'descargas2020', 'http://descargas2020.com/', 'movie, tvshow, season, episode', ''), ('1', 'mispelisyseries', 'http://mispelisyseries.com/', 'movie', 'search, listado_busqueda')",
"default": "('1', 'torrentrapid', 'http://torrentrapid.com/', 'movie, tvshow, season, episode', 'serie_episodios'), ('1', 'tumejortorrent', 'http://tumejortorrent.com/', 'movie, tvshow, season, episode', ''), ('1', 'torrentlocura', 'http://torrentlocura.com/', 'movie, tvshow, season, episode', ''), ('1', 'tvsinpagar', 'http://www.tvsinpagar.com/', 'tvshow, season, episode', ''), ('1', 'descargas2020', 'http://descargas2020.com/', 'movie, tvshow, season, episode', ''), ('1', 'mispelisyseries', 'http://mispelisyseries.com/', 'movie', 'search, listado_busqueda')",
"enabled": true,
"visible": false
},

View File

@@ -373,6 +373,7 @@ def listado(item):
elif item.extra == "series" and not "/miniseries" in item.url:
item.action = "episodios"
item.contentType = "tvshow"
item.season_colapse = True
pag = True
elif item.extra == "varios" or "/miniseries" in item.url:
item.action = "findvideos"
@@ -604,8 +605,10 @@ def listado(item):
if item_local.contentType == "movie":
year = scrapertools.find_single_match(scrapedurl, r'(\d{4})')
if year >= "1900" and year <= "2040" and year != "2020":
title_subs += [year]
item_local.infoLabels['year'] = '-'
item_local.infoLabels['year'] = year
#title_subs += [year]
else:
item_local.infoLabels['year'] = '-'
#Guarda la variable temporal que almacena la info adicional del título a ser restaurada después de TMDB
item_local.title_subs = title_subs
@@ -827,7 +830,7 @@ def listado_busqueda(item):
title_lista += [scrapedurl_alt]
else:
title_lista += [scrapedurl]
if "juego/" in scrapedurl or "xbox" in scrapedurl.lower() or "xbox" in scrapedtitle.lower() or "windows" in scrapedtitle.lower() or "windows" in calidad.lower() or "nintendo" in scrapedtitle.lower() or "xbox" in calidad.lower() or "epub" in calidad.lower() or "pdf" in calidad.lower() or "pcdvd" in calidad.lower() or "crack" in calidad.lower(): # no mostramos lo que no sean videos
if ("juego/" in scrapedurl or "xbox" in scrapedurl.lower()) and not "/serie" in scrapedurl or "xbox" in scrapedtitle.lower() or "windows" in scrapedtitle.lower() or "windows" in calidad.lower() or "nintendo" in scrapedtitle.lower() or "xbox" in calidad.lower() or "epub" in calidad.lower() or "pdf" in calidad.lower() or "pcdvd" in calidad.lower() or "crack" in calidad.lower(): # no mostramos lo que no sean videos
continue
cnt_title += 1 # Sería una línea real más para Itemlist
@@ -863,7 +866,7 @@ def listado_busqueda(item):
item_local.url = url
item_local.extra2 = 'serie_episodios' #Creamos acción temporal excluyente para otros clones
if item_local.category == 'Mispelisyseries': #Esta web no gestiona bien el cambio de episodio a Serie
pattern = 'class="btn-torrent">.*?window.location.href = "(.*?)";' #Patron para .torrent
pattern = 'class="btn-torrent">.*?window.location.href = "([^"]+)";' #Patron para .torrent
#Como no hay datos consistentes, llamamos al método de fail_over para que encuentre un canal que esté activo y pueda gestionar el cambio de episodio por serie
item_local, data_serie = generictools.fail_over_newpct1(item_local, pattern)
else:
@@ -872,7 +875,7 @@ def listado_busqueda(item):
except:
pass
pattern = 'class="btn-torrent">.*?window.location.href = "(.*?)";' #Patron para .torrent
pattern = 'class="btn-torrent">.*?window.location.href = "([^"]+)";' #Patron para .torrent
if not data_serie or (not scrapertools.find_single_match(data_serie, pattern) and not '<h3><strong>( 0 ) Resultados encontrados </strong>' in data and not '<ul class="noticias-series"></ul></form></div><!-- end .page-box -->' in data):
logger.error("ERROR 01: LISTADO_BUSQUEDA: La Web no responde o ha cambiado de URL: " + item_local.url + " / DATA: " + data_serie)
#Si no hay datos consistentes, llamamos al método de fail_over para que encuentre un canal que esté activo y pueda gestionar el cambio de episodio por serie
@@ -892,7 +895,7 @@ def listado_busqueda(item):
item_local.url = item_local.url.replace('/series/', '/series-vo/')
#item_local.url = re.sub(r'\/\d+$', '/', item_local.url) #Quitamos el ID de la serie por compatib.
if item_local.url:
title_subs += ["Episodio %sx%s" % (scrapertools.find_single_match(url, '\/temp.*?-(\d+)\/cap.*?-(\d+)\/'))]
title_subs += ["Episodio %sx%s" % (scrapertools.find_single_match(url, '\/temp.*?-(\d+)-?\/cap.*?-(\d+(?:-al-\d+)?)-?\/'))]
url = item_local.url
except:
pass
@@ -900,8 +903,8 @@ def listado_busqueda(item):
if item.extra == "novedades" and "/serie" in url:
if not item_local.url or episodio_serie == 0:
item_local.url = url
if scrapertools.find_single_match(url, '\/temp.*?-(\d+)\/cap.*?-(\d+)\/'):
title_subs += ["Episodio %sx%s" % (scrapertools.find_single_match(url, '\/temp.*?-(\d+)\/cap.*?-(\d+)\/'))]
if scrapertools.find_single_match(url, '\/temp.*?-(\d+)-?\/cap.*?-(\d+(?:-al-\d+)?)-?\/'):
title_subs += ["Episodio %sx%s" % (scrapertools.find_single_match(url, '\/temp.*?-(\d+)-?\/cap.*?-(\d+(?:-al-\d+)?)-?\/'))]
else:
title_subs += ["Episodio 1x01"]
@@ -909,6 +912,7 @@ def listado_busqueda(item):
if (".com/serie" in url or "/serie" in url or "-serie" in url) and not "/miniseries" in url and (not "/capitulo" in url or "pelisyseries.com" in item_local.channel_host): #Series
item_local.action = "episodios"
item_local.contentType = "tvshow"
item_local.season_colapse = True
item_local.extra = "series"
elif "varios/" in url or "/miniseries" in url: #Documentales y varios
item_local.action = "findvideos"
@@ -963,6 +967,7 @@ def listado_busqueda(item):
title = re.sub(r' - [t|T]emp\w+.*?\d+', '', title)
title = re.sub(r' [t|T]emp.*?\d+[x|X]\d+', '', title)
title = re.sub(r' [t|T]emp.*?\d+', '', title)
title = re.sub(r' [c|C]ap.*?\d+ al \d+', '', title)
title = re.sub(r' [c|C]ap.*?\d+', '', title)
if "audio" in title.lower(): #Reservamos info de audio para después de TMDB
title_subs += ['[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])')]
@@ -1071,8 +1076,10 @@ def listado_busqueda(item):
year = ""
year = str(year)
if year >= "1900" and year <= "2040" and year != "2020":
title_subs += [year]
item_local.infoLabels['year'] = '-'
item_local.infoLabels['year'] = year
#title_subs += [year]
else:
item_local.infoLabels['year'] = '-'
#Guarda la variable temporal que almacena la info adicional del título a ser restaurada después de TMDB
item_local.title_subs = title_subs
@@ -1531,24 +1538,40 @@ def episodios(item):
item, data = generictools.fail_over_newpct1(item, verify_fo)
#Limpiamos num. Temporada y Episodio que ha podido quedar por Novedades
season_display = 0
if item.contentSeason:
if item.season_colapse: #Si viene del menú de Temporadas...
season_display = item.contentSeason #... salvamos el num de sesión a pintar
item.from_num_season_colapse = season_display
del item.season_colapse
item.contentType = "tvshow"
if item.from_title_season_colapse:
item.title = item.from_title_season_colapse
del item.from_title_season_colapse
if item.infoLabels['title']:
del item.infoLabels['title']
del item.infoLabels['season']
if item.contentEpisodeNumber:
del item.infoLabels['episode']
if season_display == 0 and item.from_num_season_colapse:
season_display = item.from_num_season_colapse
# Obtener la información actualizada de la Serie. TMDB es imprescindible para Videoteca
if not item.infoLabels['tmdb_id']:
tmdb.set_infoLabels(item, True)
try:
tmdb.set_infoLabels(item, True) #TMDB de cada Temp
except:
pass
modo_ultima_temp_alt = modo_ultima_temp
if item.ow_force == "1": #Si hay un traspaso de canal o url, se actualiza todo
if item.ow_force == "1": #Si hay un traspaso de canal o url, se actualiza todo
modo_ultima_temp_alt = False
max_temp = 1
if item.infoLabels['number_of_seasons']:
max_temp = item.infoLabels['number_of_seasons']
y = []
if modo_ultima_temp_alt and item.library_playcounts: #Averiguar cuantas temporadas hay en Videoteca
if modo_ultima_temp_alt and item.library_playcounts: #Averiguar cuantas temporadas hay en Videoteca
patron = 'season (\d+)'
matches = re.compile(patron, re.DOTALL).findall(str(item.library_playcounts))
for x in matches:
@@ -1664,9 +1687,7 @@ def episodios(item):
estado = True #Buena calidad de datos por defecto
if "<span" in info: # new style
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)?.*?Capitulo(?:s)?\s*(?P<episode>\d+)?" \
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>" \
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
pattern = "[^>]+>.*?Temporada\s*(?:<span[^>]+>\[\s?)?(?P<season>\d+)?.*?Capitulo(?:s)?\s*(?:<span[^>]+>\[\s?)?(?P<episode>\d+)?(?:.*?(?P<episode2>\d+)?)<.*?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
if not scrapertools.find_single_match(info, pattern):
if "especial" in info.lower(): # Capitulos Especiales
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
@@ -1768,11 +1789,19 @@ def episodios(item):
break #Sale del bucle actual del FOR de episodios por página
#if ('%sx%s' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))) in item.library_playcounts:
# continue
if season_display > 0:
if item_local.contentSeason > season_display:
continue
elif item_local.contentSeason < season_display:
break
if item_local.active:
del item_local.active
if item_local.contentTitle:
del item_local.infoLabels['title']
if item_local.season_colapse:
del item_local.season_colapse
item_local.context = "['buscar_trailer']"
item_local.action = "findvideos"
item_local.contentType = "episode"
@@ -1780,19 +1809,25 @@ def episodios(item):
itemlist.append(item_local.clone())
#logger.debug(item_local)
data = ''
if len(itemlist) > 1:
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) #clasificamos
if item.season_colapse and not item.add_videolibrary: #Si viene de listado, mostramos solo Temporadas
item, itemlist = generictools.post_tmdb_seasons(item, itemlist)
# Pasada por TMDB y clasificación de lista por temporada y episodio
tmdb.set_infoLabels(itemlist, True)
if not item.season_colapse: #Si no es pantalla de Temporadas, pintamos todo
# Pasada por TMDB y clasificación de lista por temporada y episodio
tmdb.set_infoLabels(itemlist, True)
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
item, itemlist = generictools.post_tmdb_episodios(item, itemlist)
#logger.debug(item)
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
item, itemlist = generictools.post_tmdb_episodios(item, itemlist)
return itemlist

View File

@@ -1,7 +1,7 @@
{
"id": "peliculasrey",
"name": "peliculasrey",
"active": true,
"active": false,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "peliculasrey.png",

View File

@@ -0,0 +1,73 @@
{
"id": "peliculasyseries",
"name": "PeliculasySeries",
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://s22.postimg.cc/xy1burkep/peliculasyseries.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"VOSE",
"VOS",
"VO"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,345 @@
# -*- coding: utf-8 -*-
# -*- Channel PeliculasySeries -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import base64
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from lib import jsunpack
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'la': 'Latino', 'lat':'Latino', 'cas':'Castellano','es': 'Castellano', 'vs': 'VOSE', 'vos':'VOSE', 'vo':'VO',
'ori':'VO', 'so':'VOS', 'sor':'VOS'}
list_language = IDIOMAS.values()
list_quality = ['TS','Screener','DVDRip','HDRip', 'HDTV', 'micro720', 'micro1080']
list_servers = ['openload', 'rapidvideo', 'powvideo', 'gamovideo', 'streamplay', 'flashx', 'clipwatching', 'vidoza',
'thevideome']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'peliculasyseries')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'peliculasyseries')
host = 'https://peliculasyseries.org/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', url=host+'series', action='list_all', type='tvshows',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + 'buscar/q/', thumbnail=get_thumb("search", auto=True),
extra='movie'))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_movies(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'movies', action='list_all',
thumbnail=get_thumb('all', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movies'))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def get_language(lang_data):
logger.info()
language = []
lang_data = lang_data.replace('language-ES', '').replace('medium', '').replace('serie', '').replace('-','')
if 'class' in lang_data:
lang_list = scrapertools.find_multiple_matches(lang_data, 'class=" ([^"]+)"')
else:
return lang_data.strip()
for lang in lang_list:
if lang not in IDIOMAS:
lang = 'VOS'
if lang not in language:
language.append(IDIOMAS[lang])
return language
def section(item):
logger.info()
itemlist=[]
duplicados=[]
data = get_source(host)
data = scrapertools.find_single_match(data, 'data-toggle="dropdown">Géneros.*?multi-column-dropdown">.*?"clearfix"')
if 'Genero' in item.title:
patron = '<li><a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
if title not in duplicados:
itemlist.append(Item(channel=item.channel, url=scrapedurl, title=title, action='list_all',
type=item.type))
duplicados.append(title)
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.type == 'movies':
patron = '<div class="col-md-2 w3l-movie-gride-agile"><a href="([^"]+)" class=".*?">'
patron += '<img src="([^"]+)" title="([^"]+)" class="img-responsive".*?'
patron += '<div class="calidad" >([^<]+)</div> <div class="audio-info">'
patron += '(.*?)<div class="w3l-action-icon">.*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, quality, lang_data, year in matches:
title = '%s [%s] [%s]' % (scrapedtitle, year, quality)
if 'screener' in quality.lower():
quality = 'Screener'
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
language = get_language(lang_data)
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
language=language,
quality=quality,
infoLabels={'year':year}))
elif item.type == 'tvshows':
patron = '<div class="col-md-2 w3l-movie-gride-agile"><a href="([^"]+)" class=".*?">'
patron += '<img src="([^"]+)" title="([^"]+)" class="img-responsive".*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
title = scrapedtitle
contentSerieName = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
itemlist.append(item.clone(action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=contentSerieName,
context=filtertools.context(item, list_language, list_quality),
infoLabels={'year':year}))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,"<a class='last' href='([^']+)'>»</a>")
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='<a href="([^"]+)"><img class="thumb-item" src="([^"]+)" alt="[^"]+" >'
patron += '<div class="season-item">Temporada (\d+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedthumbnail, season in matches:
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action='episodesxseasons',
thumbnail=scrapedthumbnail, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron ='class="row-serie-item"><a href="([^"]+)">.*?<img class="episode-thumb-item" src="([^"]+)" alt="([^"]+)" >'
patron += '<divclass="audio-info-series">(.*?)<div class="episode-item">%s+x(\d+)</div>' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedthumbnail, scrapedtitle, lang_data, scrapedepisode in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
language = get_language(lang_data)
title = '%sx%s - %s %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle, language)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos',
thumbnail=scrapedthumbnail, language=language, infoLabels=infoLabels))
itemlist = filtertools.get_links(itemlist, item, list_language)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
from lib import generictools
itemlist = []
data = get_source(item.url)
patron = '<div class="available-source" ><div class="([^"]+)">.*?'
patron += 'data-data="([^"]+)".*?<span class="quality-text">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for lang_data, scrapedurl, quality in matches:
lang = get_language(lang_data)
if 'screener' in quality.lower():
quality = 'Screener'
quality = quality
title = '%s [%s] [%s]'
url = base64.b64decode(scrapedurl[1:])
itemlist.append(
Item(channel=item.channel, url=url, title=title, action='play', quality=quality, language=IDIOMAS[lang],
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.quality, x.language))
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language, list_quality)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
itemlist = sorted(itemlist, key=lambda it: it.language)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_results(item)
else:
return []
def search_results(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = '<li class="search-results-item media-item" .*?<a href="([^"]+)" title="([^"]+)">.*?'
patron += '<img class="content" src="([^"]+)" .*?>(Pelicula|Serie) del año([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumb, content_type, year in matches:
title = scrapedtitle
if len(year)==0:
year = '-'
url = scrapedurl
thumbnail = scrapedthumb
if not '/serie' in url:
action = 'findvideos'
else:
action = 'seasons'
new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, action=action,
infoLabels={'year':year})
if new_item.action == 'findvideos':
new_item.contentTitle = new_item.title
else:
new_item.contentSerieName = new_item.title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host + 'movies'
elif categoria == 'infantiles':
item.url = host + 'genero/animation/'
item.type='movies'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -143,7 +143,8 @@ def listado_genero(item):
itemlist = []
data = httptools.downloadpage(item.url).data
# ~ data = httptools.downloadpage(item.url).data
data = obtener_data(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
if item.extra == "movies":
@@ -180,7 +181,8 @@ def listado_anio(item):
itemlist = []
data = httptools.downloadpage(item.url).data
# ~ data = httptools.downloadpage(item.url).data
data = obtener_data(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
if item.extra == "movies":

View File

@@ -72,7 +72,7 @@ def menu_movies(item):
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
@@ -93,9 +93,9 @@ def section(item):
duplicados=[]
data = get_source(host+'/'+item.type)
if 'Genero' in item.title:
patron = '<li class=cat-item cat-item-\d+><a href=(.*?) >(.*?)/i>'
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >(.*?)</i'
elif 'Año' in item.title:
patron = '<li><a href=(.*?release.*?)>(.*?)</a>'
patron = '<li><a href="(.*?release.*?)">([^<]+)<'
elif 'Calidad' in item.title:
patron = 'menu-item-object-dtquality menu-item-\d+><a href=(.*?)>(.*?)</a>'
@@ -105,8 +105,8 @@ def section(item):
title = scrapedtitle
plot=''
if 'Genero' in item.title:
quantity = scrapertools.find_single_match(scrapedtitle,'</a> <i>(.*?)<')
title = scrapertools.find_single_match(scrapedtitle,'(.*?)</')
quantity = scrapertools.find_single_match(scrapedtitle,'</a> <i>([^<]+)<')
title = scrapertools.find_single_match(scrapedtitle,'([^<]+)</')
title = title
plot = '%s elementos' % quantity.replace('.','')
else:
@@ -124,33 +124,31 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
if item.type == 'movies':
patron = '<article id=post-\d+ class=item movies><div class=poster><img src=(.*?) alt=(.*?)>.*?quality>(.*?)'
patron += '</span><\/div><a href=(.*?)>.*?<\/h3><span>(.*?)<\/span><\/div>.*?flags(.*?)metadata'
patron = '<article id="post-\d+" class="item movies"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?'
patron += '"quality">([^<]+)</span><\/div><a href="([^"]+)">.*?</h3>.*?<span>([^<]+)</'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year, lang_data in matches:
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year in matches:
title = '%s [%s] [%s]' % (scrapedtitle, year, quality)
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
language = get_language(lang_data)
#language = get_language(lang_data)
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
language=language,
quality=quality,
infoLabels={'year':year}))
elif item.type == 'tvshows':
patron = '<article id=post-\d+ class=item tvshows><div class=poster><img src=(.*?) alt=(.*?)>.*?'
patron += '<a href=(.*?)>.*?<\/h3><span>(.*?)<\/span><\/div>'
patron = '<article id="post-\d+" class="item tvshows"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<a href="([^"]+)">.*?<span>(\d{4})<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
@@ -170,7 +168,7 @@ def list_all(item):
# Paginación
#url_next_page = scrapertools.find_single_match(data,"<a class='arrow_pag' href=([^>]+)><i id='nextpagination'")
url_next_page = scrapertools.find_single_match(data,"<link rel=next href=([^ ]+) />")
url_next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^ ]+)" />')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
@@ -216,7 +214,7 @@ def episodesxseasons(item):
itemlist = []
data=get_source(item.url)
patron='class=numerando>%s - (\d+)</div><div class=episodiotitle><a href=(.*?)>(.*?)<' % item.infoLabels['season']
patron='class="numerando">%s - (\d+)</div><div class="episodiotitle"><a href="([^"]+)">([^<]+)<' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -239,10 +237,10 @@ def findvideos(item):
from lib import generictools
itemlist = []
data = get_source(item.url)
patron = 'id=option-(\d+).*?rptss src=(.*?) frameborder'
patron = 'id="option-(\d+).*?rptss" src="([^"]+)" frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
lang = scrapertools.find_single_match(data, 'href=#option-%s>.*?/flags/(.*?).png' % option)
lang = scrapertools.find_single_match(data, 'href="#option-%s">.*?/flags/(.*?).png' % option)
quality = ''
if lang not in IDIOMAS:
lang = 'en'
@@ -306,7 +304,8 @@ def search_results(item):
itemlist=[]
data=get_source(item.url)
patron = '<article>.*?<a href=(.*?)><img src=(.*?) alt=(.*?) />.*?meta.*?year>(.*?)<(.*?)<p>(.*?)</p>'
patron = '<article>.*?<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />.*?"meta".*?'
patron += '"year">([^<]+)<(.*?)<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle, year, lang_data, scrapedplot in matches:

View File

@@ -0,0 +1,53 @@
{
"id": "seriesanimadas",
"name": "SeriesAnimadas",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://s22.postimg.cc/3lcxb3qfl/logo.png",
"banner": "",
"categories": [
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"VOSE"
]
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,301 @@
# -*- coding: utf-8 -*-
# -*- Channel SeriesAnimadas -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import base64
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from lib import jsunpack
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'latino': 'LAT', 'subtitulado':'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = [
'directo',
'openload',
]
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'seriesanimadas')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'seriesanimadas')
host = 'https://www.seriesanimadas.net/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Nuevos Capitulos', url=host, action='new_episodes', type='tvshows',
thumbnail=get_thumb('new_episodes', auto=True)))
itemlist.append(Item(channel=item.channel, title='Ultimas', url=host + 'series?', action='list_all', type='tvshows',
thumbnail=get_thumb('last', auto=True)))
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'series?', action='list_all', type='tvshows',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + 'search?s=',
thumbnail=get_thumb("search", auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_movies(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'movies', action='list_all',
thumbnail=get_thumb('all', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Por Año', action='section',
thumbnail=get_thumb('year', auto=True), type='movies'))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article class=".*?">.*?<a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)">.*?'
patron +='<span class="year">(\d{4})</span>.*?<span class="(?:animes|tvs)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedtype in matches:
title = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
if scrapedtype == 'Anime':
action = 'episodesxseasons'
elif scrapedtype == 'Serie':
action = 'seasons'
new_item = Item(channel=item.channel,
action=action,
title=title,
url=url,
contentSerieName=scrapedtitle,
thumbnail=thumbnail,
type=scrapedtype,
infoLabels={'year':year})
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'li><a href="([^"]+)" rel="next">&raquo;</a>')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='<li class="gridseason"><a href="([^"]+)"><span class="title">Temporada (\d+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, season in matches:
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron='<a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
if item.type == 'Anime':
season = '1'
else:
season = item.infoLabels['season']
episode = len(matches)
for scrapedurl, scrapedtitle in matches:
infoLabels['episode'] = episode
url = scrapedurl
title = scrapedtitle.replace(' online', '')
title = '%sx%s - %s' % (season, episode, title)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
episode -= 1
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
def new_episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article class="contenedor">.*?<a href="([^"]+)" title=".*?">.*?data-src="([^"]+)" alt="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle in matches:
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumb,
action='findvideos'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'video\[\d+\] = .*?src="([^"]+)".*?;'
matches = re.compile(patron, re.DOTALL).findall(data)
option = 1
for scrapedurl in matches:
lang = scrapertools.find_single_match(data, '"#option%s">([^<]+)<' % str(option)).strip()
lang = lang.lower()
if lang not in IDIOMAS:
lang = 'subtitulado'
quality = ''
title = '%s %s'
if 'redirector' in scrapedurl:
url_data = httptools.downloadpage(scrapedurl).data
url = scrapertools.find_single_match(url_data,'window.location.href = "([^"]+)";')
else:
url = scrapedurl
if url != '':
itemlist.append(
Item(channel=item.channel, url=url, title=title, action='play', quality=quality,
language=IDIOMAS[lang], infoLabels=item.infoLabels))
option += 1
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
itemlist = sorted(itemlist, key=lambda it: it.language)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_results(item)
else:
return []
def search_results(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = '<div class="search-results__img"><a href="([^"]+)" title=".*?"><img src="([^"]+)".*?'
patron += '<h2>([^<]+)</h2></a><div class="description">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle, scrapedplot in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumb
plot = scrapedplot
new_item=Item(channel=item.channel, title=title, url=url, contentSerieName=title, thumbnail=thumbnail,
plot=plot, action='seasons')
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host + 'movies/'
elif categoria == 'infantiles':
item.url = host + 'genre/animacion/'
elif categoria == 'terror':
item.url = host + 'genre/terror/'
item.type='movies'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -15,7 +15,7 @@ from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://seriesblanco.xyz/'
host = 'http://seriesblanco.org/'
IDIOMAS = {'es': 'Cast', 'la': 'Lat', 'vos': 'VOSE', 'vo': 'VO'}
list_language = IDIOMAS.values()
@@ -40,7 +40,7 @@ def mainlist(item):
title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url=host + 'listado/',
url=host + 'lista-de-series/',
))
itemlist.append(Item(channel=item.channel,
@@ -54,11 +54,12 @@ def mainlist(item):
title="A - Z",
action="section",
thumbnail=get_thumb('alphabet', auto=True),
url=host+'listado/', ))
url=host+'lista-de-series/', ))
itemlist.append(Item(channel=item.channel,
title="Buscar",
action="search",
url=host+"?s=",
thumbnail=get_thumb('search', auto=True)))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
@@ -69,7 +70,7 @@ def mainlist(item):
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
@@ -79,13 +80,13 @@ def list_all(item):
data = get_source(item.url)
contentSerieName = ''
patron = "<div style='float.*?<a href='(.*?)'>.*?src='(.*?)' title='(.*?)'"
patron = '<div style="float.*?<a href="([^"]+)">.*?src="([^"]+)".*?data-original-title="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = host + scrapedurl
url = scrapedurl
thumbnail = scrapedthumbnail
title = scrapertools.decodeHtmlentities(scrapedtitle)
@@ -103,13 +104,12 @@ def list_all(item):
# #Paginacion
if itemlist != []:
base_page = scrapertools.find_single_match(item.url,'(.*?)?')
next_page = scrapertools.find_single_match(data, '</span><a href=?pagina=2>>></a>')
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="lista",
action="list_all",
title='Siguiente >>>',
url=base_page+next_page,
url=next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
))
return itemlist
@@ -120,17 +120,14 @@ def section(item):
itemlist = []
data = get_source(item.url)
if item.title == 'Generos':
patron = '<li><a href=([^ ]+)><i class=fa fa-bookmark-o></i> (.*?)</a></li>'
patron = '<li><a href="([^ ]+)"><i class="fa fa-bookmark-o"></i> ([^<]+)</a></li>'
elif item.title == 'A - Z':
patron = "<a dir='ltr' href=(.*?) class='label label-primary'>(.*?)</a>"
patron = '<a dir="ltr" href="([^"]+)" class="label label-primary">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if item.title == 'Generos':
url = host + scrapedurl
else:
url = scrapedurl
url = scrapedurl
title = scrapedtitle
itemlist.append(Item(channel=item.channel,
action='list_all',
@@ -143,7 +140,7 @@ def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<span itemprop=seasonNumber class=fa fa-arrow-down>.*?Temporada (\d+) '
patron = '<span itemprop="seasonNumber" class="fa fa-arrow-down">.*?Temporada (\d+) '
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
for scrapedseason in matches:
@@ -184,14 +181,13 @@ def episodesxseason(item):
logger.info()
itemlist = []
data = get_source(item.url)
season = item.contentSeasonNumber
season_data = scrapertools.find_single_match(data, '<div id=collapse%s.*?panel-primary' % season)
patron = "<td><a href='([^ ]+)'.*?itemprop='episodeNumber'>%s+x(\d+)</span> - (.*?) </a>.*?(/banderas.*?)</td>" % season
season_data = scrapertools.find_single_match(data, '<div id="collapse%s".*?</tbody>' % season)
patron = '<td><a href="([^ ]+)".*?itemprop="episodeNumber">%sx(\d+)</span> (.*?) </a>.*?<td>(.*?)</td>' % season
matches = re.compile(patron, re.DOTALL).findall(season_data)
infoLabels = item.infoLabels
for scrapedurl, scraped_episode, scrapedtitle, lang_data in matches:
url = host + scrapedurl
url = scrapedurl
title = '%sx%s - %s' % (season, scraped_episode, scrapedtitle.strip())
infoLabels['episode'] = scraped_episode
thumbnail = item.thumbnail
@@ -217,18 +213,19 @@ def new_episodes(item):
data = get_source(item.url)
patron = "padding-left:15px'><a href=(.*?) >.*?src='(.*?)' title=.*? alt='(.*?) (\d+x\d+)'.*?"
patron += "<span class='strong'>.*?</span>(.*?)</button></a></div>"
data = scrapertools.find_single_match(data,
'<center>Series Online : Capítulos estrenados recientemente</center>.*?</ul>')
patron = '<li><h6.*?src="([^"]+)".*?href="([^"]+)">.*?src="([^"]+)".*? data-original-title=" (\d+x\d+).*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedinfo, lang_data in matches:
for lang_data, scrapedurl, scrapedthumbnail, scrapedinfo, in matches:
url = host+scrapedurl
thumbnail = scrapedthumbnail
scrapedinfo = scrapedinfo.split('x')
season = scrapedinfo[0]
episode = scrapedinfo[1]
scrapedtitle = scrapertools.find_single_match(url, 'capitulo/([^/]+)/').replace("-", " ")
title = '%s - %sx%s' % (scrapedtitle, season, episode )
title, language = add_language(title, lang_data)
itemlist.append(Item(channel=item.channel,
@@ -244,7 +241,7 @@ def new_episodes(item):
def add_language(title, string):
logger.info()
languages = scrapertools.find_multiple_matches(string, '/banderas/(.*?).png')
languages = scrapertools.find_multiple_matches(string, '/language/(.*?).png')
language = []
for lang in languages:
@@ -269,34 +266,38 @@ def findvideos(item):
itemlist = []
data = get_source(item.url)
patron = "<a href=([^ ]+) target=_blank><img src='/servidores/(.*?).(?:png|jpg)'.*?sno.*?"
patron += "<span>(.*?)<.*?(/banderas.*?)td"
patron = '<imgsrc="([^"]+)".*?<a class="open-link" data-enlace="([^"]+)".*?<td>([^<]+)</td>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, server, quality, lang_data in matches:
title = server.capitalize()
for lang_data, scrapedurl, quality in matches:
encrypted=False
title = '%s'
if quality == '':
quality = 'SD'
title = '%s [%s]' % (title, quality)
title, language = add_language(title, lang_data)
thumbnail = item.thumbnail
enlace_id, serie_id, se, ep = scrapertools.find_single_match(scrapedurl,'enlace(\d+)/(\d+)/(\d+)/(\d+)/')
url = host + 'ajax/load_enlace.php?serie=%s&temp=%s&cap=%s&id=%s' % (serie_id, se, ep, enlace_id)
url = scrapedurl
if 'streamcrypt' in url:
url = url.replace('https://streamcrypt', 'https://www.streamcrypt')
temp_data = httptools.downloadpage(url, follow_redirects=False, only_headers=True)
if 'location' in temp_data.headers:
url = temp_data.headers['location']
else:
continue
itemlist.append(Item(channel=item.channel,
title=title,
url=url,
action="play",
thumbnail=thumbnail,
server=server,
quality=quality,
language=language,
encrypted=encrypted,
infoLabels=item.infoLabels
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
@@ -307,25 +308,14 @@ def findvideos(item):
return sorted(itemlist, key=lambda it: it.language)
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, follow_redirects=False).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.infoLabels = item.infoLabels
return itemlist
def search_results(item):
logger.info()
itemlist = []
data = httptools.downloadpage(host + 'finder.php', post=item.post).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = "<a href='(.*?)'>.*?src=(.*?) style.*?value=(.*?)>"
data = get_source(item.url)
patron = '<div style="float.*?<a href="([^"]+)">.*?src="([^"]+)".*?alt="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -344,10 +334,10 @@ def search_results(item):
def search(item, texto):
logger.info()
import urllib
if texto != '':
post = {'query':texto}
post = urllib.urlencode(post)
item.post = post
texto = texto.replace(" ", "+")
item.url = item.url + texto
return search_results(item)
if texto != '':
return list_all(item)
else:
return []

View File

@@ -10,6 +10,7 @@ from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import filtertools
from channels import autoplay
IDIOMAS = {'latino': 'Latino'}
@@ -97,7 +98,6 @@ def episodios(item):
patron_caps = '<li><span>Capitulo (\d+).*?</span><a href="(.*?)">(.*?)</a></li>'
matches = scrapertools.find_multiple_matches(data, patron_caps)
# data_info = scrapertools.find_single_match(data, '<div class="info">.+?<\/div><\/div>')
patron_info = '<img src="([^"]+)">.+?</span>(.*?)</p>.*?<h2>Reseña:</h2><p>(.*?)</p>'
scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(data, patron_info)
scrapedthumbnail = host + scrapedthumbnail
@@ -142,66 +142,73 @@ def episodios(item):
return itemlist
def findvideos(item):
logger.info()
import base64
logger.info()
itemlist = []
url_server = "https://openload.co/embed/%s/"
url_api_get_key = "https://serieslan.com/idx.php?i=%s&k=%s"
def txc(key, _str):
s = range(256)
j = 0
res = ''
for i in range(256):
j = (j + s[i] + ord(key[i % len(key)])) % 256
x = s[i]
s[i] = s[j]
s[j] = x
i = 0
j = 0
for y in range(len(_str)):
i = (i + 1) % 256
j = (j + s[i]) % 256
x = s[i]
s[i] = s[j]
s[j] = x
res += chr(ord(_str[y]) ^ s[(s[i] + s[j]) % 256])
return res
data = httptools.downloadpage(item.url).data
pattern = "<script type=.+?>.+?\['(.+?)','(.+?)','.+?'\]"
idv, ide = scrapertools.find_single_match(data, pattern)
thumbnail = scrapertools.find_single_match(data,
'<div id="tab-1" class="tab-content current">.+?<img src="([^"]*)">')
show = scrapertools.find_single_match(data, '<span>Episodio: <\/span>([^"]*)<\/p><p><span>Idioma')
thumbnail = host + thumbnail
data = httptools.downloadpage(url_api_get_key % (idv, ide), headers={'Referer': item.url}).data
data = eval(data)
if type(data) == list:
video_url = url_server % (txc(ide, base64.decodestring(data[2])))
server = "openload"
if " SUB" in item.title:
lang = "VOS"
elif " Sub" in item:
lang = "VOS"
else:
lang = "Latino"
title = "Enlace encontrado en " + server + " [" + lang + "]"
if item.contentChannel=='videolibrary':
itemlist.append(item.clone(channel=item.channel, action="play", url=video_url,
thumbnail=thumbnail, server=server, folder=False))
else:
itemlist.append(Item(channel=item.channel, action="play", title=title, show=show, url=video_url, plot=item.plot,
thumbnail=thumbnail, server=server, folder=False))
_sa = scrapertools.find_single_match(data, 'var _sa = (true|false);')
_sl = scrapertools.find_single_match(data, 'var _sl = ([^;]+);')
sl = eval(_sl)
autoplay.start(itemlist, item)
return itemlist
else:
return []
buttons = scrapertools.find_multiple_matches(data, '<button href="" class="selop" sl="([^"]+)">([^<]+)</button>')
for id, title in buttons:
new_url = golink(int(id), _sa, sl)
data = httptools.downloadpage(new_url).data
_x0x = scrapertools.find_single_match(data, 'var x0x = ([^;]+);')
x0x = eval(_x0x)
url = resolve(x0x[4], base64.b64decode(x0x[1]))
if 'download' in url:
url = url.replace('download', 'preview')
title = '%s'
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language='latino',
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def golink (num, sa, sl):
import urllib
b = [3, 10, 5, 22, 31]
d = ''
for i in range(len(b)):
d += sl[2][b[i]+num:b[i]+num+1]
SVR = "https://viteca.stream" if sa == 'true' else "http://serieslan.com"
TT = "/" + urllib.quote_plus(sl[3].replace("/", "><")) if num == 0 else ""
return SVR + "/el/" + sl[0] + "/" + sl[1] + "/" + str(num) + "/" + sl[2] + d + TT
def resolve(value1, value2):
reto = ''
lista = range(256)
j = 0
for i in range(256):
j = (j + lista[i] + ord(value1[i % len(value1)])) % 256
k = lista[i]
lista[i] = lista[j]
lista[j] = k
m = 0;
j = 0;
for i in range(len(value2)):
m = (m + 1) % 256
j = (j + lista[m]) % 256
k = lista[m]
lista[m] = lista[j]
lista[j] = k
reto += chr(ord(value2[i]) ^ lista[(lista[m] + lista[j]) % 256])
return reto

View File

@@ -1,37 +0,0 @@
{
"id": "seriesverde",
"name": "SeriesVerde",
"active": false,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://s33.postimg.cc/96dhv4trj/seriesverde.png",
"banner": "",
"categories": [
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Cast",
"Lat",
"VOSE",
"VO"
]
}
]
}

View File

@@ -1,321 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel SeriesVerde -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://seriesverde.com/'
IDIOMAS = {'es': 'Cast', 'la': 'Lat', 'vos': 'VOSE', 'vo': 'VO'}
list_language = IDIOMAS.values()
list_quality = ['SD', 'Micro-HD-720p', '720p', 'HDitunes', 'Micro-HD-1080p' ]
list_servers = ['powvideo','yourupload', 'openload', 'gamovideo', 'flashx', 'clipwatching', 'streamango', 'streamcloud']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel,
title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url=host + 'listado/',
))
itemlist.append(Item(channel=item.channel,
title="Generos",
action="section",
thumbnail=get_thumb('genres', auto=True),
url=host,
))
itemlist.append(Item(channel=item.channel,
title="A - Z",
action="section",
thumbnail=get_thumb('alphabet', auto=True),
url=host+'listado/', ))
itemlist.append(Item(channel=item.channel,
title="Buscar",
action="search",
thumbnail=get_thumb('search', auto=True)))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
contentSerieName = ''
patron = "<div style='float.*?<a href='(.*?)'>.*?src='(.*?)' title='(.*?)'"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = host + scrapedurl
thumbnail = scrapedthumbnail
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(Item(channel=item.channel,
action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=scrapedtitle,
contentSerieName=contentSerieName,
context=filtertools.context(item, list_language, list_quality),
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# #Paginacion
if itemlist != []:
base_page = scrapertools.find_single_match(item.url,'(.*?)?')
next_page = scrapertools.find_single_match(data, '</span><a href=?pagina=2>>></a>')
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="lista",
title='Siguiente >>>',
url=base_page+next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.title == 'Generos':
patron = '<li><a href=([^ ]+) rel=nofollow><i class=fa fa-bookmark-o></i> (.*?)</a></li>'
elif item.title == 'A - Z':
patron = "<a dir='ltr' href=(.*?) class='label label-success'>(.*?)</a>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if item.title == 'Generos':
url = host + scrapedurl
else:
url = scrapedurl
title = scrapedtitle
itemlist.append(Item(channel=item.channel,
action='list_all',
title=title,
url=url
))
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<span itemprop=seasonNumber class=fa fa-arrow-down>.*?Temporada (\d+) '
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
for scrapedseason in matches:
url = item.url
title = 'Temporada %s' % scrapedseason
contentSeasonNumber = scrapedseason
infoLabels['season'] = contentSeasonNumber
thumbnail = item.thumbnail
itemlist.append(Item(channel=item.channel,
action="episodesxseason",
title=title,
url=url,
thumbnail=thumbnail,
contentSeasonNumber=contentSeasonNumber,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
data = get_source(item.url)
season = item.contentSeasonNumber
season_data = scrapertools.find_single_match(data, '<div id=collapse%s.*?panel-success' % season)
patron = "<td><a href='([^ ]+)'.*?itemprop='episodeNumber'>%s+x(\d+)</span> - (.*?) </a>.*?(/banderas.*?)</td>" % season
matches = re.compile(patron, re.DOTALL).findall(season_data)
infoLabels = item.infoLabels
for scrapedurl, scraped_episode, scrapedtitle, lang_data in matches:
url = host + scrapedurl
title = '%sx%s - %s' % (season, scraped_episode, scrapedtitle.strip())
infoLabels['episode'] = scraped_episode
thumbnail = item.thumbnail
title, language = add_language(title, lang_data)
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
url=url,
thumbnail=thumbnail,
language=language,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def add_language(title, string):
logger.info()
languages = scrapertools.find_multiple_matches(string, '/banderas/(.*?).png')
language = []
for lang in languages:
if 'jap' in lang or lang not in IDIOMAS:
lang = 'vos'
if len(languages) == 1:
language = IDIOMAS[lang]
title = '%s [%s]' % (title, language)
else:
language.append(IDIOMAS[lang])
title = '%s [%s]' % (title, IDIOMAS[lang])
return title, language
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = "<a href=([^ ]+) target=_blank><img src='/servidores/(.*?).(?:png|jpg)'.*?sno.*?"
patron += "<span>(.*?)<.*?(/banderas.*?)td"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, server, quality, lang_data in matches:
title = server.capitalize()
if quality == '':
quality = 'SD'
title = '%s [%s]' % (title, quality)
title, language = add_language(title, lang_data)
thumbnail = item.thumbnail
enlace_id, serie_id, se, ep = scrapertools.find_single_match(scrapedurl,'enlace(\d+)/(\d+)/(\d+)/(\d+)/')
url = host + 'ajax/load_enlace.php?serie=%s&temp=%s&cap=%s&id=%s' % (serie_id, se, ep, enlace_id)
itemlist.append(Item(channel=item.channel,
title=title,
url=url,
action="play",
thumbnail=thumbnail,
server=server,
quality=quality,
language=language,
infoLabels=item.infoLabels
))
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return sorted(itemlist, key=lambda it: it.language)
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, follow_redirects=False).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.infoLabels = item.infoLabels
return itemlist
def search_results(item):
logger.info()
itemlist = []
data = httptools.downloadpage(host + 'finder.php', post=item.post).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = "<a href='(.*?)'>.*?src=(.*?) style.*?value=(.*?)>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle in matches:
itemlist.append(Item(channel=item.channel,
title=scrapedtitle,
url=host+scrapedurl,
action="seasons",
thumbnail=scrapedthumb,
contentSerieName=scrapedtitle,
context=filtertools.context(item, list_language, list_quality)
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
logger.info()
import urllib
if texto != '':
post = {'query':texto}
post = urllib.urlencode(post)
item.post = post
return search_results(item)

View File

@@ -293,15 +293,22 @@ def submenu_tools(item):
logger.info()
itemlist = list()
#Herramientas de testeo masivo
# Herramientas personalizadas
import os
test_path = os.path.join(config.get_runtime_path(), "channels/test.py")
if filetools.exists(test_path):
itemlist.append(Item(title='Testear canales y servidores ...', channel="test", action="mainlist"))
itemlist.append(
Item(channel=CHANNELNAME, action="", title="", folder=False, thumbnail=get_thumb("setting_0.png")))
channel_custom = os.path.join(config.get_runtime_path(), 'channels', 'custom.py')
if not filetools.exists(channel_custom):
user_custom = os.path.join(config.get_data_path(), 'custom.py')
if filetools.exists(user_custom):
filetools.copy(user_custom, channel_custom, silent=True)
if filetools.exists(channel_custom):
itemlist.append(Item(channel='custom', action='mainlist', title='Custom Channel'))
itemlist.append(Item(channel=CHANNELNAME, action="check_quickfixes", folder=False,
title="Comprobar actualizaciones urgentes", plot="Versión actual: %s" % config.get_addon_version() ))
itemlist.append(Item(channel=CHANNELNAME, action="", title="", folder=False,
thumbnail=get_thumb("setting_0.png")))
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60564), action="", folder=False,
thumbnail=get_thumb("channels.png")))
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60565), action="conf_tools",
@@ -322,6 +329,13 @@ def submenu_tools(item):
return itemlist
def check_quickfixes(item):
logger.info()
from platformcode import updater
return updater.check_addon_updates(verbose=True)
def conf_tools(item):
logger.info()
@@ -553,7 +567,7 @@ def channels_onoff(item):
# Diálogo para pre-seleccionar
# ----------------------------
preselecciones = ['Pre-seleccionar activados actualmente', 'Pre-seleccionar todos', 'No pre-seleccionar ninguno']
preselecciones = [config.get_localized_string(70517), config.get_localized_string(70518), config.get_localized_string(70519)]
ret = platformtools.dialog_select(config.get_localized_string(60545), preselecciones)
if ret == -1: return False # pedido cancel
if ret == 2: preselect = []

View File

@@ -3,6 +3,7 @@
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
@@ -16,7 +17,6 @@ def mainlist(item):
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
url="http://www.submityourflicks.com/index.php?mode=search&q=%s&submit=Search"))
return itemlist
@@ -37,18 +37,6 @@ def search(item, texto):
def videos(item):
logger.info()
itemlist = []
'''
<div class="item-block item-normal col" >
<div class="inner-block">
<a href="http://www.submityourflicks.com/1846642-my-hot-wife-bending-over-and-getting-her-cunt-reamed.html" title="My hot wife bending over and getting her cunt reamed..">
<span class="image">
<script type='text/javascript'>stat['56982c566d05c'] = 0;
pic['56982c566d05c'] = new Array();
pics['56982c566d05c'] = new Array(1, 1, 1, 1, 1, 1, 1, 1, 1, 1);</script>
<img src="
'''
data = scrapertools.downloadpageGzip(item.url)
patron = '<div class="item-block[^<]+'
patron += '<div class="inner-block[^<]+'
@@ -56,34 +44,26 @@ def videos(item):
patron += '<span class="image".*?'
patron += '<img src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumbnail.replace(" ", "%20")
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
folder=False))
next_page_url = scrapertools.find_single_match(data, "<a href='([^']+)' class=\"next\">NEXT</a>")
if next_page_url != "":
url = urlparse.urljoin(item.url, next_page_url)
itemlist.append(Item(channel=item.channel, action="videos", title=">> Página siguiente", url=url, folder=True,
viewmode="movie"))
return itemlist
def play(item):
logger.info()
data = scrapertools.cache_page(item.url)
media_url = scrapertools.find_single_match(data, 'file\:\s*"([^"]+)"')
data = httptools.downloadpage(item.url).data
media_url = "https:" + scrapertools.find_single_match(data, 'source src="([^"]+)"')
itemlist = []
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=media_url,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
thumbnail=item.thumbnail, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -46,7 +46,7 @@ def buscartrailer(item, trailers=[]):
item.contentTitle = item.contentTitle.strip()
elif keyboard:
fulltitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.fulltitle.strip())
item.contentTitle = platformtools.dialog_input(default=fulltitle, heading="Introduce el título a buscar")
item.contentTitle = platformtools.dialog_input(default=fulltitle, heading=config.get_localized_string(70505))
if item.contentTitle is None:
item.contentTitle = fulltitle
else:
@@ -86,21 +86,21 @@ def buscartrailer(item, trailers=[]):
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda en Youtube", action="youtube_search",
itemlist.append(item.clone(title=title % config.get_localized_string(70507), action="youtube_search",
text_color="green"))
itemlist.append(item.clone(title=title % "Búsqueda en Filmaffinity",
itemlist.append(item.clone(title=title % config.get_localized_string(70024),
action="filmaffinity_search", text_color="green"))
# Si se trata de una serie, no se incluye la opción de buscar en Abandomoviez
if not item.show and not item.infoLabels['tvshowtitle']:
itemlist.append(item.clone(title=title % "Búsqueda en Abandomoviez",
itemlist.append(item.clone(title=title % config.get_localized_string(70508),
action="abandomoviez_search", text_color="green"))
itemlist.append(item.clone(title=title % "Búsqueda en Jayhap (Youtube, Vimeo & Dailymotion)",
itemlist.append(item.clone(title=title % config.get_localized_string(70509),
action="jayhap_search", text_color="green"))
if item.contextual:
global window_select, result
select = Select("DialogSelect.xml", config.get_runtime_path(), item=item, itemlist=itemlist,
caption="Buscando: " + item.contentTitle)
caption=config.get_localized_string(70506) + item.contentTitle)
window_select.append(select)
select.doModal()
@@ -177,11 +177,11 @@ def youtube_search(item):
'Siguiente')
if next_page != "":
next_page = urlparse.urljoin("https://www.youtube.com", next_page)
itemlist.append(item.clone(title=">> Siguiente", action="youtube_search", extra="youtube", page=next_page,
itemlist.append(item.clone(title=config.get_localized_string(70502), action="youtube_search", extra="youtube", page=next_page,
thumbnail="", text_color=""))
if not itemlist:
itemlist.append(item.clone(title="La búsqueda no ha dado resultados (%s)" % titulo,
itemlist.append(item.clone(title=config.get_localized_string(70501) % titulo,
action="", thumbnail="", text_color=""))
if keyboard:
@@ -189,7 +189,7 @@ def youtube_search(item):
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Youtube", action="manual_search",
itemlist.append(item.clone(title=title % config.get_localized_string(70510), action="manual_search",
text_color="green", thumbnail="", extra="youtube"))
return itemlist
@@ -232,11 +232,11 @@ def abandomoviez_search(item):
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">Siguiente')
if next_page != "":
next_page = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, next_page)
itemlist.append(item.clone(title=">> Siguiente", action="abandomoviez_search", page=next_page, thumbnail="",
itemlist.append(item.clone(title=config.get_localized_string(70502), action="abandomoviez_search", page=next_page, thumbnail="",
text_color=""))
if not itemlist:
itemlist.append(item.clone(title="La búsqueda no ha dado resultados", action="", thumbnail="",
itemlist.append(item.clone(title=config.get_localized_string(70501), action="", thumbnail="",
text_color=""))
if keyboard:
@@ -244,7 +244,7 @@ def abandomoviez_search(item):
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Abandomoviez",
itemlist.append(item.clone(title=title % config.get_localized_string(70511),
action="manual_search", thumbnail="", text_color="green", extra="abandomoviez"))
return itemlist
@@ -256,13 +256,13 @@ def search_links_abando(item):
data = scrapertools.downloadpage(item.url)
itemlist = []
if "Lo sentimos, no tenemos trailer" in data:
itemlist.append(item.clone(title="No hay ningún vídeo disponible", action="", text_color=""))
itemlist.append(item.clone(title=config.get_localized_string(70503), action="", text_color=""))
else:
if item.contextual:
progreso = platformtools.dialog_progress("Buscando en abandomoviez", "Cargando trailers...")
progreso = platformtools.dialog_progress(config.get_localized_string(70512), config.get_localized_string(70504))
progreso.update(10)
i = 0
message = "Cargando trailers..."
message = config.get_localized_string(70504)
patron = '<div class="col-md-3 col-xs-6"><a href="([^"]+)".*?' \
'Images/(\d+).gif.*?</div><small>(.*?)</small>'
matches = scrapertools.find_multiple_matches(data, patron)
@@ -304,7 +304,7 @@ def search_links_abando(item):
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Abandomoviez",
itemlist.append(item.clone(title=title % config.get_localized_string(70511),
action="manual_search", thumbnail="", text_color="green", extra="abandomoviez"))
return itemlist
@@ -349,11 +349,11 @@ def filmaffinity_search(item):
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">&gt;&gt;</a>')
if next_page != "":
next_page = urlparse.urljoin("http://www.filmaffinity.com/es/", next_page)
itemlist.append(item.clone(title=">> Siguiente", page=next_page, action="filmaffinity_search", thumbnail="",
itemlist.append(item.clone(title=config.get_localized_string(70502), page=next_page, action="filmaffinity_search", thumbnail="",
text_color=""))
if not itemlist:
itemlist.append(item.clone(title="La búsqueda no ha dado resultados (%s)" % item.contentTitle,
itemlist.append(item.clone(title=config.get_localized_string(70501) % item.contentTitle,
action="", thumbnail="", text_color=""))
if keyboard:
@@ -361,7 +361,7 @@ def filmaffinity_search(item):
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Filmaffinity",
itemlist.append(item.clone(title=title % config.get_localized_string(70513),
action="manual_search", text_color="green", thumbnail="", extra="filmaffinity"))
return itemlist
@@ -373,7 +373,7 @@ def search_links_filmaff(item):
itemlist = []
data = scrapertools.downloadpage(item.url)
if not '<a class="lnkvvid"' in data:
itemlist.append(item.clone(title="No hay ningún vídeo disponible", action="", text_color=""))
itemlist.append(item.clone(title=config.get_localized_string(70503), action="", text_color=""))
else:
patron = '<a class="lnkvvid".*?<b>(.*?)</b>.*?iframe.*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
@@ -402,7 +402,7 @@ def search_links_filmaff(item):
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Filmaffinity",
itemlist.append(item.clone(title=title % config.get_localized_string(70513),
action="manual_search", thumbnail="", text_color="green", extra="filmaffinity"))
return itemlist
@@ -437,14 +437,14 @@ def jayhap_search(item):
text_color="white"))
if not itemlist:
itemlist.append(item.clone(title="La búsqueda no ha dado resultados (%s)" % item.contentTitle,
itemlist.append(item.clone(title=config.get_localized_string(70501) % item.contentTitle,
action="", thumbnail="", text_color=""))
else:
tokens = data['tokens']
tokens['yt_token'] = tokens.pop('youtube')
tokens['vm_token'] = tokens.pop('vimeo')
tokens['dm_token'] = tokens.pop('dailymotion')
itemlist.append(item.clone(title=">> Siguiente", page=tokens, action="jayhap_search", extra="jayhap",
itemlist.append(item.clone(title=config.get_localized_string(70502), page=tokens, action="jayhap_search", extra="jayhap",
thumbnail="", text_color=""))
if keyboard:
@@ -452,7 +452,7 @@ def jayhap_search(item):
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Jayhap", action="manual_search",
itemlist.append(item.clone(title=title % config.get_localized_string(70514), action="manual_search",
text_color="green", thumbnail="", extra="jayhap"))
return itemlist
@@ -485,7 +485,7 @@ try:
except:
pass
self.getControl(1).setLabel("[COLOR orange]" + self.caption + "[/COLOR]")
self.getControl(5).setLabel("[COLOR tomato][B]Cerrar[/B][/COLOR]")
self.getControl(5).setLabel(config.get_localized_string(60495))
self.items = []
for item in self.itemlist:
item_l = xbmcgui.ListItem(item.title)

View File

@@ -14,56 +14,56 @@
{
"id": "tmdb",
"type": "list",
"label": "Idioma de búsqueda en TMDB",
"default": 7,
"label": "@70418",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Alemán",
"Francés",
"Portugués",
"Italiano",
"Español Latino",
"Catalán",
"Inglés",
"Castellano"
"@70419",
"@70420",
"@70421",
"@70422",
"@70423",
"@70424",
"@70425",
"@70014"
]
},
{
"id": "tmdb_alternativo",
"type": "list",
"label": "Idioma alternativo para TMDB (No sinopsis idioma principal)",
"label": "@70426",
"default": 6,
"enabled": true,
"visible": true,
"lvalues": [
"Alemán",
"Francés",
"Portugués",
"Italiano",
"Español Latino",
"Catalán",
"Inglés",
"Castellano"
"@70419",
"@70420",
"@70421",
"@70422",
"@70423",
"@70424",
"@70425",
"@70014"
]
},
{
"id": "imdb",
"type": "list",
"label": "Idioma de los títulos en IMDB",
"label": "@70427",
"color": "0xFFE0F04B",
"default": 7,
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Alemán",
"Francés",
"Portugués",
"Italiano",
"Español Latino",
"Catalán",
"Inglés",
"Castellano"
"@70419",
"@70420",
"@70421",
"@70422",
"@70423",
"@70424",
"@70425",
"@70014"
]
},
{
@@ -76,24 +76,24 @@
{
"id": "filmaff",
"type": "list",
"label": "Sitio Web Filmaffinity",
"label": "@70428",
"color": "0xFF25AA48",
"default": 5,
"enabled": true,
"visible": true,
"lvalues": [
"Colombia",
"Chile",
"Argentina",
"México",
"US/UK",
"España"
"@70429",
"@70430",
"@70431",
"@70432",
"@70433",
"@70434"
]
},
{
"id": "usuariofa",
"type": "text",
"label": "Usuario Filmaffinity (Opcional)",
"label": "@70435",
"color": "0xFFd50b0b",
"default": "",
"enabled": true,
@@ -102,7 +102,7 @@
{
"id": "passfa",
"type": "text",
"label": "Contraseña Filmaffinity",
"label": "@70436",
"color": "0xFFd50b0b",
"default": "",
"enabled": "!eq(-1,'')",
@@ -112,17 +112,17 @@
{
"id": "orderfa",
"type": "list",
"label": "Ordenar listas personales de Filmaffinity por:",
"label": "@70437",
"color": "0xFF25AA48",
"default": 0,
"enabled": "!eq(-1,'')",
"visible": true,
"lvalues": [
"Posición",
"Título",
"Año",
"Voto",
"Nota media"
"@70438",
"@60230",
"@70042",
"@70439",
"@70440"
]
},
{
@@ -135,7 +135,7 @@
{
"id": "usuariomal",
"type": "text",
"label": "Usuario MyAnimeList (Opcional)",
"label": "@70441",
"color": "0xFF25AA48",
"default": "",
"enabled": true,
@@ -144,7 +144,7 @@
{
"id": "passmal",
"type": "text",
"label": "Contraseña MyAnimeList",
"label": "@70442",
"color": "0xFF25AA48",
"default": "",
"enabled": "!eq(-1,'')",
@@ -154,7 +154,7 @@
{
"id": "adult_mal",
"type": "bool",
"label": "Mostrar Hentais en MyAnimeList",
"label": "@70443",
"color": "0xFFd50b0b",
"default": false,
"enabled": true,
@@ -163,15 +163,15 @@
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"label": "@60666",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1",
"Ninguno"
"@70444",
"@70445",
"@70446",
"@59992"
]
}
]

View File

@@ -61,7 +61,7 @@ def mainlist(item):
url='&title_type=tv_series,tv_special,mini_series',
thumbnail=images_predef + "0/TV%20Series.png"))
itemlist.append(
item.clone(title="Trakt.tv", action="trakt", text_color=color2, thumbnail="http://i.imgur.com/5sQjjuk.png"))
item.clone(title=config.get_localized_string(70415), action="trakt", text_color=color2, thumbnail="http://i.imgur.com/5sQjjuk.png"))
itemlist.append(
item.clone(title=config.get_localized_string(70026), action="mal", text_color=color2, thumbnail="http://i.imgur.com/RhsYWmd.png"))
itemlist.append(item.clone(title="", action=""))
@@ -255,7 +255,7 @@ def trakt(item):
page = "?page=1&limit=20&extended=full"
if not item.extra:
item.extra = "movie"
itemlist.append(item.clone(title=config.get_localized_string(70137), action="", text_color=color2))
itemlist.append(item.clone(title="[COLOR yellow][B]%s[/B][/COLOR]" % config.get_localized_string(70416), action=""))
itemlist.append(item.clone(title=config.get_localized_string(70049), action="acciones_trakt", url="movies/popular%s" % page))
itemlist.append(
item.clone(title=config.get_localized_string(70050), action="acciones_trakt", url="movies/trending%s" % page))
@@ -265,7 +265,7 @@ def trakt(item):
if token_auth:
itemlist.append(item.clone(title=config.get_localized_string(70052), action="acciones_trakt",
url="recommendations/movies?limit=100&extended=full", pagina=0))
itemlist.append(item.clone(title=config.get_localized_string(30123), action="", text_color=color2))
itemlist.append(item.clone(title="[COLOR yellow][B]%s[/B][/COLOR]" % config.get_localized_string(70417), action="",))
item.extra = "show"
itemlist.append(item.clone(title=config.get_localized_string(70049), action="acciones_trakt", url="shows/popular%s" % page))
itemlist.append(item.clone(title=config.get_localized_string(70050), action="acciones_trakt", url="shows/trending%s" % page))
@@ -275,7 +275,7 @@ def trakt(item):
if token_auth:
itemlist.append(item.clone(title=config.get_localized_string(70052), action="acciones_trakt",
url="recommendations/shows?limit=100&extended=full", pagina=0))
itemlist.append(item.clone(title=config.get_localized_string(70048), text_color=color2, extra="cuenta"))
itemlist.append(item.clone(title="[COLOR red][B]%s[/B][/COLOR]" % config.get_localized_string(70048), extra="cuenta"))
else:
item.extra = "movie"
# Se comprueba si existe un token guardado y sino se ejecuta el proceso de autentificación
@@ -450,7 +450,7 @@ def listado_tmdb(item):
else:
# Si es una búsqueda de personas se incluye en el título y fanart una película por la que es conocido
known_for = ob_tmdb.results[i].get("known_for")
type = item.search['type']
type=item.type
if known_for:
from random import randint
random = randint(0, len(known_for) - 1)
@@ -783,7 +783,7 @@ def filtro(item):
dict_values = None
list_controls.append({'id': 'years', 'label': 'Año', 'enabled': True, 'color': '0xFFCC2EFA',
list_controls.append({'id': 'years', 'label': config.get_localized_string(60232), 'enabled': True, 'color': '0xFFCC2EFA',
'type': 'list', 'default': -1, 'visible': True})
list_controls[0]['lvalues'] = []
valores['years'] = []
@@ -791,7 +791,7 @@ def filtro(item):
for i in range(1900, year + 1):
list_controls[0]['lvalues'].append(str(i))
valores['years'].append(str(i))
list_controls[0]['lvalues'].append('Cualquiera')
list_controls[0]['lvalues'].append(config.get_localized_string(70450))
valores['years'].append('')
if "Personalizado" in item.title:
@@ -805,7 +805,7 @@ def filtro(item):
lista = jsontools.load(httptools.downloadpage(url, cookies=False).data)["genres"]
if lista:
list_controls.append({'id': 'labelgenre', 'enabled': True, 'type': 'label', 'default': None,
'label': 'Selecciona uno, ninguno o más de un género',
'label': config.get_localized_string(70451),
'visible': True, 'color': '0xFFC52020'})
for l in lista:
list_controls.append({'id': 'genre' + str(l["id"]), 'label': l["name"], 'enabled': True,
@@ -813,11 +813,11 @@ def filtro(item):
except:
pass
list_controls.append({'id': 'orden', 'label': 'Ordenar por', 'enabled': True, 'color': '0xFF25AA48',
list_controls.append({'id': 'orden', 'label': config.get_localized_string(70455), 'enabled': True, 'color': '0xFF25AA48',
'type': 'list', 'default': -1, 'visible': True})
orden = ['Popularidad Desc', 'Popularidad Asc', 'Año Desc', 'Año Asc', 'Valoración Desc', 'Valoración Asc']
orden = [config.get_localized_string(70456), config.get_localized_string(70457), config.get_localized_string(70458), config.get_localized_string(70459), config.get_localized_string(70460), config.get_localized_string(70461)]
if item.extra == "movie":
orden.extend(['Título [A-Z]', 'Título [Z-A]'])
orden.extend([config.get_localized_string(70462), config.get_localized_string(70463)])
orden_tmdb = ['popularity.desc', 'popularity.asc', 'release_date.desc', 'release_date.asc',
'vote_average.desc', 'vote_average.asc', 'original_title.asc', 'original_title.desc']
valores['orden'] = []
@@ -828,10 +828,10 @@ def filtro(item):
list_controls.append({'id': 'espacio', 'label': '', 'enabled': False,
'type': 'label', 'default': None, 'visible': True})
list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True,
list_controls.append({'id': 'save', 'label': config.get_localized_string(70464), 'enabled': True,
'type': 'bool', 'default': False, 'visible': True})
else:
list_controls.append({'id': 'keyword', 'label': 'Palabra Clave', 'enabled': True,
list_controls.append({'id': 'keyword', 'label': config.get_localized_string(70465), 'enabled': True,
'type': 'text', 'default': '', 'visible': True})
item.valores = valores
@@ -1005,12 +1005,12 @@ def filtro_imdb(item):
if valores_guardados:
dict_values = valores_guardados
list_controls.append({'id': 'title', 'label': 'Título', 'enabled': True,
list_controls.append({'id': 'title', 'label': config.get_localized_string(60320), 'enabled': True,
'type': 'text', 'default': '', 'visible': True})
list_controls.append({'id': 'yearsdesde', 'label': 'Año desde:', 'enabled': True, 'color': '0xFFCC2EFA',
list_controls.append({'id': 'yearsdesde', 'label': config.get_localized_string(70452), 'enabled': True, 'color': '0xFFCC2EFA',
'type': 'list', 'default': -1, 'visible': True})
list_controls.append({'id': 'yearshasta', 'label': 'Año hasta:', 'enabled': True, 'color': '0xFF2ECCFA',
list_controls.append({'id': 'yearshasta', 'label': config.get_localized_string(70453), 'enabled': True, 'color': '0xFF2ECCFA',
'type': 'list', 'default': -1, 'visible': True})
list_controls[1]['lvalues'] = []
list_controls[2]['lvalues'] = []
@@ -1020,23 +1020,23 @@ def filtro_imdb(item):
list_controls[1]['lvalues'].append(str(i))
list_controls[2]['lvalues'].append(str(i))
valores['years'].append(str(i))
list_controls[1]['lvalues'].append('Cualquiera')
list_controls[2]['lvalues'].append('Cualquiera')
list_controls[1]['lvalues'].append(config.get_localized_string(70450))
list_controls[2]['lvalues'].append(config.get_localized_string(70450))
valores['years'].append('')
try:
generos_spa = {'Action': 'Acción', 'Adventure': 'Aventura', 'Animation': 'Animación', 'Biography': 'Biografía',
'Comedy': 'Comedia', 'Crime': 'Crimen', 'Documentary': 'Documental', 'Family': 'Familia',
'Fantasy': 'Fantástico', 'Film-Noir': 'Cine Negro', 'Game-Show': 'Concursos',
'History': 'Historia', 'Horror': 'Terror', 'Music': 'Música', 'Mistery': 'Intriga',
'News': 'Noticias', 'Reality-TV': 'Reality', 'Sci-Fi': 'Ciencia Ficción', 'Sport': 'Deportes',
'Talk-Show': 'Entrevistas', 'War': 'Cine Bélico'}
generos_spa = {'Action': config.get_localized_string(70394), 'Adventure': config.get_localized_string(60267), 'Animation': config.get_localized_string(60268), 'Biography': config.get_localized_string(70403),
'Comedy': config.get_localized_string(60270), 'Crime': config.get_localized_string(60271), 'Documentary': config.get_localized_string(70396), 'Family': config.get_localized_string(70399),
'Fantasy': config.get_localized_string(60274), 'Film-Noir': config.get_localized_string(70400), 'Game-Show': config.get_localized_string(70401),
'History': config.get_localized_string(70405), 'Horror': config.get_localized_string(70013), 'Music': config.get_localized_string(70404), 'Mistery': config.get_localized_string(70402),
'News': config.get_localized_string(60279), 'Reality-TV': config.get_localized_string(70406), 'Sci-Fi': config.get_localized_string(70397), 'Sport': config.get_localized_string(70395),
'Talk-Show': config.get_localized_string(70398), 'War': config.get_localized_string(70407)}
data = httptools.downloadpage("http://www.imdb.com/search/title", cookies=False).data
bloque = scrapertools.find_single_match(data, '<h3>Genres</h3>(.*?)</table>')
matches = scrapertools.find_multiple_matches(bloque, ' value="([^"]+)"\s*>\s*<label.*?>([^<]+)<')
if matches:
list_controls.append({'id': 'labelgenre', 'enabled': True, 'type': 'label', 'visible': True,
'label': 'Selecciona uno, ninguno o más de un género', 'color': '0xFFC52020'})
'label': config.get_localized_string(70451), 'color': '0xFFC52020'})
lista = []
for valor, titulo in matches:
titulo = generos_spa.get(titulo, titulo)
@@ -1054,10 +1054,10 @@ def filtro_imdb(item):
bloque = scrapertools.find_single_match(data, '<h3>Countries</h3>(.*?)Less-Common')
matches = scrapertools.find_multiple_matches(bloque, ' value="([^"]+)"\s*>([^<]+)<')
if matches:
list_controls.append({'id': 'pais', 'label': 'País', 'enabled': True, 'color': '0xFFFF8000',
list_controls.append({'id': 'pais', 'label': config.get_localized_string(70466), 'enabled': True, 'color': '0xFFFF8000',
'type': 'list', 'default': -1, 'visible': True})
list_controls[-1]['lvalues'] = []
list_controls[-1]['lvalues'].append('Cualquiera')
list_controls[-1]['lvalues'].append(config.get_localized_string(70450))
valores['pais'] = []
valores['pais'].append('')
for valor, titulo in matches:
@@ -1067,13 +1067,10 @@ def filtro_imdb(item):
except:
pass
list_controls.append({'id': 'votos', 'label': 'Número mínimo de votos', 'enabled': True,
'type': 'text', 'default': '10000', 'visible': True, 'color': '0xFFF4FA58'})
list_controls.append({'id': 'orden', 'label': 'Ordenar por', 'enabled': True, 'color': '0xFF25AA48',
list_controls.append({'id': 'orden', 'label': config.get_localized_string(70455), 'enabled': True, 'color': '0xFF25AA48',
'type': 'list', 'default': -1, 'visible': True})
orden = ['Popularidad Desc', 'Popularidad Asc', 'Año Desc', 'Año Asc', 'Valoración Desc', 'Valoración Asc',
'Título [A-Z]', 'Título [Z-A]']
orden = [config.get_localized_string(70456), config.get_localized_string(70457), config.get_localized_string(70458), config.get_localized_string(70459), config.get_localized_string(70460), config.get_localized_string(70461),
config.get_localized_string(70462), config.get_localized_string(70463)]
orden_imdb = ['moviemeter,asc', 'moviemeter,desc', 'year,desc', 'year,asc',
'user_rating,desc', 'user_rating,asc', 'alpha,asc', 'alpha,desc']
@@ -1083,7 +1080,7 @@ def filtro_imdb(item):
list_controls[-1]['lvalues'].insert(0, tipo_orden)
valores['orden'].insert(0, orden_imdb[i])
list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True,
list_controls.append({'id': 'save', 'label': config.get_localized_string(70464), 'enabled': True,
'type': 'bool', 'default': False, 'visible': True})
item.valores = valores
@@ -1130,12 +1127,12 @@ def indices_imdb(item):
itemlist = []
from datetime import datetime
if config.get_localized_string(70032) in item.title:
generos_spa = {'Action': 'Accion', 'Adventure': 'Aventura', 'Animation': 'Animacion', 'Biography': 'Biografía',
'Comedy': 'Comedia', 'Crime': 'Crimen', 'Documentary': 'Documental', 'Family': 'Familia',
'Fantasy': 'Fantasia', 'Film-Noir': 'Cine Negro', 'Game-Show': 'Concursos',
'History': 'Historia', 'Horror': 'Terror', 'Music': 'Música', 'Mistery': 'Intriga',
'News': 'Noticias', 'Reality-TV': 'Reality', 'Sci-Fi': 'Ciencia Ficcion', 'Sport': 'Deportes',
'Talk-Show': 'Entrevistas', 'War': 'Cine Bélico'}
generos_spa = {'Action': config.get_localized_string(70394), 'Adventure': config.get_localized_string(60267), 'Animation': config.get_localized_string(60268), 'Biography': config.get_localized_string(70403), 'Thriller': config.get_localized_string(70410),
'Comedy': config.get_localized_string(60270), 'Crime': config.get_localized_string(60271), 'Documentary': config.get_localized_string(70396), 'Family': config.get_localized_string(70399), 'Romance': config.get_localized_string(70409),
'Fantasy': config.get_localized_string(60274), 'Film-Noir': config.get_localized_string(70400), 'Game-Show': config.get_localized_string(70401), 'Drama': config.get_localized_string(70412), 'Western': config.get_localized_string(70411),
'History': config.get_localized_string(70405), 'Horror': config.get_localized_string(70013), 'Music': config.get_localized_string(70404), 'Musical': config.get_localized_string(70408),'Mystery': config.get_localized_string(70402),
'News': config.get_localized_string(60279), 'Reality-TV': config.get_localized_string(70406), 'Sci-Fi': config.get_localized_string(70397), 'Sport': config.get_localized_string(70395),
'Talk-Show': config.get_localized_string(70398), 'War': config.get_localized_string(70407)}
data = httptools.downloadpage("http://www.imdb.com/search/title", cookies=False).data
bloque = scrapertools.find_single_match(data, '<h3>Genres</h3>(.*?)</table>')
matches = scrapertools.find_multiple_matches(bloque, ' value="([^"]+)"\s*>\s*<label.*?>([^<]+)<')
@@ -1616,9 +1613,9 @@ def filtro_fa(item):
if valores_guardados:
dict_values = valores_guardados
list_controls.append({'id': 'yearsdesde', 'label': 'Año desde:', 'enabled': True,
list_controls.append({'id': 'yearsdesde', 'label': config.get_localized_string(70452), 'enabled': True,
'type': 'list', 'default': -1, 'visible': True})
list_controls.append({'id': 'yearshasta', 'label': 'Año hasta:', 'enabled': True,
list_controls.append({'id': 'yearshasta', 'label': config.get_localized_string(70453), 'enabled': True,
'type': 'list', 'default': -1, 'visible': True})
list_controls[0]['lvalues'] = []
list_controls[1]['lvalues'] = []
@@ -1628,8 +1625,8 @@ def filtro_fa(item):
list_controls[0]['lvalues'].append(str(i))
list_controls[1]['lvalues'].append(str(i))
valores['years'].append(str(i))
list_controls[0]['lvalues'].append('Cualquiera')
list_controls[1]['lvalues'].append('Cualquiera')
list_controls[0]['lvalues'].append(config.get_localized_string(70450))
list_controls[1]['lvalues'].append(config.get_localized_string(70450))
valores['years'].append('')
data = httptools.downloadpage("http://m.filmaffinity.com/%s/topgen.php" % langf).data
@@ -1640,7 +1637,7 @@ def filtro_fa(item):
bloque = scrapertools.find_single_match(data, 'name="genre">.*?</option>(.*?)</select>')
matches = scrapertools.find_multiple_matches(bloque, '<option value="([^"]+)">([^<]+)</option>')
if matches:
list_controls.append({'id': 'genero', 'label': 'Selecciona un género', 'enabled': True,
list_controls.append({'id': 'genero', 'label': config.get_localized_string(70467), 'enabled': True,
'type': 'list', 'default': -1, 'visible': True})
list_controls[2]['lvalues'] = []
list_controls[2]['lvalues'].append("Todos")
@@ -1659,7 +1656,7 @@ def filtro_fa(item):
bloque = scrapertools.find_single_match(data, 'name="country">.*?</option>(.*?)</select>')
matches = scrapertools.find_multiple_matches(bloque, '<option value="([^"]+)"\s*>([^<]+)</option>')
if matches:
list_controls.append({'id': 'pais', 'label': 'País', 'enabled': True,
list_controls.append({'id': 'pais', 'label': config.get_localized_string(70466), 'enabled': True,
'type': 'list', 'default': -1, 'visible': True})
list_controls[-1]['lvalues'] = []
list_controls[-1]['lvalues'].append('Todos')
@@ -1673,7 +1670,7 @@ def filtro_fa(item):
list_controls.append({'id': 'espacio', 'label': '', 'enabled': False,
'type': 'label', 'default': None, 'visible': True})
list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True,
list_controls.append({'id': 'save', 'label': config.get_localized_string(70464), 'enabled': True,
'type': 'bool', 'default': False, 'visible': True})
item.valores = valores
@@ -1863,7 +1860,7 @@ def votar_fa(item):
dict_values = None
if item.voto:
dict_values = {'voto': item.voto}
list_controls.append({'id': 'voto', 'label': 'Indica tu voto:', 'enabled': True,
list_controls.append({'id': 'voto', 'label': config.get_localized_string(70468), 'enabled': True,
'type': 'list', 'default': 0, 'visible': True})
list_controls[0]['lvalues'] = ['No vista']
valores['voto'] = ["-1"]
@@ -2184,9 +2181,9 @@ def acciones_trakt(item):
elif data and not item.url.endswith("lists"):
data = jsontools.load(data)
if data and "page=1" in item.url and item.order:
valores = {'rank': 'Por defecto', 'added': 'Añadido', 'title': 'Título', 'released': 'Estreno',
'runtime': 'Duración', 'popularity': 'Popularidad', 'percentage': 'Valoración',
'votes': 'Votos', 'asc': 'ascendente', 'desc': 'descendente'}
valores = {'rank': config.get_localized_string(70003), 'added': config.get_localized_string(70469), 'title': config.get_localized_string(60320), 'released': config.get_localized_string(70470),
'runtime': config.get_localized_string(70471), 'popularity': config.get_localized_string(70472), 'percentage': config.get_localized_string(70473),
'votes': config.get_localized_string(70474), 'asc': config.get_localized_string(70475), 'desc': config.get_localized_string(70476)}
orden = valores[item.order] + " " + valores[item.how]
itemlist.append(item.clone(title=config.get_localized_string(70349) % orden, action="order_list",
text_color=color4))
@@ -2267,13 +2264,13 @@ def order_list(item):
dict_values = {'orderby': valores1.index(item.order), 'orderhow': valores2.index(item.how)}
list_controls.append({'id': 'orderby', 'label': 'Ordenar por:', 'enabled': True,
list_controls.append({'id': 'orderby', 'label': config.get_localized_string(70455), 'enabled': True,
'type': 'list', 'default': 0, 'visible': True})
list_controls.append({'id': 'orderhow', 'label': 'De forma:', 'enabled': True,
'type': 'list', 'default': 0, 'visible': True})
list_controls[0]['lvalues'] = ['Por defecto', 'Añadido', 'Título', 'Estreno', 'Duración', 'Popularidad',
'Valoración', 'Votos']
list_controls[1]['lvalues'] = ['Ascendente', 'Descendente']
list_controls[0]['lvalues'] = [config.get_localized_string(70003), config.get_localized_string(70469), config.get_localized_string(60230), config.get_localized_string(70470), config.get_localized_string(70471), config.get_localized_string(70472),
config.get_localized_string(70473), config.get_localized_string(70474)]
list_controls[1]['lvalues'] = [config.get_localized_string(70477), config.get_localized_string(70478)]
return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values,
caption=config.get_localized_string(70320), item=item, callback='order_trakt')
@@ -2494,8 +2491,8 @@ def detalles_mal(item):
itemlist.append(
item.clone(title=config.get_localized_string(70321) % score, action="menu_mal", contentTitle=title_mal))
elif item.login and config.is_xbmc():
status = {'1': 'Viendo Actualmente', '2': 'Completados', '3': 'En pausa', '4': 'Descartados',
'6': 'Previstos para ver'}
status = {'1': config.get_localized_string(70479), '2': config.get_localized_string(70480), '3': config.get_localized_string(70384), '4': config.get_localized_string(70385),
'6': config.get_localized_string(70481)}
estado = scrapertools.find_single_match(data, 'myinfo_updateInfo".*?option selected="selected" value="(\d+)"')
try:
estado = status[estado]
@@ -2995,7 +2992,7 @@ def info_anidb(item, itemlist, url):
if abrev != title:
title += " [%s]" % abrev
estado = estado.replace("complete", config.get_localized_string(70378)).replace("finished", config.get_localized_string(70379)) \
.replace("stalled", "Pausa").replace("dropped", "Abandonada")
.replace("stalled", config.get_localized_string(70380)).replace("dropped", config.get_localized_string(70381))
title += " [COLOR %s](%s)[/COLOR] %s/%s [%s]" % (color6, estado, epis, epi_total, source)
itemlist.append(Item(channel=item.channel, title=title, infoLabels=infoLabels, action="",
thumbnail=thumbnail, text_color=color4))
@@ -3012,22 +3009,22 @@ def filtro_mal(item):
if valores_guardados:
dict_values = valores_guardados
list_controls.append({'id': 'keyword', 'label': 'Palabra Clave', 'enabled': True,
list_controls.append({'id': 'keyword', 'label': config.get_localized_string(70465), 'enabled': True,
'type': 'text', 'default': '', 'visible': True})
list_controls.append({'id': 'tipo', 'label': 'Tipo', 'enabled': True,
list_controls.append({'id': 'tipo', 'label': config.get_localized_string(70482), 'enabled': True,
'type': 'list', 'default': -1, 'visible': True})
list_controls[1]['lvalues'] = ['Especial', 'OVA', 'Película', 'Serie', 'Cualquiera']
list_controls[1]['lvalues'] = [config.get_localized_string(70483), config.get_localized_string(70484), config.get_localized_string(60244), config.get_localized_string(70136), config.get_localized_string(70450)]
valores["tipo"] = ['4', '2', '3', '1', '0']
list_controls.append({'id': 'valoracion', 'label': 'Valoración', 'enabled': True,
list_controls.append({'id': 'valoracion', 'label': config.get_localized_string(70473), 'enabled': True,
'type': 'list', 'default': -1, 'visible': True})
list_controls[2]['lvalues'] = ['(1) Grotesca', '(2) Horrible', '(3) Muy mala', '(4) Mala',
'(5) Regular', '(6) Pasable', '(7) Buena', '(8) Muy buena',
'(9) Genial', '(10) Obra maestra', 'Cualquiera']
list_controls[2]['lvalues'] = [config.get_localized_string(70486), config.get_localized_string(70487), config.get_localized_string(70488), config.get_localized_string(70489),
config.get_localized_string(70490), config.get_localized_string(70491), config.get_localized_string(70492), config.get_localized_string(70493),
config.get_localized_string(70494), config.get_localized_string(70495), config.get_localized_string(70450)]
valores["valoracion"] = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '0']
list_controls.append({'id': 'estado', 'label': 'Estado', 'enabled': True,
list_controls.append({'id': 'estado', 'label': config.get_localized_string(70485), 'enabled': True,
'type': 'list', 'default': -1, 'visible': True})
list_controls[3]['lvalues'] = ['Por estrenar', 'En emisión', 'Terminada', 'Cualquiera']
list_controls[3]['lvalues'] = ['Por estrenar', config.get_localized_string(60264), config.get_localized_string(70379), config.get_localized_string(70450)]
valores["estado"] = ['3', '1', '2', '0']
try:
@@ -3037,7 +3034,7 @@ def filtro_mal(item):
generos = scrapertools.find_multiple_matches(data, patron)
if generos:
list_controls.append({'id': 'labelgenre', 'enabled': True, 'type': 'label', 'default': None,
'label': 'Selecciona uno, ninguno o más de un género',
'label': config.get_localized_string(70451),
'visible': True, 'color': '0xFFC52020'})
for value, genre in generos:
list_controls.append({'id': 'genre' + value, 'label': genre, 'enabled': True,
@@ -3047,7 +3044,7 @@ def filtro_mal(item):
list_controls.append({'id': 'espacio', 'label': '', 'enabled': False,
'type': 'label', 'default': None, 'visible': True})
list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True,
list_controls.append({'id': 'save', 'label': config.get_localized_string(70464), 'enabled': True,
'type': 'bool', 'default': False, 'visible': True})
item.valores = valores
@@ -3231,8 +3228,8 @@ def menu_mal(item):
data = httptools.downloadpage(item.url).data
try:
status = {'1': 'Viendo Actualmente', '2': 'Completados', '3': 'En pausa', '4': 'Descartados',
'6': 'Previstos para ver'}
status = {'1': config.get_localized_string(70479), '2': config.get_localized_string(70480), '3': config.get_localized_string(70384), '4': config.get_localized_string(70385),
'6': config.get_localized_string(70481)}
button, estado = scrapertools.find_single_match(data,
'myinfo_updateInfo"(.*?)>.*?option selected="selected" value="(\d+)"')
if "disabled" in button:
@@ -3249,9 +3246,9 @@ def menu_mal(item):
if "lista" in title_estado:
item.lista = True
itemlist.append(item.clone(title=config.get_localized_string(70390) % (item.contentTitle, title_estado), action=""))
status = {'1': 'Viendo Actualmente', '2': 'Completados', '3': 'En pausa', '4': 'Descartados',
'6': 'Previstos para ver'}
itemlist.append(item.clone(title="Anime: %s%s" % (item.contentTitle, title_estado), action=""))
status = {'1': config.get_localized_string(70479), '2': config.get_localized_string(70480), '3': config.get_localized_string(70384), '4': config.get_localized_string(70385),
'6': config.get_localized_string(70481)}
for key, value in status.items():
if not value in title_estado:
itemlist.append(

63
plugin.video.alfa/channels/vepelis.py Executable file → Normal file
View File

@@ -2,10 +2,12 @@
import re
import urlparse
import urllib
from core import scrapertools
from core import httptools
from core import servertools
from core import jsontools
from core import tmdb
from core.item import Item
from platformcode import config, logger
@@ -81,24 +83,6 @@ def listarpeliculas(item):
return itemlist
def findvideos(item):
logger.info()
# Descarga la página
itemlist = []
data = httptools.downloadpage(item.url).data
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.quality = item.quality
videoitem.language = item.language
videoitem.action = 'play'
return itemlist
def generos(item):
logger.info()
itemlist = []
@@ -230,6 +214,46 @@ def listado2(item):
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def get_link(data):
new_url = scrapertools.find_single_match(data, '(?:IFRAME|iframe) src="([^"]+)" scrolling')
return new_url
def findvideos(item):
logger.info()
host = 'https://www.locopelis.tv/'
itemlist = []
new_url = get_link(get_source(item.url))
new_url = get_link(get_source(new_url))
video_id = scrapertools.find_single_match(new_url, 'http.*?h=(\w+)')
new_url = '%s%s' % (host, 'playeropstream/api.php')
post = {'h': video_id}
post = urllib.urlencode(post)
data = httptools.downloadpage(new_url, post=post).data
json_data = jsontools.load(data)
url = json_data['url']
server = servertools.get_server_from_url(url)
title = '%s' % server
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play',
server=server, infoLabels=item.infoLabels))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle = item.fulltitle
))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
@@ -277,3 +301,6 @@ def newest(categoria):
return []
return itemlist

View File

@@ -44,7 +44,9 @@
"00:00",
"04:00",
"08:00",
"12:00"
"12:00",
"16:00",
"20:00"
]
},
{
@@ -303,6 +305,12 @@
"default": true,
"enabled": "eq(-1,TheTvDB.com)",
"visible": true
},
{
"id": "verify_playcount",
"type": "bool",
"label": "Verificación de los contadores de vídeos vistos/no vistos (desmarcar para verificar)",
"default": false
}
]
}

View File

@@ -127,19 +127,23 @@ def list_tvshows(item):
pass
head_nfo, item_tvshow = videolibrarytools.read_nfo(tvshow_path)
item_tvshow.title = item_tvshow.contentTitle
item_tvshow.path = raiz
item_tvshow.nfo = tvshow_path
# Menu contextual: Marcar como visto/no visto
visto = item_tvshow.library_playcounts.get(item_tvshow.contentTitle, 0)
item_tvshow.infoLabels["playcount"] = visto
if visto > 0:
texto_visto = config.get_localized_string(60020)
contador = 0
else:
texto_visto = config.get_localized_string(60021)
contador = 1
try: #A veces da errores aleatorios, por no encontrar el .nfo. Probablemente problemas de timing
item_tvshow.title = item_tvshow.contentTitle
item_tvshow.path = raiz
item_tvshow.nfo = tvshow_path
# Menu contextual: Marcar como visto/no visto
visto = item_tvshow.library_playcounts.get(item_tvshow.contentTitle, 0)
item_tvshow.infoLabels["playcount"] = visto
if visto > 0:
texto_visto = config.get_localized_string(60020)
contador = 0
else:
texto_visto = config.get_localized_string(60021)
contador = 1
except:
logger.error('No encuentra: ' + str(tvshow_path))
continue
# Menu contextual: Buscar automáticamente nuevos episodios o no
if item_tvshow.active and int(item_tvshow.active) > 0:
@@ -319,6 +323,7 @@ def get_episodes(item):
def findvideos(item):
from channels import autoplay
logger.info()
# logger.debug("item:\n" + item.tostring('\n'))
@@ -326,6 +331,9 @@ def findvideos(item):
list_canales = {}
item_local = None
# Desactiva autoplay
autoplay.set_status(False)
if not item.contentTitle or not item.strm_path:
logger.debug("No se pueden buscar videos por falta de parametros")
return []
@@ -465,6 +473,8 @@ def findvideos(item):
itemlist.append(server)
# return sorted(itemlist, key=lambda it: it.title.lower())
autoplay.play_multi_channel(item, itemlist)
return itemlist
@@ -541,6 +551,70 @@ def update_tvshow(item):
p_dialog.close()
def verify_playcount_series(item, path):
logger.info()
"""
Este método revisa y repara el PlayCount de una serie que se haya desincronizado de la lista real de episodios en su carpeta. Las entradas de episodios, temporadas o serie que falten, son creado con la marca de "no visto". Posteriormente se envia a verificar los contadores de Temporadas y Serie
En el retorno envía de estado de True si se actualizado o False si no, normalmente por error. Con este estado, el caller puede actualizar el estado de la opción "verify_playcount" en "videolibrary.py". La intención de este método es la de dar una pasada que repare todos los errores y luego desactivarse. Se puede volver a activar en el menú de Videoteca de Alfa.
"""
#logger.debug("item:\n" + item.tostring('\n'))
#Si no ha hecho nunca la verificación, lo forzamos
estado = config.get_setting("verify_playcount", "videolibrary")
if not estado or estado == False:
estado = True #Si no ha hecho nunca la verificación, lo forzamos
else:
estado = False
if item.contentType == 'movie': #Esto es solo para Series
return (item, False)
if filetools.exists(path):
nfo_path = filetools.join(path, "tvshow.nfo")
head_nfo, it = videolibrarytools.read_nfo(nfo_path) #Obtenemos el .nfo de la Serie
if not hasattr(it, 'library_playcounts') or not it.library_playcounts: #Si el .nfo no tiene library_playcounts se lo creamos
logger.error('** No tiene PlayCount')
it.library_playcounts = {}
# Obtenemos los archivos de los episodios
raiz, carpetas_series, ficheros = filetools.walk(path).next()
# Crear un item en la lista para cada strm encontrado
estado_update = False
for i in ficheros:
if i.endswith('.strm'):
season_episode = scrapertools.get_season_and_episode(i)
if not season_episode:
# El fichero no incluye el numero de temporada y episodio
continue
season, episode = season_episode.split("x")
if season_episode not in it.library_playcounts: #No está incluido el episodio
it.library_playcounts.update({season_episode: 0}) #actualizamos el playCount del .nfo
estado_update = True #Marcamos que hemos actualizado algo
if 'season %s' % season not in it.library_playcounts: #No está incluida la Temporada
it.library_playcounts.update({'season %s' % season: 0}) #actualizamos el playCount del .nfo
estado_update = True #Marcamos que hemos actualizado algo
if it.contentSerieName not in it.library_playcounts: #No está incluida la Serie
it.library_playcounts.update({item.contentSerieName: 0}) #actualizamos el playCount del .nfo
estado_update = True #Marcamos que hemos actualizado algo
if estado_update:
logger.error('** Estado de actualización: ' + str(estado) + ' / PlayCount: ' + str(it.library_playcounts))
estado = estado_update
# se comprueba que si todos los episodios de una temporada están marcados, se marque tb la temporada
for key, value in it.library_playcounts.iteritems():
if key.startswith("season"):
season = scrapertools.find_single_match(key, 'season (\d+)') #Obtenemos en núm. de Temporada
it = check_season_playcount(it, season)
# Guardamos los cambios en item.nfo
if filetools.write(nfo_path, head_nfo + it.tojson()):
return (it, estado)
return (item, False)
def mark_content_as_watched2(item):
logger.info()
# logger.debug("item:\n" + item.tostring('\n'))

View File

@@ -1,48 +0,0 @@
{
"id": "yaske",
"name": "Yaske",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"banner": "yaske.png",
"fanart": "https://github.com/master-1970/resources/raw/master/images/fanart/yaske.png",
"thumbnail": "yaske.png",
"categories": [
"direct",
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,349 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
import unicodedata
from core import channeltools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
idiomas1 = {"/es.png":"CAST","/en_es.png":"VOSE","/la.png":"LAT","/en.png":"ENG"}
domain = "yaske.ro"
HOST = "http://www." + domain
HOST_MOVIES = "http://peliculas." + domain + "/now_playing/"
HOST_TVSHOWS = "http://series." + domain + "/popular/"
HOST_TVSHOWS_TPL = "http://series." + domain + "/tpl"
parameters = channeltools.get_channel_parameters('yaske')
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
color1, color2, color3 = ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E']
def mainlist(item):
logger.info()
itemlist = []
item.url = HOST
item.text_color = color2
item.fanart = fanart_host
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png"
itemlist.append(item.clone(title="Peliculas", text_bold=True, viewcontent='movies',
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title=" Novedades", action="peliculas", viewcontent='movies',
url=HOST_MOVIES,
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title=" Estrenos", action="peliculas",
url=HOST + "/premiere", thumbnail=thumbnail % 'estrenos'))
itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", thumbnail=thumbnail % 'generos', viewmode="thumbnails",
url=HOST
))
itemlist.append(item.clone(title=" Buscar película", action="search", thumbnail=thumbnail % 'buscar',
type = "movie" ))
itemlist.append(item.clone(title="Series", text_bold=True, viewcontent='movies',
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title=" Novedades", action="series", viewcontent='movies',
url=HOST_TVSHOWS,
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title=" Buscar serie", action="search", thumbnail=thumbnail % 'buscar',
type = "tvshow" ))
return itemlist
def series(item):
logger.info()
itemlist = []
url_p = scrapertools.find_single_match(item.url, '(.*?).page=')
page = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
if not page:
page = 1
url_p = item.url
else:
page = int(page) + 1
if "search" in item.url:
url_p += "&page=%s" %page
else:
url_p += "?page=%s" %page
data = httptools.downloadpage(url_p).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '(?s)class="post-item-image btn-play-item".*?'
patron += 'href="(http://series[^"]+)">.*?'
patron += '<img data-original="([^"]+)".*?'
patron += 'glyphicon-play-circle"></i>([^<]+).*?'
patron += 'glyphicon-calendar"></i>([^<]+).*?'
patron += 'text-muted f-14">(.*?)</h3'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedepisodes, year, scrapedtitle in matches:
scrapedepisodes.strip()
year = year.strip()
contentSerieName = scrapertools.htmlclean(scrapedtitle.strip())
title = "%s (%s)" %(contentSerieName, scrapedepisodes)
if "series" in scrapedurl:
itemlist.append(Item(channel=item.channel, action="temporadas", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentSerieName=contentSerieName,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist, True)
# Si es necesario añadir paginacion
patron_next_page = 'href="([^"]+)">\s*&raquo;'
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
if matches_next_page and len(itemlist)>0:
itemlist.append(
Item(channel=item.channel, action="series", title=">> Página siguiente", thumbnail=thumbnail_host,
url=url_p, folder=True, text_color=color3, text_bold=True))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
post = []
data = httptools.downloadpage(item.url).data
patron = 'media-object" src="([^"]+).*?'
patron += 'media-heading">([^<]+).*?'
patron += '<code>(.*?)</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, scrapedcapitulos in matches:
id = scrapertools.find_single_match(item.url, "yaske.ro/([0-9]+)")
season = scrapertools.find_single_match(scrapedtitle, "[0-9]+")
title = scrapedtitle + " (%s)" %scrapedcapitulos.replace("</code>","").replace("\n","")
post = {"data[season]" : season, "data[id]" : id, "name" : "list_episodes" , "both" : "0", "type" : "template"}
post = urllib.urlencode(post)
item.infoLabels["season"] = season
itemlist.append(item.clone(action = "capitulos",
post = post,
title = title,
url = HOST_TVSHOWS_TPL
))
tmdb.set_infoLabels(itemlist)
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title =""))
itemlist.append(item.clone(action = "add_serie_to_library",
channel = item.channel,
extra = "episodios",
title = '[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url = item.url
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += capitulos(tempitem)
return itemlist
def capitulos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, post=item.post).data
data = data.replace("<wbr>","")
patron = 'href=."([^"]+).*?'
patron += 'media-heading.">([^<]+).*?'
patron += 'fecha de emisi.*?: ([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapeddate in matches:
scrapedtitle = scrapedtitle + " (%s)" %scrapeddate
episode = scrapertools.find_single_match(scrapedurl, "capitulo-([0-9]+)")
query = item.contentSerieName + " " + scrapertools.find_single_match(scrapedtitle, "\w+")
item.infoLabels["episode"] = episode
itemlist.append(item.clone(action = "findvideos",
title = scrapedtitle.decode("unicode-escape"),
query = query.replace(" ","+"),
url = scrapedurl.replace("\\","")
))
tmdb.set_infoLabels(itemlist)
return itemlist
def search(item, texto):
logger.info()
itemlist = []
try:
item.url = HOST + "/search/?query=" + texto.replace(' ', '+')
item.extra = ""
if item.type == "movie":
itemlist.extend(peliculas(item))
else:
itemlist.extend(series(item))
if itemlist[-1].title == ">> Página siguiente":
item_pag = itemlist[-1]
itemlist = sorted(itemlist[:-1], key=lambda Item: Item.contentTitle)
itemlist.append(item_pag)
else:
itemlist = sorted(itemlist, key=lambda Item: Item.contentTitle)
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
item = Item()
try:
if categoria == 'peliculas':
item.url = HOST
elif categoria == 'infantiles':
item.url = HOST + "/genre/16/"
elif categoria == 'terror':
item.url = HOST + "/genre/27/"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
url_p = scrapertools.find_single_match(item.url, '(.*?).page=')
page = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
if not page:
page = 1
url_p = item.url
else:
page = int(page) + 1
if "search" in item.url:
url_p += "&page=%s" %page
else:
url_p += "?page=%s" %page
data = httptools.downloadpage(url_p).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '(?s)class="post-item-image btn-play-item".*?'
patron += 'href="([^"]+)">.*?'
patron += '<img data-original="([^"]+)".*?'
patron += 'glyphicon-calendar"></i>([^<]+).*?'
patron += 'post(.*?)</div.*?'
patron += 'text-muted f-14">(.*?)</h3'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, year, idiomas, scrapedtitle in matches:
query = scrapertools.find_single_match(scrapedurl, 'yaske.ro/[0-9]+/(.*?)/').replace("-","+")
year = year.strip()
patronidiomas = '<img src="([^"]+)"'
matchesidiomas = scrapertools.find_multiple_matches(idiomas, patronidiomas)
idiomas_disponibles = []
for idioma in matchesidiomas:
for lang in idiomas1.keys():
if idioma.endswith(lang):
idiomas_disponibles.append(idiomas1[lang])
if idiomas_disponibles:
idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]"
contentTitle = scrapertools.htmlclean(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle, query = query,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist)
# Si es necesario añadir paginacion
patron_next_page = 'href="([^"]+)">\s*&raquo;'
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
if matches_next_page and len(itemlist)>0:
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host,
url=url_p, folder=True, text_color=color3, text_bold=True))
return itemlist
def menu_buscar_contenido(item):
logger.info(item)
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'Generos.*?</ul>'
data = scrapertools.find_single_match(data, patron)
patron = 'href="([^"]+)">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
url = HOST + scrapedurl
itemlist.append(Item(channel = item.channel,
action = "peliculas",
title = scrapedtitle,
url = url,
text_color = color1,
contentType = 'movie',
folder = True,
viewmode = "movie_with_plot"
))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
sublist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)id="online".*?server="([^"]+)"'
mserver = scrapertools.find_single_match(data, patron)
if not item.query:
item.query = scrapertools.find_single_match(item.url, "peliculas.*?/[0-9]+/([^/]+)").replace("-","+")
url_m = "http://olimpo.link/?q=%s&server=%s" %(item.query, mserver)
patron = 'class="favicon.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?'
patron += '\[([^\]]+)\].*?\[([^\]]+)\]'
data = httptools.downloadpage(url_m).data
matches = scrapertools.find_multiple_matches(data, patron)
page = 2
while len(matches)>0:
for server, url, idioma, calidad in matches:
if "drive" in server:
server = "gvideo"
sublist.append(item.clone(action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip(),
server = server,
title="Ver en %s %s" %(server, calidad)
))
data = httptools.downloadpage(url_m + "&page=%s" %page).data
matches = scrapertools.find_multiple_matches(data, patron)
page +=1
sublist = sorted(sublist, key=lambda Item: Item.quality + Item.server)
for k in ["Español", "Latino", "Ingles - Sub Español", "Ingles"]:
lista_idioma = filter(lambda i: i.language == k, sublist)
if lista_idioma:
itemlist.append(item.clone(title=k, folder=False, infoLabels = "",
text_color=color2, text_bold=True, thumbnail=thumbnail_host))
itemlist.extend(lista_idioma)
tmdb.set_infoLabels(itemlist, True)
# Insertar items "Buscar trailer" y "Añadir a la videoteca"
if itemlist and item.extra != "library":
title = "%s [Buscar trailer]" % (item.contentTitle)
itemlist.insert(0, item.clone(channel="trailertools", action="buscartrailer",
text_color=color3, title=title, viewmode="list"))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca",
action="add_pelicula_to_library", url=item.url, text_color="green",
contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))
return itemlist
def play(item):
logger.info()
itemlist = []
ddd = httptools.downloadpage(item.url).data
url = "http://olimpo.link" + scrapertools.find_single_match(ddd, '<iframe src="([^"]+)')
item.url = httptools.downloadpage(url + "&ge=1", follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist.append(item.clone(server = ""))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -63,6 +63,11 @@ def getchanneltypes(view="thumb_"):
# Lista de categorias
channel_types = ["movie", "tvshow", "anime", "documentary", "vos", "direct", "torrent"]
dict_types_lang = {'movie': config.get_localized_string(30122), 'tvshow': config.get_localized_string(30123),
'anime': config.get_localized_string(30124), 'documentary': config.get_localized_string(30125),
'vos': config.get_localized_string(30136), 'adult': config.get_localized_string(30126),
'direct': config.get_localized_string(30137)}
if config.get_setting("adult_mode") != 0:
channel_types.append("adult")
@@ -77,7 +82,6 @@ def getchanneltypes(view="thumb_"):
viewmode="thumbnails"))
for channel_type in channel_types:
logger.info("channel_type=%s" % channel_type)
title = config.get_localized_category(channel_type)
itemlist.append(Item(title=title, channel="channelselector", action="filterchannels", category=title,
channel_type=channel_type, viewmode="thumbnails",
@@ -169,10 +173,11 @@ def filterchannels(category, view="thumb_"):
context.append({"title": "Configurar canal", "channel": "setting", "action": "channel_config",
"config": channel_parameters["channel"]})
channel_info = set_channel_info(channel_parameters)
# Si ha llegado hasta aquí, lo añade
channelslist.append(Item(title=channel_parameters["title"], channel=channel_parameters["channel"],
action="mainlist", thumbnail=channel_parameters["thumbnail"],
fanart=channel_parameters["fanart"], category=channel_parameters["title"],
fanart=channel_parameters["fanart"], plot=channel_info, category=channel_parameters["title"],
language=channel_parameters["language"], viewmode="list", context=context))
except:
@@ -232,3 +237,34 @@ def get_thumb(thumb_name, view="thumb_", auto=False):
media_path = os.path.join(resource_path, icon_pack_name)
return os.path.join(media_path, view + thumb_name)
def set_channel_info(parameters):
logger.info()
info = ''
language = ''
content = ''
langs = parameters['language']
lang_dict = {'lat':'Latino', 'cast':'Castellano', '*':'Latino, Castellano, VOSE, VO'}
for lang in langs:
if 'vos' in parameters['categories']:
lang = '*'
if lang in lang_dict:
if language != '' and language != '*' and not parameters['adult']:
language = '%s, %s' % (language, lang_dict[lang])
elif not parameters['adult']:
language = lang_dict[lang]
if lang == '*':
break
categories = parameters['categories']
for cat in categories:
if content != '':
content = '%s, %s' % (content, config.get_localized_category(cat))
else:
content = config.get_localized_category(cat)
info = '[COLOR yellow]Tipo de contenido:[/COLOR] %s\n\n[COLOR yellow]Idiomas:[/COLOR] %s' % (content, language)
return info

View File

@@ -3,6 +3,24 @@
# httptools
# --------------------------------------------------------------------------------
# Fix para error de validación del certificado del tipo:
# [downloadpage] Response code: <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:661)>
# [downloadpage] Response error: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:661)
# Fix desde la página: https://stackoverflow.com/questions/27835619/urllib-and-ssl-certificate-verify-failed-error
#-----------------------------------------------------------------------
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
#-----------------------------------------------------------------------
import inspect
import cookielib
import gzip

View File

@@ -8,6 +8,7 @@ import os
import re
import time
import urlparse
import filetools
from core import httptools
from core import jsontools
@@ -451,8 +452,9 @@ def get_server_parameters(server):
# Debriders
elif os.path.isfile(os.path.join(config.get_runtime_path(), "servers", "debriders", server + ".json")):
path = os.path.join(config.get_runtime_path(), "servers", "debriders", server + ".json")
import filetools
#
#Cuando no está bien definido el server en el canal (no existe conector), muestra error por no haber "path" y se tiene que revisar el canal
#
data = filetools.read(path)
dict_server = jsontools.load(data)
@@ -466,13 +468,6 @@ def get_server_parameters(server):
if type(dict_server[k]) == str:
dict_server[k] = [dict_server[k]]
# if not dict_server.has_key(k) or dict_server[k] == "":
# dict_server[k] = []
# elif type(dict_server[k]) == dict:
# dict_server[k] = dict_server[k]["value"]
# if type(dict_server[k]) == str:
# dict_server[k] = [dict_server[k]]
if "find_videos" in dict_server:
dict_server['find_videos']["patterns"] = dict_server['find_videos'].get("patterns", list())
dict_server['find_videos']["ignore_urls"] = dict_server['find_videos'].get("ignore_urls", list())
@@ -495,7 +490,6 @@ def get_server_parameters(server):
def get_server_json(server_name):
# logger.info("server_name=" + server_name)
import filetools
try:
server_path = filetools.join(config.get_runtime_path(), "servers", server_name + ".json")
if not filetools.exists(server_path):

View File

@@ -1350,7 +1350,7 @@ class Tmdb(object):
if "status_code" in self.temporada[numtemporada]:
#Se ha producido un error
msg = "La busqueda de " + buscando + " no dio resultados."
msg = config.get_localized_string(70496) + buscando + config.get_localized_string(70497)
msg += "\nError de tmdb: %s %s" % (
self.temporada[numtemporada]["status_code"], self.temporada[numtemporada]["status_message"])
logger.debug(msg)

View File

@@ -39,11 +39,11 @@ def auth_trakt():
else:
itemlist = []
title = "Accede a esta página: %s" % item.verify_url
title = config.get_localized_string(60248) % item.verify_url
itemlist.append(item.clone(title=title, action=""))
title = "Ingresa este código y acepta: %s" % item.user_code
title = config.get_localized_string(60249) % item.user_code
itemlist.append(item.clone(title=title, action=""))
title = "Una vez hecho, pulsa aquí!"
title = config.get_localized_string(60250)
itemlist.append(item.clone(title=title, action="token_trakt"))
return itemlist
except:
@@ -71,11 +71,11 @@ def token_trakt(item):
data = jsontools.load(data)
else:
import time
dialog_auth = platformtools.dialog_progress("Sincronizar con Trakt. No cierres esta ventana",
"1. Entra en la siguiente url: %s" % item.verify_url,
"2. Ingresa este código en la página y acepta: %s"
dialog_auth = platformtools.dialog_progress(config.get_localized_string(60251),
config.get_localized_string(60252) % item.verify_url,
config.get_localized_string(60253)
% item.user_code,
"3. Espera a que se cierre esta ventana")
config.get_localized_string(60254))
# Generalmente cada 5 segundos se intenta comprobar si el usuario ha introducido el código
while True:
@@ -107,7 +107,7 @@ def token_trakt(item):
config.set_setting("token_trakt", token, "trakt")
config.set_setting("refresh_token_trakt", refresh, "trakt")
if not item.folder:
platformtools.dialog_notification("Éxito", "Cuenta vinculada correctamente")
platformtools.dialog_notification(config.get_localized_string(60255), config.get_localized_string(60256))
if config.is_xbmc():
import xbmc
xbmc.executebuiltin("Container.Refresh")
@@ -117,14 +117,14 @@ def token_trakt(item):
import traceback
logger.error(traceback.format_exc())
if not item.folder:
return platformtools.dialog_notification("Error", "Fallo en el proceso de vinculación")
return platformtools.dialog_notification(config.get_localized_string(60527), config.get_localized_string(60258))
token = ""
itemlist = []
if token:
itemlist.append(item.clone("Cuenta vinculada con éxito", action=""))
itemlist.append(item.clone(config.get_localized_string(60256), action=""))
else:
itemlist.append(item.clone("Fallo en el proceso de vinculación", action=""))
itemlist.append(item.clone(config.get_localized_string(60260), action=""))
return itemlist
@@ -276,10 +276,7 @@ def ask_install_script():
from platformcode import platformtools
respuesta = platformtools.dialog_yesno("Alfa", "Puedes instalar el script de Trakt a continuacíon, "
"una vez instalado y configurado lo que "
"veas se sincronizara con tu cuenta automaticamente.",
"¿Deseas continuar?")
respuesta = platformtools.dialog_yesno(config.get_localized_string(20000), config.get_localized_string(70521))
if respuesta:
xbmc.executebuiltin("InstallAddon(script.trakt)")
return

View File

@@ -215,6 +215,8 @@ def save_tvshow(item, episodelist):
@return: el número de episodios sobreescritos
@rtype fallidos: int
@return: el número de episodios fallidos o -1 si ha fallado toda la serie
@rtype path: str
@return: directorio serie
"""
logger.info()
# logger.debug(item.tostring('\n'))
@@ -223,7 +225,7 @@ def save_tvshow(item, episodelist):
# Si llegados a este punto no tenemos titulo o code, salimos
if not (item.contentSerieName or item.infoLabels['code']) or not item.channel:
logger.debug("NO ENCONTRADO contentSerieName NI code")
return 0, 0, -1 # Salimos sin guardar
return 0, 0, -1, path # Salimos sin guardar
scraper_return = scraper.find_and_set_infoLabels(item)
# Llegados a este punto podemos tener:
@@ -234,7 +236,7 @@ def save_tvshow(item, episodelist):
# TODO de momento si no hay resultado no añadimos nada,
# aunq podriamos abrir un cuadro para introducir el identificador/nombre a mano
logger.debug("NO ENCONTRADO EN SCRAPER O NO TIENE code")
return 0, 0, -1
return 0, 0, -1, path
_id = item.infoLabels['code'][0]
@@ -311,7 +313,7 @@ def save_tvshow(item, episodelist):
if not episodelist:
# La lista de episodios esta vacia
return 0, 0, 0
return 0, 0, 0, path
# Guardar los episodios
'''import time
@@ -402,7 +404,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
for i, e in enumerate(scraper.sort_episode_list(new_episodelist)):
if not silent:
p_dialog.update(int(math.ceil((i + 1) * t)), 'Añadiendo episodio...', e.title)
p_dialog.update(int(math.ceil((i + 1) * t)), config.get_localized_string(60064), e.title)
season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2))
strm_path = filetools.join(path, "%s.strm" % season_episode)
@@ -619,6 +621,7 @@ def add_tvshow(item, channel=None):
# Obtiene el listado de episodios
itemlist = getattr(channel, item.action)(item)
insertados, sobreescritos, fallidos, path = save_tvshow(item, itemlist)
if not insertados and not sobreescritos and not fallidos:

View File

@@ -75,6 +75,9 @@ def update_title(item):
else:
item.add_videolibrary = True #Estamos Añadiendo a la Videoteca. Indicador para control de uso de los Canales
if item.add_videolibrary:
if item.season_colapse: del item.season_colapse
if item.from_num_season_colapse: del item.from_num_season_colapse
if item.from_title_season_colapse: del item.from_title_season_colapse
if item.contentType == "movie":
if item.from_title_tmdb: #Si se salvó el título del contenido devuelto por TMDB, se restaura.
item.title = item.from_title_tmdb
@@ -169,11 +172,11 @@ def update_title(item):
if new_item.infoLabels['rating']: #Actualizamos en Rating en el título
try:
rating_old = ''
if new_item.infoLabels['rating'] and new_item.infoLabels['rating'] != '0.0':
if new_item.infoLabels['rating'] and new_item.infoLabels['rating'] != 0.0:
rating_old = float(new_item.infoLabels['rating'])
rating_old = round(rating_old, 1)
rating_new = ''
if item.infoLabels['rating'] and item.infoLabels['rating'] != '0.0':
if item.infoLabels['rating'] and item.infoLabels['rating'] != 0.0:
rating_new = float(item.infoLabels['rating'])
rating_new = round(rating_new, 1)
item.title = item.title.replace("[" + str(rating_old) + "]", "[" + str(rating_new) + "]")
@@ -290,15 +293,30 @@ def post_tmdb_listado(item, itemlist):
if item_local.infoLabels['rating'] and item_local.infoLabels['rating'] != 0.0:
rating = float(item_local.infoLabels['rating'])
rating = round(rating, 1)
if rating == 0.0:
rating = ''
except:
pass
# Si TMDB no ha encontrado el vídeo limpiamos el año
if item_local.infoLabels['year'] == "-":
item_local.infoLabels['year'] = ''
item_local.infoLabels['aired'] = ''
# Si TMDB no ha encontrado nada y hemos usado el año de la web, lo intentamos sin año
if not item_local.infoLabels['tmdb_id']:
if item_local.infoLabels['year']: #lo intentamos de nuevo solo si había año, puede que erroneo
year = item_local.infoLabels['year'] #salvamos el año por si no tiene éxito la nueva búsqueda
item_local.infoLabels['year'] = "-" #reseteo el año
try:
tmdb.set_infoLabels(item_local, True) #pasamos otra vez por TMDB
except:
pass
if not item_local.infoLabels['tmdb_id']: #ha tenido éxito?
item_local.infoLabels['year'] = year #no, restauramos el año y lo dejamos ya
# Para Episodios, tomo el año de exposición y no el de inicio de la serie
elif item_local.infoLabels['aired']:
if item_local.infoLabels['aired']:
item_local.infoLabels['year'] = scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})')
# Preparamos el título para series, con los núm. de temporadas, si las hay
@@ -396,6 +414,160 @@ def post_tmdb_listado(item, itemlist):
return (item, itemlist)
def post_tmdb_seasons(item, itemlist):
logger.info()
"""
Pasada para gestión del menú de Temporadas de una Serie
La clave de activación de este método es la variable item.season_colapse que pone el canal en el Item de Listado.
Esta variable tendrá que desaparecer cuando se aña a la Videoteca para que se analicen los episodios de la forma tradicional
Repasa todos los episodios producidos en itemlist por "episodios" del canal para extraer las temporadas. Pone un título para Todas la Temps.
Crea un menú con las diferentes temporadas, así como con los títulos de Actualización de Título y de Añadir a Videoteca
Si ha habido un Fail-over o una Intervención Judicial, también lo anuncia
La llamada al método desde Episodios, antes de pasar Itemlist pot TMDB, es:
from lib import generictools
item, itemlist = generictools.post_tmdb_seasons(item, itemlist)
Si solo hay una temporada, devuelte el itemlist original para que se pinten los episodios de la forma tradicional
"""
#logger.debug(item)
season = 0
itemlist_temporadas = []
itemlist_fo = []
#Restauramos valores si ha habido fail-over
channel_alt = ''
if item.channel == channel_py:
if item.channel_alt:
channel_alt = item.category
item.category = item.channel_alt.capitalize()
del item.channel_alt
else:
if item.channel_alt:
channel_alt = item.channel
item.channel = item.channel_alt
item.category = item.channel_alt.capitalize()
del item.channel_alt
if item.url_alt:
item.url = item.url_alt
del item.url_alt
# Primero creamos un título para TODAS las Temporadas
# Pasada por TMDB a Serie, para datos adicionales
try:
tmdb.set_infoLabels(item, True) #TMDB de cada Temp
except:
pass
item_season = item.clone()
if item_season.season_colapse: #Quitamos el indicador de listado por Temporadas
del item_season.season_colapse
title = '** Todas las Temporadas' #Agregamos título de TODAS las Temporadas (modo tradicional)
if item_season.infoLabels['number_of_episodes']: #Ponemos el núm de episodios de la Serie
title += ' [%s epi]' % str(item_season.infoLabels['number_of_episodes'])
rating = '' #Ponemos el rating, si es diferente del de la Serie
if item_season.infoLabels['rating'] and item_season.infoLabels['rating'] != 0.0:
try:
rating = float(item_season.infoLabels['rating'])
rating = round(rating, 1)
except:
pass
if rating and rating == 0.0:
rating = ''
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
title = '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (title, str(item_season.infoLabels['year']), rating, item_season.quality, str(item_season.language))
else: #Lo arreglamos un poco para Unify
title = title.replace('[', '-').replace(']', '-').replace('.', ',').strip()
title = title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
itemlist_temporadas.append(item_season.clone(title=title, from_title_season_colapse=item.title))
#Repasamos todos los episodios para detectar las diferentes temporadas
for item_local in itemlist:
if item_local.contentSeason != season:
season = item_local.contentSeason #Si se detecta una temporada distinta se prepara un título
item_season = item.clone()
item_season.contentSeason = item_local.contentSeason #Se pone el núm de Temporada para obtener mejores datos de TMDB
item_season.title = 'Temporada %s' % item_season.contentSeason
itemlist_temporadas.append(item_season.clone(from_title_season_colapse=item.title))
#Si hay más de una temporada se sigue, si no se devuelve el Itemlist original
if len(itemlist_temporadas) > 2:
for item_local in itemlist_temporadas:
if "** Todas las Temporadas" in item_local.title: #Si es el título de TODAS las Temporadas, lo ignoramos
continue
# Pasada por TMDB a las Temporada
try:
tmdb.set_infoLabels(item_local, True) #TMDB de cada Temp
except:
pass
if item_local.infoLabels['temporada_air_date']: #Fecha de emisión de la Temp
item_local.title += ' [%s]' % str(scrapertools.find_single_match(str(item_local.infoLabels['temporada_air_date']), r'\/(\d{4})'))
#rating = '' #Ponemos el rating, si es diferente del de la Serie
#if item_local.infoLabels['rating'] and item_local.infoLabels['rating'] != 0.0:
# try:
# rating = float(item_local.infoLabels['rating'])
# rating = round(rating, 1)
# except:
# pass
#if rating and rating > 0.0:
# item_local.title += ' [%s]' % str(rating)
if item_local.infoLabels['temporada_num_episodios']: #Núm. de episodios de la Temp
item_local.title += ' [%s epi]' % str(item_local.infoLabels['temporada_num_episodios'])
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
item_local.title = '%s [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.title, item_local.quality, str(item_local.language))
else: #Lo arreglamos un poco para Unify
item_local.title = item_local.title.replace('[', '-').replace(']', '-').replace('.', ',').strip()
item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
#logger.debug(item_local)
else: #Si hay más de una temporada se sigue, si no se devuelve el Itemlist original
if item.season_colapse:
del item.season_colapse
return (item, itemlist)
#Permitimos la actualización de los títulos, bien para uso inmediato, o para añadir a la videoteca
itemlist_temporadas.append(item.clone(title="** [COLOR yelow]Actualizar Títulos - vista previa videoteca[/COLOR] **", action="actualizar_titulos", tmdb_stat=False, from_action=item.action, from_title_tmdb=item.title, from_update=True))
#Es un canal estándar, sólo una linea de Añadir a Videoteca
title = ''
if item.infoLabels['status'] and item.infoLabels['status'].lower() == "ended":
title += ' [TERMINADA]'
itemlist_temporadas.append(item_season.clone(title="[COLOR yellow]Añadir esta serie a videoteca-[/COLOR]" + title, action="add_serie_to_library", extra="episodios", add_menu=True))
#Si intervención judicial, alerto!!!
if item.intervencion:
for clone_inter, autoridad in item.intervencion:
thumb_intervenido = get_thumb(autoridad)
itemlist_fo.append(item.clone(action='', title="[COLOR yellow]" + clone_inter.capitalize() + ': [/COLOR]' + intervenido_judicial + '. Reportar el problema en el foro', thumbnail=thumb_intervenido))
del item.intervencion
#Si ha habido fail-over, lo comento
if channel_alt:
itemlist_fo.append(item.clone(action='', title="[COLOR yellow]" + channel_alt.capitalize() + '[/COLOR] [ALT ] en uso'))
itemlist_fo.append(item.clone(action='', title="[COLOR yellow]" + item.category.capitalize() + '[/COLOR] inaccesible'))
if len(itemlist_fo) > 0:
itemlist_temporadas = itemlist_fo + itemlist_temporadas
return (item, itemlist_temporadas)
def post_tmdb_episodios(item, itemlist):
logger.info()
itemlist_fo = []
@@ -463,6 +635,8 @@ def post_tmdb_episodios(item, itemlist):
del item.title_from_channel
if item.ow_force:
del item.ow_force
if item.season_colapse:
del item.season_colapse
for item_local in itemlist: #Recorremos el Itemlist generado por el canal
if item_local.add_videolibrary:
@@ -491,6 +665,8 @@ def post_tmdb_episodios(item, itemlist):
del item_local.intervencion
if item_local.ow_force:
del item_local.ow_force
if item_local.season_colapse:
del item_local.season_colapse
#logger.debug(item_local)
#Ajustamos el nombre de la categoría si es un clone de NewPct1
@@ -548,6 +724,8 @@ def post_tmdb_episodios(item, itemlist):
if item_local.infoLabels['rating'] and item_local.infoLabels['rating'] != 0.0:
rating = float(item_local.infoLabels['rating'])
rating = round(rating, 1)
if rating == 0.0:
rating = ''
except:
pass
@@ -708,7 +886,7 @@ def post_tmdb_findvideos(item, itemlist):
En Itemlist devuelve un Item con el pseudotítulo. Ahí el canal irá agregando el resto.
"""
#logger.debug(item)
logger.debug(item)
#Creción de título general del vídeo a visualizar en Findvideos
itemlist = []
@@ -733,7 +911,10 @@ def post_tmdb_findvideos(item, itemlist):
# tmdb.set_infoLabels(item, True)
#elif (not item.infoLabels['tvdb_id'] and item.contentType == 'episode') or item.contentChannel == "videolibrary":
# tmdb.set_infoLabels(item, True)
tmdb.set_infoLabels(item, True)
try:
tmdb.set_infoLabels(item, True) #TMDB de cada Temp
except:
pass
#Restauramos la información de max num. de episodios por temporada despues de TMDB
try:
if item.infoLabels['temporada_num_episodios']:
@@ -762,6 +943,8 @@ def post_tmdb_findvideos(item, itemlist):
if item.infoLabels['rating'] and item.infoLabels['rating'] != 0.0:
rating = float(item.infoLabels['rating'])
rating = round(rating, 1)
if rating == 0.0:
rating = ''
except:
pass
@@ -795,8 +978,8 @@ def post_tmdb_findvideos(item, itemlist):
tiempo_final = tiempo_final / 60 #Lo transformo a minutos
horas = tiempo_final / 60 #Lo transformo a horas
resto = tiempo_final - (horas * 60) #guardo el resto de minutos de la hora
if not scrapertools.find_single_match(item.quality, '(\[\d+:\d+\])'): #si ya tiene la duración, pasamos
item.quality += ' [%s:%s]' % (str(horas).zfill(2), str(resto).zfill(2)) #Lo agrego a Calidad del Servidor
if not scrapertools.find_single_match(item.quality, '(\[\d+:\d+)'): #si ya tiene la duración, pasamos
item.quality += ' [COLOR white][%s:%s h]' % (str(horas).zfill(2), str(resto).zfill(2)) #Lo agrego a Calidad del Servidor
except:
pass
@@ -849,6 +1032,14 @@ def post_tmdb_findvideos(item, itemlist):
#Pintamos el pseudo-título con toda la información disponible del vídeo
itemlist.append(item.clone(action="", server = "", title=title_gen)) #Título con todos los datos del vídeo
if item.action == 'show_result': #Viene de una búsqueda global
channel = item.channel.capitalize()
if item.from_channel == channel_py or item.channel == channel_py:
channel = item.category
elif item.from_channel:
channel = item.from_channel.capitalize()
item.quality = '[COLOR yellow][%s][/COLOR] %s' % (channel, item.quality)
#agregamos la opción de Añadir a Videoteca para péliculas (no series)
if item.contentType == 'movie' and item.contentChannel != "videolibrary":
#Permitimos la actualización de los títulos, bien para uso inmediato, o para añadir a la videoteca
@@ -989,11 +1180,13 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
#Cargamos en .json del canal para ver las listas de valores en settings
fail_over = channeltools.get_channel_json(channel_py)
for settings in fail_over['settings']: #Se recorren todos los settings
if settings['id'] == "clonenewpct1_channels_list": #Encontramos en setting
if settings['id'] == "clonenewpct1_channels_list": #Encontramos en setting
fail_over = settings['default'] #Carga lista de clones
break
fail_over_list = ast.literal_eval(fail_over)
if item.from_channel: #Desde search puede venir con el nombre de canal equivocado
item.channel = item.from_channel
#Recorremos el Array identificando el canal que falla
for active, channel, channel_host, contentType, action_excluded in fail_over_list:
if item.channel == channel_py:
@@ -1013,6 +1206,7 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
break
if not channel_failed:
logger.error('Patrón: ' + str(patron) + ' / fail_over_list: ' + str(fail_over_list))
logger.error(item)
return (item, data) #Algo no ha funcionado, no podemos hacer nada

View File

@@ -12,11 +12,10 @@ import re
import time
import urllib
from base64 import b64decode
from platformcode import logger
import xbmc
from core import httptools
from platformcode import config
def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
@@ -28,24 +27,30 @@ def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
class UnshortenIt(object):
_adfly_regex = r'adf\.ly|q\.gs|j\.gs|u\.bb|ay\.gy|threadsphere\.bid|restorecosm\.bid|clearload\.bid'
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid'
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
_adfocus_regex = r'adfoc\.us'
_lnxlu_regex = r'lnx\.lu'
_shst_regex = r'sh\.st|gestyy\.com'
_shst_regex = r'sh\.st|festyy\.com|ceesty\.com'
_hrefli_regex = r'href\.li'
_anonymz_regex = r'anonymz\.com'
_shrink_service_regex = r'shrink-service\.it'
_rapidcrypt_regex = r'rapidcrypt\.net'
_maxretries = 5
_this_dir, _this_filename = os.path.split(__file__)
_timeout = 10
def unshorten(self, uri, type=None):
domain = urlsplit(uri).netloc
if not domain:
return uri, "No domain found in URI!"
had_google_outbound, uri = self._clear_google_outbound_proxy(uri)
if re.search(self._adfly_regex, domain,
re.IGNORECASE) or type == 'adfly':
return self._unshorten_adfly(uri)
@@ -68,15 +73,15 @@ class UnshortenIt(object):
return self._unshorten_anonymz(uri)
if re.search(self._rapidcrypt_regex, domain, re.IGNORECASE):
return self._unshorten_rapidcrypt(uri)
return uri, 200
def unwrap_30x(self, uri, timeout=10):
domain = urlsplit(uri).netloc
self._timeout = timeout
loop_counter = 0
try:
if loop_counter > 5:
raise ValueError("Infinitely looping redirect from URL: '%s'" %
(uri,))
# headers stop t.co from working so omit headers if this is a t.co link
if domain == 't.co':
r = httptools.downloadpage(uri, timeout=self._timeout)
@@ -86,29 +91,28 @@ class UnshortenIt(object):
r = httptools.downloadpage(uri, timeout=self._timeout)
uri = re.findall(r'.*url\=(.*?)\"\.*', r.data)[0]
return uri, r.code
else:
while True:
retries = 0
while True:
r = httptools.downloadpage(
uri,
timeout=self._timeout,
follow_redirects=False)
if not r.sucess:
return uri, -1
if 'location' in r.headers and retries < self._maxretries:
r = httptools.downloadpage(
uri,
timeout=self._timeout,
follow_redirects=False,
only_headers=True)
if not r.success:
return uri, -1
retries = 0
if 'location' in r.headers and retries < self._maxretries:
r = httptools.downloadpage(
r.headers['location'],
follow_redirects=False,
only_headers=True)
uri = r.url
loop_counter += 1
retries = retries + 1
else:
return r.url, r.code
r.headers['location'],
follow_redirects=False)
uri = r.url
retries += 1
else:
return r.url, r.code
except Exception as e:
return uri, str(e)
def _clear_google_outbound_proxy(self, url):
'''
So google proxies all their outbound links through a redirect so they can detect outbound links.
@@ -117,13 +121,16 @@ class UnshortenIt(object):
This is useful for doing things like parsing google search results, or if you're scraping google
docs, where google inserts hit-counters on all outbound links.
'''
# This is kind of hacky, because we need to check both the netloc AND
# part of the path. We could use urllib.parse.urlsplit, but it's
# easier and just as effective to use string checks.
if url.startswith("http://www.google.com/url?") or \
url.startswith("https://www.google.com/url?"):
qs = urlparse(url).query
query = parse_qs(qs)
if "q" in query: # Google doc outbound links (maybe blogspot, too)
return True, query["q"].pop()
elif "url" in query: # Outbound links from google searches
@@ -132,10 +139,11 @@ class UnshortenIt(object):
raise ValueError(
"Google outbound proxy URL without a target url ('%s')?" %
url)
return False, url
def _unshorten_adfly(self, uri):
logger.info()
try:
r = httptools.downloadpage(
uri, timeout=self._timeout, cookies=False)
@@ -144,11 +152,14 @@ class UnshortenIt(object):
if len(ysmm) > 0:
ysmm = re.sub(r'var ysmm \= \'|\'\;', '', ysmm[0])
left = ''
right = ''
for c in [ysmm[i:i + 2] for i in range(0, len(ysmm), 2)]:
left += c[0]
right = c[1] + right
# Additional digit arithmetic
encoded_uri = list(left + right)
numbers = ((i, n) for i, n in enumerate(encoded_uri) if str.isdigit(n))
@@ -156,9 +167,12 @@ class UnshortenIt(object):
xor = int(first[1]) ^ int(second[1])
if xor < 10:
encoded_uri[first[0]] = str(xor)
decoded_uri = b64decode("".join(encoded_uri).encode())[16:-16].decode()
if re.search(r'go\.php\?u\=', decoded_uri):
decoded_uri = b64decode(re.sub(r'(.*?)u=', '', decoded_uri)).decode()
return decoded_uri, r.code
else:
return uri, 'No ysmm variable found'
@@ -170,15 +184,25 @@ class UnshortenIt(object):
'''
(Attempt) to decode linkbucks content. HEAVILY based on the OSS jDownloader codebase.
This has necessidated a license change.
'''
if config.is_xbmc():
import xbmc
r = httptools.downloadpage(uri, timeout=self._timeout)
firstGet = time.time()
baseloc = r.url
if "/notfound/" in r.url or \
"(>Link Not Found<|>The link may have been deleted by the owner|To access the content, you must complete a quick survey\.)" in r.data:
return uri, 'Error: Link not found or requires a survey!'
link = None
content = r.data
regexes = [
r"<div id=\"lb_header\">.*?/a>.*?<a.*?href=\"(.*?)\".*?class=\"lb",
r"AdBriteInit\(\"(.*?)\"\)",
@@ -187,49 +211,69 @@ class UnshortenIt(object):
r"src=\"http://static\.linkbucks\.com/tmpl/mint/img/lb\.gif\" /></a>.*?<a href=\"(.*?)\"",
r"id=\"content\" src=\"([^\"]*)",
]
for regex in regexes:
if self.inValidate(link):
link = find_in_text(regex, content)
if self.inValidate(link):
match = find_in_text(r"noresize=\"[0-9+]\" src=\"(http.*?)\"", content)
if match:
link = find_in_text(r"\"frame2\" frameborder.*?src=\"(.*?)\"", content)
if self.inValidate(link):
scripts = re.findall("(<script type=\"text/javascript\">[^<]+</script>)", content)
if not scripts:
return uri, "No script bodies found?"
js = False
for script in scripts:
# cleanup
script = re.sub(r"[\r\n\s]+\/\/\s*[^\r\n]+", "", script)
if re.search(r"\s*var\s*f\s*=\s*window\['init'\s*\+\s*'Lb'\s*\+\s*'js'\s*\+\s*''\];[\r\n\s]+", script):
js = script
if not js:
return uri, "Could not find correct script?"
token = find_in_text(r"Token\s*:\s*'([a-f0-9]{40})'", js)
if not token:
token = find_in_text(r"\?t=([a-f0-9]{40})", js)
assert token
authKeyMatchStr = r"A(?:'\s*\+\s*')?u(?:'\s*\+\s*')?t(?:'\s*\+\s*')?h(?:'\s*\+\s*')?K(?:'\s*\+\s*')?e(?:'\s*\+\s*')?y"
l1 = find_in_text(r"\s*params\['" + authKeyMatchStr + r"'\]\s*=\s*(\d+?);", js)
l2 = find_in_text(
r"\s*params\['" + authKeyMatchStr + r"'\]\s*=\s?params\['" + authKeyMatchStr + r"'\]\s*\+\s*(\d+?);",
js)
if any([not l1, not l2, not token]):
return uri, "Missing required tokens?"
authkey = int(l1) + int(l2)
p1_url = urljoin(baseloc, "/director/?t={tok}".format(tok=token))
r2 = httptools.downloadpage(p1_url, timeout=self._timeout)
p1_url = urljoin(baseloc, "/scripts/jquery.js?r={tok}&{key}".format(tok=token, key=l1))
r2_1 = httptools.downloadpage(p1_url, timeout=self._timeout)
r2 = httptools.downloadpage(p1_url, timeout=self._timeout)
time_left = 5.033 - (time.time() - firstGet)
xbmc.sleep(max(time_left, 0) * 1000)
if config.is_xbmc():
xbmc.sleep(max(time_left, 0) * 1000)
else:
time.sleep(5 * 1000)
p3_url = urljoin(baseloc, "/intermission/loadTargetUrl?t={tok}&aK={key}&a_b=false".format(tok=token,
key=str(authkey)))
r3 = httptools.downloadpage(p3_url, timeout=self._timeout)
resp_json = json.loads(r3.data)
if "Url" in resp_json:
return resp_json['Url'], r3.code
return "Wat", "wat"
def inValidate(self, s):
@@ -237,23 +281,30 @@ class UnshortenIt(object):
# (s == null || s != null && (s.matches("[\r\n\t ]+") || s.equals("") || s.equalsIgnoreCase("about:blank")))
if not s:
return True
if re.search("[\r\n\t ]+", s) or s.lower() == "about:blank":
return True
else:
return False
def _unshorten_adfocus(self, uri):
orig_uri = uri
try:
r = httptools.downloadpage(uri, timeout=self._timeout)
html = r.data
adlink = re.findall("click_url =.*;", html)
if len(adlink) > 0:
uri = re.sub('^click_url = "|"\;$', '', adlink[0])
if re.search(r'http(s|)\://adfoc\.us/serve/skip/\?id\=', uri):
http_header = dict()
http_header["Host"] = "adfoc.us"
http_header["Referer"] = orig_uri
r = httptools.downloadpage(uri, headers=http_header, timeout=self._timeout)
uri = r.url
return uri, r.code
else:
@@ -283,16 +334,23 @@ class UnshortenIt(object):
try:
r = httptools.downloadpage(uri, timeout=self._timeout)
html = r.data
session_id = re.findall(r'sessionId\:(.*?)\"\,', html)
if len(session_id) > 0:
session_id = re.sub(r'\s\"', '', session_id[0])
http_header = dict()
http_header["Content-Type"] = "application/x-www-form-urlencoded"
http_header["Host"] = "sh.st"
http_header["Referer"] = uri
http_header["Origin"] = "http://sh.st"
http_header["X-Requested-With"] = "XMLHttpRequest"
xbmc.sleep(5 * 1000)
if config.is_xbmc():
xbmc.sleep(5 * 1000)
else:
time.sleep(5 * 1000)
payload = {'adSessionId': session_id, 'callback': 'c'}
r = httptools.downloadpage(
'http://sh.st/shortest-url/end-adsession?' +
@@ -300,6 +358,7 @@ class UnshortenIt(object):
headers=http_header,
timeout=self._timeout)
response = r.data[6:-2].decode('utf-8')
if r.code == 200:
resp_uri = json.loads(response)['destinationUrl']
if resp_uri is not None:
@@ -325,8 +384,7 @@ class UnshortenIt(object):
r = httptools.downloadpage(
extracted_uri,
timeout=self._timeout,
follow_redirects=False,
only_headers=True)
follow_redirects=False)
return r.url, r.code
except Exception as e:
return uri, str(e)
@@ -339,9 +397,12 @@ class UnshortenIt(object):
try:
r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False)
html = r.data
uri = re.findall(r"<input type='hidden' name='\d+' id='\d+' value='([^']+)'>", html)[0]
from core import scrapertools
uri = scrapertools.decodeHtmlentities(uri)
uri = uri.replace("&sol;", "/") \
.replace("&colon;", ":") \
.replace("&period;", ".") \
@@ -349,6 +410,7 @@ class UnshortenIt(object):
.replace("&num;", "#") \
.replace("&quest;", "?") \
.replace("&lowbar;", "_")
return uri, r.code
except Exception as e:
@@ -358,7 +420,9 @@ class UnshortenIt(object):
try:
r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False)
html = r.data
uri = re.findall(r'<a class="button" href="([^"]+)">Click to continue</a>', html)[0]
uri = re.findall(r'<a class="push_button blue" href=([^>]+)>', html)[0]
return uri, r.code
except Exception as e:
@@ -379,6 +443,7 @@ def unshorten_only(uri, type=None, timeout=10):
def unshorten(uri, type=None, timeout=10):
unshortener = UnshortenIt()
uri, status = unshortener.unwrap_30x(uri, timeout=timeout)
uri, status = unshortener.unshorten(uri, type=type)
if status == 200:
uri, status = unshortener.unwrap_30x(uri, timeout=timeout)

View File

@@ -15,27 +15,26 @@ __settings__ = xbmcaddon.Addon(id="plugin.video." + PLUGIN_NAME)
__language__ = __settings__.getLocalizedString
def get_addon_version(linea_inicio=0, total_lineas=2):
def get_addon_version(with_fix=True):
'''
Devuelve el número de de versión del addon, obtenido desde el archivo addon.xml
Devuelve el número de versión del addon, y opcionalmente número de fix si lo hay
'''
return __settings__.getAddonInfo('version')
path = os.path.join(get_runtime_path(), "addon.xml")
f = open(path, "rb")
data = []
for x, line in enumerate(f):
if x < linea_inicio: continue
if len(data) == total_lineas: break
data.append(line)
f.close()
data1 = "".join(data)
# <addon id="plugin.video.alfa" name="Alfa" version="2.5.21" provider-name="Alfa Addon">
aux = re.findall('<addon id="plugin.video.alfa" name="Alfa" version="([^"]+)"', data1, re.MULTILINE | re.DOTALL)
version = "???"
if len(aux) > 0:
version = aux[0]
return version
if with_fix:
return __settings__.getAddonInfo('version') + get_addon_version_fix()
else:
return __settings__.getAddonInfo('version')
def get_addon_version_fix():
try:
last_fix_json = os.path.join(get_runtime_path(), 'last_fix.json') # información de la versión fixeada del usuario
if os.path.exists(last_fix_json):
with open(last_fix_json, 'r') as f: data=f.read(); f.close()
fix = re.findall('"fix_version"\s*:\s*(\d+)', data)
if fix:
return '.fix%s' % fix[0]
except:
pass
return ''
def get_platform(full_version=False):
"""

View File

@@ -1047,14 +1047,14 @@ def play_torrent(item, xlistitem, mediaurl):
# Plugins externos
if seleccion > 1:
mediaurl = urllib.quote_plus(item.url)
if "quasar" in torrent_options[seleccion][1] and item.infoLabels['tmdb_id']: #Llamada con más parámetros para completar el título
if item.contentType == 'episode':
if ("quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]) and item.infoLabels['tmdb_id']: #Llamada con más parámetros para completar el título
if item.contentType == 'episode' and "elementum" not in torrent_options[seleccion][1]:
mediaurl += "&episode=%s&library=&season=%s&show=%s&tmdb=%s&type=episode" % (item.infoLabels['episode'], item.infoLabels['season'], item.infoLabels['tmdb_id'], item.infoLabels['tmdb_id'])
else:
elif item.contentType == 'movie':
mediaurl += "&library=&tmdb=%s&type=movie" % (item.infoLabels['tmdb_id'])
xbmc.executebuiltin("PlayMedia(" + torrent_options[seleccion][1] % mediaurl + ")")
if "quasar" in torrent_options[seleccion][1]: #Seleccionamos que clientes torrent soportamos
if "quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]: #Seleccionamos que clientes torrent soportamos
if item.strm_path: #Sólo si es de Videoteca
import time
time_limit = time.time() + 150 #Marcamos el timepo máx. de buffering

View File

@@ -0,0 +1,145 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------------
# Updater (kodi)
# --------------------------------------------------------------------------------
import os
import time
import threading
from platformcode import config, logger, platformtools
from core import httptools
from core import jsontools
from core import downloadtools
from core import ziptools
from core import filetools
def check_addon_init():
logger.info()
# Subtarea de monitor. Se activa cada X horas para comprobar si hay FIXES al addon
def check_addon_monitor():
logger.info()
# Obtiene el íntervalo entre actualizaciones y si se quieren mensajes
try:
timer = int(config.get_setting('addon_update_timer')) # Intervalo entre actualizaciones, en Ajustes de Alfa
if timer <= 0:
return # 0. No se quieren actualizaciones
verbose = config.get_setting('addon_update_message')
except:
timer = 12 # Por defecto cada 12 horas
verbose = False # Por defecto, sin mensajes
timer = timer * 3600 # Lo pasamos a segundos
if config.get_platform(True)['num_version'] >= 14: # Si es Kodi, lanzamos el monitor
import xbmc
monitor = xbmc.Monitor()
else: # Lanzamos solo una actualización y salimos
check_addon_updates(verbose) # Lanza la actualización
return
while not monitor.abortRequested(): # Loop infinito hasta cancelar Kodi
check_addon_updates(verbose) # Lanza la actualización
if monitor.waitForAbort(timer): # Espera el tiempo programado o hasta que cancele Kodi
break # Cancelación de Kodi, salimos
return
# Lanzamos en Servicio de actualización de FIXES
try:
threading.Thread(target=check_addon_monitor).start() # Creamos un Thread independiente, hasta el fin de Kodi
time.sleep(5) # Dejamos terminar la primera verificación...
except: # Si hay problemas de threading, se llama una sola vez
try:
timer = int(config.get_setting('addon_update_timer')) # Intervalo entre actualizaciones, en Ajustes de Alfa
if timer <= 0:
return # 0. No se quieren actualizaciones
verbose = config.get_setting('addon_update_message')
except:
verbose = False # Por defecto, sin mensajes
pass
check_addon_updates(verbose) # Lanza la actualización, en Ajustes de Alfa
time.sleep(5) # Dejamos terminar la primera verificación...
return
def check_addon_updates(verbose=False):
logger.info()
ADDON_UPDATES_JSON = 'http://extra.alfa-addon.com/addon_updates/updates.json'
ADDON_UPDATES_ZIP = 'http://extra.alfa-addon.com/addon_updates/updates.zip'
try:
last_fix_json = os.path.join(config.get_runtime_path(), 'last_fix.json') # información de la versión fixeada del usuario
# Se guarda en get_runtime_path en lugar de get_data_path para que se elimine al cambiar de versión
# Descargar json con las posibles actualizaciones
# -----------------------------------------------
data = httptools.downloadpage(ADDON_UPDATES_JSON, timeout=2).data
if data == '':
logger.info('No se encuentran actualizaciones del addon')
if verbose:
platformtools.dialog_notification('Alfa ya está actualizado', 'No hay ninguna actualización urgente')
return False
data = jsontools.load(data)
if 'addon_version' not in data or 'fix_version' not in data:
logger.info('No hay actualizaciones del addon')
if verbose:
platformtools.dialog_notification('Alfa ya está actualizado', 'No hay ninguna actualización urgente')
return False
# Comprobar versión que tiene instalada el usuario con versión de la actualización
# --------------------------------------------------------------------------------
current_version = config.get_addon_version(with_fix=False)
if current_version != data['addon_version']:
logger.info('No hay actualizaciones para la versión %s del addon' % current_version)
if verbose:
platformtools.dialog_notification('Alfa ya está actualizado', 'No hay ninguna actualización urgente')
return False
if os.path.exists(last_fix_json):
lastfix = jsontools.load(filetools.read(last_fix_json))
if lastfix['addon_version'] == data['addon_version'] and lastfix['fix_version'] == data['fix_version']:
logger.info('Ya está actualizado con los últimos cambios. Versión %s.fix%d' % (data['addon_version'], data['fix_version']))
if verbose:
platformtools.dialog_notification('Alfa ya está actualizado', 'Versión %s.fix%d' % (data['addon_version'], data['fix_version']))
return False
# Descargar zip con las actualizaciones
# -------------------------------------
localfilename = os.path.join(config.get_data_path(), 'temp_updates.zip')
if os.path.exists(localfilename): os.remove(localfilename)
downloadtools.downloadfile(ADDON_UPDATES_ZIP, localfilename, silent=True)
# Descomprimir zip dentro del addon
# ---------------------------------
unzipper = ziptools.ziptools()
unzipper.extract(localfilename, config.get_runtime_path())
# Borrar el zip descargado
# ------------------------
os.remove(localfilename)
# Guardar información de la versión fixeada
# -----------------------------------------
if 'files' in data: data.pop('files', None)
filetools.write(last_fix_json, jsontools.dump(data))
logger.info('Addon actualizado correctamente a %s.fix%d' % (data['addon_version'], data['fix_version']))
if verbose:
platformtools.dialog_notification('Alfa actualizado a', 'Versión %s.fix%d' % (data['addon_version'], data['fix_version']))
return True
except:
logger.error('Error al comprobar actualizaciones del addon!')
if verbose:
platformtools.dialog_notification('Alfa actualizaciones', 'Error al comprobar actualizaciones')
return False

View File

@@ -1114,7 +1114,7 @@ msgid "Alfa"
msgstr ""
msgctxt "#60262"
msgid "You can install the Trakt script below, once installed and configured what you see will be automatically synchronized with your account."
msgid "You can install the Trakt script below."
msgstr ""
msgctxt "#60263"
@@ -1886,7 +1886,7 @@ msgid "To report a problem on'http://alfa-addon.com' you need to:|the version yo
msgstr ""
msgctxt "#60468"
msgid "You can find our Telegram channel at @StreamOnDemandOfficial\nSe you have doubts you can write to us in the Telegram group: https://bit.ly/2I3kRwF"
msgid "You can find our Telegram channel at @alfa_addon\nIf you have doubts you can write to us in the Telegram group: https://t.me/alfa_addon"
msgstr ""
msgctxt "#60469"
@@ -1894,7 +1894,7 @@ msgid "Uploading new data"
msgstr ""
msgctxt "#60470"
msgid "Buscando en Tmdb......."
msgid "Searching in Tmdb......."
msgstr ""
msgctxt "#60471"
@@ -2766,7 +2766,7 @@ msgid "Beginning"
msgstr ""
msgctxt "#70013"
msgid "Terror"
msgid "Horror"
msgstr ""
msgctxt "#70014"
@@ -2902,7 +2902,7 @@ msgid "Search Movies/TV Shows"
msgstr ""
msgctxt "#70047"
msgid " Search by director"
msgid "Search by director"
msgstr ""
msgctxt "#70048"
@@ -3857,14 +3857,6 @@ msgctxt "#70288"
msgid "Configure Downloads"
msgstr ""
msgctxt "#70289"
msgid "Alfa\nCorrected an error in the adult section, the password has been reset to "
msgstr " "
msgctxt "#70290"
msgid "default, you will have to change it again if you want.\ n Type 's', if you have understood it: "
msgstr " "
msgctxt "#70291"
msgid "Error, during conversion"
msgstr ""
@@ -4277,4 +4269,521 @@ msgstr ""
msgctxt "#70392"
msgid "Rate with a [COLOR %s]%s[/ COLOR]"
msgstr ""
msgstr ""
msgctxt "#70393"
msgid "[%s]: Select the correct %s "
msgstr ""
msgctxt "#70394"
msgid "Action"
msgstr ""
msgctxt "#70395"
msgid "Sport"
msgstr ""
msgctxt "#70396"
msgid "Documentary"
msgstr ""
msgctxt "#70397"
msgid "Science fiction"
msgstr ""
msgctxt "#70398"
msgid "Talk Show"
msgstr ""
msgctxt "#70399"
msgid "Family"
msgstr ""
msgctxt "#70400"
msgid "Film-Noir"
msgstr ""
msgctxt "#70401"
msgid "Game-Show"
msgstr ""
msgctxt "#70402"
msgid "Mystery"
msgstr ""
msgctxt "#70403"
msgid "Biography"
msgstr ""
msgctxt "#70404"
msgid "Music"
msgstr ""
msgctxt "#70405"
msgid "History"
msgstr ""
msgctxt "#70406"
msgid "Reality-TV"
msgstr ""
msgctxt "#70407"
msgid "War"
msgstr ""
msgctxt "#70408"
msgid "Musical"
msgstr ""
msgctxt "#70409"
msgid "Romance"
msgstr ""
msgctxt "#70410"
msgid "Thriller"
msgstr "Thriller"
msgctxt "#70411"
msgid "Western"
msgstr ""
msgctxt "#70412"
msgid "Drama"
msgstr ""
msgctxt "#70413"
msgid "2. Enter this code on the page and click Allow: %s"
msgstr ""
msgctxt "#70414"
msgid "Authentication. Do not close this window!!"
msgstr ""
msgctxt "#70415"
msgid "Trakt.tv"
msgstr ""
msgctxt "#70416"
msgid "=== Movies ==="
msgstr ""
msgctxt "#70417"
msgid "=== TV Shows ==="
msgstr ""
msgctxt "#70418"
msgid "Search language in TMDB"
msgstr ""
msgctxt "#70419"
msgid "German"
msgstr ""
msgctxt "#70420"
msgid "French"
msgstr ""
msgctxt "#70421"
msgid "Portuguese"
msgstr ""
msgctxt "#70422"
msgid "Italian"
msgstr ""
msgctxt "#70423"
msgid "Spanish Latin"
msgstr ""
msgctxt "#70424"
msgid "Catalan"
msgstr ""
msgctxt "#70425"
msgid "English"
msgstr ""
msgctxt "#70426"
msgid "Alternative language for TMDB (No main language synopsis)"
msgstr ""
msgctxt "#70427"
msgid "Language of titles in IMDB"
msgstr ""
msgctxt "#70428"
msgid "Filmaffinity website"
msgstr ""
msgctxt "#70429"
msgid "Colombia"
msgstr ""
msgctxt "#70430"
msgid "Chile"
msgstr ""
msgctxt "#70431"
msgid "Argentina"
msgstr ""
msgctxt "#70432"
msgid "Mexico"
msgstr ""
msgctxt "#70433"
msgid "US/UK"
msgstr ""
msgctxt "#70434"
msgid "Spain"
msgstr ""
msgctxt "#70435"
msgid "User Filmaaffinity (Optional)"
msgstr ""
msgctxt "#70436"
msgid "Password Filmaffinity"
msgstr ""
msgctxt "#70437"
msgid "Order personal lists of Filmaffinity by:"
msgstr ""
msgctxt "#70438"
msgid "Position"
msgstr ""
msgctxt "#70439"
msgid "Vote"
msgstr ""
msgctxt "#70440"
msgid "Average grade"
msgstr ""
msgctxt "#70441"
msgid "User MyAnimeList (Optional)"
msgstr ""
msgctxt "#70442"
msgid "Password MyAnimeList"
msgstr ""
msgctxt "#70443"
msgid "Show Hentai in MyAnimeList"
msgstr ""
msgctxt "#70444"
msgid "Profile 3"
msgstr ""
msgctxt "#70445"
msgid "Profile 2"
msgstr ""
msgctxt "#70446"
msgid "Profile 1"
msgstr ""
msgctxt "#70447"
msgid "[%s] The file has been deleted"
msgstr ""
msgctxt "#70448"
msgid "[%s] The file is still in process"
msgstr ""
msgctxt "#70449"
msgid ""
msgstr ""
msgctxt "#70450"
msgid "Anyone"
msgstr ""
msgctxt "#70451"
msgid "Select one, none or more than one gender"
msgstr "S"
msgctxt "#70452"
msgid "Year from"
msgstr ""
msgctxt "#70453"
msgid "Year until"
msgstr ""
msgctxt "#70454"
msgid "Minimum number of votes"
msgstr ""
msgctxt "#70455"
msgid "Order by"
msgstr ""
msgctxt "#70456"
msgid "Popularity Desc"
msgstr ""
msgctxt "#70457"
msgid "Popularity Asc"
msgstr ""
msgctxt "#70458"
msgid "Year Desc"
msgstr ""
msgctxt "#70459"
msgid "Year Asc"
msgstr ""
msgctxt "#70460"
msgid "Desc Rating"
msgstr ""
msgctxt "#70461"
msgid "Asc Rating"
msgstr ""
msgctxt "#70462"
msgid "Title [A-Z]"
msgstr ""
msgctxt "#70463"
msgid "Title [Z-A]"
msgstr ""
msgctxt "#70464"
msgid "Set as default filter"
msgstr ""
msgctxt "#70465"
msgid "Key word"
msgstr ""
msgctxt "#70466"
msgid "Country"
msgstr ""
msgctxt "#70467"
msgid "Select a genre"
msgstr ""
msgctxt "#70468"
msgid "Indicate your vote"
msgstr ""
msgctxt "#70469"
msgid "Added"
msgstr ""
msgctxt "#70470"
msgid "Premiere"
msgstr ""
msgctxt "#70471"
msgid "Duration"
msgstr ""
msgctxt "#70472"
msgid "Popularity"
msgstr ""
msgctxt "#70473"
msgid "Rating"
msgstr ""
msgctxt "#70474"
msgid "Votes"
msgstr ""
msgctxt "#70475"
msgid "upward"
msgstr ""
msgctxt "#70476"
msgid "falling"
msgstr ""
msgctxt "#70477"
msgid "Upward"
msgstr ""
msgctxt "#70478"
msgid "Falling"
msgstr ""
msgctxt "#70479"
msgid "Currently watching"
msgstr ""
msgctxt "#70480"
msgid "Completed"
msgstr ""
msgctxt "#70481"
msgid "Anticipated to see"
msgstr ""
msgctxt "#70482"
msgid "Kind"
msgstr ""
msgctxt "#70483"
msgid "Special"
msgstr ""
msgctxt "#70484"
msgid "OVA"
msgstr ""
msgctxt "#70485"
msgid "Status"
msgstr ""
msgctxt "#70486"
msgid "(1) Grotesque"
msgstr ""
msgctxt "#70487"
msgid "(2) Horrible"
msgstr ""
msgctxt "#70488"
msgid "(3) Very bad"
msgstr ""
msgctxt "#70489"
msgid "(4) Bad"
msgstr ""
msgctxt "#70490"
msgid "(5) Regular"
msgstr ""
msgctxt "#70491"
msgid "(6) Pasable"
msgstr ""
msgctxt "#70492"
msgid "(7) Good"
msgstr ""
msgctxt "#70493"
msgid "(8) Very good"
msgstr ""
msgctxt "#70494"
msgid "(9) Genial"
msgstr ""
msgctxt "#70495"
msgid "(10) Masterpiece"
msgstr ""
msgctxt "#70496"
msgid "The search for "
msgstr ""
msgctxt "#70497"
msgid " did not match."
msgstr ""
msgctxt "#70498"
msgid "Producer: "
msgstr ""
msgctxt "#70499"
msgid "Genre: "
msgstr ""
msgctxt "#70500"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgstr ""
msgctxt "#70501"
msgid "Search did not match (%s)"
msgstr ""
msgctxt "#70502"
msgid ">> Next"
msgstr ""
msgctxt "#70503"
msgid "There is no available video"
msgstr ""
msgctxt "#70504"
msgid "Loading trailers..."
msgstr ""
msgctxt "#70505"
msgid "Enter the title to search"
msgstr ""
msgctxt "#70506"
msgid "Searching: "
msgstr ""
msgctxt "#70507"
msgid "Search in Youtube"
msgstr ""
msgctxt "#70508"
msgid "Search in Abandomoviez"
msgstr ""
msgctxt "#70509"
msgid "Search in Jayhap (Youtube, Vimeo & Dailymotion)"
msgstr ""
msgctxt "#70510"
msgid "Manual Search in Youtube"
msgstr ""
msgctxt "#70511"
msgid "Manual Search in Abandomoviez"
msgstr ""
msgctxt "#70512"
msgid "Searching in abandomoviez"
msgstr ""
msgctxt "#70513"
msgid "Manual Searching in Filmaffinity"
msgstr ""
msgctxt "#70514"
msgid "Manual Search in Jayhap"
msgstr ""
msgctxt "#70515"
msgid "[COLOR aquamarine][B]Completed %s[/B][/COLOR]"
msgstr ""
msgctxt "#70516"
msgid "[COLOR aquamarine][B]In progress %s[/B][/COLOR]"
msgstr ""
msgctxt "#70517"
msgid "Pre-selected currently activated"
msgstr ""
msgctxt "#70518"
msgid "Pre-select all"
msgstr ""
msgctxt "#70519"
msgid "Do not pre-select any"
msgstr ""
msgctxt "#70520"
msgid "AutoPlay allows auto to reproduce the links directly, based on the configuration of your \nfavorite servers and qualities. "
msgstr ""
msgctxt "#70521"
msgid "You can install the Trakt script below, \nonce installed and configured what \nyou see will be synchronized with your account automatically. \nDo you want to continue?"
msgstr ""

View File

@@ -1102,8 +1102,8 @@ msgid "Alfa"
msgstr "Alfa"
msgctxt "#60262"
msgid "You can install the Trakt script below, once installed and configured what you see will be automatically synchronized with your account."
msgstr "Puoi installare lo script Trakt qui sotto, una volta installato e configurato ciò che vedi sarà sincronizzato automaticamente con il tuo account."
msgid "You can install the Trakt script below."
msgstr "Puoi installare lo script Trakt qui sotto."
msgctxt "#60263"
msgid "Do you want to continue?"
@@ -1874,8 +1874,8 @@ msgid "To report a problem on'http://alfa-addon.com' you need to:|the version yo
msgstr "Per segnalare un problema su 'http://alfa-addon.com' è necessario:|la versione che si sta usando di Alpha.|La versione che si sta usando di kodi, mediaserver, ecc.|la versione e il nome del sistema operativo che si sta usando.|Il nome della skin (nel caso in cui si stia usando Kodi) e se l'utilizzo della skin predefinita ha risolto il problema.|La descrizione del problema e tutti i casi di test.Per attivare il log in modalità dettagliata, andare su:|Configurazione.|Preferenze.|Nella scheda Generale - Selezionare l'opzione: Genera log dettagliato Il file di log dettagliato si trova nel seguente percorso: \n\n%s"
msgctxt "#60468"
msgid "You can find our Telegram channel at @StreamOnDemandOfficial\nSe you have doubts you can write to us in the Telegram group: https://bit.ly/2I3kRwF"
msgstr "Puoi trovare il nostro canale Telegram in @StreamOnDemandOfficial\nSe hai dubbi puoi scriverci nel gruppo Telegram: https://bit.ly/2I3kRwF"
msgid "You can find our Telegram channel at @alfa_addon\nIf you have doubts you can write to us in the Forum: http://alfa-addon.com"
msgstr "Scopri le novità, i suggerimenti o le opzioni che non conosci su Telegram: @alfa_addon.\nSi tienes problemas o dudas, puedes acudir al Foro: http://alfa-addon.com"
msgctxt "#60469"
msgid "Uploading new data"
@@ -2754,7 +2754,7 @@ msgid "Beginnin"
msgstr "Inizio"
msgctxt "#70013"
msgid "Terror"
msgid "Horror"
msgstr "Orrore"
msgctxt "#70014"
@@ -2890,7 +2890,7 @@ msgid "Search Movies/TV Shows"
msgstr "Cerca Film/Serie TV"
msgctxt "#70047"
msgid " Search by director"
msgid "Search by director"
msgstr "Cerca per regista"
msgctxt "#70048"
@@ -3841,14 +3841,6 @@ msgctxt "#70288"
msgid "Configure Downloads"
msgstr "Configura Downloads"
msgctxt "#70289"
msgid "Alfa\nCorrected an error in the adult section, the password has been reset to "
msgstr "Alfa\nCorretto un errore nella sezione adulti, la password è sta resettata a quella di "
msgctxt "#70290"
msgid "default, you will have to change it again if you want.\ n Type 's', if you have understood it: "
msgstr "default, puoi cambiarla di nuovo se vuoi.\n Scrivi 's', se hai capito: "
msgctxt "#70291"
msgid "Error, during conversion"
msgstr "Errore, in conversione"
@@ -3997,7 +3989,7 @@ msgstr "[FA] Aggiungi o rimuovi da un elenco utenti"
msgctxt "#70327"
msgid "It's part of: %s"
msgstr "E'parte di: %s"
msgstr "Fa parte di: %s"
msgctxt "#70328"
msgid "%ss similarar"
@@ -4261,4 +4253,525 @@ msgstr "Aggiungi alla lista %s"
msgctxt "#70392"
msgid "Rate with a [COLOR %s]%s[/ COLOR]"
msgstr "Dai un punteggio con un [COLOR %s]%s[/COLOR]"
msgstr "Dai un punteggio con un [COLOR %s]%s[/COLOR]"
msgctxt "#70393"
msgid "[%s]: Select the correct %s "
msgstr "[%s]: Seleziona la %s corretta"
msgctxt "#70394"
msgid "Action"
msgstr "Azione"
msgctxt "#70395"
msgid "Sport"
msgstr "Sport"
msgctxt "#70396"
msgid "Documentary"
msgstr "Documentari"
msgctxt "#70397"
msgid "Science fiction"
msgstr "Fantascienza"
msgctxt "#70398"
msgid "Talk Show"
msgstr "Talk Show"
msgctxt "#70399"
msgid "Family"
msgstr "Famiglia"
msgctxt "#70400"
msgid "Film-Noir"
msgstr "Film Noir"
msgctxt "#70401"
msgid "Game-Show"
msgstr "Game Show"
msgctxt "#70402"
msgid "Mystery"
msgstr "Mistero"
msgctxt "#70403"
msgid "Biography"
msgstr "Biografía"
msgctxt "#70404"
msgid "Music"
msgstr "Musica"
msgctxt "#70405"
msgid "History"
msgstr "Storico"
msgctxt "#70406"
msgid "Reality-TV"
msgstr "Reality Show"
msgctxt "#70407"
msgid "War"
msgstr "Guerra"
msgctxt "#70408"
msgid "Musical"
msgstr "Musical"
msgctxt "#70409"
msgid "Romance"
msgstr "Romantico"
msgctxt "#70410"
msgid "Thriller"
msgstr "Thriller"
msgctxt "#70411"
msgid "Western"
msgstr "Western"
msgctxt "#70412"
msgid "Drama"
msgstr "Drammatico"
msgctxt "#70413"
msgid ""
msgstr ""
msgctxt "#70413"
msgid "2. Enter this code on the page and click Allow: %s"
msgstr "2. Inserisci questo codice nella pagina e premi Allow: %s"
msgctxt "#70414"
msgid "Authentication. Do not close this window!!"
msgstr "Autenticazione. Non chiudere questa finestra!!"
msgctxt "#70415"
msgid "Trakt.tv"
msgstr "Trakt.tv"
msgctxt "#70416"
msgid "=== Movies ==="
msgstr "=== Film ==="
msgctxt "#70417"
msgid "=== TV Shows ==="
msgstr "=== Serie TV ==="
msgctxt "#70418"
msgid "Search language in TMDB"
msgstr "Ligua di ricerca in TMDB"
msgctxt "#70419"
msgid "German"
msgstr "Tedesco"
msgctxt "#70420"
msgid "French"
msgstr "Francese"
msgctxt "#70421"
msgid "Portuguese"
msgstr "Portoghese"
msgctxt "#70422"
msgid "Italian"
msgstr "Italiano"
msgctxt "#70423"
msgid "Spanish Latin"
msgstr "Spagnolo Latino"
msgctxt "#70424"
msgid "Catalan"
msgstr "Catalano"
msgctxt "#70425"
msgid "English"
msgstr "Inglese"
msgctxt "#70426"
msgid "Alternative language for TMDB (No main language synopsis)"
msgstr "Linguaggio alternativo per TMDB (sinossi senza lingua principale)"
msgctxt "#70427"
msgid "Language of titles in IMDB"
msgstr "Lingua per i titoli in IMDB"
msgctxt "#70428"
msgid "Filmaffinity website"
msgstr "Sitio Web Filmaffinity"
msgctxt "#70429"
msgid "Colombia"
msgstr "Colombia"
msgctxt "#70430"
msgid "Chile"
msgstr "Cile"
msgctxt "#70431"
msgid "Argentina"
msgstr "Argentina"
msgctxt "#70432"
msgid "Mexico"
msgstr "Messico"
msgctxt "#70433"
msgid "US/UK"
msgstr "US/UK"
msgctxt "#70434"
msgid "Spain"
msgstr "Spagna"
msgctxt "#70435"
msgid "User Filmaaffinity (Optional)"
msgstr "Username Filmaffinity (Opzionale)"
msgctxt "#70436"
msgid "Password Filmaffinity"
msgstr "Password Filmaffinity"
msgctxt "#70437"
msgid "Order personal lists of Filmaffinity by:"
msgstr "Ordina liste personali di Filmaffinity per:"
msgctxt "#70438"
msgid "Position"
msgstr "Posizione"
msgctxt "#70439"
msgid "Vote"
msgstr "Voto"
msgctxt "#70440"
msgid "Average grade"
msgstr "Valutazione media"
msgctxt "#70441"
msgid "User MyAnimeList (Optional)"
msgstr "Username MyAnimeList (Opzionale)"
msgctxt "#70442"
msgid "Password MyAnimeList"
msgstr "Password MyAnimeList"
msgctxt "#70443"
msgid "Show Hentai in MyAnimeList"
msgstr "Mostra Hentai in MyAnimeList"
msgctxt "#70444"
msgid "Profile 3"
msgstr "Perofilo 3"
msgctxt "#70445"
msgid "Profile 2"
msgstr "Profilo 2"
msgctxt "#70446"
msgid "Profile 1"
msgstr "Profilo 1"
msgctxt "#70447"
msgid "[%s] The file has been deleted"
msgstr "[%s] Il file è stato cancellato"
msgctxt "#70448"
msgid "[%s] The file is still in process"
msgstr "[%s] Il file è ancora in elaborazione"
msgctxt "#70449"
msgid "[%s] The file does not exist or has been deleted"
msgstr "[%s] Il file non esiste oppure è stato cancellato"
msgctxt "#70450"
msgid "Anyone"
msgstr "Qualsiasi"
msgctxt "#70451"
msgid "Select one, none or more than one gender"
msgstr "Seleziona uno, nessuno o più di un genere"
msgctxt "#70452"
msgid "Year from"
msgstr "Anno da:"
msgctxt "#70453"
msgid "Year until"
msgstr "Anno fino a:"
msgctxt "#70454"
msgid "Minimum number of votes"
msgstr "Numero minimo di voti"
msgctxt "#70455"
msgid "Order by"
msgstr "Ordina per"
msgctxt "#70456"
msgid "Popularity Desc"
msgstr "Popolarità Desc"
msgctxt "#70457"
msgid "Popularity Asc"
msgstr "Popolarità Asc"
msgctxt "#70458"
msgid "Year Desc"
msgstr "Anno Desc"
msgctxt "#70459"
msgid "Year Asc"
msgstr "Anno Asc"
msgctxt "#70460"
msgid "Desc Rating"
msgstr "Rating Desc"
msgctxt "#70461"
msgid "Asc Rating"
msgstr "Rating Asc"
msgctxt "#70462"
msgid "Title [A-Z]"
msgstr "Titolo [A-Z]"
msgctxt "#70463"
msgid "Title [Z-A]"
msgstr "Titolo[Z-A]"
msgctxt "#70464"
msgid "Set as default filter"
msgstr "Impostare come filtro di default"
msgctxt "#70465"
msgid "Key word"
msgstr "Parola Chiave"
msgctxt "#70466"
msgid "Country"
msgstr "Nazione"
msgctxt "#70467"
msgid "Select a genre"
msgstr "Seleziona un genere"
msgctxt "#70468"
msgid "Indicate your vote"
msgstr "Indica il tuo voto:"
msgctxt "#70469"
msgid "Added"
msgstr "Aggiunto"
msgctxt "#70470"
msgid "Premiere"
msgstr "Premiere"
msgctxt "#70471"
msgid "Duration"
msgstr "Durata"
msgctxt "#70472"
msgid "Popularity"
msgstr "Popolarità"
msgctxt "#70473"
msgid "Rating"
msgstr "Rating"
msgctxt "#70474"
msgid "Votes"
msgstr "Voti"
msgctxt "#70475"
msgid "upward"
msgstr "ascendente"
msgctxt "#70476"
msgid "falling"
msgstr "discendente"
msgctxt "#70477"
msgid "Upward"
msgstr "Ascendente"
msgctxt "#70478"
msgid "Falling"
msgstr "Discendente"
msgctxt "#70479"
msgid "Currently watching"
msgstr "In visione attualmente"
msgctxt "#70480"
msgid "Completed"
msgstr "Completati"
msgctxt "#70481"
msgid "Anticipated to see"
msgstr "Da vedersi"
msgctxt "#70482"
msgid "Kind"
msgstr "Tipo"
msgctxt "#70483"
msgid "Special"
msgstr "Special"
msgctxt "#70484"
msgid "OVA"
msgstr "OVA"
msgctxt "#70485"
msgid "Status"
msgstr "Stato"
msgctxt "#70486"
msgid "(1) Grotesque"
msgstr "(1) Grottesco"
msgctxt "#70487"
msgid "(2) Horrible"
msgstr "(2) orribile"
msgctxt "#70488"
msgid "(3) Very bad"
msgstr "(3) Bruttissimo"
msgctxt "#70489"
msgid "(4) Bad"
msgstr "(4) Brutto"
msgctxt "#70490"
msgid "(5) Regular"
msgstr "(5) Regulare"
msgctxt "#70491"
msgid "(6) Pasable"
msgstr "(6) Passabile"
msgctxt "#70492"
msgid "(7) Good"
msgstr "(7) Buono"
msgctxt "#70493"
msgid "(8) Very good"
msgstr "(8) Ottimo"
msgctxt "#70494"
msgid "(9) Genial"
msgstr "(9) Geniale"
msgctxt "#70495"
msgid "(10) Masterpiece"
msgstr "(10) Capolavoro"
msgctxt "#70496"
msgid "The search for "
msgstr "La ricerca di "
msgctxt "#70497"
msgid " did not match."
msgstr " non ha dato risultati."
msgctxt "#70498"
msgid "Producer: "
msgstr "Produzione: "
msgctxt "#70499"
msgid "Genre: "
msgstr "Genere: "
msgctxt "#70500"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgstr "Notifica([COLOR red][B]Aggiorna Kodi alla sua ultima versione[/B][/COLOR], [COLOR skyblue]per migliori info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgctxt "#70501"
msgid "Search did not match (%s)"
msgstr "La ricerca non ha dato risultati (%s)"
msgctxt "#70502"
msgid ">> Next"
msgstr ">> Successivo"
msgctxt "#70503"
msgid "There is no available video"
msgstr "Nessun video disponibile"
msgctxt "#70504"
msgid "Loading trailers..."
msgstr "Caricamento trailers..."
msgctxt "#70505"
msgid "Enter the title to search"
msgstr "Inserisci il titolo da cercare"
msgctxt "#70506"
msgid "Searching: "
msgstr "Ricerca: "
msgctxt "#70507"
msgid "Search in Youtube"
msgstr "Ricerca in Youtube"
msgctxt "#70508"
msgid "Search in Abandomoviez"
msgstr "Ricerca inAbandomoviez"
msgctxt "#70509"
msgid "Search in Jayhap (Youtube, Vimeo & Dailymotion)"
msgstr "Ricerca in Jayhap (Youtube, Vimeo & Dailymotion)"
msgctxt "#70510"
msgid "Manual Search in Youtube"
msgstr "Ricerca Manuale in Youtube"
msgctxt "#70511"
msgid "Manual Search in Abandomoviez"
msgstr "Ricerca Manuale in Abandomoviez"
msgctxt "#70512"
msgid "Searching in abandomoviez"
msgstr "Ricerca in abandomoviez"
msgctxt "#70513"
msgid "Manual Searching in Filmaffinity"
msgstr "Ricerca Manuale in Filmaffinity"
msgctxt "#70514"
msgid "Manual Search in Jayhap"
msgstr "Ricerca Manuale in Jayhap"
msgctxt "#70515"
msgid "[COLOR aquamarine][B]Completed %s[/B][/COLOR]"
msgstr "[COLOR aquamarine][B]Completata %s[/B][/COLOR]"
msgctxt "#70516"
msgid "[COLOR aquamarine][B]In progress %s[/B][/COLOR]"
msgstr "[COLOR aquamarine][B]In Corso %s[/B][/COLOR]"
msgctxt "#70517"
msgid "Pre-selected currently activated"
msgstr "Pre-seleziona attivati attuali"
msgctxt "#70518"
msgid "Pre-select all"
msgstr "Pre-seleziona tutti"
msgctxt "#70519"
msgid "Do not pre-select any"
msgstr "Non pre-selezionare nessuno"
msgctxt "#70520"
msgid "AutoPlay allows auto to reproduce the links directly, based on the configuration of your \nfavorite servers and qualities. "
msgstr "AutoPlay consente di riprodurre automaticamente i collegamenti direttamente, in base alla configurazione dei tuoi \nservers e qualità preferite. "
msgctxt "#70521"
msgid "You can install the Trakt script below, \nonce installed and configured what \nyou see will be synchronized with your account automatically. \nDo you want to continue?"
msgstr "Puoi installare lo script Trakt qui sotto, \ nuna volta installato e configurato ciò che \nvedrai verrà sincronizzato automaticamente con il tuo account. \nVuoi continuare?"

View File

@@ -479,7 +479,7 @@ msgstr "Elegir canales incluidos en la búsqueda"
msgctxt "#59995"
msgid "Saved Searches"
msgstr "Búsquedasguardadas"
msgstr "Búsquedas guardadas"
msgctxt "#59996"
msgid "Delete saved searches"
@@ -1114,8 +1114,8 @@ msgid "Alfa"
msgstr "Alfa"
msgctxt "#60262"
msgid "You can install the Trakt script below, once installed and configured what you see will be automatically synchronized with your account."
msgstr "Puedes instalar el script de Trakt a continuacíon, una vez instalado y configurado lo que veas se sincronizara con tu cuenta automaticamente."
msgid "You can install the Trakt script below."
msgstr "Puedes instalar el script de Trakt a continuacíon."
msgctxt "#60263"
msgid "Do you want to continue?"
@@ -1882,11 +1882,11 @@ msgid "Yes, the option to display merged or split \nresults by channels can be f
msgstr "Sì. La opcion de mostrar los resultados juntos \no divididos por canales se encuentra en \n'setting'>'Ajustes del buscador global'> \n'Otros ajustes'. \n¿Deseas acceder a ahora dichos ajustes?"
msgctxt "#60467"
msgid "To report a problem on'http://alfa-addon.com' you need to:|the version you're using of Alpha.|The version you're using of kodi, mediaserver, etc.|the version and name of the operating system you're using.|The name of the skin (in case you're using Kodi) and whether using the default skin has solved the problem.|Description of the problem and any test cases.To activate the log in detailed mode, go to:|Configuration.|Preferences.|In the General tab - Check the option: Generate detailed log. The detailed log file can be found in the following path: \n\n%s"
msgid "To report a problem on'http://alfa-addon.com' you need to:|the version you're using of Alfa.|The version you're using of kodi, mediaserver, etc.|the version and name of the operating system you're using.|The name of the skin (in case you're using Kodi) and whether using the default skin has solved the problem.|Description of the problem and any test cases.To activate the log in detailed mode, go to:|Configuration.|Preferences.|In the General tab - Check the option: Generate detailed log. The detailed log file can be found in the following path: \n\n%s"
msgstr "Para reportar un problema en 'http://alfa-addon.com' es necesario:\n - Versión que usas de Alfa.\n - Versión que usas de kodi, mediaserver, etc.\n - Versión y nombre del sistema operativo que usas.\n - Nombre del skin (en el caso que uses Kodi) y si se te ha resuelto el problema al usar el skin por defecto.\n - Descripción del problema y algún caso de prueba.\n - Agregar el log en modo detallado, una vez hecho esto, zipea el log y lo puedes adjuntar en un post.\n\nPara activar el log en modo detallado, ingresar a:\n - Configuración.\n - Preferencias.\n - En la pestaña General - Marcar la opción: Generar log detallado.\n\nEl archivo de log detallado se encuentra en la siguiente ruta: \n\n%s"
msgctxt "#60468"
msgid "You can find our Telegram channel at @StreamOnDemandOfficial\nSe you have doubts you can write to us in the Telegram group: https://bit.ly/2I3kRwF"
msgid "You can find our Telegram channel at @alfa_addon\nIf you have doubts you can write to us in the Forum: http://alfa-addon.com"
msgstr "Entérate de novedades, consejos u opciones que desconoces en Telegram: @alfa_addon.\nSi tienes problemas o dudas, puedes acudir al Foro: http://alfa-addon.com"
msgctxt "#60469"
@@ -2766,7 +2766,7 @@ msgid "Beginning"
msgstr "Inìcio"
msgctxt "#70013"
msgid "Terror"
msgid "Horror"
msgstr "Terror"
msgctxt "#70014"
@@ -2846,7 +2846,7 @@ msgid "Genres"
msgstr "Géneros"
msgctxt "#70033"
msgid "Actors / Actresses by popularity"
msgid "Actors/Actresses by popularity"
msgstr "Actores/Actrices por popularidad"
msgctxt "#70034"
@@ -2858,12 +2858,12 @@ msgid "Search %s"
msgstr "Buscar %s"
msgctxt "#70036"
msgid " Search actor/actress"
msgstr " Buscar actor/actriz"
msgid "Search actor/actress"
msgstr "Buscar actor/actriz"
msgctxt "#70037"
msgid " Search director, writer..."
msgstr " Buscar director, guionista..."
msgid "Search director, writer..."
msgstr "Buscar director, escritor..."
msgctxt "#70038"
msgid "Custom Filter"
@@ -2902,8 +2902,8 @@ msgid "Search Movies/TV Shows"
msgstr "Buscar Películas/Series"
msgctxt "#70047"
msgid " Search by director"
msgstr " Buscar por director"
msgid "Search by director"
msgstr "Buscar por director"
msgctxt "#70048"
msgid " My Account"
@@ -3255,7 +3255,7 @@ msgstr "Colores Personalizados"
msgctxt "#70137"
msgid "Movies"
msgstr "Peliculas"
msgstr "Películas"
msgctxt "#70138"
msgid "Low Rating"
@@ -3845,14 +3845,6 @@ msgctxt "#70288"
msgid "Configure Downloads"
msgstr "Configurar Descargas"
msgctxt "#70289"
msgid "Alfa\nCorrected an error in the adult section, the password has been reset to "
msgstr "Alfa\nCorregido un error en la seccion adultos, se ha reseteado la contrasena a por "
msgctxt "#70290"
msgid "default, you will have to change it again if you want.\ n Type 's', if you have understood it: "
msgstr "defecto, tendra que cambiarla de nuevo si lo desea.\n Escriba 's', si lo ha entendido: "
msgctxt "#70291"
msgid "Error, during conversion"
msgstr "Error, en conversión"
@@ -3964,7 +3956,7 @@ msgid "Listen to BSO - List of songs"
msgstr "Escuchar BSO - Lista de canciones"
msgctxt "#70318"
msgid "Manage yor trakt account"
msgid "Manage your trakt account"
msgstr "Gestionar con tu cuenta Trakt"
msgctxt "#70319"
@@ -4266,3 +4258,523 @@ msgstr "Añadir a lista %s"
msgctxt "#70392"
msgid "Rate with a [COLOR %s]%s[/ COLOR]"
msgstr "Puntuar con un [COLOR %s]%s[/COLOR]"
msgctxt "#70393"
msgid "[%s]: Select the correct %s "
msgstr "[%s]: Selecciona la %s correcta"
msgctxt "#70394"
msgid "Action"
msgstr "Accion"
msgctxt "#70395"
msgid "Sport"
msgstr "Deportes"
msgctxt "#70396"
msgid "Documentary"
msgstr "Documental"
msgctxt "#70397"
msgid "Science fiction"
msgstr "Ciencia Ficción"
msgctxt "#70398"
msgid "Talk Show"
msgstr "Entrevistas"
msgctxt "#70399"
msgid "Family"
msgstr "Familia"
msgctxt "#70400"
msgid "Film-Noir"
msgstr "Cine Negro"
msgctxt "#70401"
msgid "Game-Show"
msgstr "Concursos"
msgctxt "#70402"
msgid "Mystery"
msgstr "Intriga"
msgctxt "#70403"
msgid "Biography"
msgstr "Biografía"
msgctxt "#70404"
msgid "Music"
msgstr "Música"
msgctxt "#70405"
msgid "History"
msgstr "Historia"
msgctxt "#70406"
msgid "Reality-TV"
msgstr "Reality"
msgctxt "#70407"
msgid "War"
msgstr "Cine Bélico"
msgctxt "#70408"
msgid "Musical"
msgstr "Musical"
msgctxt "#70409"
msgid "Romance"
msgstr "Romance"
msgctxt "#70410"
msgid "Thriller"
msgstr "Thriller"
msgctxt "#70411"
msgid "Western"
msgstr "Western"
msgctxt "#70412"
msgid "Drama"
msgstr "Drama"
msgctxt "#70413"
msgid "2. Enter this code on the page and click Allow: %s"
msgstr "2. Ingresa este código en la página y presiona Allow: %s"
msgctxt "#70414"
msgid "Authentication. Do not close this window!!"
msgstr "Autentificación. No cierres esta ventana!!"
msgctxt "#70415"
msgid "Trakt.tv"
msgstr "Trakt.tv"
msgctxt "#70416"
msgid "=== Movies ==="
msgstr "=== Peliculas ==="
msgctxt "#70417"
msgid "=== TV Shows ==="
msgstr "=== Series ==="
msgctxt "#70418"
msgid "Search language in TMDB"
msgstr "Idioma de búsqueda en TMDB"
msgctxt "#70419"
msgid "German"
msgstr "Alemán"
msgctxt "#70420"
msgid "French"
msgstr "Francés"
msgctxt "#70421"
msgid "Portuguese"
msgstr "Portugués"
msgctxt "#70422"
msgid "Italian"
msgstr "Italiano"
msgctxt "#70423"
msgid "Spanish Latin"
msgstr "Español Latino"
msgctxt "#70424"
msgid "Catalan"
msgstr "Catalán"
msgctxt "#70425"
msgid "English"
msgstr "Inglés"
msgctxt "#70426"
msgid "Alternative language for TMDB (No main language synopsis)"
msgstr "Idioma alternativo para TMDB (No sinopsis idioma principal)"
msgctxt "#70427"
msgid "Language of titles in IMDB"
msgstr "Idioma de los títulos en IMDB"
msgctxt "#70428"
msgid "Filmaffinity website"
msgstr "Sitio Web Filmaffinity"
msgctxt "#70429"
msgid "Colombia"
msgstr "Colombia"
msgctxt "#70430"
msgid "Chile"
msgstr "Chile"
msgctxt "#70431"
msgid "Argentina"
msgstr "Argentina"
msgctxt "#70432"
msgid "Mexico"
msgstr "México"
msgctxt "#70433"
msgid "US/UK"
msgstr "US/UK"
msgctxt "#70434"
msgid "Spain"
msgstr "España"
msgctxt "#70435"
msgid "User Filmaaffinity (Optional)"
msgstr "Usuario Filmaffinity (Opcional)"
msgctxt "#70436"
msgid "Password Filmaffinity"
msgstr "Contraseña Filmaffinity"
msgctxt "#70437"
msgid "Order personal lists of Filmaffinity by:"
msgstr "Ordenar listas personales de Filmaffinity por:"
msgctxt "#70438"
msgid "Position"
msgstr "Posición"
msgctxt "#70439"
msgid "Vote"
msgstr "Voto"
msgctxt "#70440"
msgid "Average grade"
msgstr "Nota media"
msgctxt "#70441"
msgid "User MyAnimeList (Optional)"
msgstr "Usuario MyAnimeList (Opcional)"
msgctxt "#70442"
msgid "Password MyAnimeList"
msgstr "Contraseña MyAnimeList"
msgctxt "#70443"
msgid "Show Hentai in MyAnimeList"
msgstr "Mostrar Hentais en MyAnimeList"
msgctxt "#70444"
msgid "Profile 3"
msgstr "Perfil 3"
msgctxt "#70445"
msgid "Profile 2"
msgstr "Perfil 2"
msgctxt "#70446"
msgid "Profile 1"
msgstr "Perfil 1"
msgctxt "#70447"
msgid "[%s] The file has been deleted"
msgstr "[%s] El fichero ha sido borrado"
msgctxt "#70448"
msgid "[%s] The file is still in process"
msgstr "[%s] El fichero está en proceso todavía"
msgctxt "#70449"
msgid "[%s] The file does not exist or has been deleted"
msgstr "[%s] El archivo no existe o ha sido borrado"
msgctxt "#70450"
msgid "Anyone"
msgstr "Cualquiera"
msgctxt "#70451"
msgid "Select one, none or more than one gender"
msgstr "Selecciona uno, ninguno o más de un género"
msgctxt "#70452"
msgid "Year from"
msgstr "Año desde:"
msgctxt "#70453"
msgid "Year until"
msgstr "Año hasta:"
msgctxt "#70454"
msgid "Minimum number of votes"
msgstr "Número mínimo de votos"
msgctxt "#70455"
msgid "Order by"
msgstr "Ordenar por"
msgctxt "#70456"
msgid "Popularity Desc"
msgstr "Popularidad Desc"
msgctxt "#70457"
msgid "Popularity Asc"
msgstr "Popularidad Asc"
msgctxt "#70458"
msgid "Year Desc"
msgstr "Año Desc"
msgctxt "#70459"
msgid "Year Asc"
msgstr "Año Asc"
msgctxt "#70460"
msgid "Desc Rating"
msgstr "Valoración Desc"
msgctxt "#70461"
msgid "Asc Rating"
msgstr "Valoración Asc"
msgctxt "#70462"
msgid "Title [A-Z]"
msgstr "Título [A-Z]"
msgctxt "#70463"
msgid "Title [Z-A]"
msgstr "Título [Z-A]"
msgctxt "#70464"
msgid "Set as default filter"
msgstr "Establecer como filtro por defecto"
msgctxt "#70465"
msgid "Key word"
msgstr "Palabra Clave"
msgctxt "#70466"
msgid "Country"
msgstr "País"
msgctxt "#70467"
msgid "Select a genre"
msgstr "Selecciona un género"
msgctxt "#70468"
msgid "Indicate your vote"
msgstr "Indica tu voto:"
msgctxt "#70469"
msgid "Added"
msgstr "Añadido"
msgctxt "#70470"
msgid "Premiere"
msgstr "Estreno"
msgctxt "#70471"
msgid "Duration"
msgstr "Duración"
msgctxt "#70472"
msgid "Popularity"
msgstr "Popularidad"
msgctxt "#70473"
msgid "Rating"
msgstr "Valoración"
msgctxt "#70474"
msgid "Votes"
msgstr "Votos"
msgctxt "#70475"
msgid "upward"
msgstr "ascendente"
msgctxt "#70476"
msgid "falling"
msgstr "descendente"
msgctxt "#70477"
msgid "Upward"
msgstr "Ascendente"
msgctxt "#70478"
msgid "Falling"
msgstr "Descendente"
msgctxt "#70479"
msgid "Currently watching"
msgstr "Viendo Actualmente"
msgctxt "#70480"
msgid "Completed"
msgstr "Completados"
msgctxt "#70481"
msgid "Anticipated to see"
msgstr "Previstos para ver"
msgctxt "#70482"
msgid "Kind"
msgstr "Tipo"
msgctxt "#70483"
msgid "Special"
msgstr "Especial"
msgctxt "#70484"
msgid "OVA"
msgstr "OVA"
msgctxt "#70485"
msgid "Status"
msgstr "Estado"
msgctxt "#70486"
msgid "(1) Grotesque"
msgstr "(1) Grotesca"
msgctxt "#70487"
msgid "(2) Horrible"
msgstr "(2) Horrible"
msgctxt "#70488"
msgid "(3) Very bad"
msgstr "(3) Muy mala"
msgctxt "#70489"
msgid "(4) Bad"
msgstr "(4) Mala"
msgctxt "#70490"
msgid "(5) Regular"
msgstr "(5) Regular"
msgctxt "#70491"
msgid "(6) Pasable"
msgstr "(6) Pasable"
msgctxt "#70492"
msgid "(7) Good"
msgstr "(7) Buena"
msgctxt "#70493"
msgid "(8) Very good"
msgstr "(8) Muy buena"
msgctxt "#70494"
msgid "(9) Genial"
msgstr "(9) Genial"
msgctxt "#70495"
msgid "(10) Masterpiece"
msgstr "(10) Obra maestra"
msgctxt "#70496"
msgid "The search for "
msgstr "La busqueda de "
msgctxt "#70497"
msgid " did not match."
msgstr " no dio resultados."
msgctxt "#70497"
msgid " did not match."
msgstr " no dio resultados."
msgctxt "#70498"
msgid "Producer: "
msgstr "Productora: "
msgctxt "#70499"
msgid "Genre: "
msgstr "Género: "
msgctxt "#70500"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgstr "Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgctxt "#70501"
msgid "Search did not match (%s)"
msgstr "La búsqueda no ha dado resultados (%s)"
msgctxt "#70502"
msgid ">> Next"
msgstr ">> Siguiente"
msgctxt "#70503"
msgid "There is no available video"
msgstr "No hay ningún vídeo disponible"
msgctxt "#70504"
msgid "Loading trailers..."
msgstr "Cargando trailers..."
msgctxt "#70505"
msgid "Enter the title to search"
msgstr "Introduce el título a buscar"
msgctxt "#70506"
msgid "Searching: "
msgstr "Buscando: "
msgctxt "#70507"
msgid "Search in Youtube"
msgstr "Búsqueda en Youtube"
msgctxt "#70508"
msgid "Search in Abandomoviez"
msgstr "Búsqueda en Abandomoviez"
msgctxt "#70509"
msgid "Search in Jayhap (Youtube, Vimeo & Dailymotion)"
msgstr "Búsqueda en Jayhap (Youtube, Vimeo & Dailymotion)"
msgctxt "#70510"
msgid "Manual Search in Youtube"
msgstr "Búsqueda Manual en Youtube"
msgctxt "#70511"
msgid "Manual Search in Abandomoviez"
msgstr "Búsqueda Manual en Abandomoviez"
msgctxt "#70512"
msgid "Searching in abandomoviez"
msgstr "Buscando en abandomoviez"
msgctxt "#70513"
msgid "Manual Searching in Filmaffinity"
msgstr "Búsqueda Manual en Filmaffinity"
msgctxt "#70514"
msgid "Manual Search in Jayhap"
msgstr "Búsqueda Manual en Jayhap"
msgctxt "#70515"
msgid "[COLOR aquamarine][B]Completed %s[/B][/COLOR]"
msgstr "[COLOR aquamarine][B]Finalizada %s[/B][/COLOR]"
msgctxt "#70516"
msgid "[COLOR aquamarine][B]In progress %s[/B][/COLOR]"
msgstr "[COLOR aquamarine][B]En emisión %s[/B][/COLOR]"
msgctxt "#70517"
msgid "Pre-selected currently activated"
msgstr "Pre-seleccionar activados actualmente"
msgctxt "#70518"
msgid "Pre-select all"
msgstr "Pre-seleccionar todos"
msgctxt "#70519"
msgid "Do not pre-select any"
msgstr "No pre-seleccionar ninguno"
msgctxt "#70520"
msgid "AutoPlay allows auto to reproduce the links directly, based on the configuration of your \nfavorite servers and qualities. "
msgstr "AutoPlay permite auto reproducir los enlaces directamente, basándose en la configuracion de tus \nservidores y calidades favoritas. "
msgctxt "#70521"
msgid "You can install the Trakt script below, \nonce installed and configured what \nyou see will be synchronized with your account automatically. \nDo you want to continue?"
msgstr "Puedes instalar el script de Trakt a continuacíon, \nuna vez instalado y configurado lo que \veas se sincronizara con tu cuenta automaticamente. \n¿Deseas continuar?"

View File

@@ -479,7 +479,7 @@ msgstr "Elegir canales incluidos en la búsqueda"
msgctxt "#59995"
msgid "Saved Searches"
msgstr "Búsquedasguardadas"
msgstr "Búsquedas guardadas"
msgctxt "#59996"
msgid "Delete saved searches"
@@ -1114,8 +1114,8 @@ msgid "Alfa"
msgstr "Alfa"
msgctxt "#60262"
msgid "You can install the Trakt script below, once installed and configured what you see will be automatically synchronized with your account."
msgstr "Puedes instalar el script de Trakt a continuacíon, una vez instalado y configurado lo que veas se sincronizara con tu cuenta automaticamente."
msgid "You can install the Trakt script below."
msgstr "Puedes instalar el script de Trakt a continuacíon."
msgctxt "#60263"
msgid "Do you want to continue?"
@@ -1882,11 +1882,11 @@ msgid "Yes, the option to display merged or split \nresults by channels can be f
msgstr "Sì. La opcion de mostrar los resultados juntos \no divididos por canales se encuentra en \n'setting'>'Ajustes del buscador global'> \n'Otros ajustes'. \n¿Deseas acceder a ahora dichos ajustes?"
msgctxt "#60467"
msgid "To report a problem on'http://alfa-addon.com' you need to:|the version you're using of Alpha.|The version you're using of kodi, mediaserver, etc.|the version and name of the operating system you're using.|The name of the skin (in case you're using Kodi) and whether using the default skin has solved the problem.|Description of the problem and any test cases.To activate the log in detailed mode, go to:|Configuration.|Preferences.|In the General tab - Check the option: Generate detailed log. The detailed log file can be found in the following path: \n\n%s"
msgid "To report a problem on'http://alfa-addon.com' you need to:|the version you're using of Alfa.|The version you're using of kodi, mediaserver, etc.|the version and name of the operating system you're using.|The name of the skin (in case you're using Kodi) and whether using the default skin has solved the problem.|Description of the problem and any test cases.To activate the log in detailed mode, go to:|Configuration.|Preferences.|In the General tab - Check the option: Generate detailed log. The detailed log file can be found in the following path: \n\n%s"
msgstr "Para reportar un problema en 'http://alfa-addon.com' es necesario:\n - Versión que usas de Alfa.\n - Versión que usas de kodi, mediaserver, etc.\n - Versión y nombre del sistema operativo que usas.\n - Nombre del skin (en el caso que uses Kodi) y si se te ha resuelto el problema al usar el skin por defecto.\n - Descripción del problema y algún caso de prueba.\n - Agregar el log en modo detallado, una vez hecho esto, zipea el log y lo puedes adjuntar en un post.\n\nPara activar el log en modo detallado, ingresar a:\n - Configuración.\n - Preferencias.\n - En la pestaña General - Marcar la opción: Generar log detallado.\n\nEl archivo de log detallado se encuentra en la siguiente ruta: \n\n%s"
msgctxt "#60468"
msgid "You can find our Telegram channel at @StreamOnDemandOfficial\nSe you have doubts you can write to us in the Telegram group: https://bit.ly/2I3kRwF"
msgid "You can find our Telegram channel at @alfa_addon\nIf you have doubts you can write to us in the Forum: http://alfa-addon.com"
msgstr "Entérate de novedades, consejos u opciones que desconoces en Telegram: @alfa_addon.\nSi tienes problemas o dudas, puedes acudir al Foro: http://alfa-addon.com"
msgctxt "#60469"
@@ -2766,7 +2766,7 @@ msgid "Beginning"
msgstr "Inìcio"
msgctxt "#70013"
msgid "Terror"
msgid "Horror"
msgstr "Terror"
msgctxt "#70014"
@@ -2846,7 +2846,7 @@ msgid "Genres"
msgstr "Géneros"
msgctxt "#70033"
msgid "Actors / Actresses by popularity"
msgid "Actors/Actresses by popularity"
msgstr "Actores/Actrices por popularidad"
msgctxt "#70034"
@@ -2858,12 +2858,12 @@ msgid "Search %s"
msgstr "Buscar %s"
msgctxt "#70036"
msgid " Search actor/actress"
msgstr " Buscar actor/actriz"
msgid "Search actor/actress"
msgstr "Buscar actor/actriz"
msgctxt "#70037"
msgid " Search director, writer..."
msgstr " Buscar director, guionista..."
msgid "Search director, writer..."
msgstr "Buscar director, escritor..."
msgctxt "#70038"
msgid "Custom Filter"
@@ -2902,8 +2902,8 @@ msgid "Search Movies/TV Shows"
msgstr "Buscar Películas/Series"
msgctxt "#70047"
msgid " Search by director"
msgstr " Buscar por director"
msgid "Search by director"
msgstr "Buscar por director"
msgctxt "#70048"
msgid " My Account"
@@ -3255,7 +3255,7 @@ msgstr "Colores Personalizados"
msgctxt "#70137"
msgid "Movies"
msgstr "Peliculas"
msgstr "Películas"
msgctxt "#70138"
msgid "Low Rating"
@@ -3845,14 +3845,6 @@ msgctxt "#70288"
msgid "Configure Downloads"
msgstr "Configurar Descargas"
msgctxt "#70289"
msgid "Alfa\nCorrected an error in the adult section, the password has been reset to "
msgstr "Alfa\nCorregido un error en la seccion adultos, se ha reseteado la contrasena a por "
msgctxt "#70290"
msgid "default, you will have to change it again if you want.\ n Type 's', if you have understood it: "
msgstr "defecto, tendra que cambiarla de nuevo si lo desea.\n Escriba 's', si lo ha entendido: "
msgctxt "#70291"
msgid "Error, during conversion"
msgstr "Error, en conversión"
@@ -3964,7 +3956,7 @@ msgid "Listen to BSO - List of songs"
msgstr "Escuchar BSO - Lista de canciones"
msgctxt "#70318"
msgid "Manage yor trakt account"
msgid "Manage your trakt account"
msgstr "Gestionar con tu cuenta Trakt"
msgctxt "#70319"
@@ -4266,3 +4258,523 @@ msgstr "Añadir a lista %s"
msgctxt "#70392"
msgid "Rate with a [COLOR %s]%s[/ COLOR]"
msgstr "Puntuar con un [COLOR %s]%s[/COLOR]"
msgctxt "#70393"
msgid "[%s]: Select the correct %s "
msgstr "[%s]: Selecciona la %s correcta"
msgctxt "#70394"
msgid "Action"
msgstr "Accion"
msgctxt "#70395"
msgid "Sport"
msgstr "Deportes"
msgctxt "#70396"
msgid "Documentary"
msgstr "Documental"
msgctxt "#70397"
msgid "Science fiction"
msgstr "Ciencia Ficción"
msgctxt "#70398"
msgid "Talk Show"
msgstr "Entrevistas"
msgctxt "#70399"
msgid "Family"
msgstr "Familia"
msgctxt "#70400"
msgid "Film-Noir"
msgstr "Cine Negro"
msgctxt "#70401"
msgid "Game-Show"
msgstr "Concursos"
msgctxt "#70402"
msgid "Mystery"
msgstr "Intriga"
msgctxt "#70403"
msgid "Biography"
msgstr "Biografía"
msgctxt "#70404"
msgid "Music"
msgstr "Música"
msgctxt "#70405"
msgid "History"
msgstr "Historia"
msgctxt "#70406"
msgid "Reality-TV"
msgstr "Reality"
msgctxt "#70407"
msgid "War"
msgstr "Cine Bélico"
msgctxt "#70408"
msgid "Musical"
msgstr "Musical"
msgctxt "#70409"
msgid "Romance"
msgstr "Romance"
msgctxt "#70410"
msgid "Thriller"
msgstr "Thriller"
msgctxt "#70411"
msgid "Western"
msgstr "Western"
msgctxt "#70412"
msgid "Drama"
msgstr "Drama"
msgctxt "#70413"
msgid "2. Enter this code on the page and click Allow: %s"
msgstr "2. Ingresa este código en la página y presiona Allow: %s"
msgctxt "#70414"
msgid "Authentication. Do not close this window!!"
msgstr "Autentificación. No cierres esta ventana!!"
msgctxt "#70415"
msgid "Trakt.tv"
msgstr "Trakt.tv"
msgctxt "#70416"
msgid "=== Movies ==="
msgstr "=== Peliculas ==="
msgctxt "#70417"
msgid "=== TV Shows ==="
msgstr "=== Series ==="
msgctxt "#70418"
msgid "Search language in TMDB"
msgstr "Idioma de búsqueda en TMDB"
msgctxt "#70419"
msgid "German"
msgstr "Alemán"
msgctxt "#70420"
msgid "French"
msgstr "Francés"
msgctxt "#70421"
msgid "Portuguese"
msgstr "Portugués"
msgctxt "#70422"
msgid "Italian"
msgstr "Italiano"
msgctxt "#70423"
msgid "Spanish Latin"
msgstr "Español Latino"
msgctxt "#70424"
msgid "Catalan"
msgstr "Catalán"
msgctxt "#70425"
msgid "English"
msgstr "Inglés"
msgctxt "#70426"
msgid "Alternative language for TMDB (No main language synopsis)"
msgstr "Idioma alternativo para TMDB (No sinopsis idioma principal)"
msgctxt "#70427"
msgid "Language of titles in IMDB"
msgstr "Idioma de los títulos en IMDB"
msgctxt "#70428"
msgid "Filmaffinity website"
msgstr "Sitio Web Filmaffinity"
msgctxt "#70429"
msgid "Colombia"
msgstr "Colombia"
msgctxt "#70430"
msgid "Chile"
msgstr "Chile"
msgctxt "#70431"
msgid "Argentina"
msgstr "Argentina"
msgctxt "#70432"
msgid "Mexico"
msgstr "México"
msgctxt "#70433"
msgid "US/UK"
msgstr "US/UK"
msgctxt "#70434"
msgid "Spain"
msgstr "España"
msgctxt "#70435"
msgid "User Filmaaffinity (Optional)"
msgstr "Usuario Filmaffinity (Opcional)"
msgctxt "#70436"
msgid "Password Filmaffinity"
msgstr "Contraseña Filmaffinity"
msgctxt "#70437"
msgid "Order personal lists of Filmaffinity by:"
msgstr "Ordenar listas personales de Filmaffinity por:"
msgctxt "#70438"
msgid "Position"
msgstr "Posición"
msgctxt "#70439"
msgid "Vote"
msgstr "Voto"
msgctxt "#70440"
msgid "Average grade"
msgstr "Nota media"
msgctxt "#70441"
msgid "User MyAnimeList (Optional)"
msgstr "Usuario MyAnimeList (Opcional)"
msgctxt "#70442"
msgid "Password MyAnimeList"
msgstr "Contraseña MyAnimeList"
msgctxt "#70443"
msgid "Show Hentai in MyAnimeList"
msgstr "Mostrar Hentais en MyAnimeList"
msgctxt "#70444"
msgid "Profile 3"
msgstr "Perfil 3"
msgctxt "#70445"
msgid "Profile 2"
msgstr "Perfil 2"
msgctxt "#70446"
msgid "Profile 1"
msgstr "Perfil 1"
msgctxt "#70447"
msgid "[%s] The file has been deleted"
msgstr "[%s] El fichero ha sido borrado"
msgctxt "#70448"
msgid "[%s] The file is still in process"
msgstr "[%s] El fichero está en proceso todavía"
msgctxt "#70449"
msgid "[%s] The file does not exist or has been deleted"
msgstr "[%s] El archivo no existe o ha sido borrado"
msgctxt "#70450"
msgid "Anyone"
msgstr "Cualquiera"
msgctxt "#70451"
msgid "Select one, none or more than one gender"
msgstr "Selecciona uno, ninguno o más de un género"
msgctxt "#70452"
msgid "Year from"
msgstr "Año desde:"
msgctxt "#70453"
msgid "Year until"
msgstr "Año hasta:"
msgctxt "#70454"
msgid "Minimum number of votes"
msgstr "Número mínimo de votos"
msgctxt "#70455"
msgid "Order by"
msgstr "Ordenar por"
msgctxt "#70456"
msgid "Popularity Desc"
msgstr "Popularidad Desc"
msgctxt "#70457"
msgid "Popularity Asc"
msgstr "Popularidad Asc"
msgctxt "#70458"
msgid "Year Desc"
msgstr "Año Desc"
msgctxt "#70459"
msgid "Year Asc"
msgstr "Año Asc"
msgctxt "#70460"
msgid "Desc Rating"
msgstr "Valoración Desc"
msgctxt "#70461"
msgid "Asc Rating"
msgstr "Valoración Asc"
msgctxt "#70462"
msgid "Title [A-Z]"
msgstr "Título [A-Z]"
msgctxt "#70463"
msgid "Title [Z-A]"
msgstr "Título [Z-A]"
msgctxt "#70464"
msgid "Set as default filter"
msgstr "Establecer como filtro por defecto"
msgctxt "#70465"
msgid "Key word"
msgstr "Palabra Clave"
msgctxt "#70466"
msgid "Country"
msgstr "País"
msgctxt "#70467"
msgid "Select a genre"
msgstr "Selecciona un género"
msgctxt "#70468"
msgid "Indicate your vote"
msgstr "Indica tu voto:"
msgctxt "#70469"
msgid "Added"
msgstr "Añadido"
msgctxt "#70470"
msgid "Premiere"
msgstr "Estreno"
msgctxt "#70471"
msgid "Duration"
msgstr "Duración"
msgctxt "#70472"
msgid "Popularity"
msgstr "Popularidad"
msgctxt "#70473"
msgid "Rating"
msgstr "Valoración"
msgctxt "#70474"
msgid "Votes"
msgstr "Votos"
msgctxt "#70475"
msgid "upward"
msgstr "ascendente"
msgctxt "#70476"
msgid "falling"
msgstr "descendente"
msgctxt "#70477"
msgid "Upward"
msgstr "Ascendente"
msgctxt "#70478"
msgid "Falling"
msgstr "Descendente"
msgctxt "#70479"
msgid "Currently watching"
msgstr "Viendo Actualmente"
msgctxt "#70480"
msgid "Completed"
msgstr "Completados"
msgctxt "#70481"
msgid "Anticipated to see"
msgstr "Previstos para ver"
msgctxt "#70482"
msgid "Kind"
msgstr "Tipo"
msgctxt "#70483"
msgid "Special"
msgstr "Especial"
msgctxt "#70484"
msgid "OVA"
msgstr "OVA"
msgctxt "#70485"
msgid "Status"
msgstr "Estado"
msgctxt "#70486"
msgid "(1) Grotesque"
msgstr "(1) Grotesca"
msgctxt "#70487"
msgid "(2) Horrible"
msgstr "(2) Horrible"
msgctxt "#70488"
msgid "(3) Very bad"
msgstr "(3) Muy mala"
msgctxt "#70489"
msgid "(4) Bad"
msgstr "(4) Mala"
msgctxt "#70490"
msgid "(5) Regular"
msgstr "(5) Regular"
msgctxt "#70491"
msgid "(6) Pasable"
msgstr "(6) Pasable"
msgctxt "#70492"
msgid "(7) Good"
msgstr "(7) Buena"
msgctxt "#70493"
msgid "(8) Very good"
msgstr "(8) Muy buena"
msgctxt "#70494"
msgid "(9) Genial"
msgstr "(9) Genial"
msgctxt "#70495"
msgid "(10) Masterpiece"
msgstr "(10) Obra maestra"
msgctxt "#70496"
msgid "The search for "
msgstr "La busqueda de "
msgctxt "#70497"
msgid " did not match."
msgstr " no dio resultados."
msgctxt "#70497"
msgid " did not match."
msgstr " no dio resultados."
msgctxt "#70498"
msgid "Producer: "
msgstr "Productora: "
msgctxt "#70499"
msgid "Genre: "
msgstr "Género: "
msgctxt "#70500"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgstr "Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgctxt "#70501"
msgid "Search did not match (%s)"
msgstr "La búsqueda no ha dado resultados (%s)"
msgctxt "#70502"
msgid ">> Next"
msgstr ">> Siguiente"
msgctxt "#70503"
msgid "There is no available video"
msgstr "No hay ningún vídeo disponible"
msgctxt "#70504"
msgid "Loading trailers..."
msgstr "Cargando trailers..."
msgctxt "#70505"
msgid "Enter the title to search"
msgstr "Introduce el título a buscar"
msgctxt "#70506"
msgid "Searching: "
msgstr "Buscando: "
msgctxt "#70507"
msgid "Search in Youtube"
msgstr "Búsqueda en Youtube"
msgctxt "#70508"
msgid "Search in Abandomoviez"
msgstr "Búsqueda en Abandomoviez"
msgctxt "#70509"
msgid "Search in Jayhap (Youtube, Vimeo & Dailymotion)"
msgstr "Búsqueda en Jayhap (Youtube, Vimeo & Dailymotion)"
msgctxt "#70510"
msgid "Manual Search in Youtube"
msgstr "Búsqueda Manual en Youtube"
msgctxt "#70511"
msgid "Manual Search in Abandomoviez"
msgstr "Búsqueda Manual en Abandomoviez"
msgctxt "#70512"
msgid "Searching in abandomoviez"
msgstr "Buscando en abandomoviez"
msgctxt "#70513"
msgid "Manual Searching in Filmaffinity"
msgstr "Búsqueda Manual en Filmaffinity"
msgctxt "#70514"
msgid "Manual Search in Jayhap"
msgstr "Búsqueda Manual en Jayhap"
msgctxt "#70515"
msgid "[COLOR aquamarine][B]Completed %s[/B][/COLOR]"
msgstr "[COLOR aquamarine][B]Finalizada %s[/B][/COLOR]"
msgctxt "#70516"
msgid "[COLOR aquamarine][B]In progress %s[/B][/COLOR]"
msgstr "[COLOR aquamarine][B]En emisión %s[/B][/COLOR]"
msgctxt "#70517"
msgid "Pre-selected currently activated"
msgstr "Pre-seleccionar activados actualmente"
msgctxt "#70518"
msgid "Pre-select all"
msgstr "Pre-seleccionar todos"
msgctxt "#70519"
msgid "Do not pre-select any"
msgstr "No pre-seleccionar ninguno"
msgctxt "#70520"
msgid "AutoPlay allows auto to reproduce the links directly, based on the configuration of your \nfavorite servers and qualities. "
msgstr "AutoPlay permite auto reproducir los enlaces directamente, basándose en la configuracion de tus \nservidores y calidades favoritas. "
msgctxt "#70521"
msgid "You can install the Trakt script below, \nonce installed and configured what \nyou see will be synchronized with your account automatically. \nDo you want to continue?"
msgstr "Puedes instalar el script de Trakt a continuacíon, \nuna vez instalado y configurado lo que \veas se sincronizara con tu cuenta automaticamente. \n¿Deseas continuar?"

View File

@@ -479,7 +479,7 @@ msgstr "Elegir canales incluidos en la búsqueda"
msgctxt "#59995"
msgid "Saved Searches"
msgstr "Búsquedasguardadas"
msgstr "Búsquedas guardadas"
msgctxt "#59996"
msgid "Delete saved searches"
@@ -1114,8 +1114,8 @@ msgid "Alfa"
msgstr "Alfa"
msgctxt "#60262"
msgid "You can install the Trakt script below, once installed and configured what you see will be automatically synchronized with your account."
msgstr "Puedes instalar el script de Trakt a continuacíon, una vez instalado y configurado lo que veas se sincronizara con tu cuenta automaticamente."
msgid "You can install the Trakt script below."
msgstr "Puedes instalar el script de Trakt a continuacíon."
msgctxt "#60263"
msgid "Do you want to continue?"
@@ -1882,11 +1882,11 @@ msgid "Yes, the option to display merged or split \nresults by channels can be f
msgstr "Sì. La opcion de mostrar los resultados juntos \no divididos por canales se encuentra en \n'setting'>'Ajustes del buscador global'> \n'Otros ajustes'. \n¿Deseas acceder a ahora dichos ajustes?"
msgctxt "#60467"
msgid "To report a problem on'http://alfa-addon.com' you need to:|the version you're using of Alpha.|The version you're using of kodi, mediaserver, etc.|the version and name of the operating system you're using.|The name of the skin (in case you're using Kodi) and whether using the default skin has solved the problem.|Description of the problem and any test cases.To activate the log in detailed mode, go to:|Configuration.|Preferences.|In the General tab - Check the option: Generate detailed log. The detailed log file can be found in the following path: \n\n%s"
msgid "To report a problem on'http://alfa-addon.com' you need to:|the version you're using of Alfa.|The version you're using of kodi, mediaserver, etc.|the version and name of the operating system you're using.|The name of the skin (in case you're using Kodi) and whether using the default skin has solved the problem.|Description of the problem and any test cases.To activate the log in detailed mode, go to:|Configuration.|Preferences.|In the General tab - Check the option: Generate detailed log. The detailed log file can be found in the following path: \n\n%s"
msgstr "Para reportar un problema en 'http://alfa-addon.com' es necesario:\n - Versión que usas de Alfa.\n - Versión que usas de kodi, mediaserver, etc.\n - Versión y nombre del sistema operativo que usas.\n - Nombre del skin (en el caso que uses Kodi) y si se te ha resuelto el problema al usar el skin por defecto.\n - Descripción del problema y algún caso de prueba.\n - Agregar el log en modo detallado, una vez hecho esto, zipea el log y lo puedes adjuntar en un post.\n\nPara activar el log en modo detallado, ingresar a:\n - Configuración.\n - Preferencias.\n - En la pestaña General - Marcar la opción: Generar log detallado.\n\nEl archivo de log detallado se encuentra en la siguiente ruta: \n\n%s"
msgctxt "#60468"
msgid "You can find our Telegram channel at @StreamOnDemandOfficial\nSe you have doubts you can write to us in the Telegram group: https://bit.ly/2I3kRwF"
msgid "You can find our Telegram channel at @alfa_addon\nIf you have doubts you can write to us in the Forum: http://alfa-addon.com"
msgstr "Entérate de novedades, consejos u opciones que desconoces en Telegram: @alfa_addon.\nSi tienes problemas o dudas, puedes acudir al Foro: http://alfa-addon.com"
msgctxt "#60469"
@@ -2766,7 +2766,7 @@ msgid "Beginning"
msgstr "Inìcio"
msgctxt "#70013"
msgid "Terror"
msgid "Horror"
msgstr "Terror"
msgctxt "#70014"
@@ -2846,7 +2846,7 @@ msgid "Genres"
msgstr "Géneros"
msgctxt "#70033"
msgid "Actors / Actresses by popularity"
msgid "Actors/Actresses by popularity"
msgstr "Actores/Actrices por popularidad"
msgctxt "#70034"
@@ -2858,12 +2858,12 @@ msgid "Search %s"
msgstr "Buscar %s"
msgctxt "#70036"
msgid " Search actor/actress"
msgstr " Buscar actor/actriz"
msgid "Search actor/actress"
msgstr "Buscar actor/actriz"
msgctxt "#70037"
msgid " Search director, writer..."
msgstr " Buscar director, guionista..."
msgid "Search director, writer..."
msgstr "Buscar director, escritor..."
msgctxt "#70038"
msgid "Custom Filter"
@@ -2902,8 +2902,8 @@ msgid "Search Movies/TV Shows"
msgstr "Buscar Películas/Series"
msgctxt "#70047"
msgid " Search by director"
msgstr " Buscar por director"
msgid "Search by director"
msgstr "Buscar por director"
msgctxt "#70048"
msgid " My Account"
@@ -3255,7 +3255,7 @@ msgstr "Colores Personalizados"
msgctxt "#70137"
msgid "Movies"
msgstr "Peliculas"
msgstr "Películas"
msgctxt "#70138"
msgid "Low Rating"
@@ -3845,14 +3845,6 @@ msgctxt "#70288"
msgid "Configure Downloads"
msgstr "Configurar Descargas"
msgctxt "#70289"
msgid "Alfa\nCorrected an error in the adult section, the password has been reset to "
msgstr "Alfa\nCorregido un error en la seccion adultos, se ha reseteado la contrasena a por "
msgctxt "#70290"
msgid "default, you will have to change it again if you want.\ n Type 's', if you have understood it: "
msgstr "defecto, tendra que cambiarla de nuevo si lo desea.\n Escriba 's', si lo ha entendido: "
msgctxt "#70291"
msgid "Error, during conversion"
msgstr "Error, en conversión"
@@ -3964,7 +3956,7 @@ msgid "Listen to BSO - List of songs"
msgstr "Escuchar BSO - Lista de canciones"
msgctxt "#70318"
msgid "Manage yor trakt account"
msgid "Manage your trakt account"
msgstr "Gestionar con tu cuenta Trakt"
msgctxt "#70319"
@@ -4266,3 +4258,523 @@ msgstr "Añadir a lista %s"
msgctxt "#70392"
msgid "Rate with a [COLOR %s]%s[/ COLOR]"
msgstr "Puntuar con un [COLOR %s]%s[/COLOR]"
msgctxt "#70393"
msgid "[%s]: Select the correct %s "
msgstr "[%s]: Selecciona la %s correcta"
msgctxt "#70394"
msgid "Action"
msgstr "Accion"
msgctxt "#70395"
msgid "Sport"
msgstr "Deportes"
msgctxt "#70396"
msgid "Documentary"
msgstr "Documental"
msgctxt "#70397"
msgid "Science fiction"
msgstr "Ciencia Ficción"
msgctxt "#70398"
msgid "Talk Show"
msgstr "Entrevistas"
msgctxt "#70399"
msgid "Family"
msgstr "Familia"
msgctxt "#70400"
msgid "Film-Noir"
msgstr "Cine Negro"
msgctxt "#70401"
msgid "Game-Show"
msgstr "Concursos"
msgctxt "#70402"
msgid "Mystery"
msgstr "Intriga"
msgctxt "#70403"
msgid "Biography"
msgstr "Biografía"
msgctxt "#70404"
msgid "Music"
msgstr "Música"
msgctxt "#70405"
msgid "History"
msgstr "Historia"
msgctxt "#70406"
msgid "Reality-TV"
msgstr "Reality"
msgctxt "#70407"
msgid "War"
msgstr "Cine Bélico"
msgctxt "#70408"
msgid "Musical"
msgstr "Musical"
msgctxt "#70409"
msgid "Romance"
msgstr "Romance"
msgctxt "#70410"
msgid "Thriller"
msgstr "Thriller"
msgctxt "#70411"
msgid "Western"
msgstr "Western"
msgctxt "#70412"
msgid "Drama"
msgstr "Drama"
msgctxt "#70413"
msgid "2. Enter this code on the page and click Allow: %s"
msgstr "2. Ingresa este código en la página y presiona Allow: %s"
msgctxt "#70414"
msgid "Authentication. Do not close this window!!"
msgstr "Autentificación. No cierres esta ventana!!"
msgctxt "#70415"
msgid "Trakt.tv"
msgstr "Trakt.tv"
msgctxt "#70416"
msgid "=== Movies ==="
msgstr "=== Peliculas ==="
msgctxt "#70417"
msgid "=== TV Shows ==="
msgstr "=== Series ==="
msgctxt "#70418"
msgid "Search language in TMDB"
msgstr "Idioma de búsqueda en TMDB"
msgctxt "#70419"
msgid "German"
msgstr "Alemán"
msgctxt "#70420"
msgid "French"
msgstr "Francés"
msgctxt "#70421"
msgid "Portuguese"
msgstr "Portugués"
msgctxt "#70422"
msgid "Italian"
msgstr "Italiano"
msgctxt "#70423"
msgid "Spanish Latin"
msgstr "Español Latino"
msgctxt "#70424"
msgid "Catalan"
msgstr "Catalán"
msgctxt "#70425"
msgid "English"
msgstr "Inglés"
msgctxt "#70426"
msgid "Alternative language for TMDB (No main language synopsis)"
msgstr "Idioma alternativo para TMDB (No sinopsis idioma principal)"
msgctxt "#70427"
msgid "Language of titles in IMDB"
msgstr "Idioma de los títulos en IMDB"
msgctxt "#70428"
msgid "Filmaffinity website"
msgstr "Sitio Web Filmaffinity"
msgctxt "#70429"
msgid "Colombia"
msgstr "Colombia"
msgctxt "#70430"
msgid "Chile"
msgstr "Chile"
msgctxt "#70431"
msgid "Argentina"
msgstr "Argentina"
msgctxt "#70432"
msgid "Mexico"
msgstr "México"
msgctxt "#70433"
msgid "US/UK"
msgstr "US/UK"
msgctxt "#70434"
msgid "Spain"
msgstr "España"
msgctxt "#70435"
msgid "User Filmaaffinity (Optional)"
msgstr "Usuario Filmaffinity (Opcional)"
msgctxt "#70436"
msgid "Password Filmaffinity"
msgstr "Contraseña Filmaffinity"
msgctxt "#70437"
msgid "Order personal lists of Filmaffinity by:"
msgstr "Ordenar listas personales de Filmaffinity por:"
msgctxt "#70438"
msgid "Position"
msgstr "Posición"
msgctxt "#70439"
msgid "Vote"
msgstr "Voto"
msgctxt "#70440"
msgid "Average grade"
msgstr "Nota media"
msgctxt "#70441"
msgid "User MyAnimeList (Optional)"
msgstr "Usuario MyAnimeList (Opcional)"
msgctxt "#70442"
msgid "Password MyAnimeList"
msgstr "Contraseña MyAnimeList"
msgctxt "#70443"
msgid "Show Hentai in MyAnimeList"
msgstr "Mostrar Hentais en MyAnimeList"
msgctxt "#70444"
msgid "Profile 3"
msgstr "Perfil 3"
msgctxt "#70445"
msgid "Profile 2"
msgstr "Perfil 2"
msgctxt "#70446"
msgid "Profile 1"
msgstr "Perfil 1"
msgctxt "#70447"
msgid "[%s] The file has been deleted"
msgstr "[%s] El fichero ha sido borrado"
msgctxt "#70448"
msgid "[%s] The file is still in process"
msgstr "[%s] El fichero está en proceso todavía"
msgctxt "#70449"
msgid "[%s] The file does not exist or has been deleted"
msgstr "[%s] El archivo no existe o ha sido borrado"
msgctxt "#70450"
msgid "Anyone"
msgstr "Cualquiera"
msgctxt "#70451"
msgid "Select one, none or more than one gender"
msgstr "Selecciona uno, ninguno o más de un género"
msgctxt "#70452"
msgid "Year from"
msgstr "Año desde:"
msgctxt "#70453"
msgid "Year until"
msgstr "Año hasta:"
msgctxt "#70454"
msgid "Minimum number of votes"
msgstr "Número mínimo de votos"
msgctxt "#70455"
msgid "Order by"
msgstr "Ordenar por"
msgctxt "#70456"
msgid "Popularity Desc"
msgstr "Popularidad Desc"
msgctxt "#70457"
msgid "Popularity Asc"
msgstr "Popularidad Asc"
msgctxt "#70458"
msgid "Year Desc"
msgstr "Año Desc"
msgctxt "#70459"
msgid "Year Asc"
msgstr "Año Asc"
msgctxt "#70460"
msgid "Desc Rating"
msgstr "Valoración Desc"
msgctxt "#70461"
msgid "Asc Rating"
msgstr "Valoración Asc"
msgctxt "#70462"
msgid "Title [A-Z]"
msgstr "Título [A-Z]"
msgctxt "#70463"
msgid "Title [Z-A]"
msgstr "Título [Z-A]"
msgctxt "#70464"
msgid "Set as default filter"
msgstr "Establecer como filtro por defecto"
msgctxt "#70465"
msgid "Key word"
msgstr "Palabra Clave"
msgctxt "#70466"
msgid "Country"
msgstr "País"
msgctxt "#70467"
msgid "Select a genre"
msgstr "Selecciona un género"
msgctxt "#70468"
msgid "Indicate your vote"
msgstr "Indica tu voto:"
msgctxt "#70469"
msgid "Added"
msgstr "Añadido"
msgctxt "#70470"
msgid "Premiere"
msgstr "Estreno"
msgctxt "#70471"
msgid "Duration"
msgstr "Duración"
msgctxt "#70472"
msgid "Popularity"
msgstr "Popularidad"
msgctxt "#70473"
msgid "Rating"
msgstr "Valoración"
msgctxt "#70474"
msgid "Votes"
msgstr "Votos"
msgctxt "#70475"
msgid "upward"
msgstr "ascendente"
msgctxt "#70476"
msgid "falling"
msgstr "descendente"
msgctxt "#70477"
msgid "Upward"
msgstr "Ascendente"
msgctxt "#70478"
msgid "Falling"
msgstr "Descendente"
msgctxt "#70479"
msgid "Currently watching"
msgstr "Viendo Actualmente"
msgctxt "#70480"
msgid "Completed"
msgstr "Completados"
msgctxt "#70481"
msgid "Anticipated to see"
msgstr "Previstos para ver"
msgctxt "#70482"
msgid "Kind"
msgstr "Tipo"
msgctxt "#70483"
msgid "Special"
msgstr "Especial"
msgctxt "#70484"
msgid "OVA"
msgstr "OVA"
msgctxt "#70485"
msgid "Status"
msgstr "Estado"
msgctxt "#70486"
msgid "(1) Grotesque"
msgstr "(1) Grotesca"
msgctxt "#70487"
msgid "(2) Horrible"
msgstr "(2) Horrible"
msgctxt "#70488"
msgid "(3) Very bad"
msgstr "(3) Muy mala"
msgctxt "#70489"
msgid "(4) Bad"
msgstr "(4) Mala"
msgctxt "#70490"
msgid "(5) Regular"
msgstr "(5) Regular"
msgctxt "#70491"
msgid "(6) Pasable"
msgstr "(6) Pasable"
msgctxt "#70492"
msgid "(7) Good"
msgstr "(7) Buena"
msgctxt "#70493"
msgid "(8) Very good"
msgstr "(8) Muy buena"
msgctxt "#70494"
msgid "(9) Genial"
msgstr "(9) Genial"
msgctxt "#70495"
msgid "(10) Masterpiece"
msgstr "(10) Obra maestra"
msgctxt "#70496"
msgid "The search for "
msgstr "La busqueda de "
msgctxt "#70497"
msgid " did not match."
msgstr " no dio resultados."
msgctxt "#70497"
msgid " did not match."
msgstr " no dio resultados."
msgctxt "#70498"
msgid "Producer: "
msgstr "Productora: "
msgctxt "#70499"
msgid "Genre: "
msgstr "Género: "
msgctxt "#70500"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgstr "Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgctxt "#70501"
msgid "Search did not match (%s)"
msgstr "La búsqueda no ha dado resultados (%s)"
msgctxt "#70502"
msgid ">> Next"
msgstr ">> Siguiente"
msgctxt "#70503"
msgid "There is no available video"
msgstr "No hay ningún vídeo disponible"
msgctxt "#70504"
msgid "Loading trailers..."
msgstr "Cargando trailers..."
msgctxt "#70505"
msgid "Enter the title to search"
msgstr "Introduce el título a buscar"
msgctxt "#70506"
msgid "Searching: "
msgstr "Buscando: "
msgctxt "#70507"
msgid "Search in Youtube"
msgstr "Búsqueda en Youtube"
msgctxt "#70508"
msgid "Search in Abandomoviez"
msgstr "Búsqueda en Abandomoviez"
msgctxt "#70509"
msgid "Search in Jayhap (Youtube, Vimeo & Dailymotion)"
msgstr "Búsqueda en Jayhap (Youtube, Vimeo & Dailymotion)"
msgctxt "#70510"
msgid "Manual Search in Youtube"
msgstr "Búsqueda Manual en Youtube"
msgctxt "#70511"
msgid "Manual Search in Abandomoviez"
msgstr "Búsqueda Manual en Abandomoviez"
msgctxt "#70512"
msgid "Searching in abandomoviez"
msgstr "Buscando en abandomoviez"
msgctxt "#70513"
msgid "Manual Searching in Filmaffinity"
msgstr "Búsqueda Manual en Filmaffinity"
msgctxt "#70514"
msgid "Manual Search in Jayhap"
msgstr "Búsqueda Manual en Jayhap"
msgctxt "#70515"
msgid "[COLOR aquamarine][B]Completed %s[/B][/COLOR]"
msgstr "[COLOR aquamarine][B]Finalizada %s[/B][/COLOR]"
msgctxt "#70516"
msgid "[COLOR aquamarine][B]In progress %s[/B][/COLOR]"
msgstr "[COLOR aquamarine][B]En emisión %s[/B][/COLOR]"
msgctxt "#70517"
msgid "Pre-selected currently activated"
msgstr "Pre-seleccionar activados actualmente"
msgctxt "#70518"
msgid "Pre-select all"
msgstr "Pre-seleccionar todos"
msgctxt "#70519"
msgid "Do not pre-select any"
msgstr "No pre-seleccionar ninguno"
msgctxt "#70520"
msgid "AutoPlay allows auto to reproduce the links directly, based on the configuration of your \nfavorite servers and qualities. "
msgstr "AutoPlay permite auto reproducir los enlaces directamente, basándose en la configuracion de tus \nservidores y calidades favoritas. "
msgctxt "#70521"
msgid "You can install the Trakt script below, \nonce installed and configured what \nyou see will be synchronized with your account automatically. \nDo you want to continue?"
msgstr "Puedes instalar el script de Trakt a continuacíon, \nuna vez instalado y configurado lo que \veas se sincronizara con tu cuenta automaticamente. \n¿Deseas continuar?"

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

View File

@@ -120,6 +120,11 @@
<setting type="sep"/>
<setting label="Para evitar esperar demasiado cuando un servidor no responde:" type="lsep"/>
<setting id="httptools_timeout" type="labelenum" values="0|5|10|15|20|25|30" label="Timeout (tiempo de espera máximo)" default="15"/>
<setting type="sep"/>
<setting label="Gestión de actualizaciones urgentes de módulos de Alfa (Quick Fixes):" type="lsep"/>
<setting id="addon_update_timer" type="labelenum" values="0|6|12|24" label="Intervalo entre actualizaciones automáticas (horas)" default="12"/>
<setting id="addon_update_message" type="bool" label="Quiere ver mensajes de las actualizaciones" default="false"/>
</category>
</settings>

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "adnstream.com/video/([a-zA-Z]+)",
"url": "http://www.adnstream.com/video/\\1/"
}
]
},
"free": true,
"id": "adnstream",
"name": "adnstream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,30 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from platformcode import logger
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
# Saca el código del vídeo
if page_url.startswith("http://"):
try:
code = scrapertools.get_match(page_url, "http\://www.adnstream.com/video/([a-zA-Z]+)/")
except:
code = scrapertools.get_match(page_url, "http\://www.adnstream.tv/video/([a-zA-Z]+)/")
else:
code = page_url
# Lee la playlist
url = "http://www.adnstream.com/get_playlist.php?lista=video&param=" + code + "&c=463"
data = scrapertools.cache_page(url)
# Extrae la URL
media_url = scrapertools.get_match(data, "<jwplayer:file>([^<]+)</jwplayer:file>")
video_urls = [[scrapertools.get_filename_from_url(media_url)[-4:] + ' [adnstream]', media_url]]
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,45 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(http://www.(?:videoweed|bitvid)\\.[a-z]+/file/[a-zA-Z0-9]+)",
"url": "\\1"
},
{
"pattern": "(http://embed.(?:videoweed|bitvid)\\.[a-z]+/embed.php?v=[a-zA-Z0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "bitvidsx",
"name": "bitvidsx",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,40 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "This video is not yet ready" in data:
return False, "[Bitvid] El fichero está en proceso todavía o ha sido eliminado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
videourls = scrapertools.find_multiple_matches(data, 'src\s*:\s*[\'"]([^\'"]+)[\'"]')
if not videourls:
videourls = scrapertools.find_multiple_matches(data, '<source src=[\'"]([^\'"]+)[\'"]')
for videourl in videourls:
if videourl.endswith(".mpd"):
id = scrapertools.find_single_match(videourl, '/dash/(.*?)/')
videourl = "http://www.bitvid.sx/download.php%3Ffile=mm" + "%s.mp4" % id
videourl = re.sub(r'/dl(\d)*/', '/dl/', videourl)
ext = scrapertools.get_filename_from_url(videourl)[-4:]
videourl = videourl.replace("%3F", "?") + \
"|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0"
video_urls.append([ext + " [bitvid]", videourl])
return video_urls

View File

@@ -10,27 +10,21 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[Cloud] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
enc_data = scrapertools.find_single_match(data, "type='text/javascript'>(.*?)</script>")
dec_data = jsunpack.unpack(enc_data)
sources = scrapertools.find_single_match(dec_data, "sources:\[(.*?)]")
patron = "{file:(.*?)}"
matches = re.compile(patron, re.DOTALL).findall(sources)
scrapertools.printMatches(matches)
sources = scrapertools.find_single_match(data, "<source(.*?)</source")
patron = 'src="([^"]+)'
matches = scrapertools.find_multiple_matches(sources, patron)
for url in matches:
quality = 'm3u8'
video_url = url
@@ -39,5 +33,4 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_url = url[0]
quality = url[1].replace('label:','')
video_urls.append(['cloudvideo [%s]' % quality, video_url])
return video_urls
return video_urls

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "cloudy.ec/(?:embed.php\\?id=|v/)([A-z0-9]+)",
"url": "https://www.cloudy.ec/embed.php?id=\\1&playerPage=1"
}
]
},
"free": true,
"id": "cloudy",
"name": "cloudy",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s1.postimg.cc/9e6doboo2n/cloudy1.png"
}

View File

@@ -1,29 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "This video is being prepared" in data:
return False, "[Cloudy] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
media_urls = scrapertools.find_multiple_matches(data, '<source src="([^"]+)"')
for mediaurl in media_urls:
title = "%s [cloudy]" % scrapertools.get_filename_from_url(mediaurl)[-4:]
mediaurl += "|User-Agent=Mozilla/5.0"
video_urls.append([title, mediaurl])
return video_urls

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "cnubis.com/plugins/mediaplayer/([^/]+/[^.]+.php\\?u\\=[A-Za-z0-9]+)",
"url": "http://cnubis.com/plugins/mediaplayer/\\1"
}
]
},
"free": true,
"id": "cnubis",
"name": "cnubis",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,21 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from platformcode import logger
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("page_url=" + page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
media_url = scrapertools.find_single_match(data, 'file: "([^"]+)",.*?type: "([^"]+)"')
logger.info("media_url=" + media_url[0])
# URL del vídeo
video_urls.append(["." + media_url[1] + " [cnubis]", media_url[0].replace("https", "http")])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -94,10 +94,10 @@ def authentication():
device_code = data["device_code"]
intervalo = data["interval"]
dialog_auth = platformtools.dialog_progress("Autentificación. No cierres esta ventana!!",
"1. Entra en la siguiente url: %s" % verify_url,
"2. Ingresa este código en la página y presiona Allow: %s" % user_code,
"3. Espera a que se cierre esta ventana")
dialog_auth = platformtools.dialog_progress(config.get_localized_string(70414),
config.get_localized_string(60252) % verify_url,
config.get_localized_string(70413) % user_code,
config.get_localized_string(60254))
# Generalmente cada 5 segundos se intenta comprobar si el usuario ha introducido el código
while True:

View File

@@ -6,6 +6,10 @@
{
"pattern": "flashx.(?:tv|pw|ws|sx|to)/(?:embed.php\\?c=|embed-|playvid-|)([A-z0-9]+)",
"url": "https://www.flashx.tv/\\1.html"
},
{
"pattern": "flashx.co/([A-z0-9]+).jsp",
"url": "https://www.flashx.to/\\1.jsp"
}
]
},

View File

@@ -17,15 +17,16 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
domain_fx = "(?:co|tv)"
logger.info("url=" + page_url)
pfxfx = ""
data = httptools.downloadpage(page_url, cookies=False).data
data = data.replace("\n","")
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.co/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.%s/counter.cgi.*?[^(?:'|")]+)""" %domain_fx)
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.co/dl[^"]+')
playnow = scrapertools.find_single_match(data, 'https://www.flashx.%s/dl[^"]+' %domain_fx)
# Para obtener el f y el fxfx
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.co/js\w+/c\w+.*?[^(?:'|")]+)""")
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.%s/js\w+/c\w+.*?[^(?:'|")]+)""" %domain_fx)
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
@@ -35,7 +36,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("mfxfxfx2= %s" %pfxfx)
if pfxfx == "":
pfxfx = "ss=yes&f=fail&fxfx=6"
coding_url = 'https://www.flashx.co/flashx.php?%s' %pfxfx
coding_url = 'https://www.flashx.co/flashx.php?%s' %(pfxfx)
# {f: 'y', fxfx: '6'}
bloque = scrapertools.find_single_match(data, '(?s)Form method="POST" action(.*?)span')
flashx_id = scrapertools.find_single_match(bloque, 'name="id" value="([^"]+)"')

View File

@@ -8,6 +8,8 @@ from platformcode import logger
def test_video_exists(page_url):
if 'googleusercontent' in page_url:
return True, "" # desactivada verificación pq se encalla!
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
if "no+existe" in response.data:
@@ -34,13 +36,19 @@ def get_video_url(page_url, user="", password="", video_password=""):
response = httptools.downloadpage(page_url, follow_redirects = False, cookies=False, headers={"Referer": page_url})
url=response.headers['location']
cookies = ""
cookie = response.headers["set-cookie"].split("HttpOnly, ")
for c in cookie:
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = "|Cookie=" + cookies
if "set-cookie" in response.headers:
try:
cookies = ""
cookie = response.headers["set-cookie"].split("HttpOnly, ")
for c in cookie:
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = "|Cookie=" + cookies
except:
headers_string = ""
else:
headers_string = ""
quality = scrapertools.find_single_match (url, '.itag=(\d+).')

View File

@@ -16,7 +16,7 @@ def test_video_exists(page_url):
if 'Were Sorry!' in data:
data = httptools.downloadpage(page_url.replace("/embed/", "/f/"), headers=header, cookies=False).data
if 'Were Sorry!' in data:
return False, "[Openload] El archivo no existe o ha sido borrado"
return False, config.get_localized_string(70449) % "Openload"
return True, ""

View File

@@ -35,12 +35,17 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
post = "confirm.x=77&confirm.y=76&block=1"
if "Please click on this button to open this video" in data:
data = httptools.downloadpage(page_url, post=post).data
patron = 'https://www.rapidvideo.com/e/[^"]+'
match = scrapertools.find_multiple_matches(data, patron)
if match:
for url1 in match:
res = scrapertools.find_single_match(url1, '=(\w+)')
data = httptools.downloadpage(url1).data
if "Please click on this button to open this video" in data:
data = httptools.downloadpage(url1, post=post).data
url = scrapertools.find_single_match(data, 'source src="([^"]+)')
ext = scrapertools.get_filename_from_url(url)[-4:]
video_urls.append(['%s %s [rapidvideo]' % (ext, res), url])

View File

@@ -10,7 +10,7 @@ def test_video_exists(page_url):
data = httptools.downloadpage(page_url).data
if "File was deleted" in data:
return False, "[speedvideo] El archivo no existe o ha sido borrado"
return False, config.get_localized_string(70449) % "Speedvideo"
return True, ""

View File

@@ -10,7 +10,7 @@ def test_video_exists(page_url):
data = httptools.downloadpage(page_url).data
if "We are unable to find the video" in data:
return False, "[streamango] El archivo no existe o ha sido borrado"
return False, config.get_localized_string(70449) % "Streamango"
return True, ""

View File

@@ -5,7 +5,7 @@
"patterns": [
{
"pattern": "(?:thevideo.me|tvad.me|thevid.net|thevideo.ch|thevideo.us)/(?:embed-|)([A-z0-9]+)",
"url": "http://thevideo.me/embed-\\1.html"
"url": "https://thevideo.me/embed-\\1.html"
}
]
},

View File

@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
import urllib
from core import httptools
from core import scrapertools
from platformcode import logger
@@ -7,6 +8,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
return True, ""
data = httptools.downloadpage(page_url).data
if "File was deleted" in data or "Page Cannot Be Found" in data:
return False, "[thevideo.me] El archivo ha sido eliminado o no existe"
@@ -15,19 +17,16 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
if not "embed" in page_url:
page_url = page_url.replace("http://thevideo.me/", "http://thevideo.me/embed-") + ".html"
data = httptools.downloadpage(page_url).data
var = scrapertools.find_single_match(data, 'vsign.player.*?\+ (\w+)')
mpri_Key = scrapertools.find_single_match(data, "%s='([^']+)'" %var)
data_vt = httptools.downloadpage("https://thevideo.me/vsign/player/%s" % mpri_Key).data
vt = scrapertools.find_single_match(data_vt, 'function\|([^\|]+)\|')
if "fallback" in vt:
vt = scrapertools.find_single_match(data_vt, 'jwConfig\|([^\|]+)\|')
media_urls = scrapertools.find_multiple_matches(data, '\{"file"\s*\:\s*"([^"]+)"\s*,\s*"label"\s*\:\s*"([^"]+)"')
video_urls = []
for media_url, label in media_urls:
media_url += "?direct=false&ua=1&vt=%s" % vt
post= {}
post = urllib.urlencode(post)
if not "embed" in page_url:
page_url = page_url.replace("https://thevideo.me/", "https://thevideo.me/embed-") + ".html"
url = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers.get("location", "")
data = httptools.downloadpage("https://vev.io/api/serve/video/" + scrapertools.find_single_match(url, "embed/([A-z0-9]+)"), post=post).data
bloque = scrapertools.find_single_match(data, 'qualities":\{(.*?)\}')
matches = scrapertools.find_multiple_matches(bloque, '"([^"]+)":"([^"]+)')
for res, media_url in matches:
video_urls.append(
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + label + ") [thevideo.me]", media_url])
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + res + ") [thevideo.me]", media_url])
return video_urls

View File

@@ -9,6 +9,10 @@
{
"pattern": "(vshare.eu/embed-[a-zA-Z0-9/-]+.html)",
"url": "http://\\1"
},
{
"pattern": "(vshare.eu/[a-zA-Z0-9/-]+.htm)",
"url": "http://\\1"
}
]
},

View File

@@ -10,22 +10,19 @@ from lib import jsunpack
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
if httptools.downloadpage(page_url).code != 200:
return False, "El archivo no existe en vShare o ha sido borrado."
response = httptools.downloadpage(page_url)
if response.code != 200 or "No longer available!" in response.data:
return False, "[vshare] El archivo no existe o ha sido borrado."
else:
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url = " + page_url)
data = httptools.downloadpage(page_url).data
flowplayer = re.search("url: [\"']([^\"']+)", data)
if flowplayer:
return [["FLV", flowplayer.group(1)]]
video_urls = []
try:
jsUnpack = jsunpack.unpack(data)
@@ -35,7 +32,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.debug("Values: " + fields.group(1))
logger.debug("Substract: " + fields.group(2))
substract = int(fields.group(2))
arrayResult = [chr(int(value) - substract) for value in fields.group(1).split(",")]
strResult = "".join(arrayResult)
logger.debug(strResult)
@@ -46,5 +42,4 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
except:
url = scrapertools.find_single_match(data,'<source src="([^"]+)')
video_urls.append(["MP4", url])
return video_urls

View File

@@ -8,35 +8,23 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Not Found" in data or "File was deleted" in data:
return False, "[Watchvideo] El fichero no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = httptools.downloadpage(page_url).data
enc_data = scrapertools.find_single_match(data, "type='text/javascript'>(eval.*?)\s*</script>")
dec_data = jsunpack.unpack(enc_data)
video_urls = []
media_urls = scrapertools.find_multiple_matches(dec_data, '\{file\s*:\s*"([^"]+)",label\s*:\s*"([^"]+)"\}')
for media_url, label in media_urls:
ext = scrapertools.get_filename_from_url(media_url)[-4:]
video_urls.append(["%s %sp [watchvideo]" % (ext, label), media_url])
data = httptools.downloadpage(page_url).data
media_urls = scrapertools.find_multiple_matches(data, 'file:"([^"]+)"')
for media_url in media_urls:
ext = "mp4"
if "m3u8" in media_url:
ext = "m3u8"
video_urls.append(["%s [watchvideo]" % (ext), media_url])
video_urls.reverse()
m3u8 = scrapertools.find_single_match(dec_data, '\{file\:"(.*?.m3u8)"\}')
if m3u8:
title = video_urls[-1][0].split(" ", 1)[1]
video_urls.insert(0, [".m3u8 %s" % title, m3u8])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -50,17 +50,17 @@ def update(path, p_dialog, i, t, serie, overwrite):
try:
if int(overwrite) == 3:
# Sobrescribir todos los archivos (tvshow.nfo, 1x01.nfo, 1x01 [canal].json, 1x01.strm, etc...)
insertados, sobreescritos, fallidos = videolibrarytools.save_tvshow(serie, itemlist)
serie= videolibrary.check_season_playcount(serie, serie.contentSeason)
if filetools.write(path + '/tvshow.nfo', head_nfo + it.tojson()):
serie.infoLabels['playcount'] = serie.playcount
insertados, sobreescritos, fallidos, notusedpath = videolibrarytools.save_tvshow(serie, itemlist)
#serie= videolibrary.check_season_playcount(serie, serie.contentSeason)
#if filetools.write(path + '/tvshow.nfo', head_nfo + it.tojson()):
# serie.infoLabels['playcount'] = serie.playcount
else:
insertados, sobreescritos, fallidos = videolibrarytools.save_episodes(path, itemlist, serie,
silent=True,
overwrite=overwrite)
it = videolibrary.check_season_playcount(it, it.contentSeason)
if filetools.write(path + '/tvshow.nfo', head_nfo + it.tojson()):
serie.infoLabels['playcount'] = serie.playcount
#it = videolibrary.check_season_playcount(it, it.contentSeason)
#if filetools.write(path + '/tvshow.nfo', head_nfo + it.tojson()):
# serie.infoLabels['playcount'] = serie.playcount
insertados_total += insertados
except Exception, ex:
@@ -96,7 +96,8 @@ def check_for_update(overwrite=True):
serie_actualizada = False
update_when_finished = False
hoy = datetime.date.today()
estado_verify_playcount_series = False
try:
if config.get_setting("update", "videolibrary") != 0 or overwrite:
config.set_setting("updatelibrary_last_check", hoy.strftime('%Y-%m-%d'), "videolibrary")
@@ -128,6 +129,19 @@ def check_for_update(overwrite=True):
logger.info("serie=" + serie.contentSerieName)
p_dialog.update(int(math.ceil((i + 1) * t)), heading, serie.contentSerieName)
#Verificamos el estado del serie.library_playcounts de la Serie por si está incompleto
try:
estado = False
#Si no hemos hecho la verificación o no tiene playcount, entramos
estado = config.get_setting("verify_playcount", "videolibrary")
if not estado or estado == False or not serie.library_playcounts: #Si no se ha pasado antes, lo hacemos ahora
serie, estado = videolibrary.verify_playcount_series(serie, path) #También se pasa si falta un PlayCount por completo
except:
pass
else:
if estado: #Si ha tenido éxito la actualización...
estado_verify_playcount_series = True #... se marca para cambiar la opción de la Videoteca
interval = int(serie.active) # Podria ser del tipo bool
@@ -188,10 +202,11 @@ def check_for_update(overwrite=True):
if not serie_actualizada:
update_next += datetime.timedelta(days=interval)
head_nfo, serie = videolibrarytools.read_nfo(tvshow_file) #Vuelve a leer el.nfo, que ha sido modificado
head_nfo, serie = videolibrarytools.read_nfo(tvshow_file) #Vuelve a leer el.nfo, que ha sido modificado
if interval != int(serie.active) or update_next.strftime('%Y-%m-%d') != serie.update_next:
if update_next > hoy:
serie.update_next = update_next.strftime('%Y-%m-%d')
serie.active = interval
serie.update_next = update_next.strftime('%Y-%m-%d')
serie.channel = "videolibrary"
serie.action = "get_seasons"
filetools.write(tvshow_file, head_nfo + serie.tojson())
@@ -205,6 +220,9 @@ def check_for_update(overwrite=True):
else:
update_when_finished = True
if estado_verify_playcount_series: #Si se ha cambiado algún playcount, ...
estado = config.set_setting("verify_playcount", True, "videolibrary") #... actualizamos la opción de Videolibrary
if config.get_setting("search_new_content", "videolibrary") == 1 and update_when_finished:
# Actualizamos la videoteca de Kodi: Buscar contenido en todas las series
if config.is_xbmc():
@@ -254,7 +272,8 @@ def start(thread=True):
def monitor_update():
update_setting = config.get_setting("update", "videolibrary")
if update_setting == 2 or update_setting == 3: # "Actualizar "Cada dia" o "Una vez al dia"
# "Actualizar "Una sola vez al dia" o "al inicar Kodi y al menos una vez al dia"
if update_setting == 2 or update_setting == 3:
hoy = datetime.date.today()
last_check = config.get_setting("updatelibrary_last_check", "videolibrary")
if last_check:
@@ -269,14 +288,15 @@ def monitor_update():
# (last_check, hoy, datetime.datetime.now().hour))
# logger.info("Atraso del inicio del dia: %i:00" % update_start)
if last_check < hoy and datetime.datetime.now().hour >= int(update_start):
logger.info("Inicio actualizacion programada: %s" % datetime.datetime.now())
if last_check <= hoy and datetime.datetime.now().hour == int(update_start):
logger.info("Inicio actualizacion programada para las %s h.: %s" % (update_start, datetime.datetime.now()))
check_for_update(overwrite=False)
if __name__ == "__main__":
# Se ejecuta en cada inicio
import xbmc
import time
# modo adulto:
# sistema actual 0: Nunca, 1:Siempre, 2:Solo hasta que se reinicie Kodi
@@ -289,6 +309,10 @@ if __name__ == "__main__":
if wait > 0:
xbmc.sleep(wait)
# Verificar quick-fixes al abrirse Kodi, y dejarlo corriendo como Thread
from platformcode import updater
updater.check_addon_init()
if not config.get_setting("update", "videolibrary") == 2:
check_for_update(overwrite=False)