Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
unknown
2018-08-23 08:35:42 -03:00
114 changed files with 2104 additions and 2590 deletions

3
mediaserver/datos.txt Normal file
View File

@@ -0,0 +1,3 @@
TempMode
Silent=1
setup=alfa.exe

View File

@@ -1,5 +1,9 @@
REM Genera los archivos para el ejecutable en windows de Alfa Mediaserver
REM Y tambien genera el zip para Mediaserver
REM Los 2 los genera en la raiz del disco
winrar a -r \Alfa-Mediaserver-.zip \plugin.video.alfa\
python setup.py py2exe -p channels,servers,lib,platformcode
xcopy lib dist\lib /y /s /i
xcopy platformcode dist\platformcode /y /s /i
xcopy resources dist\resources /y /s /i
winrar a -ep1 -r -iiconplatformcode\template\favicon.ico -sfx -zdatos.txt \Alfa-Mediaserver--win dist\

View File

@@ -12,7 +12,7 @@ from platformcode import config, logger
def load_controllers():
controllers = []
path = os.path.join(config.get_runtime_path(),"platformcode\controllers")
path = os.path.join(config.get_runtime_path(),"platformcode", "controllers")
for fname in os.listdir(path):
mod, ext = os.path.splitext(fname)
fname = os.path.join(path, fname)

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.7" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.7.1" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,15 +19,15 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
¤ elitetorrent ¤ grantorrent
¤ newpct1 ¤ seriesanimadas
¤ seriesblanco ¤ rapidvideo
¤ watchvideo ¤ pelispedia
¤ beeg
¤ cuevana3 ¤ clipwatching
¤ estrenosgo ¤ datoporn
¤ elitetorrent ¤ mejortorrent1
¤ newpct1 ¤ seriespapaya
¤ cinetux ¤ pelisplusco
¤ arreglos internos
¤ Agradecimientos a @angedam por colaborar en ésta versión
¤ Agradecimientos a @angedam y @alaquepasa por colaborar en ésta versión
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>

View File

@@ -1,315 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from os import path
from channels import renumbertools
from core import filetools
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from channels import autoplay
list_servers = ['openload',
'directo'
]
list_quality = ['default']
CHANNEL_HOST = "http://animeflv.co"
CHANNEL_DEFAULT_HEADERS = [
["User-Agent", "Mozilla/5.0"],
["Accept-Encoding", "gzip, deflate"],
["Referer", CHANNEL_HOST]
]
REGEX_NEXT_PAGE = "class='current'>\d+?</li><li><a href='([^']+?)'"
REGEX_TITLE = r'(?:bigChar_a" href=.+?>)(.+?)(?:</a>)'
REGEX_THUMB = r'src="(http://media.animeflv\.co/uploads/thumbs/[^"]+?)"'
REGEX_PLOT = r'<span class="info">Línea de historia:</span><p><span>(.*?)</span>'
REGEX_URL = r'href="(http://animeflv\.co/Anime/[^"]+)">'
REGEX_SERIE = r'%s.+?%s([^<]+?)</a><p>(.+?)</p>' % (REGEX_THUMB, REGEX_URL)
REGEX_EPISODE = r'href="(http://animeflv\.co/Ver/[^"]+?)">(?:<span.+?</script>)?(.+?)</a></td><td>(\d+/\d+/\d+)</td></tr>'
REGEX_GENERO = r'<a href="(http://animeflv\.co/genero/[^\/]+/)">([^<]+)</a>'
def get_url_contents(url):
html = httptools.downloadpage(url, headers=CHANNEL_DEFAULT_HEADERS).data
# Elimina los espacios antes y despues de aperturas y cierres de etiquetas
html = re.sub(r'>\s+<', '><', html)
html = re.sub(r'>\s+', '>', html)
html = re.sub(r'\s+<', '<', html)
return html
def get_cookie_value():
"""
Obtiene las cookies de cloudflare
"""
cookie_file = path.join(config.get_data_path(), 'cookies.dat')
cookie_data = filetools.read(cookie_file)
cfduid = scrapertools.find_single_match(
cookie_data, r"animeflv.*?__cfduid\s+([A-Za-z0-9\+\=]+)")
cfduid = "__cfduid=" + cfduid + ";"
cf_clearance = scrapertools.find_single_match(
cookie_data, r"animeflv.*?cf_clearance\s+([A-Za-z0-9\+\=\-]+)")
cf_clearance = " cf_clearance=" + cf_clearance
cookies_value = cfduid + cf_clearance
return cookies_value
header_string = "|User-Agent=Mozilla/5.0&Referer=http://animeflv.co&Cookie=" + \
get_cookie_value()
def __extract_info_from_serie(html):
title = scrapertools.find_single_match(html, REGEX_TITLE)
title = clean_title(title)
url = scrapertools.find_single_match(html, REGEX_URL)
thumbnail = scrapertools.find_single_match(
html, REGEX_THUMB) + header_string
plot = scrapertools.find_single_match(html, REGEX_PLOT)
return [title, url, thumbnail, plot]
def __sort_by_quality(items):
"""
Ordena los items por calidad en orden decreciente
"""
def func(item):
return int(scrapertools.find_single_match(item.title, r'\[(.+?)\]'))
return sorted(items, key=func, reverse=True)
def clean_title(title):
"""
Elimina el año del nombre de las series o peliculas
"""
year_pattern = r'\([\d -]+?\)'
return re.sub(year_pattern, '', title).strip()
def __find_series(html):
"""
Busca series en un listado, ejemplo: resultados de busqueda, categorias, etc
"""
series = []
# Limitamos la busqueda al listado de series
list_start = html.find('<table class="listing">')
list_end = html.find('</table>', list_start)
list_html = html[list_start:list_end]
for serie in re.finditer(REGEX_SERIE, list_html, re.S):
thumbnail, url, title, plot = serie.groups()
title = clean_title(title)
thumbnail = thumbnail + header_string
plot = scrapertools.htmlclean(plot)
series.append([title, url, thumbnail, plot])
return series
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="letras",
title="Por orden alfabético"))
itemlist.append(Item(channel=item.channel, action="generos", title="Por géneros",
url= CHANNEL_HOST + "/ListadeAnime"))
itemlist.append(Item(channel=item.channel, action="series", title="Por popularidad",
url=CHANNEL_HOST + "/ListadeAnime/MasVisto"))
itemlist.append(Item(channel=item.channel, action="series", title="Novedades",
url=CHANNEL_HOST + "/ListadeAnime/Nuevo"))
itemlist.append(Item(channel=item.channel, action="series", title="Últimos",
url=CHANNEL_HOST + "/ListadeAnime/LatestUpdate"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...",
url=CHANNEL_HOST + "/Buscar?s="))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
def letras(item):
logger.info()
base_url = 'http://animeflv.co/ListadeAnime?c='
itemlist = list()
itemlist.append(Item(channel=item.channel, action="series", title="#", url=base_url + "#"))
for letter in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
itemlist.append(Item(channel=item.channel, action="series", title=letter, url=base_url + letter))
return itemlist
def generos(item):
logger.info()
itemlist = []
html = get_url_contents(item.url)
list_genre = re.findall(REGEX_GENERO, html)
for url, genero in list_genre:
itemlist.append(Item(channel=item.channel, action="series", title=genero, url=url))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "%20")
item.url = "%s%s" % (item.url, texto)
html = get_url_contents(item.url)
try:
# Se encontro un solo resultado y se redicciono a la página de la serie
if html.find('<title>Ver') >= 0:
show_list = [__extract_info_from_serie(html)]
# Se obtuvo una lista de resultados
else:
show_list = __find_series(html)
items = []
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
for show in show_list:
title, url, thumbnail, plot = show
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, viewmode="movies_with_plot", context=context))
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return items
def series(item):
logger.info()
page_html = get_url_contents(item.url)
show_list = __find_series(page_html)
items = []
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
for show in show_list:
title, url, thumbnail, plot = show
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, contentSerieName=title,
plot=plot, show=title, viewmode="movies_with_plot", context=context))
url_next_page = scrapertools.find_single_match(page_html, REGEX_NEXT_PAGE)
if url_next_page:
items.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url= CHANNEL_HOST + url_next_page))
return items
def episodios(item):
logger.info()
itemlist = []
html_serie = get_url_contents(item.url)
info_serie = __extract_info_from_serie(html_serie)
if info_serie[3]:
plot = info_serie[3]
else:
plot = ''
episodes = re.findall(REGEX_EPISODE, html_serie, re.DOTALL)
es_pelicula = False
for url, title, date in episodes:
episode = scrapertools.find_single_match(title, r'Episodio (\d+)')
new_item=itemlist.append(Item(channel=item.channel, action="findvideos",
url=url, thumbnail=item.thumbnail, plot=plot, show=item.show))
# El enlace pertenece a un episodio
if episode:
season = 1
episode = int(episode)
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.contentSerieName, season, episode)
new_item.infoLabels["episode"] = episode
new_item.infoLabels["season"] = season
new_item.contentSerieName = item.contentSerieName
title = "%sx%s %s (%s)" % (season, str(episode).zfill(2), "Episodio %s" % episode, date)
# El enlace pertenece a una pelicula
else:
title = "%s (%s)" % (title, date)
item.url = url
es_pelicula = True
new_item.title=title
new_item.fulltitle="%s %s" % (item.show, title)
itemlist.append(new_item)
# El sistema soporta la videoteca y se encontro por lo menos un episodio
# o pelicula
if config.get_videolibrary_support() and len(itemlist) > 0:
if es_pelicula:
item_title = "Añadir película a la videoteca"
item_action = "add_pelicula_to_library"
item_extra = ""
else:
item_title = "Añadir serie a la videoteca"
item_action = "add_serie_to_library"
item_extra = "episodios"
itemlist.append(Item(channel=item.channel, title=item_title, url=item.url,
action=item_action, extra=item_extra, show=item.show))
if not es_pelicula:
itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios",
url=item.url, action="download_all_episodes", extra="episodios",
show=item.show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
encontrados = []
page_html = get_url_contents(item.url)
regex_api = r'http://player\.animeflv\.co/[^\"]+'
iframe_url = scrapertools.find_single_match(page_html, regex_api)
iframe_html = get_url_contents(iframe_url)
itemlist.extend(servertools.find_video_items(data=iframe_html))
qualities = ["360", "480", "720", "1080"]
for videoitem in itemlist:
if videoitem.url in encontrados:
continue
encontrados.append(videoitem.url)
videoitem.fulltitle = item.fulltitle
videoitem.title = "%s en calidad [%s]" % (videoitem.server, qualities[1])
videoitem.channel = item.channel
videoitem.thumbnail = item.thumbnail
regex_video_list = r'var part = \[([^\]]+)'
videos_html = scrapertools.find_single_match(iframe_html, regex_video_list)
videos = re.findall('"([^"]+)"', videos_html, re.DOTALL)
for quality_id, video_url in enumerate(videos):
if video_url in encontrados:
continue
encontrados.append(video_url)
itemlist.append(Item(channel=item.channel, action="play", url=video_url, show=re.escape(item.show),
title="Ver en calidad [%s]" % (qualities[quality_id]), plot=item.plot,
fulltitle=item.title))
autoplay.start(__sort_by_quality(itemlist), item)
return __sort_by_quality(itemlist)

View File

@@ -1,33 +0,0 @@
{
"id": "animeflv_ru",
"name": "AnimeFLV.RU",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/5nRR9qq.png",
"banner": "animeflv_ru.png",
"compatible": {
"python": "2.7.9"
},
"categories": [
"anime"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - Episodios de anime",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -27,7 +27,7 @@ def context():
_context = ""
if config.is_xbmc():
_context = [{"title": "Configurar AutoPlay",
_context = [{"title": config.get_localized_string(60071),
"action": "autoplay_config",
"channel": "autoplay"}]
return _context
@@ -60,7 +60,7 @@ def show_option(channel, itemlist, text_color='yellow', thumbnail=None, fanart=N
'servidores y calidades favoritas. '
itemlist.append(
Item(channel=__channel__,
title="Configurar AutoPlay",
title=config.get_localized_string(60071),
action="autoplay_config",
text_color=text_color,
thumbnail=thumbnail,
@@ -102,7 +102,12 @@ def start(itemlist, item):
if item.channel == 'videolibrary':
autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY')
channel_id = item.contentChannel
if not channel_id in autoplay_node or not autoplay_node['status']:
try:
active = autoplay_node['status']
except:
active = is_active(item.channel)
if not channel_id in autoplay_node or not active:
return itemlist
# Agrega servidores y calidades que no estaban listados a autoplay_node
@@ -176,7 +181,7 @@ def start(itemlist, item):
if 'context' not in item:
item.context = list()
if not filter(lambda x: x['action'] == 'autoplay_config', context):
item.context.append({"title": "Configurar AutoPlay",
item.context.append({"title": config.get_localized_string(60071),
"action": "autoplay_config",
"channel": "autoplay",
"from_channel": channel_id})
@@ -330,20 +335,19 @@ def start(itemlist, item):
# Si se han alcanzado el numero maximo de intentos de este servidor
# preguntar si queremos seguir probando o lo ignoramos
if max_intentos_servers[videoitem.server.lower()] == 0:
text = "Parece que los enlaces de %s no estan funcionando." % videoitem.server.upper()
text = config.get_localized_string(60072) % videoitem.server.upper()
if not platformtools.dialog_yesno("AutoPlay", text,
"¿Desea ignorar todos los enlaces de este servidor?"):
config.get_localized_string(60073)):
max_intentos_servers[videoitem.server.lower()] = max_intentos
# Si no quedan elementos en la lista se informa
if autoplay_elem == autoplay_list[-1]:
platformtools.dialog_notification('AutoPlay', 'No hubo enlaces funcionales')
platformtools.dialog_notification('AutoPlay', config.get_localized_string(60072))
else:
platformtools.dialog_notification('AutoPlay No Fue Posible', 'No Hubo Coincidencias')
platformtools.dialog_notification(config.get_localized_string(60074), config.get_localized_string(60075))
if new_options:
platformtools.dialog_notification("AutoPlay", "Nueva Calidad/Servidor disponible en la "
"configuracion", sound=False)
platformtools.dialog_notification("AutoPlay", config.get_localized_string(60076), sound=False)
# Restaura si es necesario el valor previo de "Accion y Player Mode" en preferencias
if user_config_setting_action != 2:
@@ -408,15 +412,14 @@ def init(channel, list_servers, list_quality, reset=False):
channel_node["settings"]["server_%s" % n] = s
channel_node["settings"]["quality_%s" % n] = c
autoplay_node[channel] = channel_node
if change:
result, json_data = jsontools.update_node(autoplay_node, 'autoplay', 'AUTOPLAY')
if not result:
heading = "Error al iniciar AutoPlay"
msj = "Consulte su log para obtener mas información."
heading = config.get_localized_string(60077)
msj = config.get_localized_string(60078)
icon = 1
platformtools.dialog_notification(heading, msj, icon, sound=False)
@@ -482,7 +485,7 @@ def autoplay_config(item):
allow_option = True
active_settings = {"id": "active", "label": "AutoPlay (activar/desactivar la auto-reproduccion)",
active_settings = {"id": "active", "label": config.get_localized_string(60079),
"color": "0xffffff99", "type": "bool", "default": False, "enabled": allow_option,
"visible": allow_option}
list_controls.append(active_settings)
@@ -493,7 +496,7 @@ def autoplay_config(item):
if not status_language:
status_language = 0
set_language = {"id": "language", "label": "Idioma para AutoPlay (Opcional)", "color": "0xffffff99",
set_language = {"id": "language", "label": config.get_localized_string(60080), "color": "0xffffff99",
"type": "list", "default": 0, "enabled": "eq(-1,true)", "visible": True,
"lvalues": get_languages(item.from_channel)}
@@ -513,7 +516,7 @@ def autoplay_config(item):
else:
enabled = "eq(-3,true)"
custom_servers_settings = {"id": "custom_servers", "label": " Servidores favoritos", "color": "0xff66ffcc",
custom_servers_settings = {"id": "custom_servers", "label": config.get_localized_string(60081), "color": "0xff66ffcc",
"type": "bool", "default": False, "enabled": enabled, "visible": True}
list_controls.append(custom_servers_settings)
if dict_values['active'] and enabled:
@@ -526,7 +529,7 @@ def autoplay_config(item):
default = num - 1
if default > len(server_list) - 1:
default = 0
set_servers = {"id": "server_%s" % num, "label": u" \u2665 Servidor Favorito %s" % num,
set_servers = {"id": "server_%s" % num, "label": u" \u2665" + config.get_localized_string(60082) % num,
"color": "0xfffcab14", "type": "list", "default": default,
"enabled": "eq(-%s,true)+eq(-%s,true)" % (pos1, num), "visible": True,
"lvalues": server_list}
@@ -544,7 +547,7 @@ def autoplay_config(item):
else:
enabled = "eq(-7,true)"
custom_quality_settings = {"id": "custom_quality", "label": " Calidades Favoritas", "color": "0xff66ffcc",
custom_quality_settings = {"id": "custom_quality", "label": config.get_localized_string(60083), "color": "0xff66ffcc",
"type": "bool", "default": False, "enabled": enabled, "visible": True}
list_controls.append(custom_quality_settings)
if dict_values['active'] and enabled:
@@ -570,15 +573,15 @@ def autoplay_config(item):
# Plan B
dict_values['plan_b'] = settings_node.get('plan_b', False)
enabled = "eq(-4,true)|eq(-8,true)"
plan_b = {"id": "plan_b", "label": " Plan B (Si fallan los favoritos prueba otros enlaces)",
plan_b = {"id": "plan_b", "label": config.get_localized_string(70172),
"color": "0xffffff99",
"type": "bool", "default": False, "enabled": enabled, "visible": True}
list_controls.append(plan_b)
# Seccion Prioridades
priority_list = ["Servidor y Calidad", "Calidad y Servidor"]
set_priority = {"id": "priority", "label": " Prioridad (Indica el orden para Auto-Reproducir)",
priority_list = [config.get_localized_string(70174), config.get_localized_string(70175)]
set_priority = {"id": "priority", "label": config.get_localized_string(60085),
"color": "0xffffff99", "type": "list", "default": 0,
"enabled": True, "visible": "eq(-5,true)+eq(-9,true)+eq(-12,true)", "lvalues": priority_list}
list_controls.append(set_priority)
@@ -681,7 +684,7 @@ def reset(item, dict):
list_quality = channel.list_quality
init(channel_name, list_servers, list_quality, reset=True)
platformtools.dialog_notification('AutoPlay', '%s: Los datos fueron reiniciados' % item.category)
platformtools.dialog_notification('AutoPlay', config.get_localized_string(70523) % item.category)
return

View File

@@ -12,6 +12,21 @@
"movie"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Subtitulado",
"Español",
"SUB"
]
},
{
"id": "include_in_global_search",
"type": "bool",
@@ -89,20 +104,6 @@
"Perfil 1"
]
},
{
"id": "filterlanguages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"VOSE",
"Latino",
"Español",
"No filtrar"
]
},
{
"id": "filterlinks",
"type": "list",
@@ -117,6 +118,19 @@
]
},
{
"id": "filterlanguages",
"type": "list",
"label": "Mostrar enlaces del canal en idioma...",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"VOSE",
"Latino",
"Español",
"No filtrar"
]
}, {
"id": "viewmode",
"type": "list",
"label": "Elegir vista por defecto (Confluence)...",

View File

@@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
@@ -8,7 +10,13 @@ from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
CHANNEL_HOST = "http://www.cinetux.io/"
IDIOMAS = {'Latino': 'Latino', 'Subtitulado': 'Subtitulado', 'Español': 'Español', 'SUB': 'SUB' }
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'okru', 'vidoza', 'openload', 'powvideo', 'netutv','gvideo']
CHANNEL_HOST = "http://www.cinetux.to/"
# Configuracion del canal
__modo_grafico__ = config.get_setting('modo_grafico', 'cinetux')
@@ -26,6 +34,7 @@ viewmode = viewmode_options[config.get_setting('viewmode', 'cinetux')]
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
item.viewmode = viewmode
data = httptools.downloadpage(CHANNEL_HOST + "pelicula").data
@@ -53,6 +62,7 @@ def mainlist(item):
itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3,
thumbnail=get_thumb('search', auto=True)))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -129,13 +139,13 @@ def peliculas(item):
patron += '.*?alt="([^"]+)"'
patron += '(.*?)'
patron += 'href="([^"]+)"'
patron += '.*?(?:<span>|<span class="year">)([^<]+)'
patron += '.*?(?:<span>|<span class="year">)(.+?)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, scrapedyear in matches:
quality = scrapertools.find_single_match(quality, '.*?quality">([^<]+)')
try:
fulltitle = scrapedtitle
year = scrapedyear.replace("&nbsp;", "")
year = scrapertools.find_single_match(scrapedyear,'\d{4}')
if "/" in fulltitle:
fulltitle = fulltitle.split(" /", 1)[0]
scrapedtitle = "%s (%s)" % (fulltitle, year)
@@ -219,8 +229,6 @@ def findvideos(item):
filtro_enlaces = 2
dict_idiomas = {'Español': 2, 'Latino': 1, 'Subtitulado': 0}
data = httptools.downloadpage(item.url).data
if item.infoLabels["year"]:
tmdb.set_infoLabels(item, __modo_grafico__)
if filtro_enlaces != 0:
list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "online", item)
if list_enlaces:
@@ -233,6 +241,14 @@ def findvideos(item):
itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color=color1,
text_bold=True))
itemlist.extend(list_enlaces)
tmdb.set_infoLabels(item, __modo_grafico__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if itemlist:
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))

View File

@@ -30,19 +30,27 @@ def mainlist(item):
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(item.clone(title="Ultimas", action="list_all", url=host, thumbnail=get_thumb('last', auto=True)))
itemlist.append(item.clone(title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(item.clone(title="Castellano", action="list_all", url= host+'?s=Español',
thumbnail=get_thumb('audio', auto=True)))
itemlist.append(item.clone(title="Latino", action="list_all", url=host + '?s=Latino',
thumbnail=get_thumb('audio', auto=True)))
itemlist.append(item.clone(title="VOSE", action="list_all", url=host + '?s=Subtitulado',
thumbnail=get_thumb('audio', auto=True)))
itemlist.append(item.clone(title="Alfabetico", action="section", section='alpha',
thumbnail=get_thumb('alphabet', auto=True)))
itemlist.append(item.clone(title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
itemlist.append(Item(channel=item.channel, title="Ultimas", action="list_all", url=host,
thumbnail=get_thumb('last', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Castellano", action="list_all", url= host+'espanol',
thumbnail=get_thumb('audio', auto=True)))
itemlist.append(Item(channel=item.channel, title="Latino", action="list_all", url=host + 'latino',
thumbnail=get_thumb('audio', auto=True)))
itemlist.append(Item(channel=item.channel, title="VOSE", action="list_all", url=host + 'subtitulado',
thumbnail=get_thumb('audio', auto=True)))
itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section", section='alpha',
thumbnail=get_thumb('alphabet', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
@@ -52,7 +60,7 @@ def mainlist(item):
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
@@ -63,11 +71,11 @@ def list_all(item):
try:
data = get_source(item.url)
if item.section == 'alpha':
patron = '<span class=Num>\d+.*?<a href=(.*?) class.*?'
patron += 'src=(.*?) class.*?<strong>(.*?)</strong>.*?<td>(\d{4})</td>'
patron = '<span class="Num">\d+.*?<a href="([^"]+)" class.*?'
patron += 'src="([^"]+)" class.*?<strong>([^<]+)</strong>.*?<td>(\d{4})</td>'
else:
patron = '<article id=post-.*?<a href=(.*?)>.*?src=(.*?) alt=.*?'
patron += '<h2 class=Title>(.*?)<\/h2>.*?<span class=Year>(.*?)<\/span>'
patron = '<article id="post-\d+".*?<a href="([^"]+)">.*?'
patron += 'src="([^"]+)".*?<h2 class="Title">([^<]+)<\/h2>.*?<span class="Year">([^<]+)<\/span>'
data = get_source(item.url)
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -84,7 +92,7 @@ def list_all(item):
title = '%s [%s]'%(contentTitle, year)
thumbnail = 'http:'+scrapedthumbnail
itemlist.append(item.clone(action='findvideos',
itemlist.append(Item(channel=item.channel, action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
@@ -95,9 +103,10 @@ def list_all(item):
# Paginación
url_next_page = scrapertools.find_single_match(data,'<a class=next.*?href=(.*?)>')
url_next_page = scrapertools.find_single_match(data,'<a class="next.*?" rel="next" href="([^"]+)"')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all', section=item.section))
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
section=item.section))
except:
pass
return itemlist
@@ -107,17 +116,13 @@ def section(item):
itemlist = []
data = get_source(host)
action = 'list_all'
if item.section == 'quality':
patron = 'menu-item-object-category.*?menu-item-\d+><a href=(.*?)>(.*?)<\/a>'
elif item.section == 'genre':
patron = 'category menu-item-\d+><a href=(http:.*?)>(.*?)</a>'
elif item.section == 'year':
patron = 'custom menu-item-15\d+><a href=(.*?\?s.*?)>(\d{4})<\/a><\/li>'
if item.section == 'genre':
data = scrapertools.find_single_match(data, '>Géneros</a>.*?</ul>')
elif item.section == 'alpha':
patron = '<li><a href=(.*?letter.*?)>(.*?)</a>'
action = 'list_all'
data = scrapertools.find_single_match(data, '<ul class="AZList"><li>.*?</ul>')
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
@@ -136,11 +141,17 @@ def findvideos(item):
itemlist = []
data = get_source(item.url)
patron = 'domain=(.*?) class=.*?><span>.*?</span>.*?<span>\d+ - (.*?) - (.*?)</span>'
patron = 'TPlayerNv="Opt(\w\d+)".*?img src="(.*?)<span>\d+ - (.*?) - ([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, language, quality in matches:
for option, url_data, language, quality in matches:
if 'domain' in url_data:
url = scrapertools.find_single_match(url_data, 'domain=([^"]+)"')
else:
url = scrapertools.find_single_match(data, 'id="Opt%s">.*?file=([^"]+)"' % option)
if url != '' and 'youtube' not in url:
itemlist.append(item.clone(title='%s', url=url, language=IDIOMAS[language], quality=quality, action='play'))
itemlist.append(Item(channel=item.channel, title='%s', url=url, language=IDIOMAS[language],
quality=quality, action='play'))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
i.language, i.quality))

View File

@@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
@@ -9,8 +11,8 @@ def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="categorias", title="Categorías", url="http://dato.porn/categories_all"))
itemlist.append(item.clone(title="Buscar...", action="search"))
itemlist.append(item.clone(action="categorias", title="Categorías", url="http://dato.porn/categories_all", contentType="movie", viewmode="movie"))
itemlist.append(item.clone(title="Buscar...", action="search", contentType="movie", viewmode="movie"))
return itemlist
@@ -25,22 +27,27 @@ def lista(item):
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
# Extrae las entradas
patron = '<div class="vid_block">\s*<a href="([^"]+)".*?url\(\'([^\']+)\'.*?<span>(.*?)</span>.*?<b>(.*?)</b>'
patron = '<div class="videobox">\s*<a href="([^"]+)".*?url\(\'([^\']+)\'.*?<span>(.*?)<\/span><\/div><\/a>.*?class="title">(.*?)<\/a><span class="views">.*?<\/a><\/span><\/div> '
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, duration, scrapedtitle in matches:
if "/embed-" not in scrapedurl:
scrapedurl = scrapedurl.replace("dato.porn/", "dato.porn/embed-") + ".html"
#scrapedurl = scrapedurl.replace("dato.porn/", "dato.porn/embed-") + ".html"
scrapedurl = scrapedurl.replace("datoporn.co/", "datoporn.co/embed-") + ".html"
if duration:
scrapedtitle = "%s - %s" % (duration, scrapedtitle)
scrapedtitle += ' gb'
scrapedtitle = scrapedtitle.replace(":", "'")
#logger.debug(scrapedurl + ' / ' + scrapedthumbnail + ' / ' + duration + ' / ' + scrapedtitle)
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
server="datoporn", fanart=scrapedthumbnail.replace("_t.jpg", ".jpg")))
# Extrae la marca de siguiente página
next_page = scrapertools.find_single_match(data, '<a href=["|\']([^["|\']+)["|\']>Next')
# Extrae la marca de siguiente página
#next_page = scrapertools.find_single_match(data, '<a href=["|\']([^["|\']+)["|\']>Next')
next_page = scrapertools.find_single_match(data, '<a class=["|\']page-link["|\'] href=["|\']([^["|\']+)["|\']>Next')
if next_page and itemlist:
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))

View File

@@ -1,80 +0,0 @@
{
"id": "descargasmix",
"name": "DescargasMIX",
"language": ["cast", "lat"],
"active": false,
"adult": false,
"thumbnail": "descargasmix.png",
"banner": "descargasmix.png",
"categories": [
"movie",
"vos",
"torrent",
"documentary",
"anime",
"tvshow"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Incluir en Novedades - series",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_4k",
"type": "bool",
"label": "Incluir en Novedades - 4K",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,582 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
__modo_grafico__ = config.get_setting("modo_grafico", "descargasmix")
__perfil__ = config.get_setting("perfil", "descargasmix")
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
color1, color2, color3 = perfil[__perfil__]
host = config.get_setting("host", "descargasmix")
def mainlist(item):
logger.info()
itemlist = []
item.text_color = color1
# Resetear host y comprobacion de error en https (por si se actualiza Kodi)
config.set_setting("url_error", False, "descargasmix")
host = config.set_setting("host", "https://ddmix.net", "descargasmix")
host_check = get_data(host, True)
if host_check and host_check.startswith("http"):
config.set_setting("host", host_check, "descargasmix")
itemlist.append(item.clone(title="Películas", action="lista", fanart="http://i.imgur.com/c3HS8kj.png",
thumbnail=get_thumb('movies', auto=True)))
itemlist.append(item.clone(title="Series", action="lista_series", fanart="http://i.imgur.com/9loVksV.png",
thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(item.clone(title="Documentales", action="entradas", url="%s/documentales/" % host,
fanart="http://i.imgur.com/Q7fsFI6.png",
thumbnail=get_thumb('documentaries', auto=True)))
itemlist.append(item.clone(title="Anime", action="entradas", url="%s/anime/" % host,
fanart="http://i.imgur.com/whhzo8f.png",
thumbnail=get_thumb('anime', auto=True)))
itemlist.append(item.clone(title="Deportes", action="entradas", url="%s/deportes/" % host,
fanart="http://i.imgur.com/ggFFR8o.png",
thumbnail=get_thumb('deporte', auto=True)))
itemlist.append(item.clone(title="Programas de tv", action="entradas", url="%s/otros/programas-de-tv/" % host,
thumbnail=get_thumb('de la tv', auto=True)))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search", thumbnail=get_thumb('search', auto=True)))
itemlist.append(item.clone(action="setting_channel", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def setting_channel(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
try:
item.url = "%s/?s=%s" % (host, texto)
return busqueda(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def busqueda(item):
logger.info()
itemlist = []
data = get_data(item.url)
contenido = ['Películas', 'Series', 'Documentales', 'Anime', 'Deportes', 'Miniseries', 'Vídeos']
bloque = scrapertools.find_single_match(data, '<div id="content" role="main">(.*?)<div id="sidebar" '
'role="complementary">')
patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)"' \
'.*?<span class="overlay.*?>(.*?)<.*?<p class="stats">(.*?)</p>'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, info, scrapedcat in matches:
if not [True for c in contenido if c in scrapedcat]:
continue
scrapedurl = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedurl))
scrapedthumbnail = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedthumbnail))
if not scrapedthumbnail.startswith("http"):
scrapedthumbnail = "http:" + scrapedthumbnail
scrapedthumbnail = scrapedthumbnail.replace("-129x180", "")
if ("Películas" in scrapedcat or "Documentales" in scrapedcat) and "Series" not in scrapedcat:
titulo = scrapedtitle.split("[")[0]
if info:
scrapedtitle += " [%s]" % unicode(info, "utf-8").capitalize().encode("utf-8")
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, contentTitle=titulo,
thumbnail=scrapedthumbnail, fulltitle=titulo, contentType="movie"))
else:
itemlist.append(item.clone(action="episodios", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, contentTitle=scrapedtitle,
show=scrapedtitle, contentType="tvshow"))
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"')
if next_page:
next_page = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', next_page))
itemlist.append(item.clone(action="busqueda", title=">> Siguiente", url=next_page))
return itemlist
def lista(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/peliculas" % host))
itemlist.append(item.clone(title="Estrenos", action="entradas", url="%s/peliculas/estrenos" % host))
itemlist.append(item.clone(title="Dvdrip", action="entradas", url="%s/peliculas/dvdrip" % host))
itemlist.append(item.clone(title="HD (720p/1080p)", action="entradas", url="%s/peliculas/hd" % host))
itemlist.append(item.clone(title="4K", action="entradas", url="%s/peliculas/4k" % host))
itemlist.append(item.clone(title="HDRIP", action="entradas", url="%s/peliculas/hdrip" % host))
itemlist.append(item.clone(title="Latino", action="entradas",
url="%s/peliculas/latino-peliculas" % host))
itemlist.append(item.clone(title="VOSE", action="entradas", url="%s/peliculas/subtituladas" % host))
itemlist.append(item.clone(title="3D", action="entradas", url="%s/peliculas/3d" % host))
return itemlist
def lista_series(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/series/" % host))
itemlist.append(item.clone(title="Miniseries", action="entradas", url="%s/series/miniseries" % host))
return itemlist
def entradas(item):
logger.info()
itemlist = []
item.text_color = color2
data = get_data(item.url)
bloque = scrapertools.find_single_match(data, '<div id="content" role="main">(.*?)<div id="sidebar" '
'role="complementary">')
contenido = ["series", "deportes", "anime", 'miniseries', 'programas']
c_match = [True for match in contenido if match in item.url]
# Patron dependiendo del contenido
if True in c_match:
patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)"' \
'.*?<span class="overlay(|[^"]+)">'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedinfo in matches:
scrapedurl = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedurl))
if scrapedinfo != "":
scrapedinfo = scrapedinfo.replace(" ", "").replace("-", " ")
scrapedinfo = " [%s]" % unicode(scrapedinfo, "utf-8").capitalize().encode("utf-8")
titulo = scrapedtitle + scrapedinfo
titulo = scrapertools.decodeHtmlentities(titulo)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedthumbnail = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedthumbnail))
if not scrapedthumbnail.startswith("http"):
scrapedthumbnail = "http:" + scrapedthumbnail
scrapedthumbnail = scrapedthumbnail.replace("-129x180", "")
scrapedthumbnail = scrapedthumbnail.rsplit("/", 1)[0] + "/" + \
urllib.quote(scrapedthumbnail.rsplit("/", 1)[1])
if "series" in item.url or "anime" in item.url:
item.show = scrapedtitle
itemlist.append(item.clone(action="episodios", title=titulo, url=scrapedurl, thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle, contentTitle=scrapedtitle, contentType="tvshow"))
else:
patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)"' \
'.*?<span class="overlay.*?>(.*?)<.*?<p class="stats">(.*?)</p>'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, info, categoria in matches:
scrapedurl = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedurl))
titulo = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.split("[")[0])
action = "findvideos"
show = ""
if "Series" in categoria:
action = "episodios"
show = scrapedtitle
elif categoria and categoria != "Películas" and categoria != "Documentales":
try:
titulo += " [%s]" % categoria.rsplit(", ", 1)[1]
except:
titulo += " [%s]" % categoria
if 'l-espmini' in info:
titulo += " [ESP]"
if 'l-latmini' in info:
titulo += " [LAT]"
if 'l-vosemini' in info:
titulo += " [VOSE]"
if info:
titulo += " [%s]" % unicode(info, "utf-8").capitalize().encode("utf-8")
year = scrapertools.find_single_match(titulo,'\[\d{4}\]')
scrapedthumbnail = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedthumbnail))
if not scrapedthumbnail.startswith("http"):
scrapedthumbnail = "http:" + scrapedthumbnail
scrapedthumbnail = scrapedthumbnail.replace("-129x180", "")
scrapedthumbnail = scrapedthumbnail.rsplit("/", 1)[0] + "/" + \
urllib.quote(scrapedthumbnail.rsplit("/", 1)[1])
itemlist.append(item.clone(action=action, title=titulo, url=scrapedurl, thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle, contentTitle=scrapedtitle, viewmode="movie_with_plot",
show=show, contentType="movie", infoLabels={'year':year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginación
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"')
if next_page:
next_page = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', next_page))
itemlist.append(item.clone(title=">> Siguiente", url=next_page, text_color=color3))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_data(item.url)
patron = '(<ul class="menu ses" id="seasons-list">.*?<div class="section-box related-posts">)'
bloque = scrapertools.find_single_match(data, patron)
matches = scrapertools.find_multiple_matches(bloque, '<div class="polo".*?>(.*?)</div>')
for scrapedtitle in matches:
scrapedtitle = scrapedtitle.strip()
new_item = item.clone()
new_item.infoLabels['season'] = scrapedtitle.split(" ", 1)[0].split("x")[0]
new_item.infoLabels['episode'] = scrapedtitle.split(" ", 1)[0].split("x")[1]
if item.fulltitle != "Añadir esta serie a la videoteca":
title = item.fulltitle + " " + scrapedtitle.strip()
else:
title = scrapedtitle.strip()
itemlist.append(new_item.clone(action="findvideos", title=title, extra=scrapedtitle, fulltitle=title,
contentType="episode"))
itemlist.sort(key=lambda it: it.title, reverse=True)
item.plot = scrapertools.find_single_match(data, '<strong>SINOPSIS</strong>:(.*?)</p>')
if item.show != "" and item.extra == "":
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show,
text_color="green"))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist[:-2], __modo_grafico__)
except:
pass
return itemlist
def episode_links(item):
logger.info()
itemlist = []
item.text_color = color3
data = get_data(item.url)
data = data.replace("\n", "").replace("\t", "")
# Bloque de enlaces
patron = '<div class="polo".*?>%s(.*?)(?:<div class="polo"|</li>)' % item.extra.strip()
bloque = scrapertools.find_single_match(data, patron)
patron = '<div class="episode-server">.*?data-sourcelk="([^"]+)"' \
'.*?data-server="([^"]+)"' \
'.*?<div class="caliycola">(.*?)</div>'
matches = scrapertools.find_multiple_matches(bloque, patron)
itemlist.append(item.clone(action="", title="Enlaces Online/Descarga", text_color=color1))
lista_enlaces = []
for scrapedurl, scrapedserver, scrapedcalidad in matches:
if scrapedserver == "ul":
scrapedserver = "uploadedto"
if scrapedserver == "streamin":
scrapedserver = "streaminto"
titulo = " %s [%s]" % (unicode(scrapedserver, "utf-8").capitalize().encode("utf-8"), scrapedcalidad)
# Enlaces descarga
if scrapedserver == "magnet":
itemlist.insert(0,
item.clone(action="play", title=titulo, server="torrent", url=scrapedurl, extra=item.url))
else:
if servertools.is_server_enabled(scrapedserver):
try:
# servers_module = __import__("servers." + scrapedserver)
lista_enlaces.append(item.clone(action="play", title=titulo, server=scrapedserver, url=scrapedurl,
extra=item.url))
except:
pass
lista_enlaces.reverse()
itemlist.extend(lista_enlaces)
if itemlist[0].server == "torrent":
itemlist.insert(0, item.clone(action="", title="Enlaces Torrent", text_color=color1))
return itemlist
def findvideos(item):
logger.info()
if item.contentSeason != '':
return episode_links(item)
itemlist = []
item.text_color = color3
data = get_data(item.url)
item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
if year:
try:
from core import tmdb
item.infoLabels['year'] = year
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
old_format = False
# Patron torrent antiguo formato
if "Enlaces de descarga</div>" in data:
old_format = True
matches = scrapertools.find_multiple_matches(data, 'class="separate3 magnet".*?href="([^"]+)"')
for scrapedurl in matches:
scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)')
scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '', scrapedurl))
title = "[Torrent] "
title += urllib.unquote(scrapertools.find_single_match(scrapedurl, 'dn=(.*?)(?i)WWW.DescargasMix'))
itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl,
text_color="green"))
# Patron online
data_online = scrapertools.find_single_match(data, 'Ver online</div>(.*?)<div class="section-box related-posts">')
if data_online:
title = "Enlaces Online"
if '"l-latino2"' in data_online:
title += " [LAT]"
elif '"l-esp2"' in data_online:
title += " [ESP]"
elif '"l-vose2"' in data_online:
title += " [VOSE]"
patron = 'make_links.*?,[\'"]([^"\']+)["\']'
matches = scrapertools.find_multiple_matches(data_online, patron)
for i, code in enumerate(matches):
enlace = show_links(code)
links = servertools.findvideos(data=enlace[0])
if links and "peliculas.nu" not in links:
if i == 0:
extra_info = scrapertools.find_single_match(data_online, '<span class="tooltiptext">(.*?)</span>')
size = scrapertools.find_single_match(data_online, '(?i)TAMAÑO:\s*(.*?)<').strip()
if size:
title += " [%s]" % size
new_item = item.clone(title=title, action="", text_color=color1)
if extra_info:
extra_info = scrapertools.htmlclean(extra_info)
new_item.infoLabels["plot"] = extra_info
new_item.title += " +INFO"
itemlist.append(new_item)
title = " Ver vídeo en " + links[0][2]
itemlist.append(item.clone(action="play", server=links[0][2], title=title, url=links[0][1]))
scriptg = scrapertools.find_single_match(data, "<script type='text/javascript'>str='([^']+)'")
if scriptg:
gvideo = urllib.unquote_plus(scriptg.replace("@", "%"))
url = scrapertools.find_single_match(gvideo, 'src="([^"]+)"')
if url:
itemlist.append(item.clone(action="play", server="directo", url=url, extra=item.url,
title=" Ver vídeo en Googlevideo (Máxima calidad)"))
# Patron descarga
patron = '<div class="(?:floatLeft |)double(?:nuevo|)">(.*?)</div>(.*?)' \
'(?:<div(?: id="mirrors"|) class="(?:contentModuleSmall |)mirrors">|<div class="section-box related-' \
'posts">)'
bloques_descarga = scrapertools.find_multiple_matches(data, patron)
for title_bloque, bloque in bloques_descarga:
if title_bloque == "Ver online":
continue
if '"l-latino2"' in bloque:
title_bloque += " [LAT]"
elif '"l-esp2"' in bloque:
title_bloque += " [ESP]"
elif '"l-vose2"' in bloque:
title_bloque += " [VOSE]"
extra_info = scrapertools.find_single_match(bloque, '<span class="tooltiptext">(.*?)</span>')
size = scrapertools.find_single_match(bloque, '(?i)TAMAÑO:\s*(.*?)<').strip()
if size:
title_bloque += " [%s]" % size
new_item = item.clone(title=title_bloque, action="", text_color=color1)
if extra_info:
extra_info = scrapertools.htmlclean(extra_info)
new_item.infoLabels["plot"] = extra_info
new_item.title += " +INFO"
itemlist.append(new_item)
if '<div class="subiendo">' in bloque:
itemlist.append(item.clone(title=" Los enlaces se están subiendo", action=""))
continue
patron = 'class="separate.*? ([^"]+)".*?(?:make_links.*?,|href=)[\'"]([^"\']+)["\']'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedserver, scrapedurl in matches:
if (scrapedserver == "ul") | (scrapedserver == "uploaded"):
scrapedserver = "uploadedto"
titulo = unicode(scrapedserver, "utf-8").capitalize().encode("utf-8")
if titulo == "Magnet" and old_format:
continue
elif titulo == "Magnet" and not old_format:
title = " Enlace Torrent"
scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)')
scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '', scrapedurl))
itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl,
text_color="green"))
continue
if servertools.is_server_enabled(scrapedserver):
try:
# servers_module = __import__("servers." + scrapedserver)
# Saca numero de enlaces
urls = show_links(scrapedurl)
numero = str(len(urls))
titulo = " %s - Nº enlaces: %s" % (titulo, numero)
itemlist.append(item.clone(action="enlaces", title=titulo, extra=scrapedurl, server=scrapedserver))
except:
pass
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
if item.extra != "findvideos" and config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", action="add_pelicula_to_library",
extra="findvideos", url=item.url, infoLabels={'title': item.fulltitle},
fulltitle=item.fulltitle, text_color="green"))
return itemlist
def play(item):
logger.info()
itemlist = []
if not item.url.startswith("http") and not item.url.startswith("magnet"):
post = "source=%s&action=obtenerurl" % urllib.quote(item.url)
headers = {'X-Requested-With': 'XMLHttpRequest'}
data = httptools.downloadpage("%s/wp-admin/admin-ajax.php" % host.replace("https", "http"), post=post,
headers=headers, follow_redirects=False).data
url = scrapertools.find_single_match(data, 'url":"([^"]+)"').replace("\\", "")
if "enlacesmix" in url or "enlacesws.com" in url:
data = httptools.downloadpage(url, headers={'Referer': item.extra}, follow_redirects=False).data
url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"')
links = servertools.findvideosbyserver(url, item.server)
if links:
itemlist.append(item.clone(action="play", server=links[0][2], url=links[0][1]))
else:
itemlist.append(item.clone())
return itemlist
def enlaces(item):
logger.info()
itemlist = []
urls = show_links(item.extra)
numero = len(urls)
for url in urls:
links = servertools.findvideos(data=url)
if links:
for link in links:
if "/folder/" in url:
titulo = link[0]
else:
titulo = "%s - Enlace %s" % (item.title.split("-")[0], str(numero))
numero -= 1
itemlist.append(item.clone(action="play", server=link[2], title=titulo, url=link[1]))
itemlist.sort(key=lambda it: it.title)
return itemlist
def show_links(data):
import base64
data = data.split(",")
len_data = len(data)
urls = []
for i in range(0, len_data):
url = []
value1 = base64.b64decode(data[i])
value2 = value1.split("-")
for j in range(0, len(value2)):
url.append(chr(int(value2[j])))
urls.append("".join(url))
return urls
def get_data(url_orig, get_host=False):
try:
if config.get_setting("url_error", "descargasmix"):
raise Exception
response = httptools.downloadpage(url_orig)
if not response.data or "urlopen error [Errno 1]" in str(response.code):
raise Exception
if get_host:
if response.url.endswith("/"):
response.url = response.url[:-1]
return response.url
except:
config.set_setting("url_error", True, "descargasmix")
import random
server_random = ['nl', 'de', 'us']
server = server_random[random.randint(0, 2)]
url = "https://%s.hideproxy.me/includes/process.php?action=update" % server
post = "u=%s&proxy_formdata_server=%s&allowCookies=1&encodeURL=0&encodePage=0&stripObjects=0&stripJS=0&go=" \
% (url_orig, server)
while True:
response = httptools.downloadpage(url, post, follow_redirects=False)
if response.headers.get("location"):
url = response.headers["location"]
post = ""
else:
if get_host:
target = urllib.unquote(scrapertools.find_single_match(url, 'u=([^&]+)&'))
if target.endswith("/"):
target = target[:-1]
if target and target != host:
return target
else:
return ""
break
return response.data
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = host+'/peliculas'
itemlist = entradas(item)
if categoria == 'series':
item.url = host + '/series'
itemlist.extend(entradas(item))
if categoria == '4k':
item.url = host + '/peliculas/4k'
itemlist.extend(entradas(item))
if categoria == 'anime':
item.url = host + '/anime'
itemlist.extend(entradas(item))
if itemlist[-1].title == ">> Siguiente":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -377,6 +377,7 @@ def newest(categoria):
if categoria == 'torrent':
item.url = host
item.extra = "peliculas"
item.category_new= 'newest'
itemlist = listado(item)
if itemlist[-1].title == "Página siguiente >>":

View File

@@ -3,13 +3,69 @@
"name": "EstrenosGo",
"active": true,
"adult": false,
"language": ["cast"],
"fanart": "https://github.com/master-1970/resources/raw/master/images/fanart/estrenosgo.png",
"thumbnail": "https://github.com/master-1970/resources/raw/master/images/squares/estrenosgo.png",
"language": ["cast", "LAT", "VOSE", "VOS"],
"fanart": "estrenosgo.png",
"thumbnail": "estrenosgo.png",
"banner": "estrenosgo.png",
"categories": [
"movie",
"tvshow",
"torrent"
"torrent",
"direct"
],
"settings": [
{
"default": true,
"enabled": true,
"id": "include_in_global_search",
"label": "Incluir en busqueda global",
"type": "bool",
"visible": true
},
{
"default": true,
"enabled": true,
"id": "modo_grafico",
"label": "Buscar información extra (TMDB)",
"type": "bool",
"visible": true
},
{
"id": "timeout_downloadpage",
"type": "list",
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
"default": 5,
"enabled": true,
"visible": true,
"lvalues": [
"None",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
]
},
{
"id": "seleccionar_ult_temporadda_activa",
"type": "bool",
"label": "Seleccionar para Videoteca si estará activa solo la última Temporada",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": false
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -1008,10 +1008,11 @@ def newest(categoria):
try:
if categoria == 'peliculas':
item.url = host + "peliculas/"
item.extra = "novedades"
item.extra = "peliculas"
item.channel = "mejortorrent1"
item.category_new= 'newest'
item.tipo = False
itemlist = listado_busqueda(item)
itemlist = listado(item)
if "Pagina siguiente >>" in itemlist[-1].title:
itemlist.pop()
@@ -1019,6 +1020,7 @@ def newest(categoria):
item.url = host + "documentales/"
item.extra = "documentales"
item.channel = "mejortorrent1"
item.category_new= 'newest'
item.tipo = False
itemlist = listado(item)
if "Pagina siguiente >>" in itemlist[-1].title:

View File

@@ -534,7 +534,7 @@
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": false

File diff suppressed because it is too large Load Diff

View File

@@ -1,71 +0,0 @@
{
"id": "peliculasrey",
"name": "peliculasrey",
"active": false,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "peliculasrey.png",
"banner": "peliculasrey.png",
"categories": [
"direct",
"movie"
],
"settings":[
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,188 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import logger, config
host = "http://www.peliculasrey.com/"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="peliculas", title="Recientes", url=host))
itemlist.append(Item(channel = item.channel,
action = "filtro",
title = "Año de Lanzamiento",
category = "lanzamiento"
))
itemlist.append(Item(channel = item.channel,
action = "filtro",
title = "Idiomas",
category = "idioma"
))
itemlist.append(Item(channel = item.channel,
action = "filtro",
title = "Por calidad",
category = "calidades"
))
itemlist.append(Item(channel = item.channel,
action = "filtro",
title = "Por género",
category = "generos"
))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...", url=host))
return itemlist
def filtro(item):
logger.info(item.category)
itemlist = []
patron1 = '<section class="%s">(.*?)</section>' %item.category
patron2 = '<a href="([^"]+).*?title="([^"]+)'
data = httptools.downloadpage(host).data
data = scrapertools.find_single_match(data, patron1)
matches = scrapertools.find_multiple_matches(data, patron2)
for scrapedurl, scrapedtitle in matches:
if "Adulto" in scrapedtitle and config.get_setting("adult_mode") == 0:
continue
itemlist.append(
Item(channel=item.channel, action="peliculas", title=scrapedtitle.strip(), url=scrapedurl,
viewmode="movie"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "?s=" + texto
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
tabla_pelis = scrapertools.find_single_match(data,
'class="section col-17 col-main grid-125 overflow clearfix">(.*?)</div></section>')
patron = '<img src="([^"]+)" alt="([^"]+).*?href="([^"]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
year = scrapertools.find_single_match(scrapedtitle, "[0-9]{4}")
fulltitle = scrapedtitle.replace(scrapertools.find_single_match(scrapedtitle, '\([0-9]+\)' ), "")
item.infoLabels['year'] = year
itemlist.append(item.clone(channel = item.channel,
action = "findvideos",
title = scrapedtitle,
url = scrapedurl,
thumbnail = scrapedthumbnail,
plot = "",
fulltitle = fulltitle
))
tmdb.set_infoLabels(itemlist, True)
next_page = scrapertools.find_single_match(data, 'rel="next" href="([^"]+)')
if next_page != "":
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=next_page, folder=True,
viewmode="movie"))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
encontrados = []
data = httptools.downloadpage(item.url).data
patron = 'hand" rel="([^"]+).*?title="(.*?)".*?<span>([^<]+)</span>.*?</span><span class="q">(.*?)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, server_name, language, quality in matches:
if scrapedurl in encontrados:
continue
encontrados.append(scrapedurl)
language = language.strip()
quality = quality.strip()
mq = "(" + quality + ")"
if "http" in quality:
quality = mq = ""
titulo = "%s (" + language + ") " + mq
itemlist.append(item.clone(channel=item.channel,
action = "play",
title = titulo,
url = scrapedurl,
folder = False,
language = language,
quality = quality
))
tmdb.set_infoLabels(itemlist, True)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if itemlist:
itemlist.append(Item(channel=item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir pelicula a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail=item.thumbnail,
fulltitle=item.fulltitle))
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'documentales':
item.url = host + "genero/documental/"
elif categoria == 'infantiles':
item.url = host + "genero/animacion-e-infantil/"
elif categoria == 'terror':
item.url = host + "genero/terror/"
elif categoria == 'castellano':
item.url = host + "idioma/castellano/"
elif categoria == 'latino':
item.url = host + "idioma/latino/"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -103,7 +103,7 @@ def sub_search(item):
title = dict["title"] + " (" + dict["release_year"] + ")",
url = host + dict["slug"]
))
tmdb.set_infoLabels(itemlist)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
@@ -278,6 +278,7 @@ def seasons(item):
for title in matches:
season = title.replace('Temporada ','')
infoLabels['season'] = season
title = 'Temporada %s' % season.lstrip('0')
itemlist.append(Item(
channel=item.channel,
title=title,
@@ -289,7 +290,23 @@ def seasons(item):
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += season_episodes(tempitem)
return itemlist
def season_episodes(item):
logger.info()
@@ -304,8 +321,9 @@ def season_episodes(item):
for url, episode in matches:
episodenumber = re.sub('C.* ','',episode)
infoLabels['episode'] = episodenumber
title = '%sx%s - %s' % (infoLabels['season'], episodenumber, episode)
itemlist.append(Item(channel=item.channel,
title= episode,
title= title,
url = host+url,
action = 'findvideos',
infoLabels=infoLabels,
@@ -366,15 +384,18 @@ def findvideos(item):
for language in matches:
video_list.extend(get_links_by_language(item, language))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
video_list = servertools.get_servers_itemlist(video_list, lambda i: i.title % (i.server.capitalize(), i.language,i.quality) )
video_list = servertools.get_servers_itemlist(video_list, lambda i: i.title % (i.server.capitalize(), i.language,
i.quality) )
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(video_list) > 0 and item.extra != 'findvideos':
video_list.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return video_list

View File

@@ -112,11 +112,7 @@ def menuseries(item):
item.fanart = fanart_host
item.text_color = None
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
itemlist.append(item.clone(title="Películas", action="menupeliculas", text_color=color3, text_blod=True))
item.thumbnail = "https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png"
itemlist.append(item.clone(title="Series:", folder=False, text_color=color3, text_blod=True, select=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url="https://www.plusdede.com/series", thumbnail='https://s18.postimg.cc/in3ihji95/11_-_WPg_H5_Kx.png'))
itemlist.append(item.clone(action="generos", title=" Por géneros", url="https://www.plusdede.com/series", thumbnail='https://s18.postimg.cc/p0slktaah/5_-_c_Nf_KRvm.png'))
itemlist.append(
@@ -133,8 +129,6 @@ def menuseries(item):
itemlist.append(item.clone(action="search", title=" Buscar...", url="https://www.plusdede.com/series", thumbnaiil='https://s18.postimg.cc/s7n54ghvt/1_-_01_ZDYii.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True, thumbnail='https://s18.postimg.cc/xj21p46ih/10_-_Uf7e_XHE.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist
@@ -149,7 +143,6 @@ def menupeliculas(item):
item.text_color = None
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
itemlist.append(item.clone(title="Películas:", folder=False, text_color=color3, text_blod=True, select=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url="https://www.plusdede.com/pelis", thumbnail='https://s18.postimg.cc/in3ihji95/11_-_WPg_H5_Kx.png'))
itemlist.append(item.clone(action="generos", title=" Por géneros", url="https://www.plusdede.com/pelis", thumbnail='https://s18.postimg.cc/p0slktaah/5_-_c_Nf_KRvm.png'))
itemlist.append(item.clone(action="peliculas", title=" Solo HD", url="https://www.plusdede.com/pelis?quality=3", thumbnail='https://s18.postimg.cc/e17e95mfd/16_-_qmqn4_Si.png'))
@@ -161,14 +154,8 @@ def menupeliculas(item):
item.clone(action="peliculas", title=" Favoritas", url="https://www.plusdede.com/pelis/favorites", thumbnail='https://s18.postimg.cc/n8zmpwynd/4_-_JGrig_Ep.png'))
itemlist.append(item.clone(action="peliculas", title=" Vistas", url="https://www.plusdede.com/pelis/seen", thumbnail='https://s18.postimg.cc/5vpcay0qh/17_-_M2in_Fp_O.png'))
itemlist.append(item.clone(action="search", title=" Buscar...", url="https://www.plusdede.com/pelis", thumbnail='https://s18.postimg.cc/s7n54ghvt/1_-_01_ZDYii.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png"
itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True, thumbnail='https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png'))
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True, thumbnail='https://s18.postimg.cc/xj21p46ih/10_-_Uf7e_XHE.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist
@@ -182,14 +169,6 @@ def menulistas(item):
item.fanart = fanart_host
item.text_color = None
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
itemlist.append(item.clone(title="Películas", action="menupeliculas", text_color=color3, text_blod=True))
item.thumbnail = "https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png"
itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Listas:", folder=False, text_color=color3, text_blod=True, thumbnail='https://s18.postimg.cc/xj21p46ih/10_-_Uf7e_XHE.png'))
itemlist.append(
item.clone(action="listas", tipo="populares", title=" Populares", url="https://www.plusdede.com/listas", thumbnail='https://s18.postimg.cc/7aqwzrha1/8_-_3rn14_Tq.png'))
itemlist.append(
@@ -197,6 +176,7 @@ def menulistas(item):
itemlist.append(
item.clone(action="listas", tipo="tuslistas", title=" Tus Listas", url="https://www.plusdede.com/listas"))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist

View File

@@ -5,7 +5,7 @@
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://s22.postimg.cc/nucz720sx/image.png",
"banner": "",
"banner": "seriesblanco.png",
"categories": [
"tvshow",
"vos"

View File

@@ -28,34 +28,26 @@ list_servers = ['powvideo', 'streamplay', 'filebebo', 'flashx', 'gamovideo', 'no
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
thumb_series = get_thumb("channels_tvshow.png")
thumb_series_az = get_thumb("channels_tvshow_az.png")
thumb_buscar = get_thumb("search.png")
itemlist = []
itemlist.append(
Item(action="listado_alfabetico", title="Listado Alfabetico", channel=item.channel, thumbnail=thumb_series_az))
itemlist.append(
Item(action="novedades", title="Capítulos de estreno", channel=item.channel, thumbnail=thumb_series))
itemlist.append(Item(action="search", title="Buscar", channel=item.channel, thumbnail=thumb_buscar))
itemlist = filtertools.show_option(itemlist, item.channel, list_idiomas, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def listado_alfabetico(item):
logger.info()
itemlist = [item.clone(action="series_por_letra", title="0-9")]
for letra in string.ascii_uppercase:
itemlist.append(item.clone(action="series_por_letra", title=letra))
return itemlist
@@ -70,7 +62,6 @@ def series_por_letra_y_grupo(item):
logger.info("letra: %s - grupo: %s" % (item.letter, item.extra))
itemlist = []
url = urlparse.urljoin(HOST, "autoload_process.php")
post_request = {
"group_no": item.extra,
"letra": item.letter.lower()
@@ -80,10 +71,6 @@ def series_por_letra_y_grupo(item):
patron = '<div class=list_imagen><img src=(.*?) \/>.*?<div class=list_titulo><a href=(.*?) style=.*?inherit;>(.*?)'
patron +='<.*?justify>(.*?)<.*?Año:<\/b>.*?(\d{4})<'
matches = re.compile(patron, re.DOTALL).findall(data)
#series = re.findall(
# 'list_imagen.+?src="(?P<img>[^"]+).+?<div class="list_titulo"><a[^>]+href="(?P<url>[^"]+)[^>]+>(.*?)</a>', data,
# re.MULTILINE | re.DOTALL)
for img, url, name, plot, year in matches:
new_item= Item(
channel = item.channel,
@@ -99,13 +86,10 @@ def series_por_letra_y_grupo(item):
if year:
tmdb.set_infoLabels_item(new_item)
itemlist.append(new_item)
if len(matches) == 8:
itemlist.append(item.clone(title="Siguiente >>", action="series_por_letra_y_grupo", extra=item.extra + 1))
if item.extra > 0:
itemlist.append(item.clone(title="<< Anterior", action="series_por_letra_y_grupo", extra=item.extra - 1))
return itemlist
@@ -115,77 +99,76 @@ def novedades(item):
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'sidebarestdiv><a title=(.*?\d+X\d+) (.*?) href=(.*?)>.*?src=(.*?)>'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for title, language,url, img in matches:
language = IDIOMAS[language]
itemlist.append(item.clone(action="findvideos", title=title, url=urlparse.urljoin(HOST, url), thumbnail=img,
language=language))
return itemlist
def newest(categoria):
logger.info("categoria: %s" % categoria)
if categoria != 'series':
return []
return novedades(Item())
def episodios(item):
logger.info("url: %s" % item.url)
infoLabels = {}
data = httptools.downloadpage(item.url).data
episodes = re.findall('visco.*?href="(?P<url>[^"]+).+?nbsp; (?P<title>.*?)</a>.+?ucapaudio.?>(?P<langs>.*?)</div>',
data, re.MULTILINE | re.DOTALL)
itemlist = []
for url, title, langs in episodes:
s_e = scrapertools.get_season_and_episode(title)
infoLabels = item.infoLabels
infoLabels["season"] = s_e.split("x")[0]
infoLabels["episode"] = s_e.split("x")[1]
languages = " ".join(
["[%s]" % IDIOMAS.get(lang, lang) for lang in re.findall('images/s-([^\.]+)', langs)])
filter_lang = languages.replace("[", "").replace("]", "").split(" ")
itemlist.append(item.clone(action="findvideos",
infoLabels = infoLabels,
language=filter_lang,
title="%s %s %s" % (item.title, title, languages),
url=urlparse.urljoin(HOST, url),
language=filter_lang
url=urlparse.urljoin(HOST, url)
))
itemlist = filtertools.get_links(itemlist, item, list_idiomas, list_quality)
# Opción "Añadir esta serie a la videoteca de XBMC"
tmdb.set_infoLabels(itemlist, True)
# Opción "Añadir esta serie a la videoteca de KODI"
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios"))
return itemlist
def search(item, texto):
logger.info("texto: %s" % texto)
itemlist = []
infoLabels = ()
data = httptools.downloadpage(urlparse.urljoin(HOST, "/buscar.php?term=%s" % texto)).data
data_dict = jsontools.load(data)
try:
tvshows = data_dict["myData"]
except:
return []
return [item.clone(action="episodios",
title=show["titulo"],
show=show["titulo"],
url=urlparse.urljoin(HOST, show["urla"]),
for show in tvshows:
itemlist.append(item.clone(action="episodios",
context=filtertools.context(item, list_idiomas, list_quality),
contentSerieName=show["titulo"],
thumbnail=urlparse.urljoin(HOST, show["img"]),
context=filtertools.context(item, list_idiomas, list_quality)
) for show in tvshows]
title=show["titulo"],
url=urlparse.urljoin(HOST, show["urla"])
))
tmdb.set_infoLabels(itemlist)
return itemlist
def findvideos(item):
logger.info("url: %s" % item.url)
data = httptools.downloadpage(item.url).data
expr = 'mtos' + '.+?' + \
'<div.+?images/(?P<lang>[^\.]+)' + '.+?' + \
'<div[^>]+>\s+(?P<date>[^\s<]+)' + '.+?' + \
@@ -193,52 +176,49 @@ def findvideos(item):
'<div.+?href="(?P<url>[^"]+).+?images/(?P<type>[^\.]+)' + '.+?' + \
'<div[^>]+>\s*(?P<quality>.*?)</div>' + '.+?' + \
'<div.+?<a.+?>(?P<uploader>.*?)</a>'
links = re.findall(expr, data, re.MULTILINE | re.DOTALL)
itemlist = []
try:
filtro_enlaces = config.get_setting("filterlinks", item.channel)
except:
filtro_enlaces = 2
typeListStr = ["Descargar", "Ver"]
for lang, date, server, url, linkType, quality, uploader in links:
linkTypeNum = 0 if linkType == "descargar" else 1
if filtro_enlaces != 2 and filtro_enlaces != linkTypeNum:
continue
if server == "Thevideo": server = "thevideome"
if server == "1fichier": server = "onefichier"
if server == "Uploaded": server = "uploadedto"
itemlist.append(item.clone(
action="play",
title="{linkType} en {server} [{lang}] [{quality}] ({uploader}: {date})".format(
linkType=typeListStr[linkTypeNum],
lang=IDIOMAS.get(lang, lang),
date=date,
server=server.rstrip(),
server=server.rstrip().capitalize(),
quality=quality,
uploader=uploader),
server=server.rstrip(),
server=server.lower().rstrip(),
url=urlparse.urljoin(HOST, url),
language=IDIOMAS.get(lang,lang),
quality=quality
)
)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_idiomas, list_quality)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def play(item):
logger.info("play: %s" % item.url)
itemlist = []
data = httptools.downloadpage(item.url).data
video_url = scrapertools.find_single_match(data, "location.href='([^']+)")
itemlist = servertools.find_video_items(data=video_url)
item.url = scrapertools.find_single_match(data, "location.href='([^']+)")
item.server = ""
itemlist.append(item.clone())
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist[0].thumbnail=item.contentThumbnail
return itemlist

View File

@@ -1,7 +1,7 @@
{
"id": "seriesyonkis",
"name": "Seriesyonkis",
"active": true,
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "seriesyonkis.png",

View File

@@ -607,22 +607,20 @@ def findvideos(item):
def play(item):
logger.info()
itemlist = []
#logger.debug('item.url %s' % item.url)
uri = scrapertools.find_single_match(item.url, '(/transcoder[\w\W]+)')
s = scrapertools.find_single_match(item.url, r'http.*?://(.*?)\.')
#logger.debug('uri %s' % uri)
#logger.debug('s %s'% s)
uri_request = host + "/video2-prod/s/uri?uri=%s&s=%s&_=%s" % (uri, s, int(time.time()))
data = httptools.downloadpage(uri_request).data
data = jsontools.load(data)
#logger.debug(data)
if data['s'] == None:
data['s'] = ''
url = item.url.replace(".tv-vip.com/transcoder/", ".%s/e/transcoder/") % (data['b']) + "?tt=" + str(data['a']['tt']) + \
# url = item.url.replace(".tv-vip.com/transcoder/", ".%s/e/transcoder/") % (data['b']) + "?tt=" + str(data['a']['tt']) + \
# "&mm=" + data['a']['mm'] + "&bb=" + data['a']['bb']
url = item.url.replace(".tv-vip.com/transcoder/", ".pelisipad.com/s/transcoder/") + "?tt=" + str(
data['a']['tt']) + \
"&mm=" + data['a']['mm'] + "&bb=" + data['a']['bb']
#logger.debug(url)
#url += "|User-Agent=Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Mobile Safari/537.36"
url += "|User-Agent=Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Mobile Safari/537.36"
itemlist.append(item.clone(action="play", server="directo", url=url, folder=False))

View File

@@ -52,7 +52,7 @@ def getmainlist(view="thumb_"):
thumbnail=get_thumb(thumb_setting, view),
category=config.get_localized_string(30100), viewmode="list"))
itemlist.append(Item(title=config.get_localized_string(30104) + " (" + config.get_localized_string(20000) +" " + config.get_addon_version() + ")", channel="help", action="mainlist",
itemlist.append(Item(title=config.get_localized_string(30104) + " (" + config.get_localized_string(20000) +" " + config.get_addon_version(with_fix=False) + ")", channel="help", action="mainlist",
thumbnail=get_thumb("help.png", view),
category=config.get_localized_string(30104), viewmode="list"))
return itemlist
@@ -170,7 +170,7 @@ def filterchannels(category, view="thumb_"):
# Si tiene configuración añadimos un item en el contexto
context = []
if channel_parameters["has_settings"]:
context.append({"title": "Configurar canal", "channel": "setting", "action": "channel_config",
context.append({"title": config.get_localized_string(70525), "channel": "setting", "action": "channel_config",
"config": channel_parameters["channel"]})
channel_info = set_channel_info(channel_parameters)
@@ -197,7 +197,7 @@ def filterchannels(category, view="thumb_"):
channelslist.insert(0, Item(title=config.get_localized_string(60088), action="mainlist", channel="url",
thumbnail=channel_parameters["thumbnail"], type="generic", viewmode="list"))
if category in ['movie', 'tvshow']:
titles = ['Mas Populares', 'Mejor Valoradas', 'Ahora en cines', 'En Emision', 'Por Genero']
titles = [config.get_localized_string(70028), config.get_localized_string(30985), config.get_localized_string(70527), config.get_localized_string(60264), config.get_localized_string(70528)]
ids = ['popular', 'top_rated', 'now_playing', 'on_the_air']
for x in range(0,3):
if x == 2 and category != 'movie':

View File

@@ -52,7 +52,7 @@ def find_video_items(item=None, data=None):
# Busca los enlaces a los videos
for label, url, server, thumbnail in findvideos(data):
title = "Enlace encontrado en %s" % label
title = config.get_localized_string(70206) % label
itemlist.append(
item.clone(title=title, action="play", url=url, thumbnail=thumbnail, server=server, folder=False))
@@ -154,9 +154,7 @@ def findvideos(data, skip=False):
break
if not devuelve and is_filter_servers:
platformtools.dialog_ok("Filtrar servidores (Lista Negra)",
"No hay enlaces disponibles que cumplan los requisitos de su Lista Negra.",
"Pruebe de nuevo modificando el fíltro en 'Configuracíon Servidores")
platformtools.dialog_ok(config.get_localized_string(60001))
return devuelve
@@ -243,8 +241,8 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
if server_parameters:
# Muestra un diágo de progreso
if muestra_dialogo:
progreso = platformtools.dialog_progress("alfa",
"Conectando con %s" % server_parameters["name"])
progreso = platformtools.dialog_progress(config.get_localized_string(20000),
config.get_localized_string(70180) % server_parameters["name"])
# Cuenta las opciones disponibles, para calcular el porcentaje
@@ -265,7 +263,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
logger.info("Opciones disponibles: %s | %s" % (len(opciones), opciones))
else:
logger.error("No existe conector para el servidor %s" % server)
error_messages.append("No existe conector para el servidor %s" % server)
error_messages.append(config.get_localized_string(60004) % server)
muestra_dialogo = False
# Importa el server
@@ -310,7 +308,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
# Muestra el progreso
if muestra_dialogo:
progreso.update((100 / len(opciones)) * opciones.index(opcion), "Conectando con %s" % server_name)
progreso.update((100 / len(opciones)) * opciones.index(opcion), config.get_localized_string(70180) % server_name)
# Modo free
if opcion == "free":
@@ -337,10 +335,10 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
elif response and response[0][0]:
error_messages.append(response[0][0])
else:
error_messages.append("Se ha producido un error en %s" % server_name)
error_messages.append(config.get_localized_string(60006) % server_name)
except:
logger.error("Error en el servidor: %s" % opcion)
error_messages.append("Se ha producido un error en %s" % server_name)
error_messages.append(config.get_localized_string(60006) % server_name)
import traceback
logger.error(traceback.format_exc())
@@ -350,18 +348,18 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
# Cerramos el progreso
if muestra_dialogo:
progreso.update(100, "Proceso finalizado")
progreso.update(100, config.get_localized_string(60008))
progreso.close()
# Si no hay opciones disponibles mostramos el aviso de las cuentas premium
if video_exists and not opciones and server_parameters.get("premium"):
listapremium = [get_server_parameters(premium)["name"] for premium in server_parameters["premium"]]
error_messages.append(
"Para ver un vídeo en %s necesitas<br/>una cuenta en: %s" % (server, " o ".join(listapremium)))
config.get_localized_string(60009) % (server, " o ".join(listapremium)))
# Si no tenemos urls ni mensaje de error, ponemos uno generico
elif not video_urls and not error_messages:
error_messages.append("Se ha producido un error en %s" % get_server_parameters(server)["name"])
error_messages.append(config.get_localized_string(60006) % get_server_parameters(server)["name"])
return video_urls, len(video_urls) > 0, "<br/>".join(error_messages)
@@ -480,7 +478,7 @@ def get_server_parameters(server):
dict_servers_parameters[server] = dict_server
except:
mensaje = "Error al cargar el servidor: %s\n" % server
mensaje = config.get_localized_string(59986) % server
import traceback
logger.error(mensaje + traceback.format_exc())
return {}
@@ -693,9 +691,9 @@ def filter_servers(servers_list):
servers_list_filter = filter(lambda x: not config.get_setting("black_list", server=x), servers_list)
# Si no hay enlaces despues de filtrarlos
if servers_list_filter or not platformtools.dialog_yesno("Filtrar servidores (Lista Negra)",
"Todos los enlaces disponibles pertenecen a servidores incluidos en su Lista Negra.",
"¿Desea mostrar estos enlaces?"):
if servers_list_filter or not platformtools.dialog_yesno(config.get_localized_string(60000),
config.get_localized_string(60010),
config.get_localized_string(70281)):
servers_list = servers_list_filter
return servers_list
@@ -740,10 +738,10 @@ def check_video_link(url, server, timeout=3):
video_exists, message = server_module.test_video_exists(page_url=url)
if not video_exists:
logger.info("[check_video_link] No existe! %s %s %s" % (message, server, url))
resultado = "NO"
resultado = "[COLOR red][B]NO[/B][/COLOR]"
else:
logger.info("[check_video_link] comprobacion OK %s %s" % (server, url))
resultado = "Ok"
resultado = "[COLOR green][B]OK[/B][/COLOR]"
except:
logger.info("[check_video_link] No se puede comprobar ahora! %s %s" % (server, url))
resultado = "??"

View File

@@ -451,11 +451,11 @@ def find_and_set_infoLabels(item):
if item.contentType == "movie":
tipo_busqueda = "movie"
tipo_contenido = "pelicula"
tipo_contenido = config.get_localized_string(70283)
title = item.contentTitle
else:
tipo_busqueda = "tv"
tipo_contenido = "serie"
tipo_contenido = config.get_localized_string(70529)
title = item.contentSerieName
# Si el titulo incluye el (año) se lo quitamos

View File

@@ -248,7 +248,7 @@ def post_tmdb_listado(item, itemlist):
#logger.debug(item_local)
item_local.last_page = 0
del item_local.last_page #Borramos restos de paginación
del item_local.last_page #Borramos restos de paginación
if item_local.contentSeason_save: #Restauramos el num. de Temporada
item_local.contentSeason = item_local.contentSeason_save
@@ -268,7 +268,7 @@ def post_tmdb_listado(item, itemlist):
title_add = ' '
if item_local.title_subs:
for title_subs in item_local.title_subs:
if "audio" in title_subs.lower(): #se restaura info de Audio
if "audio" in title_subs.lower(): #se restaura info de Audio
title_add += scrapertools.find_single_match(title_subs, r'[a|A]udio (.*?)')
continue
if scrapertools.find_single_match(title_subs, r'(\d{4})'): #Se restaura el año, s no lo ha dado TMDB
@@ -280,13 +280,7 @@ def post_tmdb_listado(item, itemlist):
title_add = '%s -%s-' % (title_add, title_subs) #se agregan el resto de etiquetas salvadas
item_local.title_subs = []
del item_local.title_subs
if item_local.from_title:
if item_local.contentType == 'movie':
item_local.contentTitle = item_local.from_title
else:
item_local.contentSerieName = item_local.from_title
#Preparamos el Rating del vídeo
rating = ''
try:
@@ -319,6 +313,15 @@ def post_tmdb_listado(item, itemlist):
if item_local.infoLabels['aired']:
item_local.infoLabels['year'] = scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})')
if item_local.from_title:
if item_local.contentType == 'movie':
item_local.contentTitle = item_local.from_title
item_local.title = item_local.from_title
else:
item_local.contentSerieName = item_local.from_title
if item_local.contentType == 'season':
item_local.title = item_local.from_title
# Preparamos el título para series, con los núm. de temporadas, si las hay
if item_local.contentType in ['season', 'tvshow', 'episode']:
if item_local.contentType == "episode":
@@ -738,10 +741,10 @@ def post_tmdb_episodios(item, itemlist):
item_local.infoLabels['year'] = scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})')
#Preparamos el título para que sea compatible con Añadir Serie a Videoteca
if "Temporada" in item_local.title: #Compatibilizamos "Temporada" con Unify
if "Temporada" in item_local.title: #Compatibilizamos "Temporada" con Unify
item_local.title = '%sx%s al 99 -' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber))
if " al " in item_local.title: #Si son episodios múltiples, ponemos nombre de serie
if " al 99" in item_local.title.lower(): #Temporada completa. Buscamos num total de episodios de la temporada
if " al " in item_local.title: #Si son episodios múltiples, ponemos nombre de serie
if " al 99" in item_local.title.lower(): #Temporada completa. Buscamos num total de episodios de la temporada
item_local.title = item_local.title.replace("99", str(num_episodios))
item_local.title = '%s %s' % (item_local.title, item_local.contentSerieName)
item_local.infoLabels['episodio_titulo'] = '%s - %s [%s] [%s]' % (scrapertools.find_single_match(item_local.title, r'(al \d+)'), item_local.contentSerieName, item_local.infoLabels['year'], rating)
@@ -886,7 +889,7 @@ def post_tmdb_findvideos(item, itemlist):
En Itemlist devuelve un Item con el pseudotítulo. Ahí el canal irá agregando el resto.
"""
logger.debug(item)
#logger.debug(item)
#Creción de título general del vídeo a visualizar en Findvideos
itemlist = []
@@ -900,6 +903,10 @@ def post_tmdb_findvideos(item, itemlist):
item.unify = config.get_setting("unify")
except:
item.unify = config.get_setting("unify")
if item.contentSeason_save: #Restauramos el num. de Temporada
item.contentSeason = item.contentSeason_save
del item.contentSeason_save
#Salvamos la información de max num. de episodios por temporada para despues de TMDB
num_episodios = item.contentEpisodeNumber
@@ -988,20 +995,26 @@ def post_tmdb_findvideos(item, itemlist):
item.category = item.channel.capitalize()
#Formateamos de forma especial el título para un episodio
title = ''
title_gen = ''
if item.contentType == "episode": #Series
title = '%sx%s' % (str(item.contentSeason), str(item.contentEpisodeNumber).zfill(2)) #Temporada y Episodio
if item.infoLabels['temporada_num_episodios']:
title = '%s (de %s)' % (title, str(item.infoLabels['temporada_num_episodios'])) #Total Episodios
#Si son episodios múltiples, y viene de Videoteca, ponemos nombre de serie
if " al " in item.title and not " al " in item.infoLabels['episodio_titulo']:
title = '%s al %s - ' % (title, scrapertools.find_single_match(item.title, 'al (\d+)'))
if (" al " in item.title or " Al " in item.title) and not "al " in item.infoLabels['episodio_titulo']:
title = '%s al %s - ' % (title, scrapertools.find_single_match(item.title, '[al|Al] (\d+)'))
else:
title = '%s %s' % (title, item.infoLabels['episodio_titulo']) #Título Episodio
title_gen = '%s, %s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR] [%s]' % (title, item.contentSerieName, item.infoLabels['year'], rating, item.quality, str(item.language), scrapertools.find_single_match(item.title, '\s\[(\d+,?\d*?\s\w[b|B])\]')) #Rating, Calidad, Idioma, Tamaño
title_gen = '%s, ' % title
if item.contentType == "episode" or item.contentType == "season": #Series o Temporadas
title_gen += '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR] [%s]' % (item.contentSerieName, item.infoLabels['year'], rating, item.quality, str(item.language), scrapertools.find_single_match(item.title, '\s\[(\d+,?\d*?\s\w[b|B])\]')) #Rating, Calidad, Idioma, Tamaño
if item.infoLabels['status'] and item.infoLabels['status'].lower() == "ended":
title_gen = '[TERM.] %s' % title_gen #Marca cuando la Serie está terminada y no va a haber más producción
item.title = title_gen
else: #Películas
title = item.title
title_gen = item.title
@@ -1041,10 +1054,11 @@ def post_tmdb_findvideos(item, itemlist):
item.quality = '[COLOR yellow][%s][/COLOR] %s' % (channel, item.quality)
#agregamos la opción de Añadir a Videoteca para péliculas (no series)
if item.contentType == 'movie' and item.contentChannel != "videolibrary":
if (item.contentType == 'movie' or item.contentType == 'season') and item.contentChannel != "videolibrary":
#Permitimos la actualización de los títulos, bien para uso inmediato, o para añadir a la videoteca
itemlist.append(item.clone(title="** [COLOR yelow]Actualizar Títulos - vista previa videoteca[/COLOR] **", action="actualizar_titulos", extra="películas", tmdb_stat=False, from_action=item.action, from_title_tmdb=item.title, from_update=True))
if item.contentType == 'movie' and item.contentChannel != "videolibrary":
itemlist.append(item.clone(title="**-[COLOR yellow] Añadir a la videoteca [/COLOR]-**", action="add_pelicula_to_library", extra="películas", from_action=item.action, from_title_tmdb=item.title))
#Añadimos la opción de ver trailers
@@ -1171,6 +1185,7 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
data = ''
channel_failed = ''
url_alt = []
if not item.category:
item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize()
if not item.extra2:
@@ -1226,48 +1241,78 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
item.url_alt = channel_url_failed
item.url = channel_url_failed
item.url = item.url.replace(channel_host_failed, channel_host)
url_alt += [item.url] #salvamos la url para el bucle
item.channel_host = channel_host
#quitamos el código de series, porque puede variar entre webs
if item.action == "episodios" or item.action == "get_seasons":
item.url = re.sub(r'\/\d+\/?$', '', item.url) #parece que con el título solo ecuentra la serie, normalmente...
url_alt = [item.url] #salvamos la url para el bucle, pero de momento ignoramos la inicial con código de serie
#si es un episodio, generalizamos la url para que se pueda encontrar en otro clone. Quitamos la calidad del final de la url
elif item.action == "findvideos" and item.contentType == "episode":
try:
#quitamos el 0 a la izquierda del episodio. Algunos clones no lo aceptan
inter1, inter2, inter3 = scrapertools.find_single_match(item.url, '(http.*?\/temporada-\d+.*?\/capitulo.?-)(\d+)(.*?\/)')
inter2 = re.sub(r'^0', '', inter2)
if inter1 + inter2 + inter3 not in url_alt:
url_alt += [inter1 + inter2 + inter3]
#en este formato solo quitamos la calidad del final de la url
if scrapertools.find_single_match(item.url, 'http.*?\/temporada-\d+.*?\/capitulo.?-\d+.*?\/') not in url_alt:
url_alt += [scrapertools.find_single_match(item.url, 'http.*?\/temporada-\d+.*?\/capitulo.?-\d+.*?\/')]
except:
logger.error("ERROR 88: " + item.action + ": Error al convertir la url: " + item.url)
logger.debug('URLs convertidas: ' + str(url_alt))
if patron == True: #solo nos han pedido verificar el clone
return (item, data) #nos vamos, con un nuevo clone
#Leemos la nueva url
try:
if item.post:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, post=item.post, timeout=timeout).data)
else:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
except:
data = ''
if not data: #no ha habido suerte, probamos con el siguiente canal válido
logger.error("ERROR 01: " + item.action + ": La Web no responde o la URL es erronea: " + item.url)
continue
#Leemos la nueva url.. Puede haber varias alternativas a la url original
for url in url_alt:
try:
if item.post:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(url, post=item.post, timeout=timeout).data)
else:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(url, timeout=timeout).data)
data_comillas = data.replace("'", "\"")
except:
data = ''
if not data: #no ha habido suerte, probamos con la siguiente url
logger.error("ERROR 01: " + item.action + ": La Web no responde o la URL es erronea: " + url)
continue
#Hemos logrado leer la web, validamos si encontramos un línk válido en esta estructura
#Evitar páginas engañosas que puede meter al canal en un loop infinito
if (not ".com/images/no_imagen.jpg" in data and not ".com/images/imagen-no-disponible.jpg" in data) or item.action != "episodios":
if patron:
data_alt = scrapertools.find_single_match(data, patron)
if patron2 != None:
data_alt = scrapertools.find_single_match(data_alt, patron2)
if not data_alt: #no ha habido suerte, probamos con el siguiente canal
logger.error("ERROR 02: " + item.action + ": Ha cambiado la estructura de la Web: " + item.url + " / Patron: " + patron)
#Hemos logrado leer la web, validamos si encontramos un línk válido en esta estructura
#Evitar páginas engañosas que puede meter al canal en un loop infinito
if (not ".com/images/no_imagen.jpg" in data and not ".com/images/imagen-no-disponible.jpg" in data) or item.action != "episodios":
if patron:
data_alt = scrapertools.find_single_match(data, patron)
if not data_alt:
data_alt = scrapertools.find_single_match(data_comillas, patron)
if patron2 != None:
data_alt = scrapertools.find_single_match(data_alt, patron2)
if not data_alt: #no ha habido suerte, probamos con el siguiente canal
logger.error("ERROR 02: " + item.action + ": Ha cambiado la estructura de la Web: " + url + " / Patron: " + patron)
web_intervenida(item, data)
data = ''
continue
else:
item.url = url #guardamos la url que funciona
break #por fin !!! Este canal parece que funciona
else:
logger.error("ERROR 02: " + item.action + ": Ha cambiado la estructura de la Web: " + url + " / Patron: " + patron)
web_intervenida(item, data)
data = ''
continue
else:
break #por fin !!! Este canal parece que funciona
else:
logger.error("ERROR 02: " + item.action + ": Ha cambiado la estructura de la Web: " + item.url + " / Patron: " + patron)
web_intervenida(item, data)
data = ''
if not data: #no ha habido suerte, probamos con el siguiente clone
url_alt = []
continue
else:
break
del item.extra2 #Borramos acción temporal excluyente
if not data: #Si no ha logrado encontrar nada, salimos limpiando variables
del item.extra2 #Borramos acción temporal excluyente
if not data: #Si no ha logrado encontrar nada, salimos limpiando variables
if item.channel == channel_py:
if item.channel_alt:
item.category = item.channel_alt.capitalize()

View File

@@ -13,7 +13,6 @@ import time
import urllib
from base64 import b64decode
from core import httptools
from platformcode import config
@@ -36,6 +35,7 @@ class UnshortenIt(object):
_anonymz_regex = r'anonymz\.com'
_shrink_service_regex = r'shrink-service\.it'
_rapidcrypt_regex = r'rapidcrypt\.net'
_cryptmango_regex = r'cryptmango'
_maxretries = 5
@@ -73,45 +73,66 @@ class UnshortenIt(object):
return self._unshorten_anonymz(uri)
if re.search(self._rapidcrypt_regex, domain, re.IGNORECASE):
return self._unshorten_rapidcrypt(uri)
if re.search(self._cryptmango_regex, uri, re.IGNORECASE):
return self._unshorten_cryptmango(uri)
return uri, 200
def unwrap_30x(self, uri, timeout=10):
def unwrap_30x(uri, timeout=10):
domain = urlsplit(uri).netloc
self._timeout = timeout
domain = urlsplit(uri).netloc
self._timeout = timeout
try:
# headers stop t.co from working so omit headers if this is a t.co link
if domain == 't.co':
r = httptools.downloadpage(uri, timeout=self._timeout)
return r.url, r.code
# p.ost.im uses meta http refresh to redirect.
if domain == 'p.ost.im':
r = httptools.downloadpage(uri, timeout=self._timeout)
uri = re.findall(r'.*url\=(.*?)\"\.*', r.data)[0]
return uri, r.code
retries = 0
while True:
r = httptools.downloadpage(
uri,
timeout=self._timeout,
follow_redirects=False)
if not r.sucess:
return uri, -1
if 'location' in r.headers and retries < self._maxretries:
r = httptools.downloadpage(
r.headers['location'],
follow_redirects=False)
uri = r.url
retries += 1
else:
try:
# headers stop t.co from working so omit headers if this is a t.co link
if domain == 't.co':
r = httptools.downloadpage(uri, timeout=self._timeout)
return r.url, r.code
# p.ost.im uses meta http refresh to redirect.
if domain == 'p.ost.im':
r = httptools.downloadpage(uri, timeout=self._timeout)
uri = re.findall(r'.*url\=(.*?)\"\.*', r.data)[0]
return uri, r.code
except Exception as e:
return uri, str(e)
retries = 0
while True:
r = httptools.downloadpage(
uri,
timeout=self._timeout,
cookies=False,
follow_redirects=False)
if not r.sucess:
return uri, -1
if '4snip' not in r.url and 'location' in r.headers and retries < self._maxretries:
r = httptools.downloadpage(
r.headers['location'],
cookies=False,
follow_redirects=False)
uri = r.url
retries += 1
else:
return r.url, r.code
except Exception as e:
return uri, str(e)
uri, code = unwrap_30x(uri, timeout)
if 'vcrypt' in uri and 'fastshield' in uri:
# twince because of cookies
httptools.downloadpage(
uri,
timeout=self._timeout,
post='go=go')
r = httptools.downloadpage(
uri,
timeout=self._timeout,
post='go=go')
return r.url, r.code
return uri, code
def _clear_google_outbound_proxy(self, url):
'''
@@ -428,6 +449,18 @@ class UnshortenIt(object):
except Exception as e:
return uri, str(e)
def _unshorten_cryptmango(self, uri):
try:
r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False)
html = r.data
uri = re.findall(r'<iframe src="([^"]+)"[^>]+>', html)[0]
return uri, r.code
except Exception as e:
return uri, str(e)
def unwrap_30x_only(uri, timeout=10):
unshortener = UnshortenIt()

View File

@@ -72,8 +72,8 @@ def check_addon_init():
def check_addon_updates(verbose=False):
logger.info()
ADDON_UPDATES_JSON = 'http://extra.alfa-addon.com/addon_updates/updates.json'
ADDON_UPDATES_ZIP = 'http://extra.alfa-addon.com/addon_updates/updates.zip'
ADDON_UPDATES_JSON = 'https://extra.alfa-addon.com/addon_updates/updates.json'
ADDON_UPDATES_ZIP = 'https://extra.alfa-addon.com/addon_updates/updates.zip'
try:
last_fix_json = os.path.join(config.get_runtime_path(), 'last_fix.json') # información de la versión fixeada del usuario

View File

@@ -846,7 +846,7 @@ msgid "Enter URL"
msgstr ""
msgctxt "#60089"
msgid "Enter the URL [Link to server / download]"
msgid "Enter the URL [Link to server/download]"
msgstr ""
msgctxt "#60090"
@@ -3033,10 +3033,6 @@ msgctxt "#70079"
msgid "Remove only links of "
msgstr ""
msgctxt "#70080"
msgid "Do you want Alfa to auto-configure Kodi's video library?"
msgstr ""
msgctxt "#70082"
msgid "Global Search"
msgstr ""
@@ -4787,3 +4783,35 @@ msgctxt "#70521"
msgid "You can install the Trakt script below, \nonce installed and configured what \nyou see will be synchronized with your account automatically. \nDo you want to continue?"
msgstr ""
msgctxt "#70522"
msgid "No filter"
msgstr ""
msgctxt "#70523"
msgid "%s: The data was restarted"
msgstr ""
msgctxt "#70524"
msgid " Server error, try later."
msgstr ""
msgctxt "#70525"
msgid "Configure channel"
msgstr ""
msgctxt "#70526"
msgid "Verification of counters of videos seen / not seen (uncheck to verify)"
msgstr ""
msgctxt "#70527"
msgid "Now in Theatres "
msgstr ""
msgctxt "#70528"
msgid "Movies by Genre"
msgstr ""
msgctxt "#70529"
msgid "tv show"
msgstr ""

View File

@@ -834,8 +834,8 @@ msgid "Enter URL"
msgstr "Inserisci URL"
msgctxt "#60089"
msgid "Enter the URL [Link to server / download]"
msgstr "Inserire l'URL [Link a server / download]"
msgid "Enter the URL [Link to server/download]"
msgstr "Inserire l'URL [Link a server/download]"
msgctxt "#60090"
msgid "Enter the URL [Direct link to video]."
@@ -3021,14 +3021,6 @@ msgctxt "#70079"
msgid "Remove only links of "
msgstr "Rimuovere solo i collegamenti di "
msgctxt "#70080"
msgid "Do you want Alfa to auto-configure Kodi's video library?"
msgstr "Vuoi che Alfa auto-configuri la videoteca di Kodi?"
msgctxt "#70081"
msgid "If you choose 'No' you can do it later from 'Configuration > Preferences > Paths'."
msgstr "Se scegli 'No' potrai farlo in seguito da 'Configurazione > Preferenze > Percorsi'."
msgctxt "#70082"
msgid "Global Search"
msgstr "Ricerca Globale"
@@ -4257,7 +4249,7 @@ msgstr "Dai un punteggio con un [COLOR %s]%s[/COLOR]"
msgctxt "#70393"
msgid "[%s]: Select the correct %s "
msgstr "[%s]: Seleziona la %s corretta"
msgstr "[%s]: Seleziona il %s corretto"
msgctxt "#70394"
msgid "Action"
@@ -4773,5 +4765,41 @@ msgstr "AutoPlay consente di riprodurre automaticamente i collegamenti direttame
msgctxt "#70521"
msgid "You can install the Trakt script below, \nonce installed and configured what \nyou see will be synchronized with your account automatically. \nDo you want to continue?"
msgstr "Puoi installare lo script Trakt qui sotto, \ nuna volta installato e configurato ciò che \nvedrai verrà sincronizzato automaticamente con il tuo account. \nVuoi continuare?"
msgstr "Puoi installare lo script Trakt qui sotto, \nuna volta installato e configurato ciò che \nvedrai verrà sincronizzato automaticamente con il tuo account. \nVuoi continuare?"
msgctxt "#70522"
msgid "No Filter"
msgstr "Non Filtrare"
msgctxt "#70523"
msgid "No filter"
msgstr "Non filtrare"
msgctxt "#70523"
msgid "%s: The data was restarted"
msgstr "%s: I dati sono stati riavviati"
msgctxt "#70524"
msgid " Server error, try later."
msgstr " Errore del server, riprova più tardi."
msgctxt "#70525"
msgid "Configure channel"
msgstr "Configura canale"
msgctxt "#70526"
msgid "Verification of counters of videos seen / not seen (uncheck to verify)"
msgstr "Verifica dei contatori di video visti/non visti (deselezionare per verificare)"
msgctxt "#70527"
msgid "Now in Theatres "
msgstr "Oggi in Sala"
msgctxt "#70528"
msgid "Movies by Genre"
msgstr "Per genere"
msgctxt "#70529"
msgid "tv show"
msgstr "serie"

View File

@@ -303,7 +303,7 @@ msgstr "Populares"
msgctxt "#30985"
msgid "Top Rated"
msgstr "Mejor valoradas"
msgstr "Mejor Valoradas"
msgctxt "#30986"
msgid "Search by Collection"
@@ -679,7 +679,7 @@ msgstr "Sincronizacion con Trakt iniciada"
msgctxt "#60046"
msgid "TheMovieDB not present.\nInstall it now?"
msgstr "TheMovieDB\nNo se ha encontrado el Scraper de películas de TheMovieDB.\n¿Desea instalarlo ahora?"
msgstr "TheMovieDB\nNo se ha encontrado el proveedor de información de películas de TheMovieDB.\n¿Desea instalarlo ahora?"
msgctxt "#60047"
msgid "The Movie Database is not installed."
@@ -687,7 +687,7 @@ msgstr "The Movie Database no instalado."
msgctxt "#60048"
msgid "The TVDB not present.\nInstall it now?"
msgstr "The TVDB\nNo se ha encontrado el Scraper de series de The TVDB.\n¿Desea instalarlo ahora?"
msgstr "The TVDB\nNo se ha encontrado el proveedor de información de series de The TVDB.\n¿Desea instalarlo ahora?"
msgctxt "#60049"
msgid "The TVDB is not installed."
@@ -707,15 +707,15 @@ msgstr "Errore di impostazione LibraryPath in BD"
msgctxt "#60053"
msgid "Do you want to configure this scraper in italian as default option for the movies ?"
msgstr "¿Desea configurar este Scraper en español como opción por defecto para películas?"
msgstr "¿Desea configurar este proveedor de información en español como opción por defecto para películas?"
msgctxt "#60054"
msgid "Do you want to configure this scraper in italian as default option for the tv series ?"
msgstr "¿Desea configurar este Scraper en español como opción por defecto para series?"
msgstr "¿Desea configurar este proveedor de información en español como opción por defecto para series?"
msgctxt "#60055"
msgid "Error of provider configuration in BD."
msgstr "Error al configurar el scraper en la BD."
msgstr "Error al configurar el proveedor de información en la BD."
msgctxt "#60056"
msgid "Videolibrary %s not configured"
@@ -3033,14 +3033,6 @@ msgctxt "#70079"
msgid "Remove only links of "
msgstr "Eliminar solo los enlaces de "
msgctxt "#70080"
msgid "Do you want Alfa to auto-configure Kodi's video library?"
msgstr "¿Desea que Alfa auto-configure la videoteca de Kodi?"
msgctxt "#70081"
msgid "If you choose 'No' you can do it later from 'Configuration > Preferences > Paths'."
msgstr "Si pulsa 'No' podra hacerlo desde 'Configuración > Preferencia > Rutas'."
msgctxt "#70082"
msgid "Global Search"
msgstr "Buscador global"
@@ -3091,7 +3083,7 @@ msgstr "The Movie Database"
msgctxt "#70094"
msgid "Select scraper for movies"
msgstr "Seleccione el scraper para las películas"
msgstr "Seleccione el proveedor de información para las películas"
msgctxt "#70095"
msgid "Universal Movie Scraper not present.\nInstall it now?"
@@ -3143,7 +3135,7 @@ msgstr "Si pulsa 'No' podrá hacerlo desde 'Configuración > Preferencias > Ruta
msgctxt "#70107"
msgid "Select scraper for Tv Shows"
msgstr "Seleccione el scraper para las series"
msgstr "Seleccione el proveedor de información para las series"
msgctxt "#70108"
msgid "Icons Set"
@@ -4778,3 +4770,35 @@ msgstr "AutoPlay permite auto reproducir los enlaces directamente, basándose en
msgctxt "#70521"
msgid "You can install the Trakt script below, \nonce installed and configured what \nyou see will be synchronized with your account automatically. \nDo you want to continue?"
msgstr "Puedes instalar el script de Trakt a continuacíon, \nuna vez instalado y configurado lo que \veas se sincronizara con tu cuenta automaticamente. \n¿Deseas continuar?"
msgctxt "#70522"
msgid "No filter"
msgstr "No filtrar"
msgctxt "#70523"
msgid "%s: The data was restarted"
msgstr "%s: Los datos fueron reiniciados"
msgctxt "#70524"
msgid " Server error, try later."
msgstr " Error de servidor, inténtelo más tarde."
msgctxt "#70525"
msgid "Configure channel"
msgstr "Configurar canal"
msgctxt "#70526"
msgid "Verification of counters of videos seen / not seen (uncheck to verify)"
msgstr "Verificación de los contadores de vídeos vistos/no vistos (desmarcar para verificar)"
msgctxt "#70527"
msgid "Now in Theatres "
msgstr "Ahora en cines"
msgctxt "#70528"
msgid "Movies by Genre"
msgstr "Por generos"
msgctxt "#70529"
msgid "tv show"
msgstr "serie"

View File

@@ -303,7 +303,7 @@ msgstr "Populares"
msgctxt "#30985"
msgid "Top Rated"
msgstr "Mejor valoradas"
msgstr "Mejor Valoradas"
msgctxt "#30986"
msgid "Search by Collection"
@@ -679,7 +679,7 @@ msgstr "Sincronizacion con Trakt iniciada"
msgctxt "#60046"
msgid "TheMovieDB not present.\nInstall it now?"
msgstr "TheMovieDB\nNo se ha encontrado el Scraper de películas de TheMovieDB.\n¿Desea instalarlo ahora?"
msgstr "TheMovieDB\nNo se ha encontrado el proveedor de información de películas de TheMovieDB.\n¿Desea instalarlo ahora?"
msgctxt "#60047"
msgid "The Movie Database is not installed."
@@ -687,7 +687,7 @@ msgstr "The Movie Database no instalado."
msgctxt "#60048"
msgid "The TVDB not present.\nInstall it now?"
msgstr "The TVDB\nNo se ha encontrado el Scraper de series de The TVDB.\n¿Desea instalarlo ahora?"
msgstr "The TVDB\nNo se ha encontrado el proveedor de información de series de The TVDB.\n¿Desea instalarlo ahora?"
msgctxt "#60049"
msgid "The TVDB is not installed."
@@ -707,15 +707,15 @@ msgstr "Errore di impostazione LibraryPath in BD"
msgctxt "#60053"
msgid "Do you want to configure this scraper in italian as default option for the movies ?"
msgstr "¿Desea configurar este Scraper en español como opción por defecto para películas?"
msgstr "¿Desea configurar este proveedor de información en español como opción por defecto para películas?"
msgctxt "#60054"
msgid "Do you want to configure this scraper in italian as default option for the tv series ?"
msgstr "¿Desea configurar este Scraper en español como opción por defecto para series?"
msgstr "¿Desea configurar este proveedor de información en español como opción por defecto para series?"
msgctxt "#60055"
msgid "Error of provider configuration in BD."
msgstr "Error al configurar el scraper en la BD."
msgstr "Error al configurar el proveedor de información en la BD."
msgctxt "#60056"
msgid "Videolibrary %s not configured"
@@ -3033,14 +3033,6 @@ msgctxt "#70079"
msgid "Remove only links of "
msgstr "Eliminar solo los enlaces de "
msgctxt "#70080"
msgid "Do you want Alfa to auto-configure Kodi's video library?"
msgstr "¿Desea que Alfa auto-configure la videoteca de Kodi?"
msgctxt "#70081"
msgid "If you choose 'No' you can do it later from 'Configuration > Preferences > Paths'."
msgstr "Si pulsa 'No' podra hacerlo desde 'Configuración > Preferencia > Rutas'."
msgctxt "#70082"
msgid "Global Search"
msgstr "Buscador global"
@@ -3091,7 +3083,7 @@ msgstr "The Movie Database"
msgctxt "#70094"
msgid "Select scraper for movies"
msgstr "Seleccione el scraper para las películas"
msgstr "Seleccione el proveedor de información para las películas"
msgctxt "#70095"
msgid "Universal Movie Scraper not present.\nInstall it now?"
@@ -3143,7 +3135,7 @@ msgstr "Si pulsa 'No' podrá hacerlo desde 'Configuración > Preferencias > Ruta
msgctxt "#70107"
msgid "Select scraper for Tv Shows"
msgstr "Seleccione el scraper para las series"
msgstr "Seleccione el proveedor de información para las series"
msgctxt "#70108"
msgid "Icons Set"
@@ -4778,3 +4770,35 @@ msgstr "AutoPlay permite auto reproducir los enlaces directamente, basándose en
msgctxt "#70521"
msgid "You can install the Trakt script below, \nonce installed and configured what \nyou see will be synchronized with your account automatically. \nDo you want to continue?"
msgstr "Puedes instalar el script de Trakt a continuacíon, \nuna vez instalado y configurado lo que \veas se sincronizara con tu cuenta automaticamente. \n¿Deseas continuar?"
msgctxt "#70522"
msgid "No filter"
msgstr "No filtrar"
msgctxt "#70523"
msgid "%s: The data was restarted"
msgstr "%s: Los datos fueron reiniciados"
msgctxt "#70524"
msgid " Server error, try later."
msgstr " Error de servidor, inténtelo más tarde."
msgctxt "#70525"
msgid "Configure channel"
msgstr "Configurar canal"
msgctxt "#70526"
msgid "Verification of counters of videos seen / not seen (uncheck to verify)"
msgstr "Verificación de los contadores de vídeos vistos/no vistos (desmarcar para verificar)"
msgctxt "#70527"
msgid "Now in Theatres "
msgstr "Ahora en cines"
msgctxt "#70528"
msgid "Movies by Genre"
msgstr "Por generos"
msgctxt "#70529"
msgid "tv show"
msgstr "serie"

View File

@@ -303,7 +303,7 @@ msgstr "Populares"
msgctxt "#30985"
msgid "Top Rated"
msgstr "Mejor valoradas"
msgstr "Mejor Valoradas"
msgctxt "#30986"
msgid "Search by Collection"
@@ -679,7 +679,7 @@ msgstr "Sincronizacion con Trakt iniciada"
msgctxt "#60046"
msgid "TheMovieDB not present.\nInstall it now?"
msgstr "TheMovieDB\nNo se ha encontrado el Scraper de películas de TheMovieDB.\n¿Desea instalarlo ahora?"
msgstr "TheMovieDB\nNo se ha encontrado el proveedor de información de películas de TheMovieDB.\n¿Desea instalarlo ahora?"
msgctxt "#60047"
msgid "The Movie Database is not installed."
@@ -687,7 +687,7 @@ msgstr "The Movie Database no instalado."
msgctxt "#60048"
msgid "The TVDB not present.\nInstall it now?"
msgstr "The TVDB\nNo se ha encontrado el Scraper de series de The TVDB.\n¿Desea instalarlo ahora?"
msgstr "The TVDB\nNo se ha encontrado el proveedor de información de series de The TVDB.\n¿Desea instalarlo ahora?"
msgctxt "#60049"
msgid "The TVDB is not installed."
@@ -707,15 +707,15 @@ msgstr "Errore di impostazione LibraryPath in BD"
msgctxt "#60053"
msgid "Do you want to configure this scraper in italian as default option for the movies ?"
msgstr "¿Desea configurar este Scraper en español como opción por defecto para películas?"
msgstr "¿Desea configurar este proveedor de información en español como opción por defecto para películas?"
msgctxt "#60054"
msgid "Do you want to configure this scraper in italian as default option for the tv series ?"
msgstr "¿Desea configurar este Scraper en español como opción por defecto para series?"
msgstr "¿Desea configurar este proveedor de información en español como opción por defecto para series?"
msgctxt "#60055"
msgid "Error of provider configuration in BD."
msgstr "Error al configurar el scraper en la BD."
msgstr "Error al configurar el proveedor de información en la BD."
msgctxt "#60056"
msgid "Videolibrary %s not configured"
@@ -3033,14 +3033,6 @@ msgctxt "#70079"
msgid "Remove only links of "
msgstr "Eliminar solo los enlaces de "
msgctxt "#70080"
msgid "Do you want Alfa to auto-configure Kodi's video library?"
msgstr "¿Desea que Alfa auto-configure la videoteca de Kodi?"
msgctxt "#70081"
msgid "If you choose 'No' you can do it later from 'Configuration > Preferences > Paths'."
msgstr "Si pulsa 'No' podra hacerlo desde 'Configuración > Preferencia > Rutas'."
msgctxt "#70082"
msgid "Global Search"
msgstr "Buscador global"
@@ -3091,7 +3083,7 @@ msgstr "The Movie Database"
msgctxt "#70094"
msgid "Select scraper for movies"
msgstr "Seleccione el scraper para las películas"
msgstr "Seleccione el proveedor de información para las películas"
msgctxt "#70095"
msgid "Universal Movie Scraper not present.\nInstall it now?"
@@ -3143,7 +3135,7 @@ msgstr "Si pulsa 'No' podrá hacerlo desde 'Configuración > Preferencias > Ruta
msgctxt "#70107"
msgid "Select scraper for Tv Shows"
msgstr "Seleccione el scraper para las series"
msgstr "Seleccione el proveedor de información para las series"
msgctxt "#70108"
msgid "Icons Set"
@@ -4778,3 +4770,35 @@ msgstr "AutoPlay permite auto reproducir los enlaces directamente, basándose en
msgctxt "#70521"
msgid "You can install the Trakt script below, \nonce installed and configured what \nyou see will be synchronized with your account automatically. \nDo you want to continue?"
msgstr "Puedes instalar el script de Trakt a continuacíon, \nuna vez instalado y configurado lo que \veas se sincronizara con tu cuenta automaticamente. \n¿Deseas continuar?"
msgctxt "#70522"
msgid "No filter"
msgstr "No filtrar"
msgctxt "#70523"
msgid "%s: The data was restarted"
msgstr "%s: Los datos fueron reiniciados"
msgctxt "#70524"
msgid " Server error, try later."
msgstr " Error de servidor, inténtelo más tarde."
msgctxt "#70525"
msgid "Configure channel"
msgstr "Configurar canal"
msgctxt "#70526"
msgid "Verification of counters of videos seen / not seen (uncheck to verify)"
msgstr "Verificación de los contadores de vídeos vistos/no vistos (desmarcar para verificar)"
msgctxt "#70527"
msgid "Now in Theatres "
msgstr "Ahora en cines"
msgctxt "#70528"
msgid "Movies by Genre"
msgstr "Por generos"
msgctxt "#70529"
msgid "tv show"
msgstr "serie"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 210 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 579 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 349 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 82 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 96 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:embed.|)auroravid.to/(?:video/|embed/\\?v=)([A-z0-9]{13})",
"url": "http://www.auroravid.to/embed/?v=\\1"
}
]
},
"free": true,
"id": "auroravid",
"name": "auroravid",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,43 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "This file no longer exists on our servers" in data:
return False, "[Auroravid] El fichero ha sido borrado"
elif "is being converted" in data:
return False, "[Auroravid] El fichero está en proceso todavía"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
videourls = scrapertools.find_multiple_matches(data, 'src\s*:\s*[\'"]([^\'"]+)[\'"]')
if not videourls:
videourls = scrapertools.find_multiple_matches(data, '<source src=[\'"]([^\'"]+)[\'"]')
for videourl in videourls:
if videourl.endswith(".mpd"):
id = scrapertools.find_single_match(videourl, '/dash/(.*?)/')
videourl = "http://www.auroravid.to/download.php%3Ffile=mm" + "%s.mp4" % id
videourl = re.sub(r'/dl(\d)*/', '/dl/', videourl)
ext = scrapertools.get_filename_from_url(videourl)[-4:]
videourl = videourl.replace("%3F", "?") + \
"|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0"
video_urls.append([ext + " [auroravid]", videourl])
return video_urls

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:backin).net/([A-Z0-9]+)",
"url": "http://backin.net/s/generating.php?code=\\1"
}
]
},
"free": true,
"id": "backin",
"name": "backin",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,39 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
# if '<meta property="og:title" content=""/>' in data:
# return False,"The video has been cancelled from Backin.net"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
headers = []
headers.append(["User-Agent",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_5) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17"])
# First access
data = scrapertools.cache_page(page_url, headers=headers)
logger.info("data=" + data)
# URL
url = scrapertools.find_single_match(data, 'type="video/mp4" src="([^"]+)"')
logger.info("url=" + url)
# URL del vídeo
video_urls.append([".mp4" + " [backin]", url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,44 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "bigfile.to/((?:list|file)/[\\w]+)",
"url": "https://www.bigfile.to/\\1"
}
]
},
"free": false,
"id": "bigfile",
"name": "bigfile",
"premium": [
"realdebrid"
],
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,15 +0,0 @@
# -*- coding: utf-8 -*-
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
return video_urls

View File

@@ -2,15 +2,15 @@
from core import httptools
from core import scrapertools
from platformcode import logger
from lib import jsunpack
from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "File Not Found" in data or "File was deleted" in data:
return False, "[clipwatching] El video ha sido borrado"
if "File Not Found" in data:
return False, config.get_localized_string(70292) % "ClipWatching"
return True, ""
@@ -23,6 +23,6 @@ def get_video_url(page_url, user="", password="", video_password=""):
videos = scrapertools.find_multiple_matches(unpacked, 'file:"([^"]+).*?label:"([^"]+)')
for video, label in videos:
video_urls.append([label + " [clipwatching]", video])
logger.info("Url: %s" %videos)
logger.info("Url: %s" % videos)
video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
return video_urls

Some files were not shown because too many files have changed in this diff Show More