Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
unknown
2018-09-20 14:31:12 -03:00
63 changed files with 3226 additions and 1957 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.7.4" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.7.5" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,17 +19,19 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
¤ repelis ¤ thevid
¤ vevio ¤ danimados
¤ sipeliculas ¤ cinecalidad
¤ locopelis ¤ pelisipad
¤ divxtotal ¤ elitetorrent
¤ estrenosgo ¤ grantorrent
¤ mejortorrent1 ¤ newpct1
¤ tvvip ¤ zonatorrent
¤ maxipelis24 ¤ wikiseries
¤ pelismagnet ¤ todopeliculas
¤ allpeliculas ¤ puyasubs
¤ yape ¤ dilo
¤ goovie ¤ pelisipad
¤ seriesblanco ¤ pepecine
¤ maxipelis24 ¤ pelisplanet
¤ yts
¤ arreglos internos
¤ Agradecimientos a @angedam y @chivmalev por colaborar en ésta versión
¤ Agradecimientos a @wrlopez y @chivmalev por colaborar en ésta versión
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>

View File

@@ -33,15 +33,13 @@ SERVERS = {"26": "powvideo", "45": "okru", "75": "openload", "12": "netutv", "65
list_servers = ['powvideo', 'okru', 'openload', 'netutv', 'thevideos', 'spruto', 'stormo', 'idowatch', 'nowvideo',
'fastplay', 'raptu', 'tusfiles']
host = "http://allpeliculas.com/"
host = "http://allpeliculas.io/"
def mainlist(item):
logger.info()
itemlist = []
item.text_color = color1
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(title="Películas", action="lista", fanart="http://i.imgur.com/c3HS8kj.png",
url= host + "movies/newmovies?page=1", extra1 = 0,
thumbnail=get_thumb('movies', auto=True)))
@@ -51,16 +49,13 @@ def mainlist(item):
url= host, thumbnail=get_thumb('colections', auto=True)))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search", thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def colecciones(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'href="(/peliculas[^"]+).*?'
patron += 'title_geo"><span>([^<]+).*?'
@@ -143,11 +138,11 @@ def findvideos(item):
patron += '>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for url, calidad in matches:
calidad = scrapertools.find_single_match(calidad, "\d+") + scrapertools.find_single_match(calidad, "\..+")
itemlist.append(item.clone(
channel = item.channel,
action = "play",
title = calidad,
fulltitle = item.title,
thumbnail = item.thumbnail,
contentThumbnail = item.thumbnail,
url = url,
@@ -159,7 +154,7 @@ def findvideos(item):
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
fulltitle = item.fulltitle
contentTitle = item.contentTitle
))
# Requerido para FilterTools
@@ -183,31 +178,22 @@ def lista(item):
dict_param = dict()
item.infoLabels = {}
item.text_color = color2
params = '{}'
if item.extra1 != 0:
dict_param["genero"] = [item.extra1]
params = jsontools.dump(dict_param)
data = httptools.downloadpage(item.url, post=params).data
data = data.replace("<mark>","").replace("<\/mark>","")
dict_data = jsontools.load(data)
for it in dict_data["items"]:
title = it["title"]
plot = it["slogan"]
rating = it["imdb"]
year = it["year"]
url = host + "pelicula/" + it["slug"]
title = it["title"] + " (%s)" %year
thumb = host + it["image"]
item.infoLabels['year'] = year
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb,
plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie"))
try:
tmdb.set_infoLabels(itemlist, __modo_grafico__)
except:
pass
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb,
context=["buscar_trailer"], contentTitle=it["title"], contentType="movie"))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
pagina = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
item.url = item.url.replace(pagina, "")
if pagina == "":
@@ -219,6 +205,7 @@ def lista(item):
))
return itemlist
def search(item, texto):
logger.info()
if texto != "":
@@ -246,12 +233,10 @@ def newest(categoria):
if itemlist[-1].action == "lista":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,34 +0,0 @@
{
"id": "cuelgame",
"name": "Cuelgame",
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "cuelgame.png",
"banner": "cuelgame.png",
"categories": [
"torrent",
"movie",
"tvshow",
"documentary",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,97 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools, httptools
from core.item import Item
from core.scrapertools import decodeHtmlentities as dhe
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Videos[/COLOR]", action="scraper",
url="http://cuelgame.net/?category=4",
thumbnail="http://img5a.flixcart.com/image/poster/q/t/d/vintage-camera-collage-sr148-medium-400x400-imadkbnrnbpggqyz.jpeg",
fanart="http://imgur.com/7frGoPL.jpg"))
itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Buscar[/COLOR]", action="search", url="",
thumbnail="http://images2.alphacoders.com/846/84682.jpg",
fanart="http://imgur.com/1sIHN1r.jpg"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://cuelgame.net/search.php?q=%s" % (texto)
try:
return scraper(item)
# Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def scraper(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|CET", "", data)
patron = '<h2> <a href="([^"]+)".*?'
patron += 'class="l:\d+".*?>([^<]+)</a>'
patron += '(.*?)class="lazy".*?'
patron += 'news-content">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, check_thumb, scrapedplot in matches:
scrapedtitle = re.sub(r'\.', ' ', scrapedtitle)
scrapedthumbnail = scrapertools.find_single_match(check_thumb, "</div><img src=\'([^\']+)\'")
title_year = re.sub(r"(\d+)p", "", scrapedtitle)
if "category=4" in item.url:
try:
year = scrapertools.find_single_match(title_year, '.*?(\d\d\d\d)')
except:
year = ""
else:
year = ""
# No deja pasar items de la mula
if scrapedurl.startswith("ed2k:"):
continue
scrapedtitle = "[COLOR greenyellow]" + scrapedtitle + "[/COLOR]"
itemlist.append(
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="play", server="torrent",
thumbnail=scrapedthumbnail, folder=False))
# Extrae el paginador
patronvideos = '<a href="([^"]+)" rel="next">siguiente &#187;</a>'
matches = scrapertools.find_multiple_matches(data, patronvideos)
if len(matches) > 0:
# corrige "&" para la paginación
next_page = matches[0].replace("amp;", "")
scrapedurl = urlparse.urljoin(item.url, next_page)
itemlist.append(Item(channel=item.channel, action="scraper", title="Página siguiente >>", url=scrapedurl,
thumbnail="http://imgur.com/ycPgVVO.png", folder=True))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = 'http://cuelgame.net/?category=4'
itemlist = scraper(item)
if itemlist[-1].action == "Página siguiente >>":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -0,0 +1,37 @@
{
"id": "dilo",
"name": "Dilo",
"active": true,
"adult": false,
"language": [],
"thumbnail": "https://s22.postimg.cc/u6efsniqp/dilo.png",
"banner": "",
"categories": [
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VOSE"
]
}
]
}

View File

@@ -0,0 +1,297 @@
# -*- coding: utf-8 -*-
# -*- Channel Dilo -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'https://www.dilo.nu/'
IDIOMAS = {'Español': 'CAST', 'Latino': 'LAT', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'streamango', 'powvideo', 'clipwatching', 'streamplay', 'streamcherry', 'gamovideo']
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Nuevos capitulos", action="latest_episodes", url=host,
thumbnail=get_thumb('new episodes', auto=True)))
itemlist.append(Item(channel=item.channel, title="Ultimas", action="latest_shows", url=host,
thumbnail=get_thumb('last', auto=True)))
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'catalogue',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
url=host + 'catalogue', thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Por Años", action="section", url=host + 'catalogue',
thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div class="col-lg-2 col-md-3 col-6 mb-3"><a href="([^"]+)".*?<img src="([^"]+)".*?'
patron += 'font-weight-500">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
scrapedtitle = scrapedtitle
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
thumbnail=thumbnail)
new_item.contentSerieName=scrapedtitle
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
page_base = host + 'catalogue'
next_page = scrapertools.find_single_match(data, '<a href="([^ ]+)" aria-label="Netx">')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
url=page_base+next_page, thumbnail=get_thumb("more.png"),
type=item.type))
return itemlist
def section(item):
logger.info()
itemlist = []
data=get_source(item.url)
if item.title == 'Generos':
data = scrapertools.find_single_match(data, '>Todos los generos</button>.*?<button class')
elif 'Años' in item.title:
data = scrapertools.find_single_match(data, '>Todos los años</button>.*?<button class')
patron = 'input" id="([^"]+)".*?name="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for id, name in matches:
url = '%s?%s=%s' % (item.url, name, id)
title = id.capitalize()
itemlist.append(Item(channel=item.channel, title=title, url=url, action='list_all'))
return itemlist
def latest_episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a class="media" href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?'
patron += 'width: 97%">([^<]+)</div><div>(\d+x\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedcontent, scrapedep in matches:
title = '%s)' % (scrapedtitle.replace(' Online ', ' ('))
contentSerieName = scrapedcontent
itemlist.append(Item(channel=item.channel, action='findvideos', url=scrapedurl, thumbnail=scrapedthumbnail,
title=title, contentSerieName=contentSerieName, type='episode'))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def latest_shows(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.find_single_match(data, '>Nuevas series</div>.*?text-uppercase"')
patron = '<div class="col-lg-3 col-md-4 col-6 mb-3"><a href="([^"]+)".*?src="([^"]+)".*?weight-500">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapedtitle
contentSerieName = scrapedtitle
itemlist.append(Item(channel=item.channel, action='seasons', url=scrapedurl, thumbnail=scrapedthumbnail,
title=title, contentSerieName=contentSerieName))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def seasons(item):
from core import jsontools
import urllib
logger.info()
itemlist=[]
data=get_source(item.url)
serie_id = scrapertools.find_single_match(data, '{"item_id": (\d+)}')
post = {'item_id': serie_id}
post = urllib.urlencode(post)
seasons_url = '%sapi/web/seasons.php' % host
headers = {'Referer':item.url}
data = jsontools.load(httptools.downloadpage(seasons_url, post=post, headers=headers).data)
infoLabels = item.infoLabels
for dict in data:
season = dict['number']
if season != '0':
infoLabels['season'] = season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, url=item.url, title=title, action='episodesxseason',
contentSeasonNumber=season, id=serie_id, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodesxseason(item):
logger.info()
from core import jsontools
import urllib
logger.info()
itemlist = []
season = item.infoLabels['season']
post = {'item_id': item.id, 'season_number': season}
post = urllib.urlencode(post)
seasons_url = '%sapi/web/episodes.php' % host
headers = {'Referer': item.url}
data = jsontools.load(httptools.downloadpage(seasons_url, post=post, headers=headers).data)
infoLabels = item.infoLabels
for dict in data:
episode = dict['number']
epi_name = dict['name']
title = '%sx%s - %s' % (season, episode, epi_name)
url = '%s%s/' % (host, dict['permalink'])
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel, title=title, action='findvideos', url=url,
contentEpisodeNumber=season, id=item.id, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'data-link="([^"]+)">.*?500">([^<]+)<.*?>Reproducir en ([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for enc_url, server, language in matches:
if not config.get_setting('unify'):
title = ' [%s]' % language
else:
title = ''
itemlist.append(Item(channel=item.channel, title='%s'+title, url=enc_url, action='play',
language=IDIOMAS[language], server=server, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def decode_link(enc_url):
logger.info()
try:
new_data = get_source(enc_url)
new_enc_url = scrapertools.find_single_match(new_data, 'src="([^"]+)"')
try:
url = httptools.downloadpage(new_enc_url, follow_redirects=False).headers['location']
except:
if not 'jquery' in new_enc_url:
url = new_enc_url
except:
pass
return url
def play(item):
logger.info()
item.url = decode_link(item.url)
itemlist = [item]
return itemlist
def search(item, texto):
logger.info()
import urllib
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist

View File

@@ -27,6 +27,22 @@
"type": "bool",
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VO",
"VOS",
"VOSE"
]
},
{
"id": "timeout_downloadpage",
"type": "list",

View File

@@ -14,6 +14,15 @@ from core.item import Item
from platformcode import config, logger
from core import tmdb
from lib import generictools
from channels import filtertools
from channels import autoplay
#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'}
IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['torrent']
host = 'https://www.divxtotal3.net/'
channel = 'divxtotal'
@@ -33,6 +42,9 @@ def mainlist(item):
thumb_series = get_thumb("channels_tvshow.png")
thumb_buscar = get_thumb("search.png")
thumb_separador = get_thumb("next.png")
thumb_settings = get_thumb("setting_0.png")
autoplay.init(item.channel, list_servers, list_quality)
item.url_plus = "peliculas/"
itemlist.append(Item(channel=item.channel, title="Películas", action="categorias", url=host + item.url_plus, url_plus=item.url_plus, thumbnail=thumb_cartelera, extra="Películas"))
@@ -49,7 +61,20 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host + "?s=%s", thumbnail=thumb_buscar, extra="search"))
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador))
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings))
autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return
def submenu(item):
@@ -174,7 +199,7 @@ def listado(item):
cnt_tot = 40 # Poner el num. máximo de items por página
cnt_title = 0 # Contador de líneas insertadas en Itemlist
inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
fin = inicio + 10 # Después de este tiempo pintamos (segundos)
fin = inicio + 5 # Después de este tiempo pintamos (segundos)
timeout_search = timeout # Timeout para descargas
if item.extra == 'search':
timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas
@@ -287,7 +312,7 @@ def listado(item):
else:
url = scrapedurl #No se encuentra la Serie, se trata como Episodio suelto
cnt_title += 1
#cnt_title += 1
item_local = item.clone() #Creamos copia de Item para trabajar
if item_local.tipo: #... y limpiamos
del item_local.tipo
@@ -458,7 +483,13 @@ def listado(item):
item_local.contentSeason_save = item_local.contentSeason
del item_local.infoLabels['season']
itemlist.append(item_local.clone()) #Pintar pantalla
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist = filtertools.get_link(itemlist, item_local, list_language)
else:
itemlist.append(item_local.clone()) #Si no, pintar pantalla
cnt_title = len(itemlist) #Contador de líneas añadidas
#logger.debug(item_local)
@@ -483,6 +514,10 @@ def listado(item):
def findvideos(item):
logger.info()
itemlist = []
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if not item.language:
item.language = ['CAST'] #Castellano por defecto
matches = []
item.category = categoria
@@ -552,11 +587,26 @@ def findvideos(item):
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Seridor Torrent
itemlist.append(item_local.clone()) #Pintar pantalla
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
#logger.debug("TORRENT: " + scrapedurl + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
#logger.debug(item_local)
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
# Requerido para AutoPlay
autoplay.start(itemlist, item) #Lanzamos Autoplay
return itemlist

View File

@@ -18,9 +18,62 @@
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra en TMDB",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VO",
"VOS",
"VOSE"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "timeout_downloadpage",
"type": "list",
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
"default": 5,
"enabled": true,
"visible": true,
"lvalues": [
"None",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
]
}
]
}
]
}

View File

@@ -4,6 +4,7 @@ import re
import sys
import urllib
import urlparse
import time
from channelselector import get_thumb
from core import httptools
@@ -13,8 +14,22 @@ from core.item import Item
from platformcode import config, logger
from core import tmdb
from lib import generictools
from channels import filtertools
from channels import autoplay
#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'}
IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['torrent']
host = 'http://www.elitetorrent.biz'
channel = "elitetorrent"
categoria = channel.capitalize()
__modo_grafico__ = config.get_setting('modo_grafico', channel)
timeout = config.get_setting('timeout_downloadpage', channel)
def mainlist(item):
@@ -26,19 +41,37 @@ def mainlist(item):
thumb_series = get_thumb("channels_tvshow.png")
thumb_series_hd = get_thumb("channels_tvshow_hd.png")
thumb_buscar = get_thumb("search.png")
thumb_separador = get_thumb("next.png")
thumb_settings = get_thumb("setting_0.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host, extra="peliculas", thumbnail=thumb_pelis))
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series", thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host, thumbnail=thumb_buscar))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host, thumbnail=thumb_buscar, filter_lang=True))
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador))
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings))
autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return
def submenu(item):
logger.info()
itemlist = []
item.filter_lang = True
data = ''
try:
@@ -84,10 +117,13 @@ def submenu(item):
if "/serie" in scrapedurl:
continue
if 'subtitulado' in scrapedtitle.lower() or 'latino' in scrapedtitle.lower() or 'original' in scrapedtitle.lower():
item.filter_lang = False
itemlist.append(item.clone(action="listado", title=scrapedtitle, url=scrapedurl))
if item.extra == "series": #Añadimos Series VOSE que está fuera del menú principal
itemlist.append(item.clone(action="listado", title="Series VOSE", url=host + "/series-vose/"))
itemlist.append(item.clone(action="listado", title="Series VOSE", url=host + "/series-vose/", filter_lang=False))
return itemlist
@@ -96,10 +132,18 @@ def listado(item):
logger.info()
itemlist = []
inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
fin = inicio + 5 # Después de este tiempo pintamos (segundos)
timeout_search = timeout # Timeout para descargas
if item.extra == 'search':
timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas
if timeout_search < 5:
timeout_search = 5 # Timeout un poco más largo para las búsquedas
# Descarga la página
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout_search).data)
except:
pass
@@ -168,6 +212,9 @@ def listado(item):
item_local.language += ["VO"]
if "dual" in scrapedcategory.lower() or "dual" in title.lower():
item_local.language[0:0] = ["DUAL"]
if item_local.language == []:
item_local.language = ['CAST'] #Por defecto
#Limpiamos el título de la basura innecesaria
title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("Sub", "").replace("sub", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "")
@@ -215,10 +262,14 @@ def listado(item):
item_local.infoLabels['year'] = "-"
#Pasamos a TMDB cada Item, para evitar el efecto memoria de tmdb
if item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global, pasamos
tmdb.set_infoLabels(item_local, True)
#if item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global, pasamos
# tmdb.set_infoLabels(item_local, True)
itemlist.append(item_local.clone()) #Pintar pantalla
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
if config.get_setting('filter_languages', channel) > 0 and item.filter_lang: #Si hay idioma seleccionado, se filtra
itemlist = filtertools.get_link(itemlist, item_local, list_language)
else:
itemlist.append(item_local.clone()) #Si no, pintar pantalla
#if not item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
# return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo
@@ -250,7 +301,7 @@ def listado(item):
else:
title = '[COLOR gold]Página siguiente >>[/COLOR] %s' % (int(matches[1]) - 1)
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=scrapedurl, extra=item.extra))
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=scrapedurl, extra=item.extra, filter_lang=item.filter_lang))
return itemlist
@@ -258,11 +309,15 @@ def listado(item):
def findvideos(item):
logger.info()
itemlist = []
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if not item.language:
item.language = ['CAST'] #Castellano por defecto
#Bajamos los datos de la página
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
except:
pass
@@ -308,12 +363,12 @@ def findvideos(item):
if size:
item.quality = '%s [%s]' % (item.quality, size) #Agregamos size al final de calidad
item.quality = item.quality.replace("GB", "G B").replace("MB", "M B") #Se evita la palabra reservada en Unify
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
#Ahora pintamos el link del Torrent, si lo hay
if link_torrent: # Hay Torrent ?
if link_torrent: # Hay Torrent ?
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
if item_local.quality:
item_local.quality += " "
item_local.quality += "[Torrent]"
@@ -332,10 +387,27 @@ def findvideos(item):
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Seridor Torrent
itemlist.append(item_local.clone()) #Pintar pantalla
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
#Ahora pintamos el link del Magnet, si lo hay
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if link_magnet: # Hay Magnet ?
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
if item_local.quality:
item_local.quality += " "
item_local.quality = item_local.quality.replace("[Torrent]", "") + "[Magnet]"
@@ -347,11 +419,26 @@ def findvideos(item):
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Seridor Torrent
itemlist.append(item_local.clone()) #Pintar pantalla
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
#logger.debug("TORRENT: " + link_torrent + "MAGNET: " + link_magnet + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + size + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
#logger.debug(item_local)
# Requerido para AutoPlay
autoplay.start(itemlist, item) #Lanzamos Autoplay
return itemlist
@@ -387,7 +474,7 @@ def newest(categoria):
itemlist = []
item = Item()
try:
if categoria == 'torrent':
if categoria == 'peliculas':
item.url = host
item.extra = "peliculas"
item.category_new= 'newest'

View File

@@ -31,6 +31,22 @@
"type": "bool",
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VO",
"VOS",
"VOSE"
]
},
{
"id": "timeout_downloadpage",
"type": "list",

View File

@@ -14,12 +14,23 @@ from core.item import Item
from platformcode import config, logger, platformtools
from core import tmdb
from lib import generictools
from channels import filtertools
from channels import autoplay
#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'}
IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['torrent']
host = 'http://estrenosby.net/' # 'http://estrenosli.org/'
channel = "estrenosgo"
color1, color2, color3 = ['0xFF58D3F7', '0xFF2E64FE', '0xFF0404B4']
__modo_grafico__ = config.get_setting('modo_grafico', 'estrenosgo')
modo_ultima_temp = config.get_setting('seleccionar_ult_temporadda_activa', 'estrenosgo') #Actualización sólo últ. Temporada?
timeout = config.get_setting('timeout_downloadpage', 'estrenosgo')
__modo_grafico__ = config.get_setting('modo_grafico', channel)
modo_ultima_temp = config.get_setting('seleccionar_ult_temporadda_activa', channel) #Actualización sólo últ. Temporada?
timeout = config.get_setting('timeout_downloadpage', channel)
def mainlist(item):
@@ -37,28 +48,41 @@ def mainlist(item):
thumb_buscar = get_thumb("search.png")
thumb_separador = get_thumb("next.png")
thumb_cabecera = get_thumb("nofolder.png")
thumb_settings = get_thumb("setting_0.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, url=host, title="PELÍCULAS: ", folder=False, thumbnail=thumb_pelis))
itemlist.append(Item(channel=item.channel, title=" - Cartelera", action="categorias", url=item.url + "descarga-0-58126", thumbnail=thumb_cartelera, extra="cartelera"))
itemlist.append(Item(channel=item.channel, title=" - DVD-RIP", action="categorias", url=item.url + "descarga-0-581210", thumbnail=thumb_pelis, extra="DVD-RIP"))
itemlist.append(Item(channel=item.channel, title=" - HD-RIP", action="categorias", url=item.url + "descarga-0-58128", thumbnail=thumb_pelis_hd, extra="HD-RIP"))
itemlist.append(Item(channel=item.channel, title=" - Subtituladas", action="categorias", url=item.url + "descarga-0-58127", thumbnail=thumb_pelis_VO, extra="VOSE"))
itemlist.append(Item(channel=item.channel, title=" - Versión Original", action="categorias", url=item.url + "descarga-0-5812255", thumbnail=thumb_pelis_VO, extra="VO"))
itemlist.append(Item(channel=item.channel, url=host, title="", folder=False, thumbnail=thumb_separador))
itemlist.append(Item(channel=item.channel, title=" - Cartelera", action="categorias", url=item.url + "descarga-0-58126", thumbnail=thumb_cartelera, extra="cartelera", filter_lang=True))
itemlist.append(Item(channel=item.channel, title=" - DVD-RIP", action="categorias", url=item.url + "descarga-0-581210", thumbnail=thumb_pelis, extra="DVD-RIP", filter_lang=True))
itemlist.append(Item(channel=item.channel, title=" - HD-RIP", action="categorias", url=item.url + "descarga-0-58128", thumbnail=thumb_pelis_hd, extra="HD-RIP", filter_lang=True))
itemlist.append(Item(channel=item.channel, title=" - Subtituladas", action="categorias", url=item.url + "descarga-0-58127", thumbnail=thumb_pelis_VO, extra="VOSE", filter_lang=False))
itemlist.append(Item(channel=item.channel, title=" - Versión Original", action="categorias", url=item.url + "descarga-0-5812255", thumbnail=thumb_pelis_VO, extra="VO", filter_lang=False))
itemlist.append(Item(channel=item.channel, url=host, title="Series", action="submenu", thumbnail=thumb_series, extra="series"))
itemlist.append(Item(channel=item.channel, url=host, title="", folder=False, thumbnail=thumb_separador))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host + "descarga-0-0-0-0-fx-1-%s-sch-titulo-", thumbnail=thumb_buscar, extra="search"))
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador))
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings))
autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return
def submenu(item):
logger.info()
itemlist = []
item.filter_lang = True
thumb_cartelera = get_thumb("now_playing.png")
thumb_pelis = get_thumb("channels_movie.png")
@@ -183,7 +207,7 @@ def listado(item):
cnt_tot = 40 # Poner el num. máximo de items por página
cnt_title = 0 # Contador de líneas insertadas en Itemlist
inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
fin = inicio + 10 # Después de este tiempo pintamos (segundos)
fin = inicio + 5 # Después de este tiempo pintamos (segundos)
timeout_search = timeout # Timeout para descargas
if item.extra == 'search':
timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas
@@ -306,7 +330,7 @@ def listado(item):
elif "Archivo Torrent" not in scrapedenlace and "Video Online" not in scrapedenlace: #Si no tiene enlaces pasamos
continue
cnt_title += 1
#cnt_title += 1
item_local = item.clone() #Creamos copia de Item para trabajar
if item_local.tipo: #... y limpiamos
del item_local.tipo
@@ -326,6 +350,8 @@ def listado(item):
del item_local.text_bold
item_local.text_color = True
del item_local.text_color
item_local.filter_lang = True
del item_local.filter_lang
title_subs = [] #creamos una lista para guardar info importante
item_local.language = [] #creamos lista para los idiomas
@@ -473,7 +499,13 @@ def listado(item):
item_local.contentSeason_save = item_local.contentSeason
del item_local.infoLabels['season']
itemlist.append(item_local.clone()) #Pintar pantalla
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
if config.get_setting('filter_languages', channel) > 0 and item.filter_lang: #Si hay idioma seleccionado, se filtra
itemlist = filtertools.get_link(itemlist, item_local, list_language)
else:
itemlist.append(item_local.clone()) #Si no, pintar pantalla
cnt_title = len(itemlist) #Contador de líneas añadidas
#logger.debug(item_local)
@@ -490,7 +522,7 @@ def listado(item):
else:
title = '%s' % curr_page-1
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente " + title, title_lista=title_lista, url=item.url, extra=item.extra, extra2=item.extra2, last_page=str(last_page), curr_page=str(curr_page)))
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente " + title, title_lista=title_lista, url=item.url, extra=item.extra, extra2=item.extra2, last_page=str(last_page), curr_page=str(curr_page), filter_lang=item.filter_lang))
return itemlist
@@ -572,8 +604,12 @@ def listado_series(item):
item_local.title = title.strip().lower().title()
item_local.from_title = title.strip().lower().title()
itemlist.append(item_local.clone()) #Pintar pantalla
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist = filtertools.get_link(itemlist, item_local, list_language)
else:
itemlist.append(item_local.clone()) #Si no, pintar pantalla
#logger.debug(item_local)
#if not item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
@@ -598,6 +634,10 @@ def listado_series(item):
def findvideos(item):
logger.info()
itemlist = []
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if not item.language:
item.language = ['CAST'] #Castellano por defecto
#logger.debug(item)
@@ -682,7 +722,7 @@ def findvideos(item):
#Ahora tratamos los enlaces .torrent
itemlist_alt = [] #Usamos una lista intermedia para poder ordenar los episodios
if matches_torrent:
for scrapedurl, scrapedquality, scrapedlang in matches_torrent: #leemos los torrents con la diferentes calidades
for scrapedurl, scrapedquality, scrapedlang in matches_torrent: #leemos los torrents con la diferentes calidades
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
@@ -695,7 +735,7 @@ def findvideos(item):
if not item_local.quality:
item_local.quality = item.quality
elif scrapertools.find_single_match(item.quality, '(\[\d+:\d+ h\])'): #Salvamos la duración
item_local.quality += ' [/COLOR][COLOR white]%s' % scrapertools.find_single_match(item.quality, '(\[\d+:\d+ h\])') #Copiamos duración
item_local.quality += ' [/COLOR][COLOR white]%s' % scrapertools.find_single_match(item.quality, '(\[\d+:\d+ h\])') #Copiamos duración
if scrapedlang in IDIOMAS: #Salvamos el idioma, si lo hay
item_local.language = ["%s" % IDIOMAS[scrapedlang]]
@@ -718,6 +758,7 @@ def findvideos(item):
#logger.debug(data)
for scrapedtorrent, scrapedtitle in matches:
item_local = item_local.clone()
quality = item_local.quality
qualityscraped = ''
if not item_local.contentEpisodeNumber and item_local.contentType == 'episode':
@@ -773,11 +814,23 @@ def findvideos(item):
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Seridor Torrent
itemlist_alt.append(item_local.clone(quality=quality)) #Pintar pantalla
itemlist_t.append(item_local.clone(quality=quality)) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
#logger.debug("TORRENT: " + scrapedtorrent + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + scrapedsize + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
#logger.debug(item_local)
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist_alt.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist_alt.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
#Si son múltiples episodios, ordenamos
if len(itemlist_alt) > 1 and (item.contentType == 'episode' or item.contentType == 'season'):
itemlist_alt = sorted(itemlist_alt, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) #clasificamos
@@ -786,6 +839,8 @@ def findvideos(item):
#Ahora tratamos los servidores directo
itemlist_alt = []
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if matches_directo:
for scrapedurl, scrapedquality, scrapedlang in matches_directo: #leemos los torrents con la diferentes calidades
#Generamos una copia de Item para trabajar sobre ella
@@ -824,6 +879,8 @@ def findvideos(item):
#logger.debug(data)
for scrapedtitle, scrapedenlace in matches:
item_local = item_local.clone()
enlace = ''
devuelve = ''
mostrar_server = ''
@@ -918,18 +975,33 @@ def findvideos(item):
item_local.action = "play" #Visualizar vídeo
item_local.server = servidor #Seridor Directo
itemlist_alt.append(item_local.clone(quality=quality)) #Pintar pantalla
itemlist_t.append(item_local.clone(quality=quality)) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
except:
logger.error('ERROR al procesar enlaces DIRECTOS: ' + servidor + ' / ' + scrapedenlace)
#logger.debug("DIRECTO: " + scrapedenlace + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + scrapedsize + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
#logger.debug(item_local)
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist_alt.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist_alt.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
#Si son múltiples episodios, ordenamos
if len(itemlist_alt) > 1 and (item.contentType == 'episode' or item.contentType == 'season'):
itemlist_alt = sorted(itemlist_alt, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) #clasificamos
tmdb.set_infoLabels(itemlist_alt, True) #TMDB de la lista de episodios
itemlist.extend(itemlist_alt)
# Requerido para AutoPlay
autoplay.start(itemlist, item) #Lanzamos Autoplay
return itemlist

View File

@@ -17,10 +17,10 @@ from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'1':'Cast', '2':'Lat', '3':'VOSE', '4':'VO'}
IDIOMAS = {'EspaL':'Cast', 'LatinoL':'Lat', 'SubL':'VOSE', 'OriL':'VO'}
list_language = IDIOMAS.values()
CALIDADES = {'1':'1080','2':'720','3':'480','4':'360'}
CALIDADES = {'1080p':'1080','720p':'720','480p':'480','360p':'360'}
list_quality = ['1080', '720', '480', '360']
@@ -89,17 +89,20 @@ def section(item):
logger.info()
itemlist=[]
data = get_source(host+item.type)
if 'Genero' in item.title:
data = scrapertools.find_single_match(data, 'genero.*?</ul>')
data = scrapertools.find_single_match(data, 'Generos.*?</ul>')
elif 'Año' in item.title:
data = scrapertools.find_single_match(data, 'año.*?</ul>')
patron = '<a href=(.*?) >(.*?)</a>'
data = scrapertools.find_single_match(data, 'Años.*?</ul>')
patron = "<li onclick=filter\(this, '([^']+)', \d+\);>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
for scrapedtitle in matches:
title = scrapedtitle
itemlist.append(Item(channel=item.channel, url=scrapedurl, title=title, action='list_all',
if r'\d+' in scrapedtitle:
url = '%s%s/filtro/,/%s,' % (host, item.type, title)
else:
url = '%s%s/filtro/%s,/,' % (host, item.type, title)
itemlist.append(Item(channel=item.channel, url=url, title=title, action='list_all',
type=item.type))
return itemlist
@@ -109,46 +112,33 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
#logger.debug(data)
#return
if item.type == 'peliculas':
patron = '<article class=Items>.*?<img src=(.*?) />.*?<a href=(.*?)><h2>(.*?)</h2>.*?'
patron += "<p>(.*?)</p><span>(\d{4}) /.*?</span>.*?'(\d+)'"
matches = re.compile(patron, re.DOTALL).findall(data)
patron = '<article class=Item><a href=([^>]+)><div class=Poster>'
patron += '<img src=(.+?)(?:>|alt).*?<h2>([^>]+)</h2>.*?</article>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedplot, year, video_id in matches:
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = '%s [%s]' % (scrapedtitle, year)
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
title = scrapedtitle
thumbnail = scrapedthumbnail.strip()
url = scrapedurl
filter_thumb = thumbnail.replace("https://image.tmdb.org/t/p/w154", "")
filter_list = {"poster_path": filter_thumb}
filter_list = filter_list.items()
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
plot=thumbnail,
infoLabels={'filtro':filter_list})
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
video_id=video_id,
infoLabels={'year':year}))
if item.type == 'peliculas':
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
else:
new_item.action = 'seasons'
new_item.contentSerieName = scrapedtitle
elif item.type == 'series':
patron = '<article class=GoItemEp>.*?<a href=(.*?)>.*?<img src=(.*?) />.*?'
patron +='<h2>(.*?)</h2><p>(.*?)</p><span>(\d{4}) /'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, year in matches:
title = scrapedtitle
contentSerieName = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
itemlist.append(item.clone(action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
plot=scrapedplot,
contentSerieName=contentSerieName,
infoLabels={'year':year}))
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
@@ -199,21 +189,18 @@ def episodesxseasons(item):
itemlist = []
data=get_source(item.url)
logger.debug(data)
patron= "ViewEpisode\('(\d+)', this\)><div class=num>%s - (\d+)</div>" % item.infoLabels['season']
patron += ".*?src=(.*?) />.*?namep>(.*?)<span>"
patron= "<li><a href=([^>]+)><b>%s - (\d+)</b><h2 class=eTitle>([^>]+)</h2>" % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for video_id, scrapedepisode, scrapedthumbnail, scrapedtitle in matches:
for url, scrapedepisode, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=item.url, thumbnail=scrapedthumbnail,
action='findvideos', video_id=video_id, infoLabels=infoLabels))
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -224,87 +211,45 @@ def findvideos(item):
logger.info()
from lib import jsunpack
itemlist = []
headers = {'referer':item.url}
if item.video_id == '':
find_id = get_source(item.url)
#logger.debug(find_id)
#return
item.video_id = scrapertools.find_single_match(find_id, 'var centerClick = (\d+);')
url = 'https://goovie.co/api/links/%s' % item.video_id
data = httptools.downloadpage(url, headers=headers).data
video_list = jsontools.load(data)
for video_info in video_list:
logger.debug(video_info)
url = video_info['visor']
plot = 'idioma: %s calidad: %s' % (video_info['idioma'], video_info['calidad'])
data = get_source(item.url)
patron = "onclick=clickLink\(this, '([^']+)', '([^']+)', '([^']+)'\);>"
matches = re.compile(patron, re.DOTALL).findall(data)
headers = {'referer': item.url}
for url, quality, language in matches:
data = httptools.downloadpage(url, headers=headers, follow_redirects=False).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
packed = scrapertools.find_single_match(data, '(eval\(.*?);var')
unpacked = jsunpack.unpack(packed)
logger.debug('unpacked %s' % unpacked)
server = scrapertools.find_single_match(unpacked, "src:.'(http://\D+)/")
id = scrapertools.find_single_match(unpacked, "src:.'http://\D+/.*?description:.'(.*?).'")
if server == '':
if 'powvideo' in unpacked:
id = scrapertools.find_single_match(unpacked ,",description:.'(.*?).'")
server= 'https://powvideo.net'
id = scrapertools.find_single_match(unpacked, ",description:.'(.*?).'")
server = 'https://powvideo.net'
url = '%s/%s' % (server, id)
if server != '' and id != '':
language = IDIOMAS[video_info['idioma']]
quality = CALIDADES[video_info['calidad']]
language = IDIOMAS[language]
quality = CALIDADES[quality]
title = ' [%s] [%s]' % (language, quality)
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=language,
quality=quality))
itmelist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.append(Item(channel=item.channel, title='%s' + title, url=url, action='play', language=language,
quality=quality, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return sorted(itemlist, key=lambda i: i.language)
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.type = 'peliculas'
if texto != '':
return search_results(item)
return list_all(item)
else:
return []
def search_results(item):
logger.info()
itemlist=[]
data=get_source(item.url)
logger.debug(data)
patron = '<article class=Items>.*?href=(.*?)>.*?typeContent>(.*?)<.*?'
patron += '<img src=(.*?) />.*?<h2>(.*?)</h2><p>(.*?)</p><span>(\d{4})<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, content_type ,scrapedthumb, scrapedtitle, scrapedplot, year in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumb
plot = scrapedplot
if content_type != 'Serie':
action = 'findvideos'
else:
action = 'seasons'
new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, plot=plot,
action=action, type=content_type, infoLabels={'year':year})
if new_item.action == 'findvideos':
new_item.contentTitle = new_item.title
else:
new_item.contentSerieName = new_item.title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
@@ -313,9 +258,9 @@ def newest(categoria):
if categoria in ['peliculas']:
item.url = host + 'peliculas'
elif categoria == 'infantiles':
item.url = host + 'peliculas/generos/animación'
item.url = host + 'peliculas/filtro/Animación,/,'
elif categoria == 'terror':
item.url = host + 'peliculas/generos/terror'
item.url = host + 'peliculas/filtro/Terror,/,'
item.type='peliculas'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':

View File

@@ -22,6 +22,30 @@
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra (TMDB)",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VO",
"VOS",
"VOSE"
]
},
{
"id": "seleccionar_serie_temporada",
"type": "list",
@@ -43,12 +67,25 @@
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra (TMDB)",
"default": true,
"id": "timeout_downloadpage",
"type": "list",
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
"default": 5,
"enabled": true,
"visible": true
"visible": true,
"lvalues": [
"None",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
]
}
]
}

View File

@@ -4,6 +4,7 @@ import re
import sys
import urllib
import urlparse
import time
from channelselector import get_thumb
from core import httptools
@@ -13,13 +14,24 @@ from core.item import Item
from platformcode import config, logger
from core import tmdb
from lib import generictools
from channels import filtertools
from channels import autoplay
#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'}
IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['torrent']
host = "https://grantorrent.net/"
channel = "grantorrent"
dict_url_seasons = dict()
__modo_grafico__ = config.get_setting('modo_grafico', 'grantorrent')
modo_serie_temp = config.get_setting('seleccionar_serie_temporada', 'grantorrent')
modo_ultima_temp = config.get_setting('seleccionar_ult_temporadda_activa', 'grantorrent')
__modo_grafico__ = config.get_setting('modo_grafico', channel)
timeout = config.get_setting('timeout_downloadpage', channel)
modo_serie_temp = config.get_setting('seleccionar_serie_temporada', channel)
modo_ultima_temp = config.get_setting('seleccionar_ult_temporadda_activa', channel)
def mainlist(item):
@@ -32,7 +44,10 @@ def mainlist(item):
thumb_series = get_thumb("channels_tvshow.png")
thumb_series_hd = get_thumb("channels_tvshow_hd.png")
thumb_buscar = get_thumb("search.png")
thumb_separador = get_thumb("next.png")
thumb_settings = get_thumb("setting_0.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host, extra="peliculas", thumbnail=thumb_pelis))
@@ -43,11 +58,12 @@ def mainlist(item):
#Buscar series
itemlist.append(Item(channel=item.channel, action="search", title="Buscar en Series >>", url=host + "series/", extra="series", thumbnail=thumb_buscar))
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador))
itemlist.append(
Item(channel=item.channel, action="", title="[COLOR yellow]Configuración del Canal:[/COLOR]", url="", thumbnail=thumb_settings))
itemlist.append(
Item(channel=item.channel, action="settingCanal", title="Opciones de Videoteca y TMDB", url="", thumbnail=thumb_settings))
itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configurar canal", thumbnail=thumb_settings))
autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
return itemlist
@@ -120,6 +136,8 @@ def listado(item):
cnt_tot = 40 # Poner el num. máximo de items por página
cnt_title = 0 # Contador de líneas insertadas en Itemlist
result_mode = config.get_setting("result_mode", channel="search") # Búsquedas globales: listado completo o no
if not item.extra2:
item.extra2 = ''
#Sistema de paginado para evitar páginas vacías o semi-vacías en casos de búsquedas con series con muchos episodios
title_lista = [] # Guarda la lista de series que ya están en Itemlist, para no duplicar lineas
@@ -133,18 +151,25 @@ def listado(item):
cnt_top = 10 #max. num de páginas web a leer antes de pintar
total_pag = 1
post_num = 1 #num pagina actual
inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
fin = inicio + 5 # Después de este tiempo pintamos (segundos)
timeout_search = timeout # Timeout para descargas
if item.extra == 'search':
timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas
if timeout_search < 5:
timeout_search = 5 # Timeout un poco más largo para las búsquedas
#Máximo num. de líneas permitidas por TMDB (40). Máx de 5 páginas por Itemlist para no degradar el rendimiento.
#Si itemlist sigue vacío después de leer 5 páginas, se pueden llegar a leer hasta 10 páginas para encontrar algo
while cnt_title <= cnt_tot and cnt_next < cnt_top:
while cnt_title <= cnt_tot and cnt_next < cnt_top and fin > time.time():
# Descarga la página
data = ''
try:
if not item.post:
item.post = item.url
video_section = ''
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.post).data)
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.post, timeout=timeout_search).data)
video_section = scrapertools.find_single_match(data, '<div class="contenedor-home">(.*?</div>)</div></div>')
except:
pass
@@ -234,6 +259,10 @@ def listado(item):
if scrapedurl_alt in title_lista_alt or scrapedurl_alt in title_lista_alt_for: # si ya se ha tratado, pasamos al siguiente item
continue
#Verificamos si el idioma está dentro del filtro, si no pasamos
if not lookup_idiomas_paginacion(item, scrapedurl, scrapedtitle, lang, list_language):
continue
title_lista_alt_for += [scrapedurl_alt]
cnt_title += 1 # Sería una línea real más para Itemlist
@@ -278,7 +307,7 @@ def listado(item):
if scrapedurl_alt in title_lista: # si ya se ha tratado, pasamos al siguiente item
continue # solo guardamos la url para series y docus
title_lista += [scrapedurl_alt]
cnt_title += 1 # Sería una línea real más para Itemlist
#cnt_title += 1 # Sería una línea real más para Itemlist
item_local = item.clone() #Creamos copia de Item para trabajar y limpiamos campos innecesarios
if item_local.media: #Viene de Búsquedas
@@ -315,6 +344,9 @@ def listado(item):
if "dual" in lang.lower() or "dual" in title.lower():
item_local.language[0:0] = ["DUAL"]
if item_local.language == []:
item_local.language = ['CAST'] #Por defecto
#Limpiamos el título de la basuna innecesaria
title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("Sub", "").replace("sub", "").replace("(Reparado)", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("(Latino)", "").replace("Latino", "")
title = title.replace("- HDRip", "").replace("(HDRip)", "").replace("- Hdrip", "").replace("(microHD)", "").replace("(DVDRip)", "").replace("(HDRip)", "").replace("(BR-LINE)", "").replace("(HDTS-SCREENER)", "").replace("(BDRip)", "").replace("(BR-Screener)", "").replace("(DVDScreener)", "").replace("TS-Screener", "").replace(" TS", "").replace(" Ts", "")
@@ -346,8 +378,13 @@ def listado(item):
item_local.from_title = title.strip() #Guardamos esta etiqueta para posible desambiguación de título
item_local.infoLabels['year'] = "-" #Reseteamos el año para que TMDB nos lo de
#Agrega el item local a la lista itemlist
itemlist.append(item_local.clone())
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist = filtertools.get_link(itemlist, item_local, list_language)
else:
itemlist.append(item_local.clone()) #Si no, pintar pantalla
cnt_title = len(itemlist) #Contador de líneas añadidas
#if not item.category and result_mode == 0: #Si este campo no existe, viene de la primera pasada de una búsqueda global
# return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorrar tiempo
@@ -373,7 +410,7 @@ def listado(item):
else:
title = '[COLOR gold]Página siguiente >>[/COLOR] %s' % post_num
itemlist.append(item.clone(action="listado", title=title, url=next_page, thumbnail=get_thumb("next.png"), title_lista=title_lista))
itemlist.append(item.clone(action="listado", title=title, url=next_page, thumbnail=get_thumb("next.png"), title_lista=title_lista, language=''))
return itemlist
@@ -381,13 +418,17 @@ def listado(item):
def findvideos(item):
logger.info()
itemlist = []
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if not item.language:
item.language = ['CAST'] #Castellano por defecto
#logger.debug(item)
#Bajamos los datos de la página
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, timeout=timeout).data)
except:
pass
@@ -420,6 +461,14 @@ def findvideos(item):
#Ahora recorremos todos los links por calidades
for lang, quality, size, scrapedurl in matches:
temp_epi = ''
if scrapertools.find_single_match(quality, '\s?\(Contrase.+?: <font color="[^>]*>(.*?)<\/font>\)'):
password = scrapertools.find_single_match(quality, '\s?\(Contrase.+?: <font color="[^>]*>(.*?)<\/font>\)')
quality = re.sub(r'\s?\(Contrase.+?: <font color="[^>]*>(.*?)<\/font>\)', '', quality)
quality += ' [Contraseña=%s]' % password
if scrapertools.find_single_match(size, '\s?\(Contrase.+?: <font color="[^>]*>(.*?)<\/font>\)'):
password = scrapertools.find_single_match(size, '\s?\(Contrase.+?: <font color="[^>]*>(.*?)<\/font>\)')
size = re.sub(r'\s?\(Contrase.+?: <font color="[^>]*>(.*?)<\/font>\)', '', size)
size += ' [Contraseña=%s]' % password
if item.contentType == "episode": #En Series los campos están en otro orden. No hay size, en su lugar sxe
temp_epi = quality
quality = size
@@ -485,10 +534,10 @@ def findvideos(item):
else:
item_local.quality = '%s [/COLOR][COLOR white][%s]' % (item_local.quality, size)
if item_local.action == 'show_result': #Viene de una búsqueda global
channel = item_local.channel.capitalize()
channel_alt = item_local.channel.capitalize()
if item_local.from_channel:
channel = item_local.from_channel.capitalize()
item_local.quality = '[COLOR yellow][%s][/COLOR] %s' % (channel, item_local.quality)
channel_alt = item_local.from_channel.capitalize()
item_local.quality = '[COLOR yellow][%s][/COLOR] %s' % (channel_alt, item_local.quality)
#Salvamos la url del .torrent
if scrapedurl:
@@ -507,11 +556,26 @@ def findvideos(item):
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Seridor Torrent
itemlist.append(item_local.clone()) #Pintar pantalla
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
#logger.debug("TORRENT: " + item_local.url + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality)
#logger.debug(item_local)
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
# Requerido para AutoPlay
autoplay.start(itemlist, item) #Lanzamos Autoplay
return itemlist
@@ -530,7 +594,7 @@ def episodios(item):
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) #Cargamos los datos de la página
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, timeout=timeout).data) #Cargamos los datos de la página
patron_actual = '<link rel="canonical" href="(.*?)"' #Patrón de url temporada actual
patron_actual_num = 'temporadas?-(\d+)' #Patrón de núm. de temporada actual
@@ -611,7 +675,7 @@ def episodios(item):
while temp_actual != '': #revisamos las temporadas hasta el final
if not data: #si no hay datos, descargamos. Si los hay de loop anterior, los usamos
try:
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(temp_actual).data)
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(temp_actual, timeout=timeout).data)
#Controla que no haya un bucle en la cadena de links entre temporadas
if scrapertools.find_single_match(temp_actual, patron_actual_num) in temp_lista:
@@ -842,6 +906,34 @@ def episodios(item):
item, itemlist = generictools.post_tmdb_episodios(item, itemlist)
return itemlist
def lookup_idiomas_paginacion(item, scrapedurl, title, lang, list_language):
logger.info()
estado = True
item.language = []
itemlist = []
if "latino" in lang.lower() or "latino" in item.url or "latino" in title.lower():
item_local.language += ["LAT"]
if "ingles" in lang.lower() or "ingles" in item.url or "vose" in scrapedurl or "vose" in item.url:
if "VOSE" in lang.lower() or "sub" in title.lower() or "vose" in scrapedurl or "vose" in item.url:
item_local.language += ["VOS"]
else:
item_local.language += ["VO"]
if item.language == []:
item.language = ['CAST'] #Por defecto
#Ahora se filtra por idioma, si procede, y se pinta lo que vale. Excluye categorías en otros idiomas.
if config.get_setting('filter_languages', channel) > 0 and item.extra2 != 'categorias':
itemlist = filtertools.get_link(itemlist, item, list_language)
if len(itemlist) == 0:
estado = False
#Volvemos a la siguiente acción en el canal
return estado
def actualizar_titulos(item):

View File

@@ -275,7 +275,11 @@ class main(xbmcgui.WindowDialog):
skin = xbmc.getSkinDir()
self.fonts = get_fonts(skin)
self.setCoordinateResolution(2)
#### Compatibilidad con Kodi 18 ####
if config.get_platform(True)['num_version'] < 18:
self.setCoordinateResolution(2)
self.actorButton = xbmcgui.ControlButton(995, 475, 55, 55, '', font='Font40', alignment=0x00000006,
noFocusTexture='https://s17.postimg.cc/40acsuihb/thumb_search_star_no.png',
focusTexture='https://s33.postimg.cc/ikk0qyvrj/thumb_search_star.png',
@@ -805,7 +809,10 @@ class related(xbmcgui.WindowDialog):
import traceback
logger.error(traceback.format_exc())
self.setCoordinateResolution(2)
#### Compatibilidad con Kodi 18 ####
if config.get_platform(True)['num_version'] < 18:
self.setCoordinateResolution(2)
self.background = xbmcgui.ControlImage(178, 50, 1053, 634, self.infoLabels.get("fanart",
"http://s6.postimg.cc/fflvear2p/nofanart.png"))
self.addControl(self.background)
@@ -1207,6 +1214,7 @@ def busqueda_global(item, infoLabels, org_title=False):
cat = ["serie"]
else:
cat = ["movie"]
cat += ["infoPlus"]
new_item = Item()
new_item.extra = infoLabels.get("title", "")
@@ -1546,7 +1554,10 @@ class ActorInfo(xbmcgui.WindowDialog):
elif not actor_tmdb.result.get("biography"):
actor_tmdb.result["biography"] = "Sin información"
self.setCoordinateResolution(2)
#### Compatibilidad con Kodi 18 ####
if config.get_platform(True)['num_version'] < 18:
self.setCoordinateResolution(2)
self.background = xbmcgui.ControlImage(30, -5, 1250, 730, 'http://imgur.com/7ccBX3g.png')
self.addControl(self.background)
if set_animation:
@@ -1952,7 +1963,10 @@ class images(xbmcgui.WindowDialog):
for imagen, title in self.mal:
self.imagenes.append(imagen)
self.setCoordinateResolution(2)
#### Compatibilidad con Kodi 18 ####
if config.get_platform(True)['num_version'] < 18:
self.setCoordinateResolution(2)
self.shadow = xbmcgui.ControlImage(245, 10, 1011, 700, 'http://imgur.com/66VSLTo.png')
self.addControl(self.shadow)
if set_animation:
@@ -2175,7 +2189,10 @@ class Trailer(xbmcgui.WindowXMLDialog):
self.doModal()
def onInit(self):
self.setCoordinateResolution(0)
#### Compatibilidad con Kodi 18 ####
if config.get_platform(True)['num_version'] < 18:
self.setCoordinateResolution(0)
if not self.video_url:
platformtools.dialog_notification(config.get_localized_string(60507),
config.get_localized_string(60508), 2)

View File

@@ -4,6 +4,7 @@ import re
import urlparse
import urllib
from core import tmdb
from core import servertools
from core import httptools
from core import scrapertools
@@ -11,7 +12,7 @@ from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host="http://maxipelis24.com"
host = "http://maxipelis24.com"
def mainlist(item):
@@ -19,11 +20,11 @@ def mainlist(item):
itemlist = []
itemlist.append(Item(channel=item.channel, title="peliculas", action="movies", url=host, thumbnail=get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host, cat='year', thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host, cat='genre', thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, action="category", title="Calidad", url=host, cat='quality', thumbnail=get_thumb("quality", auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"?s=", thumbnail=get_thumb("search", auto=True)))
itemlist.append(Item(channel = item.channel, title = "peliculas", action = "movies", url = host, thumbnail = get_thumb('movies', auto = True)))
itemlist.append(Item(channel = item.channel, action = "category", title = "Año de Estreno", url = host, cat = 'year', thumbnail = get_thumb('year', auto = True)))
itemlist.append(Item(channel = item.channel, action = "category", title = "Géneros", url = host, cat = 'genre', thumbnail = get_thumb('genres', auto = True)))
itemlist.append(Item(channel = item.channel, action = "category", title = "Calidad", url = host, cat = 'quality', thumbnail = get_thumb("quality", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "?s=", thumbnail = get_thumb("search", auto = True)))
return itemlist
@@ -51,8 +52,8 @@ def category(item):
patron = 'li><a href="([^"]+)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl , scrapedtitle in matches:
itemlist.append(Item(channel=item.channel, action='movies', title=scrapedtitle, url=scrapedurl, type='cat', first=0))
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(channel = item.channel, action = 'movies', title =scrapedtitle, url = scrapedurl, type = 'cat', first = 0))
return itemlist
def movies(item):
@@ -70,56 +71,60 @@ def movies(item):
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, img, scrapedtitle, ranking, resto, year, quality in matches:
scrapedtitle = re.sub(r'\d{4}|[()]','', scrapedtitle)
plot = scrapertools.htmlclean(resto).strip()
title = '%s [COLOR yellow](%s)[/COLOR] [COLOR red][%s][/COLOR]'% (scrapedtitle, ranking, quality)
itemlist.append(Item(channel=item.channel,
title=title,
url=scrapedurl,
action="findvideos",
plot=plot,
thumbnail=img,
title = ' %s [COLOR yellow](%s)[/COLOR] [COLOR red][%s][/COLOR]' % (scrapedtitle, ranking, quality)
itemlist.append(Item(channel = item.channel,
title = title,
url = scrapedurl,
action = "findvideos",
plot = plot,
thumbnail = img,
contentTitle = scrapedtitle,
contentType = "movie",
quality=quality))
quality = quality,
infoLabels = {'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
#Paginacion
next_page = '<div class="pag_.*?href="([^"]+)">Siguiente<'
matches = re.compile(next_page, re.DOTALL).findall(data)
matches = re.compile('<div class="pag_.*?href="([^"]+)">Siguiente<', re.DOTALL).findall(data)
if matches:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(Item(channel=item.channel, action = "movies", title = "Página siguiente >>",url = url))
itemlist.append(Item(channel = item.channel, action = "movies", title = "Página siguiente >>", url = url))
return itemlist
def findvideos(item):
logger.info()
itemlist=[]
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<div id="contenedor">(.*?)</div></div></div>')
# Busca los enlaces a los videos
listavideos = servertools.findvideos(data)
for video in listavideos:
videotitle = scrapertools.unescape(video[0])
url = video[1]
server = video[2]
itemlist.append(Item(channel=item.channel, action="play", server=server, title=videotitle, url=url,
thumbnail=item.thumbnail, plot=item.plot, fulltitle=item.title, folder=False))
itemlist.append(Item(channel = item.channel,
action = "play",
server = server,
title = videotitle,
url = url,
thumbnail = item.thumbnail,
plot = item.plot,
contentTitle = item.contentTitle,
infoLabels = item.infoLabels,
folder = False))
# Opción "Añadir esta película a la biblioteca de KODI"
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle,
thumbnail=item.thumbnail
))
itemlist.append(Item(channel = item.channel,
title = '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url = item.url,
action = "add_pelicula_to_library",
extra = "findvideos",
contentTitle = item.contentTitle,
thumbnail = item.thumbnail
))
return itemlist

View File

@@ -38,6 +38,22 @@
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VO",
"VOS",
"VOSE"
]
},
{
"id": "seleccionar_ult_temporadda_activa",
"type": "bool",
@@ -61,6 +77,27 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "timeout_downloadpage",
"type": "list",
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
"default": 5,
"enabled": true,
"visible": true,
"lvalues": [
"None",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
]
}
]
}

View File

@@ -4,6 +4,7 @@ import re
import sys
import urllib
import urlparse
import time
from channelselector import get_thumb
from core import httptools
@@ -13,10 +14,23 @@ from core.item import Item
from platformcode import config, logger
from core import tmdb
from lib import generictools
from channels import filtertools
from channels import autoplay
host = config.get_setting('domain_name', 'mejortorrent1')
__modo_grafico__ = config.get_setting('modo_grafico', 'mejortorrent1')
#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'}
IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['torrent']
channel = "mejortorrent1"
host = config.get_setting('domain_name', channel)
categoria = channel.capitalize()
__modo_grafico__ = config.get_setting('modo_grafico', channel)
timeout = config.get_setting('timeout_downloadpage', channel)
def mainlist(item):
logger.info()
@@ -30,7 +44,10 @@ def mainlist(item):
thumb_series_az = get_thumb("channels_tvshow_az.png")
thumb_docus = get_thumb("channels_documentary.png")
thumb_buscar = get_thumb("search.png")
thumb_separador = get_thumb("next.png")
thumb_settings = get_thumb("setting_0.png")
autoplay.init(item.channel, list_servers, list_quality)
#itemlist.append(Item(channel=item.channel, title="Novedades", action="listado_busqueda", extra="novedades", tipo=False,
# url= host + "ultimos-torrents/", thumbnail=thumb_buscar))
@@ -46,19 +63,20 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", thumbnail=thumb_buscar, tipo=False))
itemlist.append(
Item(channel=item.channel, action="", title="[COLOR yellow]Configuración del Canal:[/COLOR]", url="", thumbnail=thumb_settings))
itemlist.append(
Item(channel=item.channel, action="settingCanal", title="URL del Canal y otros", url="", thumbnail=thumb_settings))
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador))
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings))
autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
return itemlist
def settingCanal(item):
def configuracion(item):
from platformcode import platformtools
platformtools.show_channel_settings()
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return
return
def submenu(item):
@@ -126,7 +144,7 @@ def listado(item):
try:
data = ''
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
data = re.sub('\r\n', '', data).decode('utf8').encode('utf8')
data = data.replace("'", '"')
except:
@@ -234,16 +252,15 @@ def listado(item):
url_next_page = urlparse.urljoin(item.url, scrapertools.find_single_match(data, patron_next_page) + str(cnt_pag_num + 2))
#url_last_page = re.sub(r"\d+$", "9999", url_next_page)
#data_last = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(url_last_page).data)
if "/documentales" in item.url:
patron_last_page = '<a href="[^"]+\/(\d+)\/" class="paginar" >\d+<\/a>&nbsp;<\/div>'
else:
patron_last_page = '<a class="paginar" href="[^"]+\/(\d+)\/">&\w+;<\/a>&\w+;<\/div>'
#if "/documentales" in item.url:
#patron_last_page = '<a href="[^"]+\/(\d+)\/" class="paginar" >\d+<\/a>&nbsp;<\/div>'
patron_last_page = '<a class="paginar" href="[^"]+\/(\d+)\/">&\w+;<\/a>&\w+;<\/div>'
#patron_last_page = '<span class="nopaginar">(\d+)<\/span>'
if "/documentales" in item.url:
item.last_page = int(scrapertools.find_single_match(data, patron_last_page))
else:
try:
#item.last_page = int(scrapertools.find_single_match(data, patron_last_page)) * (len(matches) / cnt_tot)
item.last_page = int(scrapertools.find_single_match(data, patron_last_page))
except:
item.last_page = 0
if matches_cnt > cnt_tot and item.extra == "documentales" and pag:
item.next_page = ''
@@ -323,7 +340,7 @@ def listado(item):
real_title, item_local.contentSeason, episodio, item_local.quality = scrapertools.find_single_match(scrapedurl, patron_title_ep)
#Hay que buscar la raiz de la temporada
data_epi = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item_local.url).data)
data_epi = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item_local.url, timeout=timeout).data)
url = scrapertools.find_single_match(data_epi, '<tr><td>.*<a href="([^"]+)" style="text-decoration:none;"><h1 style=')
if not url:
url = scrapertools.find_single_match(data_epi, '<td><a href="(secciones.php\?sec\=descargas&ap=[^"]+)"')
@@ -411,6 +428,9 @@ def listado(item):
if scrapertools.find_single_match(title, r'-\s[m|M].*?serie'):
title = re.sub(r'-\s[m|M].*?serie', '', title)
title_subs += ["Miniserie"]
if item_local.language == []:
item_local.language = ['CAST'] #Por defecto
if title.endswith('.'):
title = title[:-1]
@@ -522,6 +542,13 @@ def listado_busqueda(item):
curr_page_num = 1 # Página actual
category = "" # Guarda la categoria que viene desde una busqueda global
matches = []
inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
fin = inicio + 5 # Después de este tiempo pintamos (segundos)
timeout_search = timeout # Timeout para descargas
if item.extra == 'search':
timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas
if timeout_search < 5:
timeout_search = 5 # Timeout un poco más largo para las búsquedas
if item.url_next_page:
url_next_page = item.url_next_page
@@ -529,12 +556,12 @@ def listado_busqueda(item):
url_next_page = item.url
#Máximo num. de líneas permitidas por TMDB. Máx de 5 páginas por Itemlist para no degradar el rendimiento
while cnt_title <= cnt_tot and cnt_next < 5:
while cnt_title <= cnt_tot and fin > time.time():
status = False # Calidad de los datos leídos
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_next_page, post=item.post).data)
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_next_page, post=item.post, timeout=timeout_search).data)
data = re.sub('\r\n', '', data).decode('utf8').encode('utf8')
data = data.replace("'", '"')
except:
@@ -566,8 +593,13 @@ def listado_busqueda(item):
if len(matches_alt) > 0:
status = True
for scrapedurl, scrapedtitle, scrapedquality, scrapedtype in matches_alt:
if scrapedtype not in ['Juegos', 'Capitulos', 'Musica']: #limpiamos de contenidos no deseados
matches.append(matches_alt[i]) #acumulamos los títulos
if scrapedtype in ['Juegos', 'Capitulos', 'Musica']: #limpiamos de contenidos no deseados
i += 1
continue
if not lookup_idiomas_paginacion(item, scrapedurl, scrapedtitle, scrapedquality, list_language):
i += 1
continue
matches.append(matches_alt[i]) #acumulamos los títulos
i += 1
cnt_title = len(matches) #número de títulos a pintar
@@ -646,7 +678,7 @@ def listado_busqueda(item):
title = title.replace(" Latino", "").replace(" latino", "").replace(" Argentina", "").replace(" argentina", "")
title = title.replace("Castellano", "").replace("castellano", "").replace("inglés", "").replace("ingles", "").replace("Inglés", "").replace("Ingles", "")
if "audio" in title.lower(): #Reservamos info de audio para después de TMDB
if "audio" in title.lower(): #Reservamos info de audio para después de TMDB
title_subs += ['[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])')]
title = re.sub(r'\[[a|A]udio.*?\]', '', title)
if "[dual" in title.lower():
@@ -655,6 +687,9 @@ def listado_busqueda(item):
if scrapertools.find_single_match(title, r'-\s[m|M].*?serie'):
title = re.sub(r'-\s[m|M].*?serie', '', title)
title_subs += ["Miniserie"]
if item_local.language == []:
item_local.language = ['CAST'] #Por defecto
if title.endswith('.'):
title = title[:-1]
@@ -742,7 +777,13 @@ def listado_busqueda(item):
item_local.contentSeason_save = item_local.contentSeason
del item_local.infoLabels['season']
itemlist.append(item_local.clone())
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist = filtertools.get_link(itemlist, item_local, list_language)
else:
itemlist.append(item_local.clone()) #Si no, pintar pantalla
cnt_title = len(itemlist) #Contador de líneas añadidas
#logger.debug(item_local)
@@ -768,6 +809,10 @@ def listado_busqueda(item):
def findvideos(item):
logger.info()
itemlist = []
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if not item.language:
item.language = ['CAST'] #Castellano por defecto
#logger.debug(item)
@@ -781,7 +826,7 @@ def findvideos(item):
#Bajamos los datos de la página de todo menos de Documentales y Varios
if not item.post:
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
data = data.replace('"', "'")
patron = "<form (?:.*?)?"
patron += "name='episodios'.+action='([^']+)' method='post'>.*?"
@@ -882,11 +927,27 @@ def findvideos(item):
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Seridor Torrent
itemlist.append(item_local.clone()) #Pintar pantalla
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
#logger.debug("title=[" + item.title + "], torrent=[ " + item_local.url + " ], url=[ " + url + " ], post=[" + item.post + "], thumbnail=[ " + item.thumbnail + " ]" + " size: " + size)
#logger.debug(item_local)
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
# Requerido para AutoPlay
autoplay.start(itemlist, item) #Lanzamos Autoplay
return itemlist
@@ -901,7 +962,7 @@ def episodios(item):
# Carga la página
data_ini = ''
try:
data_ini = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data_ini = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
data_ini = data_ini.replace('"', "'")
except: #Algún error de proceso, salimos
pass
@@ -1016,8 +1077,34 @@ def episodios(item):
item, itemlist = generictools.post_tmdb_episodios(item, itemlist)
return itemlist
def lookup_idiomas_paginacion(item, url, title, calidad, list_language):
logger.info()
estado = True
item.language = []
itemlist = []
if "[subs" in title.lower() or "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower():
item.language += ["VOS"]
if "latino" in title.lower() or "argentina" in title.lower():
item.language += ["LAT"]
if item.language == []:
item.language = ['CAST'] #Por defecto
#Ahora se filtra por idioma, si procede, y se pinta lo que vale. Excluye categorías en otros idiomas.
if config.get_setting('filter_languages', channel) > 0:
itemlist = filtertools.get_link(itemlist, item, list_language)
if len(itemlist) == 0:
estado = False
#Volvemos a la siguiente acción en el canal
return estado
def actualizar_titulos(item):
logger.info()

View File

@@ -32,6 +32,22 @@
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VO",
"VOS",
"VOSE"
]
},
{
"id": "clonenewpct1_channel_default",
"type": "list",

View File

@@ -16,6 +16,15 @@ from core.item import Item
from platformcode import config, logger
from core import tmdb
from lib import generictools
from channels import filtertools
from channels import autoplay
#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'}
IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['torrent']
channel_py = 'newpct1'
@@ -84,12 +93,15 @@ def mainlist(item):
thumb_series_az = get_thumb("channels_tvshow_az.png")
thumb_docus = get_thumb("channels_documentary.png")
thumb_buscar = get_thumb("search.png")
thumb_separador = get_thumb("next.png")
thumb_settings = get_thumb("setting_0.png")
if channel_clone_name == "*** DOWN ***": #Ningún clones activo !!!
itemlist.append(item.clone(action='', title="[COLOR yellow]Ningún canal NewPct1 activo[/COLOR]"))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos y salimos
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, action="submenu_novedades", title="Novedades", url=item.channel_host + "ultimas-descargas/", extra="novedades", thumbnail=thumb_pelis, category=item.category, channel_host=item.channel_host))
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=item.channel_host,
@@ -103,10 +115,11 @@ def mainlist(item):
itemlist.append(
Item(channel=item.channel, action="search", title="Buscar", url=item.channel_host + "buscar", thumbnail=thumb_buscar, category=item.category, channel_host=item.channel_host))
itemlist.append(
Item(channel=item.channel, action="", title="[COLOR yellow]Configuración de Servidores:[/COLOR]", url="", thumbnail=thumb_settings, category=item.category, channel_host=item.channel_host))
itemlist.append(
Item(channel=item.channel, action="settingCanal", title="Servidores para Ver Online y Descargas", url="", thumbnail=thumb_settings, category=item.category, channel_host=item.channel_host))
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador, category=item.category, channel_host=item.channel_host))
itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configurar canal", thumbnail=thumb_settings, category=item.category, channel_host=item.channel_host))
autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
item.category = '%s / %s' % (channel_py.title(), item.category.title()) #Newpct1 / nombre de clone en pantalla de Mainlist
@@ -124,6 +137,7 @@ def submenu(item):
logger.info()
itemlist = []
item.extra2 = ''
data = ''
try:
@@ -180,6 +194,13 @@ def submenu(item):
#Preguntamos por las entradas que no corresponden al "extra"
if item.extra in scrapedtitle.lower() or (item.extra == "peliculas" and ("cine" in scrapedurl or "anime" in scrapedurl)) or (item.extra == "varios" and ("documentales" in scrapedurl or "varios" in scrapedurl)):
#Si tiene filtro de idiomas, marcamos estas páginas como no filtrables
if "castellano" in title.lower() or "latino" in title.lower() or "subtituladas" in title.lower() or "vo" in title.lower() or "v.o" in title.lower() or "- es" in title.lower():
item.extra2 = "categorias"
else:
item.extra2 = ""
itemlist.append(item.clone(action="listado", title=title, url=scrapedurl))
itemlist.append(item.clone(action="alfabeto", title=title + " [A-Z]", url=scrapedurl))
@@ -195,6 +216,7 @@ def submenu_novedades(item):
itemlist = []
itemlist_alt = []
item.extra2 = ''
data = ''
timeout_search=timeout * 2 #Más tiempo para Novedades, que es una búsqueda
@@ -273,6 +295,13 @@ def submenu_novedades(item):
itemlist_alt = sorted(itemlist_alt, key=lambda it: it.title) #clasificamos
for item_local in itemlist_alt:
item_local.title = re.sub(r'^\d{2}', '', item_local.title) #Borramos la secuencia
#Si tiene filtro de idiomas, marcamos estas páginas como no filtrables
if "castellano" in item_local.title.lower() or "latino" in item_local.title.lower() or "subtituladas" in item_local.title.lower() or "vo" in item_local.title.lower() or "v.o" in item_local.title.lower() or "- es" in item_local.title.lower():
item_local.extra2 = "categorias"
else:
item_local.extra2 = ""
itemlist.append(item_local.clone())
itemlist.append(
@@ -550,6 +579,9 @@ def listado(item):
title = re.sub(r'- [m|M].*?serie ?\w+', '', title)
title_subs += ["[Miniserie]"]
if not item_local.language:
item_local.language = ["CAST"]
#Limpiamos restos en título
title = title.replace("Castellano", "").replace("castellano", "").replace("inglés", "").replace("ingles", "").replace("Inglés", "").replace("Ingles", "").replace("Ingl", "").replace("Engl", "").replace("Calidad", "").replace("de la Serie", "").replace("Spanish", "")
title_alt = title_alt.replace("Castellano", "").replace("castellano", "").replace("inglés", "").replace("ingles", "").replace("Inglés", "").replace("Ingles", "").replace("Ingl", "").replace("Engl", "").replace("Calidad", "").replace("de la Serie", "").replace("Spanish", "")
@@ -565,8 +597,9 @@ def listado(item):
title = re.sub(r'\(\d{4}\)$', '', title)
if re.sub(r'\d{4}$', '', title).strip():
title = re.sub(r'\d{4}$', '', title)
title = re.sub(r'\d+x\d+', '', title)
title = re.sub(r'x\d+', '', title).strip()
if item_local.contentType != "movie":
title = re.sub(r'\d+x\d+', '', title)
title = re.sub(r'x\d+', '', title).strip()
if title.endswith("torrent gratis"): title = title[:-15]
if title.endswith("gratis"): title = title[:-7]
@@ -617,8 +650,11 @@ def listado(item):
#Guarda la variable temporal que almacena la info adicional del título a ser restaurada después de TMDB
item_local.title_subs = title_subs
#Agrega el item local a la lista itemlist
itemlist.append(item_local.clone())
#Ahora se filtra por idioma, si procede, y se pinta lo que vale. Excluye categorías en otros idiomas.
#if config.get_setting('filter_languages', channel_py) > 0 and item.extra2 != 'categorias':
# itemlist = filtertools.get_link(itemlist, item_local, list_language)
#else:
itemlist.append(item_local.clone()) #Si no, pintar pantalla
#logger.debug(item_local)
@@ -802,6 +838,10 @@ def listado_busqueda(item):
if "juego/" in scrapedurl: # no mostramos lo que no sean videos
continue
#Verificamos si el idioma está dentro del filtro, si no pasamos
if not lookup_idiomas_paginacion(item, scrapedurl, scrapedtitle, calidad, list_language):
continue
cnt_title += 1 # Sería una línea real más para Itemlist
#Control de página
@@ -850,7 +890,7 @@ def listado_busqueda(item):
if ("juego/" in scrapedurl or "xbox" in scrapedurl.lower()) and not "/serie" in scrapedurl or "xbox" in scrapedtitle.lower() or "windows" in scrapedtitle.lower() or "windows" in calidad.lower() or "nintendo" in scrapedtitle.lower() or "xbox" in calidad.lower() or "epub" in calidad.lower() or "pdf" in calidad.lower() or "pcdvd" in calidad.lower() or "crack" in calidad.lower(): # no mostramos lo que no sean videos
continue
cnt_title += 1 # Sería una línea real más para Itemlist
#cnt_title += 1 # Sería una línea real más para Itemlist
#Creamos una copia de Item para cada contenido
item_local = item.clone()
@@ -1022,6 +1062,9 @@ def listado_busqueda(item):
title = re.sub(r'- [m|M].*?serie ?\w+', '', title)
title_subs += ["[Miniserie]"]
if not item_local.language:
item_local.language = ["CAST"]
#Limpiamos restos en título
title = title.replace("Castellano", "").replace("castellano", "").replace("inglés", "").replace("ingles", "").replace("Inglés", "").replace("Ingles", "").replace("Ingl", "").replace("Engl", "").replace("Calidad", "").replace("de la Serie", "").replace("Spanish", "")
@@ -1036,8 +1079,9 @@ def listado_busqueda(item):
title = re.sub(r'\(\d{4}\)$', '', title)
if re.sub(r'\d{4}$', '', title).strip():
title = re.sub(r'\d{4}$', '', title)
title = re.sub(r'\d+x\d+', '', title)
title = re.sub(r'x\d+', '', title).strip()
if item_local.contentType != "movie":
title = re.sub(r'\d+x\d+', '', title)
title = re.sub(r'x\d+', '', title).strip()
if "pelisyseries.com" in host and item_local.contentType == "tvshow":
titulo = ''
@@ -1152,11 +1196,11 @@ def listado_busqueda(item):
data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8")
data_serie = data_serie.replace("chapters", "buscar-list")
if not scrapertools.find_single_match(data_serie, pattern): #No ha habido suerte ...
item_local.contentType = "movie" #tratarlo el capítulo como película
if not scrapertools.find_single_match(data_serie, pattern): #No ha habido suerte ...
item_local.contentType = "movie" #tratarlo el capítulo como película
item_local.extra = "peliculas"
else:
item_local.url = url_tvshow #Cambiamos url de episodio por el de serie
item_local.url = url_tvshow #Cambiamos url de episodio por el de serie
else:
item_local.url = url_id #Cambiamos url de episodio por el de serie
@@ -1165,8 +1209,13 @@ def listado_busqueda(item):
item_local.title = real_title_mps.replace('-', ' ').title().strip() #Esperemos que el nuevo título esté bien
item_local.contentSerieName = item_local.title
#Agrega el item local a la lista itemlist
itemlist.append(item_local.clone())
#Ahora se filtra por idioma, si procede, y se pinta lo que vale. Excluye categorías en otros idiomas.
if config.get_setting('filter_languages', channel_py) > 0 and item.extra2 != 'categorias':
itemlist = filtertools.get_link(itemlist, item_local, list_language)
else:
itemlist.append(item_local.clone()) #Si no, pintar pantalla
cnt_title = len(itemlist) #Contador de líneas añadidas
#logger.debug(item_local)
@@ -1180,7 +1229,7 @@ def listado_busqueda(item):
item, itemlist = generictools.post_tmdb_listado(item, itemlist)
if post:
itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title="[COLOR gold][B]Pagina siguiente >> [/B][/COLOR]" + str(post_num) + " de " + str(total_pag), thumbnail=get_thumb("next.png"), title_lista=title_lista, cnt_pag=cnt_pag))
itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title="[COLOR gold][B]Pagina siguiente >> [/B][/COLOR]" + str(post_num) + " de " + str(total_pag), thumbnail=get_thumb("next.png"), title_lista=title_lista, cnt_pag=cnt_pag, language=''))
#logger.debug("Titulos: " + str(len(itemlist)) + " Matches: " + str(len(matches)) + " Post: " + str(item.post) + " / " + str(post_actual) + " / " + str(total_pag))
@@ -1189,6 +1238,10 @@ def listado_busqueda(item):
def findvideos(item):
logger.info()
itemlist = []
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if not item.language:
item.language = ['CAST'] #Castellano por defecto
#logger.debug(item)
@@ -1362,13 +1415,12 @@ def findvideos(item):
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures")
#Añadimos el tamaño para todos
size = scrapertools.find_single_match(data, '<div class="entry-left".*?><a href=".*?span class=.*?>Size:<\/strong>?\s(\d+?\.?\d*?\s\w[b|B])<\/span>')
if not size: #Para planetatorrent
size = scrapertools.find_single_match(data, '<div class="fichas-box"><div class="entry-right"><div style="[^"]+"><span class="[^"]+"><strong>Size:<\/strong>?\s(\d+?\.?\d*?\s\w[b|B])<\/span>')
size = size.replace(".", ",") #sustituimos . por , porque Unify lo borra
if not size:
size = scrapertools.find_single_match(item.quality, '\s\[(\d+,?\d*?\s\w\s?[b|B])\]')
size = scrapertools.find_single_match(item.quality, '\s?\[(\d+.?\d*?\s?\w\s?[b|B])\]')
if not size:
size = generictools.get_torrent_size(item.url) #Buscamos el tamaño en el .torrent
if size:
@@ -1400,7 +1452,7 @@ def findvideos(item):
quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final del título
else:
quality = item_local.quality
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (quality, str(item_local.language)) #Preparamos título de Torrent
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (quality, str(item_local.language)) #Preparamos título de Torrent
#Preparamos título y calidad, quitamos etiquetas vacías
item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title)
@@ -1408,18 +1460,32 @@ def findvideos(item):
item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', quality)
quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', quality)
quality = quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
item_local.quality = quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
item_local.alive = "??" #Calidad del link sin verificar
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Servidor
itemlist.append(item_local.clone(quality=quality)) #Pintar pantalla
item_local.alive = "??" #Calidad del link sin verificar
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Servidor
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel_py) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
logger.debug("TORRENT: " + item_local.url + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + size + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
#logger.debug(item_local)
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel_py) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
# VER vídeos, descargar vídeos un link, o múltiples links
data = scrapertools.find_single_match(data, '<div id="tab1" class="tab_content"(.*?<\/ul>(?:<div.*?>)?<\/div><\/div><\/div>)') #Seleccionar el bloque para evitar duplicados
@@ -1442,7 +1508,9 @@ def findvideos(item):
for logo, servidor, idioma, calidad, enlace, title in enlaces_ver:
if ver_enlaces_veronline == 0: #Si no se quiere Ver Online, se sale del bloque
break
if "ver" in title.lower():
item_local = item.clone()
servidor = servidor.replace("streamin", "streaminto")
if servidor.capitalize() in excluir_enlaces_veronline: #Servidor excluido, pasamos al siguiente
@@ -1503,11 +1571,25 @@ def findvideos(item):
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality)
item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
itemlist.append(item_local.clone())
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel_py) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
except:
logger.error('ERROR al procesar enlaces VER DIRECTOS: ' + servidor + ' / ' + enlace)
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel_py) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
#Ahora vemos los enlaces de DESCARGAR
if len(enlaces_descargar) > 0 and ver_enlaces_descargas != 0:
@@ -1525,6 +1607,7 @@ def findvideos(item):
break
if "Ver" not in title:
item_local = item.clone()
servidor = servidor.replace("uploaded", "uploadedto")
partes = enlace.split(" ") #Partimos el enlace en cada link de las partes
title = "Descarga" #Usamos la palabra reservada de Unify para que no formatee el título
@@ -1607,11 +1690,26 @@ def findvideos(item):
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality)
item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
itemlist.append(item_local.clone())
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel_py) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
except:
logger.error('ERROR al procesar enlaces DESCARGAR DIRECTOS: ' + servidor + ' / ' + enlace)
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel_py) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
# Requerido para AutoPlay
autoplay.start(itemlist, item) #Lanzamos Autoplay
return itemlist
@@ -1926,6 +2024,32 @@ def episodios(item):
return itemlist
def lookup_idiomas_paginacion(item, url, title, calidad, list_language):
logger.info()
estado = True
item.language = []
itemlist = []
if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or "subs" in title.lower() or ".com/pelicula/" in url or ".com/series-vo" in url or "-vo/" in url or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or "sub" in calidad.lower() or ".com/peliculas-vo" in item.url:
item.language += ["VOS"]
if "latino" in title.lower() or "argentina" in title.lower() or "-latino/" in url or "latino" in calidad.lower() or "argentina" in calidad.lower():
item.language += ["LAT"]
if item.language == []:
item.language = ['CAST'] #Por defecto
#Ahora se filtra por idioma, si procede, y se pinta lo que vale. Excluye categorías en otros idiomas.
if config.get_setting('filter_languages', channel_py) > 0 and item.extra2 != 'categorias':
itemlist = filtertools.get_link(itemlist, item, list_language)
if len(itemlist) == 0:
estado = False
#Volvemos a la siguiente acción en el canal
return estado
def actualizar_titulos(item):
logger.info()

View File

@@ -77,10 +77,10 @@ def submenu(item):
url=host % "list/ultimas-peliculas" + ext, text_color=color2,
thumbnail=host % "list/ultimas-peliculas/thumbnail_167x250.jpg",
fanart=host % "list/ultimas-peliculas/background_1080.jpg", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Destacados", action="entradas",
url=host % "list/000-novedades" + ext, text_color=color2,
thumbnail=host % "list/screener/thumbnail_167x250.jpg",
fanart=host % "list/screener/background_1080.jpg", viewmode="movie_with_plot"))
# itemlist.append(Item(channel=item.channel, title="Destacados", action="entradas",
# url=host % "list/000-novedades" + ext, text_color=color2,
# thumbnail=host % "list/screener/thumbnail_167x250.jpg",
# fanart=host % "list/screener/background_1080.jpg", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Más vistas", action="entradas",
url=host % "list/peliculas-mas-vistas" + ext, text_color=color2,
thumbnail=host % "list/peliculas-mas-vistas/thumbnail_167x250.jpg",
@@ -167,7 +167,7 @@ def entradas(item):
#if child['year']:
# title += " (" + child['year'] + ")"
#title += quality
thumbnail += "|User-Agent=%s" % httptools.get_user_agent
video_urls = []
for k, v in child.get("video", {}).items():
for vid in v:
@@ -232,6 +232,7 @@ def entradasconlistas(item):
thumbnail = host % "list/%s/thumbnail_167x250.jpg" % child["id"]
fanart = host % "list/%s/background_1080.jpg" % child["id"]
thumbnail += "|User-Agent=%s" % httptools.get_user_agent
itemlist.append(Item(channel=item.channel, action=action, title=title,
url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, show=show,
infoLabels=infolabels, contentTitle=fulltitle, viewmode="movie_with_plot",
@@ -295,7 +296,7 @@ def entradasconlistas(item):
for vid in v:
video_urls.append(["http://%s.pelisipad.com/s/transcoder/%s" % (vid["server"], vid["url"]) + "?%s",
vid["height"]])
thumbnail += "|User-Agent=%s" % httptools.get_user_agent
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, video_urls=video_urls,
thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels,
contentTitle=fulltitle, viewmode="movie_with_plot", text_color=color3))
@@ -347,6 +348,7 @@ def series(item):
if child.get("numberOfSeasons") and "- Temporada" not in title:
title += " (Temps:%s)" % child['numberOfSeasons']
thumbnail += "|User-Agent=%s" % httptools.get_user_agent
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, text_color=color3,
thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels,
contentTitle=fulltitle, viewmode="movie_with_plot", show=fulltitle))
@@ -414,6 +416,7 @@ def episodios(item):
title = fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
except:
title = fulltitle = child['id'].replace("-", " ")
thumbnail += "|User-Agent=%s" % httptools.get_user_agent
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
fanart=fanart, fulltitle=fulltitle, contentTitle=fulltitle, viewmode="movie",
show=item.show, infoLabels=infoLabels, video_urls=video_urls, extra="episodios",
@@ -491,6 +494,7 @@ def nuevos_cap(item):
else:
title = fulltitle = child['name']
thumbnail += "|User-Agent=%s" % httptools.get_user_agent
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
fanart=fanart, fulltitle=fulltitle, contentTitle=fulltitle, viewmode="movie",
show=item.fulltitle, infoLabels=infoLabels, video_urls=video_urls, extra="nuevos",
@@ -571,6 +575,7 @@ def listas(item):
infolabels['title'] = title
try:
from core import videolibrarytools
thumbnail += "|User-Agent=%s" % httptools.get_user_agent
new_item = item.clone(title=title, url=url, fulltitle=title, fanart=fanart, extra="findvideos",
thumbnail=thumbnail, infoLabels=infolabels, category="Cine")
videolibrarytools.add_movie(new_item)

View File

@@ -17,7 +17,7 @@
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"default": true,
"enabled": true,
"visible": true
},
@@ -29,6 +29,22 @@
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VO",
"VOS",
"VOSE"
]
},
{
"id": "include_in_newest_torrent",
"type": "bool",
@@ -36,6 +52,35 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "timeout_downloadpage",
"type": "list",
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
"default": 10,
"enabled": true,
"visible": true,
"lvalues": [
"None",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
]
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -9,7 +9,6 @@
"banner": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/pelisplanetbaner.png",
"categories": [
"movie",
"tvshow",
"direct",
"vos"
],

View File

@@ -57,15 +57,15 @@ def mainlist(item):
viewcontent='movies', thumbnail=thumbnail % 'generos',
viewmode="movie_with_plot", url=host + 'generos/'))
itemlist.append(Item(channel=item.channel, title="Filtrar por Idiomas",
itemlist.append(Item(channel=item.channel, title="[COLOR yellow][Filtrar por Idiomas][/COLOR]",
fanart=fanart_host, folder=False, text_color=color3,
text_blod=True, thumbnail=thumbnail % 'idiomas'))
itemlist.append(item.clone(title="Castellano", action="peliculas", text_blod=True,
itemlist.append(item.clone(title=" Castellano", action="peliculas", text_blod=True,
viewcontent='movies', thumbnail=thumbnail % 'castellano',
viewmode="movie_with_plot", url=host + 'idioma/castellano/'))
itemlist.append(item.clone(title="Latino", action="peliculas", text_blod=True,
itemlist.append(item.clone(title=" Latino", action="peliculas", text_blod=True,
viewcontent='movies', thumbnail=thumbnail % 'latino',
viewmode="movie_with_plot", url=host + 'idioma/latino/'))
@@ -173,15 +173,16 @@ def peliculas(item):
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
patron_todas = '<div class="home-movies">(.*?)<footer>'
data = scrapertools.find_single_match(data, patron_todas)
patron = 'col-sm-5"><a href="([^"]+)".+?'
patron += 'browse-movie-link-qd.*?>([^>]+)</.+?'
patron += '<p>([^>]+)</p>.+?'
patron += 'title one-line">([^>]+)</h2>.+?'
patron = 'col-sm-5".*?href="([^"]+)".+?'
patron += 'browse-movie-link-qd.*?>([^<]+)</.+?'
patron += '<p>([^<]+)</p>.+?'
patron += 'title one-line">([^<]+)</h2>.+?'
patron += 'title-category">([^<]+)</span>.*?'
patron += 'img-responsive" src="([^"]+)".*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, quality, year, scrapedtitle, scrapedthumbnail in matches:
for scrapedurl, quality, year, scrapedtitle, category, scrapedthumbnail in matches:
if '/ ' in scrapedtitle:
scrapedtitle = scrapedtitle.partition('/ ')[2]
title = scrapedtitle
@@ -192,7 +193,8 @@ def peliculas(item):
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title, url=url,
title="%s [COLOR yellowgreen][%s][/COLOR] [COLOR violet][%s][/COLOR]" % (title, category, year),
url=url,
quality=quality,
thumbnail=thumbnail,
contentTitle=contentTitle,

View File

@@ -15,7 +15,7 @@ from core.item import Item, InfoLabels
from platformcode import config, logger
from channels import filtertools
host = "https://pepecine.io"
host = "https://pepecine.me" # "https://pepecine.io"
IDIOMAS = {'c': 'Castellano', 'i': 'Inglés', 'l': 'Latino', 's': 'VOSE', 'v': 'VO'}
list_idiomas = IDIOMAS.values()
@@ -30,34 +30,34 @@ def mainlist(item):
itemlist.append(item.clone(
title = " Últimas películas",
url = host + '/las-peliculas-online',
url = host + '/mis-peliculas-online',
action = 'list_latest',
type = 'movie'))
itemlist.append(item.clone(title = " Películas por género",
url = host + '/ver-pelicula',
url = host + '/ver-la-pelicula',
action = 'genero',
type = 'movie'))
itemlist.append(item.clone(title = " Todas las películas",
url = host + '/ver-pelicula',
url = host + '/ver-la-pelicula',
action = 'list_all',
type = 'movie'))
itemlist.append(Item(title = "Series"))
itemlist.append(item.clone(title = " Últimos episodios",
url = host + '/las-series-online',
url = host + '/mis-series-online',
action = 'list_latest',
type = 'series'))
itemlist.append(item.clone(title = " Series por género",
url = host + '/ver-serie-tv',
url = host + '/ver-la-serie',
action = 'genero',
type = 'series'))
itemlist.append(item.clone(title = " Todas las series",
url = host + '/ver-serie-tv',
url = host + '/ver-la-serie',
action ='list_all',
type = 'series'))
@@ -251,11 +251,11 @@ def list_all(item):
new_item.contentTitle = element['title']
new_item.fulltitle = element['title']
if new_item.extra != "links_encoded":
new_item.url = host + "/ver-pelicula/" + str(element['id'])
new_item.url = host + "/ver-la-pelicula/" + str(element['id'])
elif item.type == 'series':
new_item.action = 'seasons'
new_item.url = host + "/ver-serie-tv/" + str(element['id'])
new_item.url = host + "/ver-la-serie/" + str(element['id'])
new_item.show = element['title']
new_item.contentType = 'tvshow'
new_item.contentSerieName = element['title']
@@ -334,8 +334,8 @@ def findvideos(item):
if item.extra != "links_encoded":
data = httptools.downloadpage(item.url).data
patron = "renderTab\.bind.*?'([^']+)"
patron += '.*?<img data-bind="[^"]+"><b>([^<]*)'
patron = "renderTab\.bind[^']+'([^']+)"
patron += '.*?<b[^>]*>([^<]*)<img src='
patron += '.*?<td [^>]*>([^<]*)'
patron += '.*?<td [^>]*>([^<]*)'

View File

@@ -1,7 +1,7 @@
{
"id": "plusdede",
"name": "Plusdede",
"active": true,
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "https://s18.postimg.cc/e17e98eqh/6_-_4_Isbv_Q3.png",

View File

@@ -5,8 +5,10 @@ import re
from core import httptools
from core import jsontools
from core import scrapertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from megaserver import Client
from platformcode import config, logger, platformtools
__modo_grafico__ = config.get_setting('modo_grafico', 'puyasubs')
__perfil__ = config.get_setting('perfil', "puyasubs")
@@ -20,39 +22,36 @@ if __perfil__ < 3:
else:
color1 = color2 = color3 = color4 = color5 = ""
host = "http://puya.si"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, action="listado", title="Novedades Anime", thumbnail=item.thumbnail,
url="http://puya.si/?cat=4", text_color=color1))
url= host + "/?cat=4", text_color=color1))
itemlist.append(Item(channel=item.channel, action="listado", title="Novedades Doramas", thumbnail=item.thumbnail,
url="http://puya.si/?cat=142", text_color=color1))
url= host + "/?cat=142", text_color=color1))
itemlist.append(Item(channel=item.channel, action="", title="Descargas", text_color=color2))
itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Animes y Doramas en proceso",
thumbnail=item.thumbnail, url="http://puya.si/?page_id=25501", text_color=color1))
thumbnail=item.thumbnail, url= host + "/?page_id=25501", text_color=color1))
itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Animes Finalizados",
thumbnail=item.thumbnail, url="http://puya.si/?page_id=15388", text_color=color1))
thumbnail=item.thumbnail, url= host + "/?page_id=15388", text_color=color1))
itemlist.append(Item(channel=item.channel, action="letra", title=" Descargas Animes Finalizados por Letra",
thumbnail=item.thumbnail, url="http://puya.si/?page_id=15388", text_color=color1))
thumbnail=item.thumbnail, url= host + "/?page_id=15388", text_color=color1))
itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Doramas Finalizados",
thumbnail=item.thumbnail, url="http://puya.si/?page_id=25507", text_color=color1))
thumbnail=item.thumbnail, url= host + "/?page_id=25507", text_color=color1))
itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Películas y Ovas",
thumbnail=item.thumbnail, url="http://puya.si/?page_id=25503", text_color=color1))
thumbnail=item.thumbnail, url= host + "/?page_id=25503", text_color=color1))
itemlist.append(Item(channel=item.channel, action="torrents", title="Lista de Torrents", thumbnail=item.thumbnail,
url="https://www.frozen-layer.com/buscar/descargas", text_color=color1))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar anime/dorama/película",
thumbnail=item.thumbnail, url="http://puya.si/?s=", text_color=color3))
thumbnail=item.thumbnail, url= host + "/?s=", text_color=color3))
itemlist.append(item.clone(title="Configurar canal", action="configuracion", text_color=color5, folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
@@ -73,9 +72,7 @@ def search(item, texto):
def listado(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url).data
bloques = scrapertools.find_multiple_matches(data, '<h2 class="entry-title">(.*?)</article>')
patron = 'href="([^"]+)".*?>(.*?)</a>.*?(?:<span class="bl_categ">(.*?)|</span>)</footer>'
@@ -96,27 +93,22 @@ def listado(item):
itemlist.append(Item(channel=item.channel, action="findvideos", url=url, title=title, thumbnail=thumb,
contentTitle=contenttitle, show=contenttitle, contentType=tipo,
infoLabels={'filtro': filtro_tmdb}, text_color=color1))
if ("cat=4" in item.url or item.extra == "busqueda") and not item.extra == "novedades":
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
next_page = scrapertools.find_single_match(data, "<span class='current'>.*?<a href='([^']+)'")
if next_page:
next_page = next_page.replace("&#038;", "&")
itemlist.append(Item(channel=item.channel, action="listado", url=next_page, title=">> Página Siguiente",
thumbnail=item.thumbnail, extra=item.extra, text_color=color2))
return itemlist
def descargas(item):
logger.info()
itemlist = list()
if not item.pagina:
item.pagina = 0
data = httptools.downloadpage(item.url).data
patron = '<li><a href="(http://puya.si/\?page_id=\d+|http://safelinking.net/[0-9A-z]+)">(.*?)</a>'
if item.letra:
@@ -130,32 +122,25 @@ def descargas(item):
.replace("[Puya+] ", "")
contenttitle = re.sub(r'(\[[^\]]*\])', '', contenttitle).strip()
filtro_tmdb = {"original_language": "ja"}.items()
tipo = "tvshow"
if "page_id=25503" in item.url:
tipo = "movie"
action = "findvideos"
if "safelinking" in url:
action = "extract_safe"
itemlist.append(Item(channel=item.channel, action=action, url=url, title=title, contentTitle=contenttitle,
show=contenttitle, contentType=tipo, infoLabels={'filtro': filtro_tmdb},
text_color=color1))
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if len(matches) > item.pagina + 20:
pagina = item.pagina + 20
itemlist.append(Item(channel=item.channel, action="descargas", url=item.url, title=">> Página Siguiente",
thumbnail=item.thumbnail, pagina=pagina, letra=item.letra, text_color=color2))
return itemlist
def letra(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url).data
patron = '<li>(?:<strong>|)([A-z#]{1})(?:</strong>|)</li>'
@@ -163,20 +148,16 @@ def letra(item):
for match in matches:
itemlist.append(Item(channel=item.channel, title=match, action="descargas", letra=match, url=item.url,
thumbnail=item.thumbnail, text_color=color1))
return itemlist
def torrents(item):
logger.info()
itemlist = list()
if not item.pagina:
item.pagina = 0
post = "utf8=%E2%9C%93&busqueda=puyasubs&search=Buscar&tab=anime&con_seeds=con_seeds"
data = httptools.downloadpage(item.url, post).data
patron = "<td>.*?href='([^']+)' title='descargar torrent'>.*?title='informacion de (.*?)'.*?<td class='fecha'>.*?<td>(.*?)</td>" \
".*?<span class=\"stats\d+\">(\d+)</span>.*?<span class=\"stats\d+\">(\d+)</span>"
matches = scrapertools.find_multiple_matches(data, patron)
@@ -184,20 +165,15 @@ def torrents(item):
contentTitle = title
if "(" in contentTitle:
contentTitle = contentTitle.split("(")[0]
size = size.strip()
filtro_tmdb = {"original_language": "ja"}.items()
title += " [COLOR %s][Semillas:%s[/COLOR]|[COLOR %s]Leech:%s[/COLOR]|%s]" % (
color4, seeds, color5, leechers, size)
url = "https://www.frozen-layer.com" + url
itemlist.append(Item(channel=item.channel, action="play", url=url, title=title, contentTitle=contentTitle,
server="torrent", show=contentTitle, contentType="tvshow", text_color=color1,
infoLabels={'filtro': filtro_tmdb}))
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if len(matches) > item.pagina + 25:
pagina = item.pagina + 25
itemlist.append(Item(channel=item.channel, action="torrents", url=item.url, title=">> Página Siguiente",
@@ -208,43 +184,39 @@ def torrents(item):
next_page = "https://www.frozen-layer.com" + next_page
itemlist.append(Item(channel=item.channel, action="torrents", url=next_page, title=">> Página Siguiente",
thumbnail=item.thumbnail, pagina=0, text_color=color2))
return itemlist
def findvideos(item):
logger.info()
if item.infoLabels["tmdb_id"] and not item.infoLabels["plot"]:
from core import tmdb
tmdb.set_infoLabels_item(item, True, idioma_busqueda="en")
itemlist = list()
itemlist = []
data = httptools.downloadpage(item.url).data
data2 = data.replace("\n","")
idiomas = scrapertools.find_single_match(data, 'Subtitulo:\s*(.*?)<br />')
calidades = ['720p', '1080p']
torrentes = scrapertools.find_multiple_matches(data, '<a href="(https://www.frozen-layer.com/descargas[^"]+)"')
calidades = ['1080p', '720p']
torrentes = scrapertools.find_multiple_matches(data, '<a href="((?:https://www.frozen-layer.com/descargas[^"]+|https://nyaa.si/view/[^"]+))"')
if torrentes:
for i, enlace in enumerate(torrentes):
title = "Ver por Torrent %s" % idiomas
if ">720p" in data and ">1080p" in data:
try:
title = "[%s] %s" % (calidades[i], title)
except:
pass
itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent"))
if ">720p" in data2 and ">1080p" in data2:
title = "[%s] %s" % (calidades[i], title)
if "nyaa" in enlace:
data1 = httptools.downloadpage(url=enlace).data
enlace = "https://nyaa.si" + scrapertools.find_single_match(data1, 'a href="(/do[^"]+)')
itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent"))
enlace = scrapertools.find_single_match(data1, '<a href="(magnet[^"]+)')
itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent"))
#itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent"))
onefichier = scrapertools.find_multiple_matches(data, '<a href="(https://1fichier.com/[^"]+)"')
if onefichier:
for i, enlace in enumerate(onefichier):
title = "Ver por 1fichier %s" % idiomas
if ">720p" in data and ">1080p" in data:
if ">720p" in data and ">1080p" in data2:
try:
title = "[%s] %s" % (calidades[i], title)
except:
pass
itemlist.append(item.clone(title=title, action="play", url=enlace, server="onefichier"))
safelink = scrapertools.find_multiple_matches(data, '<a href="(http(?:s|)://safelinking.net/[^"]+)"')
if safelink:
for i, safe in enumerate(safelink):
@@ -276,17 +248,14 @@ def findvideos(item):
except:
pass
itemlist.append(item.clone(title=title, action=action, url=enlace, server=server))
return itemlist
def carpeta(item):
logger.info()
itemlist = list()
if item.server == "onefichier":
data = httptools.downloadpage(item.url).data
patron = '<tr>.*?<a href="([^"]+)".*?>(.*?)</a>.*?<td class="normal">(.*?)</td>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, size in matches:
@@ -295,11 +264,7 @@ def carpeta(item):
server="onefichier", text_color=color1, thumbnail=item.thumbnail,
infoLabels=item.infoLabels))
else:
from megaserver import Client
from platformcode import platformtools
c = Client(url=item.url)
files = c.get_files()
c.stop()
for enlace in files:
@@ -308,7 +273,6 @@ def carpeta(item):
Item(channel=item.channel, title=enlace["name"], url=item.url + "|" + file_id, action="play",
server="mega", text_color=color1, thumbnail=item.thumbnail,
infoLabels=item.infoLabels))
itemlist.sort(key=lambda item: item.title)
return itemlist
@@ -316,16 +280,13 @@ def carpeta(item):
def extract_safe(item):
logger.info()
if item.infoLabels["tmdb_id"] and not item.infoLabels["plot"]:
from core import tmdb
tmdb.set_infoLabels_item(item, True, idioma_busqueda="en")
itemlist = list()
hash = item.url.rsplit("/", 1)[1]
headers = [['Content-Type', 'application/json;charset=utf-8']]
post = jsontools.dump({"hash": hash})
data = httptools.downloadpage("http://safelinking.net/v1/protected", post, headers).data
data = jsontools.load(data)
for link in data.get("links"):
enlace = link["url"]
domain = link["domain"]
@@ -335,29 +296,11 @@ def extract_safe(item):
server = "mega"
if "/#F!" in enlace:
action = "carpeta"
elif "1fichier" in domain:
server = "onefichier"
if "/dir/" in enlace:
action = "carpeta"
itemlist.append(item.clone(title=title, action=action, url=enlace, server=server))
return itemlist
def play(item):
logger.info()
itemlist = list()
if item.server == "torrent" and "frozen" in item.url and not item.url.endswith(".torrent"):
data = httptools.downloadpage(item.url).data
enlace = scrapertools.find_single_match(data, "<div id='descargar_torrent'>.*?href='([^']+)'")
if enlace:
itemlist.append(item.clone(url=enlace))
else:
itemlist.append(item)
return itemlist
@@ -365,7 +308,7 @@ def newest(categoria):
logger.info()
item = Item()
try:
item.url = "http://puya.si/?cat=4"
item.url = host + "/?cat=4"
item.extra = "novedades"
itemlist = listado(item)
@@ -373,12 +316,10 @@ def newest(categoria):
itemlist.pop()
for it in itemlist:
it.contentTitle = it.title
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -304,10 +304,12 @@ if xbmcgui:
def __init__(self, *args, **kwargs):
logger.debug()
if xbmcgui.__version__ == "1.2":
self.setCoordinateResolution(1)
else:
self.setCoordinateResolution(5)
#### Compatibilidad con Kodi 18 ####
if config.get_platform(True)['num_version'] < 18:
if xbmcgui.__version__ == "1.2":
self.setCoordinateResolution(1)
else:
self.setCoordinateResolution(5)
self.show = kwargs.get("show")
self.channel = kwargs.get("channel")

View File

@@ -9,11 +9,11 @@ from channelselector import get_thumb
from channels import autoplay
from channels import filtertools
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from lib import jsunpack
from platformcode import config, logger, platformtools

View File

@@ -394,6 +394,10 @@ def show_result(item):
return channel.search(item, tecleado)
else:
# Mostrar resultados: todos juntos
if item.infoPlus: #Si viene de una ventana de InfoPlus, hay que salir de esta forma...
del item.infoPlus #si no, se mete en un bucle mostrando la misma pantalla,
item.title = item.title.strip() #dando error en "handle -1"
return getattr(channel, item.action)(item)
try:
from platformcode import launcher
launcher.run(item)
@@ -489,7 +493,7 @@ def do_search(item, categories=None):
if categories:
# Si no se ha seleccionado torrent no se muestra
if "torrent" not in categories:
if "torrent" not in categories and "infoPlus" not in categories:
if "torrent" in channel_parameters["categories"]:
logger.info("%s -torrent-" % basename_without_extension)
continue
@@ -601,6 +605,8 @@ def do_search(item, categories=None):
for i in element["itemlist"]:
if i.action:
title = " " + i.title
if "infoPlus" in categories: #Se manrca vi viene de una ventana de InfoPlus
i.infoPlus = True
itemlist.append(i.clone(title=title, from_action=i.action, from_channel=i.channel,
channel="search", action="show_result", adult=element["adult"]))

View File

@@ -1,61 +0,0 @@
{
"id": "seriecanal",
"name": "Seriecanal",
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "http://i.imgur.com/EwMK8Yd.png",
"banner": "seriecanal.png",
"categories": [
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "user",
"type": "text",
"label": "Usuario",
"color": "0xFFd50b0b",
"enabled": true,
"visible": true
},
{
"id": "password",
"type": "text",
"label": "Contraseña",
"color": "0xFFd50b0b",
"enabled": true,
"visible": true,
"hidden": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -1,226 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from platformcode import config, logger
__modo_grafico__ = config.get_setting('modo_grafico', "seriecanal")
__perfil__ = config.get_setting('perfil', "seriecanal")
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
color1, color2, color3 = perfil[__perfil__]
host = "https://www.seriecanal.com/"
def login():
logger.info()
data = httptools.downloadpage(host).data
if "Cerrar Sesion" in data:
return True, ""
usuario = config.get_setting("user", "seriecanal")
password = config.get_setting("password", "seriecanal")
if usuario == "" or password == "":
return False, 'Regístrate en www.seriecanal.com e introduce tus datos en "Configurar Canal"'
else:
post = urllib.urlencode({'username': usuario, 'password': password})
data = httptools.downloadpage(host + "index.php?page=member&do=login&tarea=acceder", post=post).data
if "Bienvenid@, se ha identificado correctamente en nuestro sistema" in data:
return True, ""
else:
return False, "Error en el login. El usuario y/o la contraseña no son correctos"
def mainlist(item):
logger.info()
itemlist = []
item.text_color = color1
result, message = login()
if result:
itemlist.append(item.clone(action="series", title="Últimos episodios", url=host))
itemlist.append(item.clone(action="genero", title="Series por género"))
itemlist.append(item.clone(action="alfabetico", title="Series por orden alfabético"))
itemlist.append(item.clone(action="search", title="Buscar..."))
else:
itemlist.append(item.clone(action="", title=message, text_color="red"))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
item.url = host + "index.php?page=portada&do=category&method=post&category_id=0&order=" \
"C_Create&view=thumb&pgs=1&p2=1"
try:
post = "keyserie=" + texto
item.extra = post
return series(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def genero(item):
logger.info()
itemlist = []
data = httptools.downloadpage(host).data
data = scrapertools.find_single_match(data, '<ul class="tag-cloud">(.*?)</ul>')
matches = scrapertools.find_multiple_matches(data, '<a.*?href="([^"]+)">([^"]+)</a>')
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.capitalize()
url = urlparse.urljoin(host, scrapedurl)
itemlist.append(item.clone(action="series", title=scrapedtitle, url=url))
return itemlist
def alfabetico(item):
logger.info()
itemlist = []
data = httptools.downloadpage(host).data
data = scrapertools.find_single_match(data, '<ul class="pagination pagination-sm" style="margin:5px 0;">(.*?)</ul>')
matches = scrapertools.find_multiple_matches(data, '<a.*?href="([^"]+)">([^"]+)</a>')
for scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(host, scrapedurl)
itemlist.append(item.clone(action="series", title=scrapedtitle, url=url))
return itemlist
def series(item):
logger.info()
itemlist = []
item.infoLabels = {}
item.text_color = color2
if item.extra != "":
data = httptools.downloadpage(item.url, post=item.extra).data
else:
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<div class="item-inner" style="margin: 0 20px 0px 0\;"><img src="([^"]+)".*?' \
'href="([^"]+)" title="Click para Acceder a la Ficha(?:\|([^"]+)|)".*?' \
'<strong>([^"]+)</strong></a>.*?<strong>([^"]+)</strong></p>.*?' \
'<p class="text-warning".*?\;">(.*?)</p>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedurl, scrapedplot, scrapedtitle, scrapedtemp, scrapedepi in matches:
title = scrapedtitle + " - " + scrapedtemp + " - " + scrapedepi
url = urlparse.urljoin(host, scrapedurl)
temporada = scrapertools.find_single_match(scrapedtemp, "\d+")
episode = scrapertools.find_single_match(scrapedepi, "\d+")
#item.contentType = "tvshow"
if temporada != "":
item.infoLabels['season'] = temporada
#item.contentType = "season"
if episode != "":
item.infoLabels['episode'] = episode
#item.contentType = "episode"
itemlist.append(item.clone(action="findvideos", title=title, url=url,
contentSerieName=scrapedtitle,
context=["buscar_trailer"]))
tmdb.set_infoLabels(itemlist)
# Extra marca siguiente página
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" (?:onclick="return false;" |)title='
'"Página Siguiente"')
if next_page != "/":
url = urlparse.urljoin(host, next_page)
itemlist.append(item.clone(action="series", title=">> Siguiente", url=url, text_color=color3))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
item.text_color = color3
data = httptools.downloadpage(item.url).data
data = scrapertools.decodeHtmlentities(data)
# Busca en la seccion descarga/torrent
data_download = scrapertools.find_single_match(data, '<th>Episodio - Enlaces de Descarga</th>(.*?)</table>')
patron = '<p class="item_name".*?<a href="([^"]+)".*?>([^"]+)</a>'
matches = scrapertools.find_multiple_matches(data_download, patron)
for scrapedurl, scrapedepi in matches:
new_item = item.clone()
if "Episodio" not in scrapedepi:
scrapedtitle = "[Torrent] Episodio " + scrapedepi
else:
scrapedtitle = "[Torrent] " + scrapedepi
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)")
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]")
itemlist.append(new_item.clone(action="play", title=scrapedtitle, url=scrapedurl, server="torrent",
contentType="episode"))
# Busca en la seccion online
data_online = scrapertools.find_single_match(data, "<th>Enlaces de Visionado Online</th>(.*?)</table>")
patron = '<a href="([^"]+)\\n.*?src="([^"]+)".*?' \
'title="Enlace de Visionado Online">([^"]+)</a>'
matches = scrapertools.find_multiple_matches(data_online, patron)
for scrapedurl, scrapedthumb, scrapedtitle in matches:
# Deshecha enlaces de trailers
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
if (scrapedthumb != "images/series/youtube.png") & (scrapedtitle != "Trailer"):
new_item = item.clone()
server = scrapertools.find_single_match(scrapedthumb, "images/series/(.*?).png")
title = "[" + server.capitalize() + "]" + " " + scrapedtitle
new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)")
itemlist.append(new_item.clone(action="play", title=title, url=scrapedurl, contentType="episode"))
# Comprueba si hay otras temporadas
if not "No hay disponible ninguna Temporada adicional" in data:
data_temp = scrapertools.find_single_match(data, '<div class="panel panel-success">(.*?)</table>')
data_temp = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data_temp)
patron = '<tr><td><p class="item_name"><a href="([^"]+)".*?' \
'<p class="text-success"><strong>([^"]+)</strong>'
matches = scrapertools.find_multiple_matches(data_temp, patron)
for scrapedurl, scrapedtitle in matches:
new_item = item.clone()
url = urlparse.urljoin(host, scrapedurl)
scrapedtitle = scrapedtitle.capitalize()
temporada = scrapertools.find_single_match(scrapedtitle, "Temporada (\d+)")
if temporada != "":
new_item.infoLabels['season'] = temporada
new_item.infoLabels['episode'] = ""
itemlist.append(new_item.clone(action="findvideos", title=scrapedtitle, url=url, text_color="red",
contentType="season"))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
new_item = item.clone()
if config.is_xbmc():
new_item.contextual = True
itemlist.append(new_item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
return itemlist
def play(item):
logger.info()
itemlist = []
if item.extra == "torrent":
itemlist.append(item.clone())
else:
# Extrae url de enlace bit.ly
if item.url.startswith("http://bit.ly/"):
item.url = scrapertools.getLocationHeaderFromResponse(item.url)
video_list = servertools.findvideos(item.url)
if video_list:
url = video_list[0][1]
server = video_list[0][2]
itemlist.append(item.clone(server=server, url=url))
return itemlist

View File

@@ -212,21 +212,21 @@ def new_episodes(item):
itemlist = []
data = get_source(item.url)
data = scrapertools.find_single_match(data,
'<center>Series Online : Capítulos estrenados recientemente</center>.*?</ul>')
patron = '<li><h6.*?src="([^"]+)".*?href="([^"]+)">.*?src="([^"]+)".*? data-original-title=" (\d+x\d+).*?'
patron = '<li><h6.*?src="([^"]+)".*?alt=" (\d+x\d+).+?".*?href="([^"]+)">.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for lang_data, scrapedurl, scrapedthumbnail, scrapedinfo, in matches:
for lang_data, scrapedinfo, scrapedurl, scrapedthumbnail in matches:
url = host+scrapedurl
url =scrapedurl
thumbnail = scrapedthumbnail
scrapedinfo = scrapedinfo.split('x')
season = scrapedinfo[0]
episode = scrapedinfo[1]
scrapedtitle = scrapertools.find_single_match(url, 'capitulo/([^/]+)/').replace("-", " ")
title = '%s - %sx%s' % (scrapedtitle, season, episode )
scrapedtitle = scrapertools.find_single_match(url, 'capitulo/([^/]+)/')
url = '%scapitulos/%s' % (host, scrapedtitle)
title = '%s - %sx%s' % (scrapedtitle.replace('-', ' '), season, episode )
title, language = add_language(title, lang_data)
itemlist.append(Item(channel=item.channel,
action='seasons',

View File

@@ -1,25 +0,0 @@
{
"id": "seriesyonkis",
"name": "Seriesyonkis",
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "seriesyonkis.png",
"banner": "seriesyonkis.png",
"fanart": "seriesyonkis.jpg",
"categories": [
"tvshow",
"anime",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,197 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
host = 'https://yonkis.to'
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, action="alfabetico", title="Listado alfabetico", url=host))
itemlist.append(Item(channel=item.channel, action="mas_vistas", title="Series más vistas",
url=host + "/series-mas-vistas"))
itemlist.append(Item(channel=item.channel, action="ultimos", title="Últimos episodios añadidos",
url=host))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + "/buscar/serie"))
return itemlist
def alfabetico(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, action="series", title="0-9", url=host + "/lista-de-series/0-9"))
for letra in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
itemlist.append(Item(channel=item.channel, action="series", title=letra, url=host+"/lista-de-series/"+letra))
return itemlist
def mas_vistas(item):
logger.info()
data = httptools.downloadpage(item.url).data
matches = re.compile('<a title="([^"]+)" href="([^"]+)".*?src="([^"]+)".*?</a>', re.S).findall(data)
itemlist = []
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
scrapedurl = urlparse.urljoin(item.url, scrapedurl)
scrapedthumbnail = urlparse.urljoin(item.url, scrapedthumbnail.replace("/90/", "/150/"))
itemlist.append(
Item(channel=item.channel, action="episodios", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, show=scrapedtitle, fanart=item.fanart))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
post = "keyword=%s&search_type=serie" % texto
data = httptools.downloadpage(item.url, post=post).data
try:
patron = '<a href="([^"]+)" title="([^"]+)"><img.*?src="([^"]+)".*?class="content">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumb, scrapedplot in matches:
title = scrapedtitle.strip()
url = host + scrapedurl
thumb = host + scrapedthumb.replace("/90/", "/150/")
plot = re.sub(r"\n|\r|\t|\s{2,}", "", scrapedplot.strip())
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumb + "]")
itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url,
thumbnail=thumb, plot=plot, show=title))
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def ultimos(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
logger.debug("data %s" % data)
matches = re.compile('data-href="([^"]+)" data-src="([^"]+)" data-alt="([^"]+)".*?<a[^>]+>(.*?)</a>', re.S).findall(data)
for url, thumb, show, title in matches:
url = host + url
itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=thumb, show=show.strip(),
action="findvideos", fulltitle=title))
return itemlist
def series(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
matches = scrapertools.find_single_match(data, '<ul id="list-container" class="dictionary-list">(.*?)</ul>')
matches = re.compile('title="([^"]+)" href="([^"]+)"', re.S).findall(matches)
for title, url in matches:
itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title,
url=urlparse.urljoin(item.url, url), thumbnail=item.thumbnail, show=title))
# Paginador
matches = re.compile('<a href="([^"]+)">></a>', re.S).findall(data)
paginador = None
if len(matches) > 0:
paginador = Item(channel=item.channel, action="series", title="!Página siguiente",
url=urlparse.urljoin(item.url, matches[0]), thumbnail=item.thumbnail, show=item.show)
if paginador and len(itemlist) > 0:
itemlist.insert(0, paginador)
itemlist.append(paginador)
return itemlist
def episodios(item):
logger.info()
itemlist = []
# Descarga la pagina
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
pattern = '<meta property="og:description" content="([^/]+)" /><meta property="og:image" content="([^"]+)"'
plot, thumb = scrapertools.find_single_match(data, pattern)
matches = re.compile('<a class="episodeLink p1" href="([^"]+)"><strong>(.*?)</strong>(.*?)</a>', re.S).findall(data)
for url, s_e, title in matches:
url = host + url
title = s_e.strip() + title
itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=thumb, show=item.show, plot=plot,
action="findvideos", fulltitle=title))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show))
itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url,
action="download_all_episodes", extra="episodios", show=item.show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
# Descarga la pagina
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
pattern = '<a href="([^"]+)"[^>]+><img[^>]+alt="([^"]+)" /></a></td><td class="episode-lang"><span ' \
'class="flags[^"]+" title="([^"]+)"'
matches = re.compile(pattern, re.S).findall(data)
for url, server, language in matches:
title = "[%s] - [%s]" % (language, server)
url = host + url
server = re.sub('(\..*)', '', server)
logger.debug("url %s" % url)
itemlist.append(Item(channel=item.channel, action="play", title=title, fulltitle=item.fulltitle, url=url,
thumbnail=item.thumbnail, language=language, server=server))
return itemlist
def play(item):
logger.info()
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
itemlist = servertools.find_video_items(data=data)
for video_item in itemlist:
video_item.title = "%s [%s]" % (item.fulltitle, item.lang)
video_item.thumbnail = item.thumbnail
video_item.language = item.language
return itemlist

View File

@@ -91,7 +91,10 @@ class Main(xbmcgui.WindowXMLDialog):
self.items = []
def onInit(self):
self.setCoordinateResolution(2)
#### Compatibilidad con Kodi 18 ####
if config.get_platform(True)['num_version'] < 18:
self.setCoordinateResolution(2)
self.focus = -1
self.buttons = []
posx= 0

View File

@@ -3,22 +3,31 @@
"name": "TodoPeliculas",
"active": true,
"adult": false,
"language": ["cast"],
"language": ["cast", "lat"],
"thumbnail": "http://www.todo-peliculas.com/images/logo.png",
"banner": "",
"version": 1,
"categories": [
"movie",
"torrent"
"torrent",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra en TMDB",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
@@ -29,7 +38,11 @@
"visible": true,
"lvalues": [
"No filtrar",
"Castellano"
"CAST",
"LAT",
"VO",
"VOS",
"VOSE"
]
},
{
@@ -55,6 +68,27 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "timeout_downloadpage",
"type": "list",
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
"default": 5,
"enabled": true,
"visible": true,
"lvalues": [
"None",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
]
}
]
}

View File

@@ -1,176 +1,489 @@
# -*- coding: utf-8 -*-
# -*- Channel TodoPeliculas -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import sys
import urllib
import urlparse
import time
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
from lib import generictools
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'cast': 'Castellano'}
#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'}
IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['torrent']
host = 'http://www.todo-peliculas.com/'
channel = "todopeliculas"
categoria = channel.capitalize()
__modo_grafico__ = config.get_setting('modo_grafico', channel)
timeout = config.get_setting('timeout_downloadpage', channel)
def mainlist(item):
logger.info()
itemlist = []
thumb_cartelera = get_thumb("now_playing.png")
thumb_pelis = get_thumb("channels_movie.png")
thumb_pelis_hd = get_thumb("channels_movie_hd.png")
thumb_buscar = get_thumb("search.png")
thumb_separador = get_thumb("next.png")
thumb_settings = get_thumb("setting_0.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(item.clone(title="Ultimas", action="list_all", url=host+'torrents'))
itemlist.append(item.clone(title="Por Calidad", action="section", url=host))
itemlist.append(item.clone(title="Buscar", action="search", url=host+'buscar?searchword='))
autoplay.show_option(item.channel, itemlist)
itemlist.append(Item(channel=item.channel, title="Novedades", action="listado", url=host+'torrents', thumbnail=thumb_cartelera, extra="peliculas"))
itemlist.append(Item(channel=item.channel, title="Por Calidades", action="categorias", url=host, thumbnail=thumb_pelis_hd, extra="peliculas", extra2="categorias"))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host+'buscar?searchword=', thumbnail=thumb_buscar, extra="search"))
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador))
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings))
autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
def categorias(item):
logger.info()
itemlist = []
data = ''
try:
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
except:
pass
patron = '<li><a href="([^"]+)" rel="tag" class="[^>]+>(.*?)<\/a><\/li>'
#Verificamos si se ha cargado una página, y si además tiene la estructura correcta
if not data or not scrapertools.find_single_match(data, patron):
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
for clone_inter, autoridad in item.intervencion:
thumb_intervenido = get_thumb(autoridad)
itemlist.append(item.clone(action='', title="[COLOR yellow]" + clone_inter.capitalize() + ': [/COLOR]' + intervenido_judicial + '. Reportar el problema en el foro', thumbnail=thumb_intervenido))
return itemlist #Salimos
logger.error("ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL: " + item.url)
if not data: #Si no ha logrado encontrar nada, salimos
itemlist.append(item.clone(action='', title=item.category + ': ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
data = get_source(item.url)
if item.type == 'buscar':
patron = '<div class=moditemfdb><a title=(.*?)\s+href=(.*?)><img.*?class=thumbnailresult src=(.*?)/>'
elif item.type == 'section':
patron = '<div class=blogitem >.*?href=(.*?)>.*?src=(.*?) alt.*?title=(.*?)>'
else:
patron = '<div class=blogitem ><a title=(.*?)\s+href=(.*?)>.*?src=(.*?) onload'
matches = re.compile(patron, re.DOTALL).findall(data)
for info_1, info_2, info_3 in matches:
if not matches:
logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: SUBMENU: Ha cambiado la estructura de la Web. Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
if item.type != 'section':
url = host+info_2
quality = scrapertools.find_single_match(info_1, '\[(.*?)\]')
contentTitle = re.sub(r'\[.*?\]', '', info_1)
title = '%s [%s]'%(contentTitle, quality)
thumbnail = info_3
else:
url = host + info_1
quality = scrapertools.find_single_match(info_3, '\[(.*?)\]')
contentTitle = re.sub(r'\[.*?\]', '', info_3)
title = '%s [%s]' % (contentTitle, quality)
thumbnail = info_2
quality = ''
if quality == '':
title = title.replace('[]', '')
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
quality = quality
))
# Paginación
url_next_page = scrapertools.find_single_match(data,'Anterior.*?<a href=/(.*?) title=Siguiente>Siguiente</a>')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=host+url_next_page, action='list_all'))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(host)
patron = '<li><a href=(.*?) rel=tag class=>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
#logger.debug(matches)
for scrapedurl, scrapedtitle in matches:
url = scrapedurl
title = scrapedtitle
new_item = Item(channel=item.channel, title= title, url=url, action='list_all', type='section')
itemlist.append(new_item)
itemlist.append(item.clone(action="listado", title=scrapedtitle.capitalize().strip(), url=scrapedurl))
return itemlist
def listado(item):
logger.info()
itemlist = []
item.category = categoria
#logger.debug(item)
curr_page = 1 # Página inicial
cnt_title = 0 # Contador de líneas insertadas en Itemlist
if item.curr_page:
curr_page = int(item.curr_page) # Si viene de una pasada anterior, lo usamos
del item.curr_page # ... y lo borramos
if item.last_page:
last_page = int(item.last_page) # Si viene de una pasada anterior, lo usamos
del item.last_page # ... y lo borramos
cnt_tot = 40 # Poner el num. máximo de items por página
cnt_pct = 0.725 #% de la página a llenar
inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
fin = inicio + 5 # Después de este tiempo pintamos (segundos)
timeout_search = timeout # Timeout para descargas
if item.extra == 'search':
timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas
if timeout_search < 5:
timeout_search = 5 # Timeout un poco más largo para las búsquedas
if not item.extra2: # Si viene de Catálogo o de Alfabeto
item.extra2 = ''
next_page_url = item.url
#Máximo num. de líneas permitidas por TMDB. Máx de 10 segundos por Itemlist para no degradar el rendimiento
while cnt_title < cnt_tot * cnt_pct and fin > time.time():
# Descarga la página
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)|&nbsp;", "", httptools.downloadpage(next_page_url, timeout=timeout_search).data)
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
except:
pass
if not data: #Si la web está caída salimos sin dar error
logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: LISTADO:. La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
break #si no hay más datos, algo no funciona, pintamos lo que tenemos
#Patrón para todo, menos para Alfabeto
if item.extra == 'search':
patron = '<div class="moditemfdb"><a title="([^"]+)"\s+href="([^"]+)"><img.*?class="thumbnailresult" src="([^"]+)"\/><\/a>'
elif item.extra2 == 'categorias':
patron = '<div class="blogitem "><a href="([^"]+)".*?src="([^"]+)" alt.*?title="([^"]+)">'
else:
patron = '<div class="blogitem "><a title="([^"]+)"\s+href="([^"]+)">.*?src="([^"]+)" onload'
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches and not 'Total: 0 resultados encontrados' in data: #error
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error
return itemlist #Salimos
logger.error("ERROR 02: LISTADO: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: LISTADO: Ha cambiado la estructura de la Web. Reportar el error con el log'))
break #si no hay más datos, algo no funciona, pintamos lo que tenemos
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
#Buscamos la url de paginado y la última página
patron = '<a href="([^"]+=(\d+))" title="Siguiente">Siguiente<\/a>'
try:
next_page_url, curr_page = scrapertools.find_single_match(data, patron)
curr_page = int(curr_page) / len(matches)
except: #Si no lo encuentra, lo ponemos a 1
#logger.error('ERROR 03: LISTADO: Al obtener la paginación: ' + patron + ' / ' + data)
fin = 0 #Forzamos a salir del WHILE al final del FOR
cnt_title = 0 #Evitamos pié de página
curr_page = 1
next_page_url = item.url
next_page_url = urlparse.urljoin(host, next_page_url)
#logger.debug('curr_page: ' + str(curr_page) + ' / url: ' + next_page_url)
#Empezamos el procesado de matches
for scrapedtitle, scrapedurl, scrapedthumb in matches:
if item.extra2 == 'categorias': #Cambia el orden de tres parámetros (Categorías)
title = scrapedthumb
url = urlparse.urljoin(host, scrapedtitle)
thumb = scrapedurl
else: #lo estándar
title = scrapedtitle
url = urlparse.urljoin(host, scrapedurl)
thumb = scrapedthumb
quality = scrapertools.find_single_match(title, '\[(.*?)\]') #capturamos quality
title = re.sub(r'\[.*?\]', '', title) #y lo borramos de title
title = title.replace("á", "a").replace("é", "e").replace("í", "i").replace("ó", "o").replace("ú", "u").replace("ü", "u").replace("�", "ñ").replace("ñ", "ñ").replace("&atilde;", "a").replace("&etilde;", "e").replace("&itilde;", "i").replace("&otilde;", "o").replace("&utilde;", "u").replace("&ntilde;", "ñ").replace("&#8217;", "'")
item_local = item.clone() #Creamos copia de Item para trabajar
if item_local.tipo: #... y limpiamos
del item_local.tipo
if item_local.totalItems:
del item_local.totalItems
if item_local.post_num:
del item_local.post_num
if item_local.intervencion:
del item_local.intervencion
if item_local.viewmode:
del item_local.viewmode
item_local.text_bold = True
del item_local.text_bold
item_local.text_color = True
del item_local.text_color
title_subs = [] #creamos una lista para guardar info importante
item_local.language = [] #iniciamos Lenguaje
item_local.quality = quality #guardamos la calidad, si la hay
item_local.url = url #guardamos el thumb
item_local.thumbnail = thumb #guardamos el thumb
item_local.context = "['buscar_trailer']"
item_local.contentType = "movie" #por defecto, son películas
item_local.action = "findvideos"
#Ajustamos los idiomas
if ("-latino-" in url.lower() or "(latino)" in title.lower()) and "LAT" not in item_local.language:
item_local.language += ['LAT']
elif ('-vos-' in url.lower() or '-vose-' in url.lower() or '(vos)' in title.lower() or '(vose)' in title.lower()) and "VOSE" not in item_local.language:
item_local.language += ['VOSE']
elif ('-vo-' in url.lower() or '(vo)' in title.lower()) and "VO" not in item_local.language:
item_local.language += ['VO']
if item_local.language == []:
item_local.language = ['CAST'] #Por defecto
title = re.sub(r'\(.*?\)', '', title) #Limpiamos del idioma de title
#Detectamos info interesante a guardar para después de TMDB
if scrapertools.find_single_match(title, '[m|M].*?serie'):
title = re.sub(r'[m|M]iniserie', '', title)
title_subs += ["Miniserie"]
if scrapertools.find_single_match(title, '[s|S]aga'):
title = re.sub(r'[s|S]aga', '', title)
title_subs += ["Saga"]
if scrapertools.find_single_match(title, '[c|C]olecc'):
title = re.sub(r'[c|C]olecc...', '', title)
title_subs += ["Colección"]
if "duolog" in title.lower():
title_subs += ["[Saga]"]
title = title.replace(" Duologia", "").replace(" duologia", "").replace(" Duolog", "").replace(" duolog", "")
if "trilog" in title.lower():
title_subs += ["[Saga]"]
title = title.replace(" Trilogia", "").replace(" trilogia", "").replace(" Trilog", "").replace(" trilog", "")
if "extendida" in title.lower() or "v.e." in title.lower()or "v e " in title.lower():
title_subs += ["[V. Extendida]"]
title = title.replace("Version Extendida", "").replace("(Version Extendida)", "").replace("V. Extendida", "").replace("VExtendida", "").replace("V Extendida", "").replace("V.Extendida", "").replace("V Extendida", "").replace("V.E.", "").replace("V E ", "").replace("V:Extendida", "")
item_local.infoLabels["year"] = '-' #Reseteamos el año para TMDB
#Limpiamos el título de la basura innecesaria
title = re.sub(r'- $', '', title)
title = re.sub(r'TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title, flags=re.IGNORECASE)
#Terminamos de limpiar el título
title = re.sub(r'\??\s?\d*?\&.*', '', title)
title = re.sub(r'[\(|\[]\s+[\)|\]]', '', title)
title = title.replace('()', '').replace('[]', '').strip().lower().title()
item_local.from_title = title.strip().lower().title() #Guardamos esta etiqueta para posible desambiguación de título
#Salvamos el título según el tipo de contenido
if item_local.contentType == "movie":
item_local.contentTitle = title.strip().lower().title()
else:
item_local.contentSerieName = title.strip().lower().title()
item_local.title = title.strip().lower().title() #Guardamos el título
#Guarda la variable temporal que almacena la info adicional del título a ser restaurada después de TMDB
item_local.title_subs = title_subs
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist = filtertools.get_link(itemlist, item_local, list_language)
else:
itemlist.append(item_local.clone()) #Si no, pintar pantalla
cnt_title = len(itemlist) #Contador de líneas añadidas
#logger.debug(item_local)
#Pasamos a TMDB la lista completa Itemlist
tmdb.set_infoLabels(itemlist, __modo_grafico__)
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
item, itemlist = generictools.post_tmdb_listado(item, itemlist)
# Si es necesario añadir paginacion
if cnt_title >= cnt_tot * cnt_pct:
title = '%s' % curr_page
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente " + title, url=next_page_url, extra=item.extra, extra2=item.extra2))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if not item.language:
item.language = ['CAST'] #Castellano por defecto
second_url = scrapertools.find_single_match(data, '<p><a href=(.*?) rel')
item.category = categoria
item.extra2 = 'xyz'
del item.extra2
#logger.debug(item)
data = get_source(host+second_url)
url = scrapertools.find_single_match(data, "open\('(.*?)'")
#Bajamos los datos de la página
data = ''
patron = '<p><a href="([^"]+)" rel'
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)|&nbsp;", "", httptools.downloadpage(item.url, timeout=timeout).data)
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
except:
pass
if not data:
logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
if url != '':
quality = item.quality
title = 'Torrent [%s]' % quality
itemlist.append(item.clone(title=title, url=url, quality=quality, action='play', server='torrent',
language='cast'))
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches: #error
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
# Requerido para FilterTools
#Ahora tratamos los enlaces .torrent
for scrapedurl in matches: #leemos los torrents con la diferentes calidades
if 'javascript' in scrapedurl: #evitamos la basura
continue
url = urlparse.urljoin(host, scrapedurl)
#Leemos la siguiente página, que es de verdad donde está el magnet/torrent
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)|&nbsp;", "", httptools.downloadpage(url, timeout=timeout).data)
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
except:
pass
patron = "window.open\('([^']+)'"
url = scrapertools.find_single_match(data, patron)
if not url: #error
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
continue #si no hay más datos, algo no funciona, pasamos al siguiente
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
item_local.url = urlparse.urljoin(host, url)
#Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent
size = scrapertools.find_single_match(item_local.quality, '\s\[(\d+,?\d*?\s\w\s?[b|B])\]')
if not size:
size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent
if size:
item_local.title = re.sub(r'\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía
item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título
size = size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b')
item_local.quality = re.sub(r'\s\[\d+,?\d*?\s\w\s?[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía
item_local.quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final de la calidad
#Ahora pintamos el link del Torrent
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language))
#Preparamos título y calidad, quitamos etiquetas vacías
item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title)
item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title)
item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.quality)
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality).strip()
item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
itemlist = filtertools.get_links(itemlist, item, list_language)
item_local.alive = "??" #Calidad del link sin verificar
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Servidor Torrent
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
#logger.debug("TORRENT: " + scrapedurl + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
#logger.debug(item_local)
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
autoplay.start(itemlist, item) #Lanzamos Autoplay
return itemlist
def actualizar_titulos(item):
logger.info()
item = generictools.update_title(item) #Llamamos al método que actualiza el título con tmdb.find_and_set_infoLabels
#Volvemos a la siguiente acción en el canal
return item
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.type = 'buscar'
try:
item.url = item.url + texto
if texto != '':
return list_all(item)
else:
if texto != '':
return listado(item)
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['torrent', 'peliculas']:
item.url = host+'torrents'
item.url = host + 'torrents'
elif categoria == '4k':
item.url = 'http://www.todo-peliculas.com/tags/4k'
item.type='section'
itemlist = list_all(item)
item.url = host + 'tags/4k'
item.extra2 = 'categorias'
item.extra = "peliculas"
item.channel = channel
item.category_new= 'newest'
if itemlist[-1].title == 'Siguiente >>':
itemlist = listado(item)
if ">> Página siguiente" in itemlist[-1].title:
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
return itemlist

View File

@@ -1,22 +0,0 @@
{
"id": "tupornotv",
"name": "tuporno.tv",
"active": true,
"adult": true,
"language": ["*"],
"banner": "tupornotv.png",
"thumbnail": "tupornotv.png",
"categories": [
"adult"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,264 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core.item import Item
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Pendientes de Votación", action="novedades",
url="http://tuporno.tv/pendientes"))
itemlist.append(
Item(channel=item.channel, title="Populares", action="masVistos", url="http://tuporno.tv/", folder=True))
itemlist.append(
Item(channel=item.channel, title="Categorias", action="categorias", url="http://tuporno.tv/categorias/",
folder=True))
itemlist.append(Item(channel=item.channel, title="Videos Recientes", action="novedades",
url="http://tuporno.tv/videosRecientes/", folder=True))
itemlist.append(Item(channel=item.channel, title="Top Videos (mas votados)", action="masVotados",
url="http://tuporno.tv/topVideos/", folder=True))
itemlist.append(Item(channel=item.channel, title="Nube de Tags", action="categorias", url="http://tuporno.tv/tags/",
folder=True))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def novedades(item):
logger.info()
url = item.url
# ------------------------------------------------------
# Descarga la página
# ------------------------------------------------------
data = scrapertools.cachePage(url)
# logger.info(data)
# ------------------------------------------------------
# Extrae las entradas
# ------------------------------------------------------
# seccion novedades
'''
<table border="0" cellpadding="0" cellspacing="0" ><tr><td align="center" width="100%" valign="top" height="160px">
<a href="/videos/cogiendo-en-el-bosque"><img src="imagenes/videos//c/o/cogiendo-en-el-bosque_imagen2.jpg" alt="Cogiendo en el bosque" border="0" align="top" /></a>
<h2><a href="/videos/cogiendo-en-el-bosque">Cogiendo en el bosque</a></h2>
'''
patronvideos = '<div class="relative">(.*?)</div><div class="video'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
# if DEBUG: scrapertools.printMatches(matches)
itemlist = []
for match in matches:
# Titulo
try:
scrapedtitle = re.compile('title="(.+?)"').findall(match)[0]
except:
scrapedtitle = ''
try:
scrapedurl = re.compile('href="(.+?)"').findall(match)[0]
scrapedurl = urlparse.urljoin(url, scrapedurl)
except:
continue
try:
scrapedthumbnail = re.compile('src="(.+?)"').findall(match)[0]
scrapedthumbnail = urlparse.urljoin(url, scrapedthumbnail)
except:
scrapedthumbnail = ''
scrapedplot = ""
try:
duracion = re.compile('<div class="duracion">(.+?)<').findall(match)[0]
except:
try:
duracion = re.compile('\((.+?)\)<br').findall(match[3])[0]
except:
duracion = ""
# logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"], duracion=["+duracion+"]")
# Añade al listado de XBMC
# trozos = scrapedurl.split("/")
# id = trozos[len(trozos)-1]
# videos = "http://149.12.64.129/videoscodiH264/"+id[0:1]+"/"+id[1:2]+"/"+id+".flv"
itemlist.append(
Item(channel=item.channel, action="play", title=scrapedtitle + " [" + duracion + "]", url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False))
# ------------------------------------------------------
# Extrae el paginador
# ------------------------------------------------------
# <a href="/topVideos/todas/mes/2/" class="enlace_si">Siguiente </a>
patronsiguiente = '<a href="(.+?)" class="enlace_si">Siguiente </a>'
siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data)
if len(siguiente) > 0:
scrapedurl = urlparse.urljoin(url, siguiente[0])
itemlist.append(Item(channel=item.channel, action="novedades", title="!Next page", url=scrapedurl, folder=True))
return itemlist
def masVistos(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, title="Hoy", action="novedades", url="http://tuporno.tv/hoy", folder=True))
itemlist.append(Item(channel=item.channel, title="Recientes", action="novedades", url="http://tuporno.tv/recientes",
folder=True))
itemlist.append(
Item(channel=item.channel, title="Semana", action="novedades", url="http://tuporno.tv/semana", folder=True))
itemlist.append(
Item(channel=item.channel, title="Mes", action="novedades", url="http://tuporno.tv/mes", folder=True))
itemlist.append(
Item(channel=item.channel, title="Año", action="novedades", url="http://tuporno.tv/ano", folder=True))
return itemlist
def categorias(item):
logger.info()
url = item.url
# ------------------------------------------------------
# Descarga la página
# ------------------------------------------------------
data = scrapertools.cachePage(url)
# logger.info(data)
# ------------------------------------------------------
# Extrae las entradas
# ------------------------------------------------------
# seccion categorias
# Patron de las entradas
if url == "http://tuporno.tv/categorias/":
patronvideos = '<li><a href="([^"]+)"' # URL
patronvideos += '>([^<]+)</a></li>' # TITULO
else:
patronvideos = '<a href="(.tags[^"]+)"' # URL
patronvideos += ' class="[^"]+">([^<]+)</a>' # TITULO
matches = re.compile(patronvideos, re.DOTALL).findall(data)
# if DEBUG: scrapertools.printMatches(matches)
itemlist = []
for match in matches:
if match[1] in ["SexShop", "Videochat", "Videoclub"]:
continue
# Titulo
scrapedtitle = match[1]
scrapedurl = urlparse.urljoin(url, match[0])
scrapedthumbnail = ""
scrapedplot = ""
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
# Añade al listado de XBMC
itemlist.append(Item(channel=item.channel, action="novedades", title=scrapedtitle.capitalize(), url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
return itemlist
def masVotados(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, title="Hoy", action="novedades", url="http://tuporno.tv/topVideos/todas/hoy",
folder=True))
itemlist.append(Item(channel=item.channel, title="Recientes", action="novedades",
url="http://tuporno.tv/topVideos/todas/recientes", folder=True))
itemlist.append(
Item(channel=item.channel, title="Semana", action="novedades", url="http://tuporno.tv/topVideos/todas/semana",
folder=True))
itemlist.append(
Item(channel=item.channel, title="Mes", action="novedades", url="http://tuporno.tv/topVideos/todas/mes",
folder=True))
itemlist.append(
Item(channel=item.channel, title="Año", action="novedades", url="http://tuporno.tv/topVideos/todas/ano",
folder=True))
return itemlist
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
else:
texto = item.extra.replace(" ", "+")
item.url = "http://tuporno.tv/buscador/?str=" + texto
try:
return getsearch(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def getsearch(item):
logger.info()
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
patronvideos = '<div class="relative"><a href="(.videos[^"]+)"[^>]+><img.+?src="([^"]+)" alt="(.+?)" .*?<div class="duracion">(.+?)</div></div></div>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
itemlist = []
for match in matches:
# Titulo
scrapedtitle = match[2].replace("<b>", "")
scrapedtitle = scrapedtitle.replace("</b>", "")
scrapedurl = urlparse.urljoin("http://tuporno.tv/", match[0])
scrapedthumbnail = urlparse.urljoin("http://tuporno.tv/", match[1])
scrapedplot = ""
duracion = match[3]
itemlist.append(
Item(channel=item.channel, action="play", title=scrapedtitle + " [" + duracion + "]", url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False))
'''<a href="/buscador/?str=busqueda&desde=HV_PAGINA_SIGUIENTE" class="enlace_si">Siguiente </a>'''
patronsiguiente = '<a href="([^"]+)" class="enlace_si">Siguiente </a>'
siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data)
if len(siguiente) > 0:
patronultima = '<!--HV_SIGUIENTE_ENLACE'
ultpagina = re.compile(patronultima, re.DOTALL).findall(data)
scrapertools.printMatches(siguiente)
if len(ultpagina) == 0:
scrapedurl = urlparse.urljoin(item.url, siguiente[0])
itemlist.append(
Item(channel=item.channel, action="getsearch", title="!Next page", url=scrapedurl, folder=True))
return itemlist
def play(item):
logger.info()
itemlist = []
# Lee la pagina del video
data = scrapertools.cachePage(item.url)
codVideo = scrapertools.get_match(data, 'body id="([^"]+)"')
logger.info("codVideo=" + codVideo)
# Lee la pagina con el codigo
# http://tuporno.tv/flvurl.php?codVideo=188098&v=MAC%2011,5,502,146
url = "http://tuporno.tv/flvurl.php?codVideo=" + codVideo + "&v=MAC%2011,5,502,146"
data = scrapertools.cachePage(url)
logger.info("data=" + data)
kpt = scrapertools.get_match(data, "kpt\=(.+?)\&")
logger.info("kpt=" + kpt)
# Decodifica
import base64
url = base64.decodestring(kpt)
logger.info("url=" + url)
itemlist.append(
Item(channel=item.channel, action="play", title=item.title, url=url, thumbnail=item.thumbnail, plot=item.plot,
server="Directo", folder=False))
return itemlist

View File

@@ -0,0 +1,77 @@
{
"id": "yape",
"name": "Yape",
"active": true,
"adult": false,
"language": ["lat","cast"],
"thumbnail": "https://s8.postimg.cc/71ed4op5d/yape1.png",
"banner": "https://s8.postimg.cc/4wu03lfsx/yape2.png",
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"ESP",
"VOSE"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,193 @@
# -*- coding: utf-8 -*-
# -*- Channel Yape -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
from channelselector import get_thumb
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger, platformtools
idio = {'https://cdn.yape.nu//languajes/la.png': 'LAT','https://cdn.yape.nu//languajes/es.png': 'ESP','https://cdn.yape.nu//languajes/en_es.png': 'VOSE'}
cali = {'HD 1080p': 'HD 1080p','TS Screener HQ':'TS Screener HQ', 'BR Screnner':'BR Screnner','HD Rip':'HD Rip','DVD Screnner':'DVD Screnner'}
list_language = idio.values()
list_quality = cali.values()
list_servers = ['streamango', 'powvideo', 'openload', 'streamplay', 'vidoza', 'clipwaching']
__channel__='yape'
host = "https://yape.nu"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel = item.channel, title = "Actualizadas", action = "peliculas", url = host + "/catalogue?sort=time_update&page=", page=1, thumbnail = get_thumb("updated", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Mas vistas", action = "peliculas", url = host + "/catalogue?sort=mosts-today&page=", page=1, thumbnail = get_thumb("more watched", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Ultimas agregadas", action = "peliculas", url = host + "/catalogue?sort=latest&page=", page=1, thumbnail = get_thumb("last", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos", url = host, extra = "Genero", thumbnail = get_thumb("genres", auto = True) ))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/search?term=", thumbnail = get_thumb("search", auto = True)))
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
def configuracion(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
item.url = host + "/search?s=%s&page=" %texto
item.extra = "busca"
item.page = 1
if texto != '':
return peliculas(item)
else:
return []
def peliculas(item):
logger.info()
itemlist = []
url = item.url + str(item.page)
data = httptools.downloadpage(url).data
patron = 'class="col-lg-2 col-md-3 col-6 mb-3">.*?href="([^"]+).*?'
patron += 'title="([^"]+).*?'
patron += 'src="([^"]+).*?'
patron += 'txt-size-13">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear in matches:
scrapedtitle = scrapedtitle.replace("Ver ","").replace(" Completa Online Gratis","")
itemlist.append(Item(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,
infoLabels = {'year':scrapedyear},
thumbnail = scrapedthumbnail,
title = scrapedtitle + " (%s)" %scrapedyear,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
#pagination
if len(itemlist)>0:
itemlist.append(Item(channel = item.channel,
action = "peliculas",
page = item.page + 1,
title = "Página siguiente >>",
url = item.url
))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = host + "/catalogue?sort=latest?page="
item.page=1
elif categoria == 'infantiles':
item.url = host + '/genre/animacion?page'
item.page = 1
elif categoria == 'terror':
item.url = host + 'genre/terror?page='
item.page = 1
itemlist = peliculas(item)
if "Pagina" in itemlist[-1].title:
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'dropdown-item py-1 px-2" href="([^"]+)"'
patron += '>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for url, titulo in matches:
itemlist.append(Item(channel = item.channel,
action = "peliculas",
title = titulo,
url = url + "?page=",
page = 1
))
return itemlist
def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'Descargar</span>(.*?)Te recomendamos')
if bloque == "[]":
return []
patron = 'sv_([^_]+).*?'
patron += 'link="([^"]+).*?'
patron += 'juM9Fbab.*?src="([^"]+).*?'
patron += 'rounded c.">([^<]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedserver, scrapedurl, scrapedlanguage, scrapedquality in matches:
titulo = "Ver en: " + scrapedserver.capitalize() + " (%s)(%s)" %(cali[scrapedquality], idio[scrapedlanguage])
itemlist.append(
item.clone(action = "play",
language = idio[scrapedlanguage],
quality = cali[scrapedquality],
title = titulo,
url = scrapedurl
))
itemlist.sort(key=lambda it: (it.language, it.server))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if itemlist:
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
contentTitle = item.contentTitle
))
return itemlist
def play(item):
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, 'iframe class="" src="([^"]+)')
item.url = url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist.append(item.clone())
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist[0].thumbnail = item.contentThumbnail
return itemlist

View File

@@ -0,0 +1,23 @@
{
"id": "yts",
"name": "Yts",
"active": true,
"adult": false,
"language": ["*"],
"thumbnail": "yts.jpg",
"categories": [
"movie",
"torrent",
"vos"
],
"settings":[
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,123 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from lib import generictools
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel = item.channel,
title = "Browse",
action = "movies",
opt = 0,
url = "https://yts.am/browse-movies"
))
itemlist.append(Item(channel = item.channel,
title = "Popular",
action = "movies",
opt = 1,
url = "https://yts.am" ))
itemlist.append(Item(channel = item.channel,
title = "Search",
action = "search",
opt = 0,
url = "https://yts.am/browse-movies"
))
return itemlist
def movies(item):
logger.info()
itemlist = []
infoLabels = {}
data = httptools.downloadpage(item.url).data
patron = '(?s)class="browse-movie-wrap.*?a href="([^"]+).*?' #Movie link
patron += 'img class.*?src="([^"]+).*?' #Image
patron += 'movie-title">.*?([^<]+)' #Movie title
patron += '.*?year">(.*?)<' #Year
matches = scrapertools.find_multiple_matches(data, patron)
idx = 0
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
if item.opt == 1:
scrapedthumbnail = 'https://yts.am' + scrapedthumbnail
infoLabels['plot'] = findplot(scrapedurl)
itemlist.append(Item(action = "findvideo",
channel = item.channel,
infoLabels = infoLabels,
title = scrapedtitle + ' (' + year + ')',
thumbnail = scrapedthumbnail,
url = scrapedurl
))
idx += 1
if item.opt == 1 and idx == 4:
break
if itemlist != []:
actual_page = item.url
pattern = '(?s)href="([^"]+)">Next.*?'
next_page = scrapertools.find_single_match(data, pattern)
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="movies",
title='Next >>>',
url='https://yts.am' + next_page))
return itemlist
def findplot(url):
data = httptools.downloadpage(url).data
pattern = '(?s)<p class="hidden-xs">(.*?)</p>' #Synopsis
plot = scrapertools.find_single_match(data, pattern)
return plot
def findvideo(item):
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)modal-quality.*?<span>(.*?)</span>' #Quality
patron += '.*?size">(.*?)</p>' #Type
patron += '.*?href="([^"]+)" rel' #Torrent link
matches = scrapertools.find_multiple_matches(data, patron)
for quality, videoType, link in matches:
title = item.title + ' ' + quality + ' ' + videoType
itemlist.append(Item(channel = item.channel,
title=title,
url = link,
thumbnail = item.thumbnail,
action='play',
server='torrent'
))
return itemlist
def search(item, text):
logger.info('search: ' + text)
try:
item.url = 'https://yts.am/browse-movies/' + text + '/all/all/0/latest'
itemlist = movies(item)
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []

View File

@@ -30,6 +30,22 @@
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VO",
"VOS",
"VOSE"
]
},
{
"id": "timeout_downloadpage",
"type": "list",
@@ -66,30 +82,6 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -14,6 +14,15 @@ from core.item import Item
from platformcode import config, logger
from core import tmdb
from lib import generictools
from channels import filtertools
from channels import autoplay
#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'}
IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['torrent']
host = 'https://zonatorrent.tv/'
@@ -33,20 +42,35 @@ def mainlist(item):
thumb_series = get_thumb("channels_tvshow.png")
thumb_buscar = get_thumb("search.png")
thumb_separador = get_thumb("next.png")
thumb_settings = get_thumb("setting_0.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, title="Películas", action="submenu", url=host, thumbnail=thumb_pelis, extra="peliculas"))
itemlist.append(Item(channel=item.channel, url=host, title="Series", action="submenu", thumbnail=thumb_series, extra="series"))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host + "?s=", thumbnail=thumb_buscar, extra="search"))
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador))
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings))
autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return
def submenu(item):
logger.info()
itemlist = []
item.extra2 = ''
thumb_cartelera = get_thumb("now_playing.png")
thumb_pelis_az = get_thumb("channels_movie_az.png")
@@ -71,7 +95,7 @@ def submenu(item):
itemlist.append(item.clone(title="Más vistas", action="listado", url=host + "/peliculas-mas-vistas-2/", url_plus=item.url_plus, thumbnail=thumb_popular, extra2="popular"))
itemlist.append(item.clone(title="Más votadas", action="listado", url=host + "/peliculas-mas-votadas/", url_plus=item.url_plus, thumbnail=thumb_popular, extra2="popular"))
itemlist.append(item.clone(title="Castellano", action="listado", url=host + "?s=spanish", url_plus=item.url_plus, thumbnail=thumb_spanish, extra2="CAST"))
itemlist.append(item.clone(title="Latino", action="listado", url=host + "?s=latino", url_plus=item.url_plus, thumbnail=thumb_latino, lextra2="LAT"))
itemlist.append(item.clone(title="Latino", action="listado", url=host + "?s=latino", url_plus=item.url_plus, thumbnail=thumb_latino, extra2="LAT"))
itemlist.append(item.clone(title="Subtitulado", action="listado", url=host + "?s=Subtitulado", url_plus=item.url_plus, thumbnail=thumb_pelis_vos, extra2="VOSE"))
else:
@@ -288,8 +312,6 @@ def listado(item):
title = scrapedtitle
title = title.replace("á", "a").replace("é", "e").replace("í", "i").replace("ó", "o").replace("ú", "u").replace("ü", "u").replace("�", "ñ").replace("ñ", "ñ").replace("&atilde;", "a").replace("&etilde;", "e").replace("&itilde;", "i").replace("&otilde;", "o").replace("&utilde;", "u").replace("&ntilde;", "ñ").replace("&#8217;", "'")
cnt_title += 1
item_local = item.clone() #Creamos copia de Item para trabajar
if item_local.tipo: #... y limpiamos
@@ -433,7 +455,14 @@ def listado(item):
#Guarda la variable temporal que almacena la info adicional del título a ser restaurada después de TMDB
item_local.title_subs = title_subs
itemlist.append(item_local.clone()) #Pintar pantalla
logger.debug(item.extra2)
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
if config.get_setting('filter_languages', channel) > 0 and item.extra2 not in ['CAST', 'LAT', 'VO', 'VOS', 'VOSE']: #Si hay idioma seleccionado, se filtra
itemlist = filtertools.get_link(itemlist, item_local, list_language)
else:
itemlist.append(item_local.clone()) #Si no, pintar pantalla
cnt_title = len(itemlist) #Contador de líneas añadidas
#logger.debug(item_local)
@@ -458,6 +487,10 @@ def listado(item):
def findvideos(item):
logger.info()
itemlist = []
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if not item.language:
item.language = ['CAST'] #Castellano por defecto
matches = []
item.category = categoria
@@ -549,12 +582,26 @@ def findvideos(item):
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Servidor Torrent
itemlist.append(item_local.clone()) #Pintar pantalla
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
#logger.debug("TORRENT: " + scrapedurl + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
#logger.debug(item_local)
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
#Ahora tratamos los Servidores Directos
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
titles = re.compile('data-TPlayerNv="Opt\d+">.*? <span>(.*?)</span></li>', re.DOTALL).findall(data)
urls = re.compile('id="Opt\d+"><iframe[^>]+src="([^"]+)"', re.DOTALL).findall(data)
@@ -625,14 +672,30 @@ def findvideos(item):
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality)
item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
item_local.action = "play" #Visualizar vídeo
item_local.server = server #Servidor Directo
item_local.action = "play" #Visualizar vídeo
item_local.server = server #Servidor Directo
itemlist.append(item_local.clone()) #Pintar pantalla
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
#logger.debug("DIRECTO: " server + ' / ' + enlace + " / title: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
#logger.debug(item_local)
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
# Requerido para AutoPlay
autoplay.start(itemlist, item) #Lanzamos Autoplay
return itemlist
@@ -856,11 +919,6 @@ def search(item, texto):
logger.info()
#texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return listado(item)
try:
item.url = item.url + texto

File diff suppressed because one or more lines are too long

View File

@@ -13,6 +13,7 @@ import sys
import urllib
import urlparse
import datetime
import time
from channelselector import get_thumb
from core import httptools
@@ -188,11 +189,11 @@ def update_title(item):
if new_item.contentSeason: #Restauramos el núm. de Temporada después de TMDB
item.contentSeason = new_item.contentSeason
if item.from_update:
if item.from_update: #Si la llamda es desde el menú del canal...
item.from_update = True
del item.from_update
platformtools.itemlist_update(item)
xlistitem = refresh_screen(item) #Refrescamos la pantallas con el nuevo Item
#Para evitar el "efecto memoria" de TMDB, se le llama con un título ficticio para que resetee los buffers
if item.contentSerieName:
new_item.infoLabels['tmdb_id'] = '289' #una serie no ambigua
@@ -208,6 +209,40 @@ def update_title(item):
return item
def refresh_screen(item):
logger.info()
"""
#### Compatibilidad con Kodi 18 ####
Refresca la pantalla con el nuevo Item después que haber establecido un dialogo que ha causado el cambio de Item
Crea un xlistitem para engañar a Kodi con la función xbmcplugin.setResolvedUrl FALSE
Entrada: item: El Item actualizado
Salida: xlistitem El xlistitem creado, por si resulta de alguna utilidad posterior
"""
try:
import xbmcplugin
import xbmcgui
xlistitem = xbmcgui.ListItem(path=item.url) #Creamos xlistitem por compatibilidad con Kodi 18
if config.get_platform(True)['num_version'] >= 16.0:
xlistitem.setArt({"thumb": item.contentThumbnail}) #Cargamos el thumb
else:
xlistitem.setThumbnailImage(item.contentThumbnail)
xlistitem.setInfo("video", item.infoLabels) #Copiamos infoLabel
xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) #Preparamos el entorno para evitar error Kod1 18
time.sleep(1) #Dejamos tiempo para que se ejecute
except:
pass
platformtools.itemlist_update(item) #refrescamos la pantalla con el nuevo Item
return xlistitem
def post_tmdb_listado(item, itemlist):
logger.info()
itemlist_fo = []
@@ -297,6 +332,17 @@ def post_tmdb_listado(item, itemlist):
item_local.infoLabels['year'] = ''
item_local.infoLabels['aired'] = ''
#Si traía el TMDB-ID, pero no ha funcionado, lo reseteamos e intentamos de nuevo
if item_local.infoLabels['tmdb_id'] and not item_local.infoLabels['originaltitle']:
logger.error("*** TMDB-ID erroneo, reseteamos y reintentamos ***")
logger.error(item_local)
del item_local.infoLabels['tmdb_id'] #puede traer un TMDB-ID erroneo
try:
tmdb.set_infoLabels(item_local, True) #pasamos otra vez por TMDB
except:
pass
logger.error(item_local)
# Si TMDB no ha encontrado nada y hemos usado el año de la web, lo intentamos sin año
if not item_local.infoLabels['tmdb_id']:
if item_local.infoLabels['year']: #lo intentamos de nuevo solo si había año, puede que erroneo
@@ -383,7 +429,7 @@ def post_tmdb_listado(item, itemlist):
title = '%s [COLOR yellow][%s][/COLOR] [%s] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (title, str(item_local.infoLabels['year']), rating, item_local.quality, str(item_local.language))
else: #Si Titulos Inteligentes SÍ seleccionados:
title = title.replace("[", "-").replace("]", "-").replace(".", ",")
title = title.replace("[", "-").replace("]", "-").replace(".", ",").replace("GB", "G B").replace("Gb", "G b").replace("gb", "g b").replace("MB", "M B").replace("Mb", "M b").replace("mb", "m b")
#Limpiamos las etiquetas vacías
if item_local.infoLabels['episodio_titulo']:
@@ -393,9 +439,15 @@ def post_tmdb_listado(item, itemlist):
title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', title).strip()
if item.category_new == "newest": #Viene de Novedades. Marcamos el título con el nombre del canal
title += ' -%s-' % scrapertools.find_single_match(item_local.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize()
if item_local.contentType == "movie":
item_local.contentTitle += ' -%s-' % scrapertools.find_single_match(item_local.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize()
if scrapertools.find_single_match(item_local.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/'):
title += ' -%s-' % scrapertools.find_single_match(item_local.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize()
else:
title += ' -%s-' % item_local.channel.capitalize()
if item_local.contentType == "movie":
if scrapertools.find_single_match(item_local.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/'):
item_local.contentTitle += ' -%s-' % scrapertools.find_single_match(item_local.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize()
else:
item_local.contentTitle += ' -%s-' % item_local.channel.capitalize()
elif "Episodio " in title:
if not item_local.contentSeason or not item_local.contentEpisodeNumber:
item_local.contentSeason, item_local.contentEpisodeNumber = scrapertools.find_single_match(title_add, 'Episodio (\d+)x(\d+)')
@@ -483,7 +535,7 @@ def post_tmdb_seasons(item, itemlist):
del item_season.season_colapse
title = '** Todas las Temporadas' #Agregamos título de TODAS las Temporadas (modo tradicional)
if item_season.infoLabels['number_of_episodes']: #Ponemos el núm de episodios de la Serie
title += ' [%s epi]' % str(item_season.infoLabels['number_of_episodes'])
title += ' [%sx%s epi]' % (str(item_season.infoLabels['number_of_seasons']), str(item_season.infoLabels['number_of_episodes']))
rating = '' #Ponemos el rating, si es diferente del de la Serie
if item_season.infoLabels['rating'] and item_season.infoLabels['rating'] != 0.0:
@@ -543,7 +595,7 @@ def post_tmdb_seasons(item, itemlist):
if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados:
item_local.title = '%s [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.title, item_local.quality, str(item_local.language))
else: #Lo arreglamos un poco para Unify
item_local.title = item_local.title.replace('[', '-').replace(']', '-').replace('.', ',').strip()
item_local.title = item_local.title.replace("[", "-").replace("]", "-").replace(".", ",").replace("GB", "G B").replace("Gb", "G b").replace("gb", "g b").replace("MB", "M B").replace("Mb", "M b").replace("mb", "m b")
item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
#logger.debug(item_local)
@@ -795,6 +847,7 @@ def post_tmdb_episodios(item, itemlist):
item_local.title = item_local.title.replace(" []", "").strip()
item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?-?\s?\]?\]\[\/COLOR\]', '', item_local.title).strip()
item_local.title = re.sub(r'\s?\[COLOR \w+\]-?\s?\[\/COLOR\]', '', item_local.title).strip()
item_local.title = item_local.title.replace("[", "-").replace("]", "-").replace(".", ",").replace("GB", "G B").replace("Gb", "G b").replace("gb", "g b").replace("MB", "M B").replace("Mb", "M b").replace("mb", "m b")
#Si la información de num. total de episodios de TMDB no es correcta, tratamos de calcularla
if num_episodios < item_local.contentEpisodeNumber:
@@ -996,7 +1049,15 @@ def post_tmdb_findvideos(item, itemlist):
#busco "duration" en infoLabels
tiempo = 0
if item.infoLabels['duration']:
tiempo = item.infoLabels['duration']
try:
if config.get_platform(True)['num_version'] < 18:
tiempo = item.infoLabels['duration']
elif xbmc.getCondVisibility('Window.IsMedia') == 1:
item.quality = re.sub(r'\s?\[\d+:\d+\ h]', '', item.quality)
else:
tiempo = item.infoLabels['duration']
except:
tiempo = item.infoLabels['duration']
elif item.contentChannel == 'videolibrary': #No hay, viene de la Videoteca? buscamos en la DB
#Leo de la BD de Kodi la duración de la película o episodio. En "from_fields" se pueden poner las columnas que se quiera
@@ -1212,7 +1273,7 @@ def get_torrent_size(url):
#si tiene múltiples archivos sumamos la longitud de todos
if not size:
check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), "'length': (\d+)}")
check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), "'length': (\d+).*?}")
sizet = sum([int(i) for i in check_video])
size = convert_size(sizet)
@@ -1651,6 +1712,8 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
channel_alt = item.channel
if item.url:
channel_alt = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').lower() #Salvamos en nombre del canal o clone
if not channel_alt:
channel_alt = item.channel
channel = "'%s'" % channel_alt
category = ''
if channel_alt != 'videolibrary':

View File

@@ -134,13 +134,13 @@ def open_settings():
if settings_post['adult_aux_new_password1'] == settings_post['adult_aux_new_password2']:
set_setting('adult_password', settings_post['adult_aux_new_password1'])
else:
platformtools.dialog_ok(config.get_localized_string(60305),
config.get_localized_string(60306),
config.get_localized_string(60307))
platformtools.dialog_ok(get_localized_string(60305),
get_localized_string(60306),
get_localized_string(60307))
else:
platformtools.dialog_ok(config.get_localized_string(60305), config.get_localized_string(60309),
config.get_localized_string(60310))
platformtools.dialog_ok(get_localized_string(60305), get_localized_string(60309),
get_localized_string(60310))
# Deshacer cambios
set_setting("adult_mode", settings_pre.get("adult_mode", 0))
@@ -195,23 +195,23 @@ def get_setting(name, channel="", server="", default=None):
# Specific channel setting
if channel:
# logger.info("config.get_setting reading channel setting '"+name+"' from channel json")
# logger.info("get_setting reading channel setting '"+name+"' from channel json")
from core import channeltools
value = channeltools.get_channel_setting(name, channel, default)
# logger.info("config.get_setting -> '"+repr(value)+"'")
# logger.info("get_setting -> '"+repr(value)+"'")
return value
# Specific server setting
elif server:
# logger.info("config.get_setting reading server setting '"+name+"' from server json")
# logger.info("get_setting reading server setting '"+name+"' from server json")
from core import servertools
value = servertools.get_server_setting(name, server, default)
# logger.info("config.get_setting -> '"+repr(value)+"'")
# logger.info("get_setting -> '"+repr(value)+"'")
return value
# Global setting
else:
# logger.info("config.get_setting reading main setting '"+name+"'")
# logger.info("get_setting reading main setting '"+name+"'")
value = __settings__.getSetting(name)
if not value:
return default

View File

@@ -97,7 +97,9 @@ class Main(xbmcgui.WindowXMLDialog):
self.items = []
def onInit(self):
self.setCoordinateResolution(2)
#### Compatibilidad con Kodi 18 ####
if config.get_platform(True)['num_version'] < 18:
self.setCoordinateResolution(2)
for menuentry in MAIN_MENU.keys():
item = xbmcgui.ListItem(MAIN_MENU[menuentry]["label"])

View File

@@ -35,7 +35,10 @@ def encode_log(message=""):
def get_caller(message=None):
module = inspect.getmodule(inspect.currentframe().f_back.f_back)
module = module.__name__
if module == None:
module = "None"
else:
module = module.__name__
function = inspect.currentframe().f_back.f_back.f_code.co_name

View File

@@ -154,6 +154,13 @@ def render_items(itemlist, parent_item):
valid_genre = True
elif anime:
valid_genre = True
elif 'siguiente' in item.title.lower() and '>' in item.title:
item.thumbnail = get_thumb("next.png")
elif 'add' in item.action:
if 'pelicula' in item.action:
item.thumbnail = get_thumb("videolibrary_movie.png")
elif 'serie' in item.action:
item.thumbnail = get_thumb("videolibrary_tvshow.png")
if unify_enabled and parent_item.channel != 'alfavorites':
@@ -1071,8 +1078,8 @@ def play_torrent(item, xlistitem, mediaurl):
#### Compatibilidad con Kodi 18: evita cuelgues/cancelaciones cuando el .torrent se lanza desde pantalla convencional
if xbmc.getCondVisibility('Window.IsMedia'):
xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) #Preparamos el entorno para evutar error Kod1 18
time.sleep(1) #Dejamos que se ejecute
xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) #Preparamos el entorno para evitar error Kod1 18
time.sleep(1) #Dejamos tiempo para que se ejecute
mediaurl = urllib.quote_plus(item.url)
if ("quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]) and item.infoLabels['tmdb_id']: #Llamada con más parámetros para completar el título
@@ -1083,17 +1090,17 @@ def play_torrent(item, xlistitem, mediaurl):
xbmc.executebuiltin("PlayMedia(" + torrent_options[seleccion][1] % mediaurl + ")")
#Seleccionamos que clientes torrent soportamos para el marcado de vídeos vistos
if "quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]:
time_limit = time.time() + 150 #Marcamos el timepo máx. de buffering
while not is_playing() and time.time() < time_limit: #Esperamos mientra buffera
time.sleep(5) #Repetimos cada intervalo
#logger.debug(str(time_limit))
if item.strm_path and is_playing(): #Sólo si es de Videoteca
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_auto_as_watched(item) #Marcamos como visto al terminar
#logger.debug("Llamado el marcado")
#Seleccionamos que clientes torrent soportamos para el marcado de vídeos vistos: asumimos que todos funcionan
#if "quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]:
time_limit = time.time() + 150 #Marcamos el timepo máx. de buffering
while not is_playing() and time.time() < time_limit: #Esperamos mientra buffera
time.sleep(5) #Repetimos cada intervalo
#logger.debug(str(time_limit))
if item.strm_path and is_playing(): #Sólo si es de Videoteca
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_auto_as_watched(item) #Marcamos como visto al terminar
#logger.debug("Llamado el marcado")
if seleccion == 1:
from platformcode import mct

View File

@@ -40,7 +40,9 @@ class Recaptcha(xbmcgui.WindowXMLDialog):
self.imagen = kwargs.get("imagen")
def onInit(self):
self.setCoordinateResolution(2)
#### Compatibilidad con Kodi 18 ####
if config.get_platform(True)['num_version'] < 18:
self.setCoordinateResolution(2)
self.update_window()
def onClick(self, control):

View File

@@ -469,6 +469,7 @@ class SettingsWindow(xbmcgui.WindowXMLDialog):
self.ok_enabled = False
self.default_enabled = False
#### Compatibilidad con Kodi 18 ####
if config.get_platform(True)['num_version'] < 18:
if xbmcgui.__version__ == "1.2":
self.setCoordinateResolution(1)

View File

@@ -184,10 +184,12 @@ class InfoWindow(xbmcgui.WindowXMLDialog):
self.scraper = Tmdb
def onInit(self):
if xbmcgui.__version__ == "1.2":
self.setCoordinateResolution(1)
else:
self.setCoordinateResolution(5)
#### Compatibilidad con Kodi 18 ####
if config.get_platform(True)['num_version'] < 18:
if xbmcgui.__version__ == "1.2":
self.setCoordinateResolution(1)
else:
self.setCoordinateResolution(5)
# Ponemos el título y las imagenes
self.getControl(10002).setLabel(self.caption)

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

After

Width:  |  Height:  |  Size: 27 KiB

View File

@@ -4,7 +4,7 @@
<setting id="player_mode" type="enum" values="Direct|SetResolvedUrl|Built-In|Download and Play" label="30044" default="0"/>
<setting id="default_action" type="enum" lvalues="30006|30007|30008" label="30005" default="0"/>
<setting id="thumbnail_type" type="enum" lvalues="30011|30012|30200" label="30010" default="2"/>
<setting id="channel_language" type="labelenum" values="all|cast|lat|ita" label="30019" default="all"/>
<setting id="channel_language" type="labelenum" values="all|cast|lat" label="30019" default="all"/>
<setting id="trakt_sync" type="bool" label="70109" default="false"/>
<setting id="forceview" type="bool" label="30043" default="false"/>
<setting id="faster_item_serialization" type="bool" label="30300" default="false"/>
@@ -53,52 +53,52 @@
<setting id="unify" type="bool" label="70134" default="false"/>
<setting id="title_color" type="bool" label="70135" default="false" visible="eq(-1,true)"/>
<setting id="movie_color" type="labelenum" label="70137"
lvalues="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-1,true)+eq(-2,true)"/>
<setting id="tvshow_color" type="labelenum" label="30123"
lvalues="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-2,true)+eq(-3,true)"/>
<setting id="year_color" type="labelenum" label="60232"
lvalues="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-3,true)+eq(-4,true)"/>
<setting id="rating_1_color" type="labelenum" label="70138"
lvalues="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-4,true)+eq(-5,true)"/>
<setting id="rating_2_color" type="labelenum" label="70139"
lvalues="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-5,true)+eq(-6,true)"/>
<setting id="rating_3_color" type="labelenum" label="70140"
lvalues="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-6,true)+eq(-7,true)"/>
<setting id="quality_color" type="labelenum" label="70141"
lvalues="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-7,true)+eq(-8,true)"/>
<setting id="cast_color" type="labelenum" label="59980"
lvalues="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-8,true)+eq(-9,true)"/>
<setting id="lat_color" type="labelenum" label="59981"
lvalues="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-9,true)+eq(-10,true)"/>
<setting id="vose_color" type="labelenum" label="70142"
lvalues="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-10,true)+eq(-11,true)"/>
<setting id="vos_color" type="labelenum" label="70143"
lvalues="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-11,true)+eq(-12,true)"/>
<setting id="vo_color" type="labelenum" label="70144"
lvalues="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-12,true)+eq(-13,true)"/>
<setting id="server_color" type="labelenum" label="70145"
lvalues="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-13,true)+eq(-14,true)"/>
<setting id="library_color" type="labelenum" label="70146"
lvalues="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-14,true)+eq(-15,true)"/>
<setting id="update_color" type="labelenum" label="70147"
lvalues="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-15,true)+eq(-16,true)"/>
<setting id="no_update_color" type="labelenum" label="70148"
lvalues="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
default="white" visible="eq(-16,true)+eq(-17,true)"/>
</category>
<category label="70149">

View File

@@ -33,12 +33,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
url = scrapertools.find_single_match(unpacked, "(?:src):\\\\'([^\\\\]+.mp4)\\\\'")
itemlist.append([".mp4" + " [powvideo]", decode_video_url(url)])
from lib import alfaresolver
itemlist.append([".mp4" + " [powvideo]", alfaresolver.decode_video_url(url, data)])
itemlist.sort(key=lambda x: x[0], reverse=True)
return itemlist
def decode_video_url(url):
tria = re.compile('[0-9a-z]{40,}', re.IGNORECASE).findall(url)[0]
gira = tria[::-1]
x = gira[1:]
return re.sub(tria, x, url)

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "rapidvideo.(?:org|com)/(?:\\?v=|e/|embed/|v/)([A-z0-9]+)",
"pattern": "rapidvideo.(?:org|com)/(?:\\?v=|e/|embed/|v/|)([A-z0-9]+)",
"url": "https://www.rapidvideo.com/e/\\1"
}
]

View File

@@ -38,13 +38,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
unpacked = jsunpack.unpack(packed)
url = scrapertools.find_single_match(unpacked, '(http[^,]+\.mp4)')
itemlist.append([".mp4" + " [streamplay]", decode_video_url(url)])
from lib import alfaresolver
itemlist.append([".mp4" + " [streamplay]", alfaresolver.decode_video_url(url, data)])
itemlist.sort(key=lambda x: x[0], reverse=True)
return itemlist
def decode_video_url(url):
tria = re.compile('[0-9a-z]{40,}', re.IGNORECASE).findall(url)[0]
gira = tria[::-1]
x = gira[:4] + gira[6:]
return re.sub(tria, x, url)