Merge remote-tracking branch 'alfa-addon/master' into fixes

This commit is contained in:
Unknown
2018-10-15 14:41:56 -03:00
24 changed files with 1332 additions and 2930 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.7.7" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.7.8" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,14 +19,11 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
¤ cinecalidad ¤ verpelis
¤ pepecine ¤ pelispedia
¤ pelisplusco ¤ seriesblancoxyz
¤ seriesdanko ¤ pedropolis
¤ pelisplanet ¤ danimados
¤ fembed ¤ upvid
¤ megadede ¤ crunchyroll
¤ pelismagnet
¤ cinehindi ¤ repelis
¤ rexpelis ¤ yape
¤ bloghorror ¤ pelkex
¤ documaniatv ¤ mejortorrent
¤ arreglos internos
</news>

View File

@@ -3,25 +3,34 @@
import re
import urlparse
from channelselector import get_thumb
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
IDIOMAS = {'Hindi': 'Hindi'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'netutv']
host = "http://www.cinehindi.com/"
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="genero", title="Generos", url=host))
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host))
itemlist.append(Item(channel=item.channel, action="genero", title="Generos", url=host, thumbnail = get_thumb("genres", auto = True)))
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host, thumbnail = get_thumb("newest", auto = True)))
itemlist.append(Item(channel=item.channel, action="proximas", title="Próximas Películas",
url=urlparse.urljoin(host, "proximamente")))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "?s=")))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "?s="), thumbnail = get_thumb("search", auto = True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -50,9 +59,7 @@ def search(item, texto):
def proximas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc...
patron = 'class="item">.*?' # Todos los items de peliculas (en esta web) empiezan con esto
@@ -77,40 +84,36 @@ def proximas(item):
item.url = next_page_url + 'proximamente/page/' + str(i) + '/'
itemlist.append(Item(channel=item.channel, action="proximas", title=">> Página siguiente", url=item.url,
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'))
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc...
patron = 'class="item">.*?' # Todos los items de peliculas (en esta web) empiezan con esto
patron += '<a href="([^"]+).*?' # scrapedurl
patron += '<img src="([^"]+).*?' # scrapedthumbnail
patron += 'alt="([^"]+).*?' # scrapedtitle
patron += '<span class="ttx">([^<]+).*?' # scrapedplot
patron += '<div class="fixyear">(.*?)</span></div></div>' # scrapedfixyear
patron += '<div class="fixyear">(.*?)</span></div><' # scrapedfixyear
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, scrapedfixyear in matches:
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedfixyear in matches:
patron = '<span class="year">([^<]+)' # scrapedyear
scrapedyear = scrapertools.find_single_match(scrapedfixyear, patron)
scrapedtitle = scrapedtitle.replace(scrapertools.find_single_match(scrapedtitle,'\(\d{4}\)'),'').strip()
title = scrapedtitle
if scrapedyear:
scrapedtitle += ' (%s)' % (scrapedyear)
title += ' (%s)' % (scrapedyear)
item.infoLabels['year'] = int(scrapedyear)
patron = '<span class="calidad2">([^<]+).*?' # scrapedquality
scrapedquality = scrapertools.find_single_match(scrapedfixyear, patron)
if scrapedquality:
scrapedtitle += ' [%s]' % (scrapedquality)
title += ' [%s]' % (scrapedquality)
itemlist.append(
item.clone(title=scrapedtitle, url=scrapedurl, plot=scrapedplot, action="findvideos", extra=scrapedtitle,
show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie", context=["buscar_trailer"]))
item.clone(title=title, url=scrapedurl, action="findvideos", extra=scrapedtitle,
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie", context=["buscar_trailer"]))
tmdb.set_infoLabels(itemlist)
scrapertools.printMatches(itemlist)
# Paginacion
patron_genero = '<h1>([^"]+)<\/h1>'
genero = scrapertools.find_single_match(data, patron_genero)
@@ -118,9 +121,7 @@ def lista(item):
patron = "<a rel='nofollow' class=previouspostslink' href='([^']+)'>Siguiente "
else:
patron = "<span class='current'>.+?href='(.+?)'>"
next_page_url = scrapertools.find_single_match(data, patron)
if next_page_url != "":
item.url = next_page_url
itemlist.append(Item(channel=item.channel, action="lista", title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=next_page_url,
@@ -130,22 +131,34 @@ def lista(item):
def findvideos(item):
logger.info()
itemlist = []
itemlist1 = []
data = httptools.downloadpage(item.url).data
itemlist1.extend(servertools.find_video_items(data=data))
patron_show = '<div class="data"><h1 itemprop="name">([^<]+)<\/h1>'
show = scrapertools.find_single_match(data, patron_show)
for videoitem in itemlist1:
videoitem.channel = item.channel
videoitem.infoLabels = item.infoLabels
for i in range(len(itemlist1)):
if not 'youtube' in itemlist1[i].title:
itemlist.append(itemlist1[i])
tmdb.set_infoLabels(itemlist, True)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentChannel!='videolibrary':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))
return itemlist
def play(item):
logger.info()
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -0,0 +1,79 @@
{
"id": "documaniatv",
"name": "DocumaniaTV",
"active": true,
"adult": false,
"language": ["cast"],
"banner": "",
"thumbnail": "https://www.documaniatv.com/uploads/xcustom-logo.png.pagespeed.ic.lxJKR_lQE9.webp",
"version": 1,
"categories": [
"documentary",
"vos",
"direct",
"torrent"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VO",
"VOS",
"VOSE"
]
},
{
"id": "timeout_downloadpage",
"type": "list",
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
"default": 5,
"enabled": true,
"visible": true,
"lvalues": [
"None",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
]
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,792 @@
# -*- coding: utf-8 -*-
import re
import sys
import urllib
import urlparse
import time
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
from lib import generictools
from channels import filtertools
from channels import autoplay
#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'}
IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['directo']
host = 'https://www.documaniatv.com/'
channel = "documaniatv"
categoria = channel.capitalize()
__modo_grafico__ = config.get_setting('modo_grafico', channel)
timeout = config.get_setting('timeout_downloadpage', channel)
def mainlist(item):
logger.info()
itemlist = []
thumb_docus = get_thumb("channels_documentary.png")
thumb_series = get_thumb("channels_tvshow.png")
thumb_buscar = get_thumb("search.png")
thumb_separador = get_thumb("next.png")
thumb_settings = get_thumb("setting_0.png")
thumb_cartelera = get_thumb("now_playing.png")
thumb_pelis_vos = get_thumb("channels_vos.png")
thumb_popular = get_thumb("popular.png")
thumb_generos = get_thumb("genres.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, title="Novedades", action="listado", url=host + "newvideos.html", thumbnail=thumb_docus, extra="novedades"))
itemlist.append(Item(channel=item.channel, title="Los Más Vistos", action="listado", url=host + "topvideos.html", thumbnail=thumb_popular, extra="populares"))
itemlist.append(Item(channel=item.channel, title="Por Géneros", action="categorias", url=host + "categorias-y-canales.html", thumbnail=thumb_generos, extra="categorias"))
itemlist.append(Item(channel=item.channel, title="Series", action="listado", url=host + "top-series-documentales.html", thumbnail=thumb_series, extra="series"))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host + "search.php?keywords=", thumbnail=thumb_buscar, extra="search"))
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador))
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings))
autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return
def categorias(item):
logger.info()
itemlist = []
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
except:
pass
patron = '<a href="([^"]+)" title="([^"]+)">'
#Verificamos si se ha cargado una página, y si además tiene la estructura correcta
if not data or not scrapertools.find_single_match(data, patron):
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
for clone_inter, autoridad in item.intervencion:
thumb_intervenido = get_thumb(autoridad)
itemlist.append(item.clone(action='', title="[COLOR yellow]" + clone_inter.capitalize() + ': [/COLOR]' + intervenido_judicial + '. Reportar el problema en el foro', thumbnail=thumb_intervenido))
return itemlist #Salimos
logger.error("ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL: " + item.url + data)
if not data: #Si no ha logrado encontrar nada, salimos
itemlist.append(item.clone(action='', title=item.category + ': ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches:
logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: SUBMENU: Ha cambiado la estructura de la Web. Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#logger.debug(matches)
for scrapedurl, scrapedtitle in matches:
if 'series documentales' in scrapedtitle.lower():
continue
itemlist.append(item.clone(action="listado", title=scrapedtitle.capitalize().strip(), url=scrapedurl))
return itemlist
def listado(item):
logger.info()
itemlist = []
item.category = categoria
#logger.debug(item)
curr_page = 1 # Página inicial Web
curr_page_foot = 1 # Página inicial Alfa
last_page = 99999 # Última página inicial
last_page_foot = 1 # Última página inicial
cnt_tot = 40 # Poner el num. máximo de items por página
cnt_title = 0 # Contador de líneas insertadas en Itemlist
cnt_title_tot = 0 # Contador de líneas insertadas en Itemlist, total
if item.curr_page:
curr_page = int(item.curr_page) # Si viene de una pasada anterior, lo usamos
del item.curr_page # ... y lo borramos
if item.curr_page_foot:
curr_page_foot = int(item.curr_page_foot) # Si viene de una pasada anterior, lo usamos
del item.curr_page_foot # ... y lo borramos
if item.last_page:
last_page = int(item.last_page) # Si viene de una pasada anterior, lo usamos
del item.last_page # ... y lo borramos
if item.last_page_foot:
last_page_foot = int(item.last_page_foot) # Si viene de una pasada anterior, lo usamos
del item.last_page_foot # ... y lo borramos
if item.cnt_tot:
cnt_tot = int(item.cnt_tot) # Si viene de una pasada anterior, lo usamos
del item.cnt_tot # ... y lo borramos
if item.cnt_title_tot:
cnt_title_tot = int(item.cnt_title_tot) # Si viene de una pasada anterior, lo usamos
del item.cnt_title_tot # ... y lo borramos
inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
fin = inicio + 10 # Después de este tiempo pintamos (segundos)
timeout_search = timeout # Timeout para descargas
if item.extra == 'search':
timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas
if timeout_search < 5:
timeout_search = 5 # Timeout un poco más largo para las búsquedas
if not item.extra2: # Si viene de Catálogo o de Alfabeto
item.extra2 = ''
next_page_url = item.url
#Máximo num. de líneas permitidas por TMDB. Máx de 10 segundos por Itemlist para no degradar el rendimiento
while cnt_title < cnt_tot and curr_page <= last_page and fin > time.time():
# Descarga la página
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)|&nbsp;", "", httptools.downloadpage(next_page_url, timeout=timeout_search).data)
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
except:
pass
if not data: #Si la web está caída salimos sin dar error
logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: LISTADO:. La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
break #si no hay más datos, algo no funciona, pintamos lo que tenemos
#Patrón para todo, menos para Series
patron = '<span class="pm-label-duration">(.*?)<\/span>.*?<a href="([^"]+)" title="([^"]+)">.*?data-echo="([^"]+)"'
#Si viene de Series, ponemos un patrón especializado
if item.extra == 'series':
patron = '(?:<span class="pm-label-duration">(.*?)<\/span>.*?)?<a href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches and not 'Lo siento, tu búsqueda no arrojó ningún resultado, intenta con otras palabras.' in data: #error
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error
return itemlist #Salimos
logger.error("ERROR 02: LISTADO: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: LISTADO: Ha cambiado la estructura de la Web. Reportar el error con el log'))
break #si no hay más datos, algo no funciona, pintamos lo que tenemos
matches_len = len(matches)
if matches_len > cnt_title_tot and cnt_title_tot > 0:
matches = matches[cnt_title_tot:]
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
#Buscamos la url de paginado y la última página
data_page = scrapertools.find_single_match(data, '<ul class="pagination pagination-sm pagination-arrows">.*?<\/li><\/ul><\/div><\/div> <\/div>')
if item.extra == 'series':
patron = '>(\d+)?<\/a><\/li><li class=""><a href="([^"]+-(\d+).html)"><i class="fa fa-arrow-right"><\/i><\/a><\/li><\/ul><\/div><\/div>\s?<\/div>'
elif item.extra == 'categorias':
patron = '>(\d+)?<\/a><\/li><li class=""><a href="([^"]+-(\d+)-date.html)">&raquo;<\/a><\/li><\/ul><\/div><\/div>\s?<\/div>'
if not scrapertools.find_single_match(data, patron):
patron = '>(\d+)?<\/a><\/li><li class=""><a href="([^"]+page-(\d+)\/)">&raquo;<\/a><\/li><\/ul><\/div><\/div>\s?<\/div>'
else:
patron = '>(\d+)?<\/a><\/li><li class=""><a href="([^"]+&page=(\d+))">&raquo;<\/a><\/li><\/ul><\/div><\/div>\s?<\/div>'
#Si la página de la web es superior a la página del canal, se ponen los limites
if item.extra == 'novedades': cnt_tot = matches_len
elif item.extra == 'populares': cnt_tot = 25
elif item.extra == 'categorias': cnt_tot = matches_len
elif item.extra == 'series': cnt_tot = 25
elif item.extra == 'search': cnt_tot = matches_len
else: cnt_tot = 40
if last_page == 99999: #Si es el valor inicial, buscamos
#Se busca el píe de página
try:
last_page, next_page_url, next_page = scrapertools.find_single_match(data, patron)
last_page = int(last_page)
curr_page = int(next_page)-1
next_page_url = urlparse.urljoin(host, next_page_url)
except: #Si no lo encuentra, lo ponemos a 1
logger.error('ERROR 03: LISTADO: Al obtener la paginación: ' + patron)
curr_page = 1
last_page = 0
if item.extra == 'series':
next_page_url = item.url
else:
next_page_url = item.url + '?&page=1'
#Calculamos el num de páginas totales si la página web es más grande que la del canal
last_page_foot = last_page
if matches_len > cnt_tot:
if last_page == 0:
last_page = 1
if last_page_foot == 0:
last_page_foot = 1
if item.extra == 'series':
last_page_foot = last_page_foot * (100 / cnt_tot)
else:
last_page_foot = last_page_foot * (matches_len / cnt_tot)
#Calculamos la url de la siguiente página
if last_page > 1 or last_page_foot > 1:
curr_page_foot += 1 #Apunto ya a la página siguiente
if item.extra == 'series':
if cnt_title_tot + cnt_tot >= matches_len:
curr_page += 1 #Apunto ya a la página siguiente
cnt_title_tot = 0 - len(matches)
if len(matches) < cnt_tot: #Si va a cargar otra página, no lo cuento
curr_page_foot -= 1 #Vuelvo a la página actual
next_page_url = re.sub(r'(?:-\d+)?.html', '-%s.html' % curr_page, next_page_url)
item.url = next_page_url
else:
next_page_url = item.url
elif item.extra == 'categorias':
curr_page += 1 #Apunto ya a la página siguiente
if scrapertools.find_single_match(next_page_url, '(?:-\d+)-date.html'):
next_page_url = re.sub(r'(?:-\d+)-date.html', '-%s-date.html' % curr_page, next_page_url)
else:
next_page_url = re.sub(r'\/page-\d+', '/page-%s' % curr_page, next_page_url)
elif item.extra == 'populares':
next_page_url = item.url
else:
curr_page += 1 #Apunto ya a la página siguiente
next_page_url = re.sub(r'page=\d+', 'page=%s' % curr_page, next_page_url)
#logger.debug('curr_page: ' + str(curr_page) + ' / last_page: ' + str(last_page) + ' / url: ' + next_page_url + ' / cnt_title: ' + str(cnt_title) + ' / cnt_title_tot: ' + str(cnt_title_tot) + ' / cnt_tot: ' + str(cnt_tot) + ' / matches_len: ' + str(matches_len))
#Empezamos el procesado de matches
for scrapedduration, scrapedurl, scrapedtitle, scrapedthumb in matches:
title = scrapedtitle
title = title.replace("á", "a").replace("é", "e").replace("í", "i").replace("ó", "o").replace("ú", "u").replace("ü", "u").replace("�", "ñ").replace("ñ", "ñ").replace("&atilde;", "a").replace("&etilde;", "e").replace("&itilde;", "i").replace("&otilde;", "o").replace("&utilde;", "u").replace("&ntilde;", "ñ").replace("&#8217;", "'")
item_local = item.clone() #Creamos copia de Item para trabajar
if item_local.tipo: #... y limpiamos
del item_local.tipo
if item_local.totalItems:
del item_local.totalItems
if item_local.post_num:
del item_local.post_num
if item_local.intervencion:
del item_local.intervencion
if item_local.viewmode:
del item_local.viewmode
item_local.text_bold = True
del item_local.text_bold
item_local.text_color = True
del item_local.text_color
if item_local.url_plus:
del item_local.url_plus
title_subs = [] #creamos una lista para guardar info importante
item_local.language = [] #iniciamos Lenguaje
item_local.quality = "" #iniciamos calidad
item_local.url = scrapedurl #guardamos la url
item_local.thumbnail = scrapedthumb #guardamos el thumb
if channel not in item_local.thumbnail: #si el thumb está encriptado, paamos
item_local.thumbnail = get_thumb("channels_tvshow.png") #... y ponemos el de Series por defecto
item_local.context = "['buscar_trailer']"
item_local.contentType = "movie" #por defecto, son películas
item_local.action = "findvideos"
#Analizamos los formatos de series
if '/top-series' in scrapedurl or item_local.extra == 'series':
item_local.contentType = "tvshow"
item_local.action = "episodios"
#Buscamos calidades adicionales
if "3d" in title.lower() and not "3d" in item_local.quality.lower():
if item_local.quality:
item_local.quality += " 3D"
else:
item_local.quality = "3D"
title = re.sub('3D', '', title, flags=re.IGNORECASE)
title = title.replace('[]', '')
if item_local.quality:
item_local.quality += ' %s' % scrapertools.find_single_match(title, '\[(.*?)\]')
else:
item_local.quality = '%s' % scrapertools.find_single_match(title, '\[(.*?)\]')
#Detectamos idiomas
if ("latino" in scrapedurl.lower() or "latino" in title.lower()) and "LAT" not in item_local.language:
item_local.language += ['LAT']
elif ('subtitulado' in scrapedurl.lower() or 'subtitulado' in title.lower() or 'vose' in title.lower()) and "VOSE" not in item_local.language:
item_local.language += ['VOSE']
elif ('version-original' in scrapedurl.lower() or 'version original' in title.lower()) and "VO" not in item_local.language:
item_local.language += ['VO']
if item_local.language == []:
item_local.language = ['CAST']
#Detectamos info interesante a guardar para después de TMDB
if scrapertools.find_single_match(title, '[m|M].*?serie'):
title = re.sub(r'[m|M]iniserie', '', title)
title_subs += ["Miniserie"]
if scrapertools.find_single_match(title, '[s|S]aga'):
title = re.sub(r'[s|S]aga', '', title)
title_subs += ["Saga"]
if scrapertools.find_single_match(title, '[c|C]olecc'):
title = re.sub(r'[c|C]olecc...', '', title)
title_subs += ["Colección"]
if "duolog" in title.lower():
title_subs += ["[Saga]"]
title = title.replace(" Duologia", "").replace(" duologia", "").replace(" Duolog", "").replace(" duolog", "")
if "trilog" in title.lower():
title_subs += ["[Saga]"]
title = title.replace(" Trilogia", "").replace(" trilogia", "").replace(" Trilog", "").replace(" trilog", "")
if "extendida" in title.lower() or "v.e." in title.lower()or "v e " in title.lower():
title_subs += ["[V. Extendida]"]
title = title.replace("Version Extendida", "").replace("(Version Extendida)", "").replace("V. Extendida", "").replace("VExtendida", "").replace("V Extendida", "").replace("V.Extendida", "").replace("V Extendida", "").replace("V.E.", "").replace("V E ", "").replace("V:Extendida", "")
#Ponemos el año a '-'
item_local.infoLabels["year"] = '-'
#Limpiamos el título de la basura innecesaria
title = re.sub(r'TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title, flags=re.IGNORECASE)
title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "").replace("LATINO", "").replace("Spanish", "").replace("Trailer", "").replace("Audio", "")
#Terminamos de limpiar el título
title = re.sub(r'\??\s?\d*?\&.*', '', title)
title = re.sub(r'[\(|\[]\s+[\)|\]]', '', title)
title = title.replace('()', '').replace('[]', '').strip().lower().title()
item_local.from_title = title.strip().lower().title() #Guardamos esta etiqueta para posible desambiguación de título
#Salvamos el título según el tipo de contenido
if item_local.contentType == "movie":
item_local.contentTitle = title.strip().lower().title()
else:
item_local.contentSerieName = title.strip().lower().title()
item_local.title = title.strip().lower().title()
#Añadimos la duración a la Calidad
if scrapedduration:
if item_local.quality:
item_local.quality += ' [%s]' % scrapedduration
else:
item_local.quality = '[%s]' % scrapedduration
#Guarda la variable temporal que almacena la info adicional del título a ser restaurada después de TMDB
item_local.title_subs = title_subs
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist = filtertools.get_link(itemlist, item_local, list_language)
else:
itemlist.append(item_local.clone()) #Si no, pintar pantalla
cnt_title = len(itemlist) #Contador de líneas añadidas
if cnt_title >= cnt_tot: #Si hemos llegado al límite de la página, pintamos
cnt_title_tot += cnt_title
break
#logger.debug(item_local)
#Pasamos a TMDB la lista completa Itemlist
tmdb.set_infoLabels(itemlist, __modo_grafico__)
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
item, itemlist = generictools.post_tmdb_listado(item, itemlist)
# Si es necesario añadir paginacion
if (curr_page <= last_page and item.extra not in ['populares']) or (cnt_title_tot < matches_len and 'populares' in item.extra):
if last_page_foot > 1:
title = '%s de %s' % (curr_page_foot-1, last_page_foot)
else:
title = '%s' % curr_page_foot-1
if item.extra not in ['populares', 'series']:
cnt_title_tot = 0
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente " + title, url=next_page_url, extra=item.extra, extra2=item.extra2, last_page=str(last_page), last_page_foot=str(last_page_foot), curr_page=str(curr_page), curr_page_foot=str(curr_page_foot), cnt_tot=str(cnt_tot), cnt_title_tot=str(cnt_title_tot)))
#logger.debug(str(cnt_tot) + ' / ' + str(cnt_title) + ' / ' + str(cnt_title_tot))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if not item.language:
item.language = ['CAST'] #Castellano por defecto
matches = []
item.category = categoria
item.extra2 = 'xyz'
del item.extra2
#logger.debug(item)
#Bajamos los datos de la página
data = ''
patron = '<link itemprop="embedUrl"\s*href="([^"]+)"\s*\/>(?:<iframe src="([^"]*)")?'
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
except:
pass
if not data:
logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches: #error
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
#Si es un episodio suelto, se ofrece la posibilidad de ver la lista de Episodios completa
if scrapertools.find_single_match(item.contentTitle, ':\s*(\d+)-\s*') and item.contentType != 'episode':
serie = item.contentTitle
url_serie = scrapertools.find_single_match(data, '<dt><span>Serie Documental<\/span><\/dt>\s*<dd><a href="([^"]+)"\s*>')
serie = scrapertools.find_single_match(data, '<dt><span>Serie Documental<\/span><\/dt><dd><a href="[^"]+"\s*>(.*?)<')
if url_serie:
itemlist.append(item.clone(title="**-[COLOR yellow] Ver TODOS los episodios de la Serie [/COLOR]-**", action="episodios", contentType='tvshow', url=url_serie, extra="series", from_title=serie, wanted=serie, contentSerieName=serie, contentTitle=serie, quality="", language=[]))
#Recorremos la lista de servidores Directos, excluyendo YouTube para trailers
for scrapedurl, scrapedplayer in matches:
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
#Buscamos la url del vídeo
if 'cnubis.com' in scrapedplayer:
videourl = conector_cnubis(scrapedurl, scrapedplayer)
else:
videourl = servertools.findvideos(scrapedurl)
#Ya tenemos un enlace, lo pintamos
if len(videourl) > 0:
server = videourl[0][0]
enlace = videourl[0][1]
mostrar_server = True
if config.get_setting("hidepremium"): #Si no se aceptan servidore premium, se ignoran
mostrar_server = servertools.is_server_enabled(server)
#Se comprueba si el vídeo existe
if mostrar_server:
item_local.alive = "??" #Se asume poe defecto que es link es dudoso
item_local.alive = servertools.check_video_link(enlace, server, timeout=timeout)
if '?' in item_local.alive:
alive = '?' #No se ha podido comprobar el vídeo
elif 'no' in item_local.alive.lower():
continue #El enlace es malo
else:
alive = '' #El enlace está verificado
#Ahora pintamos el link del Servidor Directo
item_local.url = enlace
item_local.title = '[COLOR yellow][%s][/COLOR] [COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (alive, server.capitalize(), item_local.quality, str(item_local.language))
#Preparamos título y calidad, quitamos etiquetas vacías
item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title)
item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title)
item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.quality)
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality)
item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
item_local.action = "play" #Visualizar vídeo
item_local.server = server.lower() #Servidor Directo
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
#logger.debug("DIRECTO: " server + ' / ' + enlace + " / title: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
#logger.debug(item_local)
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
# Requerido para AutoPlay
autoplay.start(itemlist, item) #Lanzamos Autoplay
return itemlist
def episodios(item):
logger.info()
itemlist = []
item.category = categoria
#logger.debug(item)
if item.from_title:
item.title = item.from_title
item.extra2 = 'xyz'
del item.extra2
next_page_url = item.url
inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
fin = inicio + 10 # Después de este tiempo pintamos (segundos)
timeout_search = timeout # Timeout para descargas
item.quality = re.sub(r'\s?\[\d+:\d+\]', '', item.quality) #quitamos la duración de la serie
# Obtener la información actualizada de la Serie. TMDB es imprescindible para Videoteca
if not item.infoLabels['tmdb_id']:
tmdb.set_infoLabels(item, True)
#Bucle para recorrer todas las páginas
epis = 1
while next_page_url and fin > time.time():
# Descarga la página
data = '' #Inserto en num de página en la url
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)|&nbsp;", "", httptools.downloadpage(next_page_url, timeout=timeout).data)
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
except: #Algún error de proceso, salimos
pass
if not data:
logger.error("ERROR 01: EPISODIOS: La Web no responde o la URL es erronea" + item.url)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: EPISODIOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
return itemlist
#Buscamos los episodios
patron = '<span class="pm-label-duration">(.*?)<\/span>.*?<a href="([^"]+)" title="([^"]+)">.*?data-echo="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches: #error
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error
return itemlist #Salimos
logger.error("ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web. Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
patron = '<li class=""><a href="([^"]+)">&raquo;<\/a><\/li><\/ul><\/div><\/div>\s*<\/div>'
next_page_url = ''
next_page_url = scrapertools.find_single_match(data, patron)
if next_page_url:
next_page_url = urlparse.urljoin(host, next_page_url)
#logger.debug(next_page_url)
# Recorremos todos los episodios generando un Item local por cada uno en Itemlist
for scrapedduration, scrapedurl, scrapedtitle, scrapedthumbnail in matches:
item_local = item.clone()
item_local.action = "findvideos"
item_local.contentType = "episode"
item_local.extra = "episodios"
if item_local.library_playcounts:
del item_local.library_playcounts
if item_local.library_urls:
del item_local.library_urls
if item_local.path:
del item_local.path
if item_local.update_last:
del item_local.update_last
if item_local.update_next:
del item_local.update_next
if item_local.channel_host:
del item_local.channel_host
if item_local.active:
del item_local.active
if item_local.contentTitle:
del item_local.infoLabels['title']
if item_local.season_colapse:
del item_local.season_colapse
if item_local.unify:
del item_local.unify
if item_local.tmdb_stat:
del item_local.tmdb_stat
item_local.wanted = 'xyz'
del item_local.wanted
item_local.title = ''
item_local.context = "['buscar_trailer']"
item_local.url = scrapedurl
title = scrapedtitle
item_local.language = []
#Buscamos calidades del episodio
if 'hdtv' in scrapedtitle.lower() or 'hdtv' in scrapedurl:
item_local.quality = 'HDTV'
elif 'hd7' in scrapedtitle.lower() or 'hd7' in scrapedurl:
item_local.quality = 'HD720p'
elif 'hd1' in scrapedtitle.lower() or 'hd1' in scrapedurl:
item_local.quality = 'HD1080p'
if not item_local.quality:
item_local.quality = '[%s]' % scrapedduration
#Buscamos idiomas del episodio
lang = scrapedtitle.strip()
if ('v.o' in lang.lower() or 'v.o' in scrapedurl.lower()) and not 'VO' in item_local.language:
item_local.language += ['VO']
elif ('vose' in lang.lower() or 'v.o.s.e' in lang.lower() or 'vose' in scrapedurl.lower() or 'v.o.s.e' in scrapedurl.lower()) and not 'VOSE' in item_local.language:
item_local.language += ['VOSE']
elif ('latino' in lang.lower() or 'latino' in scrapedurl.lower()) and not 'LAT' in item_local.language:
item_local.language += ['LAT']
if not item_local.language:
item_local.language += ['CAST']
#Buscamos la Temporada y el Episodio
item_local.contentSeason = 0
item_local.contentEpisodeNumber = 0
try:
#Extraemos los episodios
patron = ':\s*(\d+)-\s*'
if scrapertools.find_single_match(title, patron):
item_local.contentEpisodeNumber = int(scrapertools.find_single_match(title, patron))
#Extraemos la temporada
patron = '\s*\(t|T(\d+)\):'
if scrapertools.find_single_match(title, patron):
item_local.contentSeason = int(scrapertools.find_single_match(title, patron))
except:
logger.error('ERROR al extraer Temporada/Episodio: ' + title)
if item_local.contentSeason == 0:
if 'ii:' in title.lower(): item_local.contentSeason = 2
elif 'iii:' in title.lower(): item_local.contentSeason = 3
elif 'iv:' in title.lower(): item_local.contentSeason = 4
else: item_local.contentSeason = 1
if item_local.contentEpisodeNumber == 0:
item_local.contentEpisodeNumber = epis
#Formateamos el título compatible con la Videoteca
item_local.title = '%sx%s -' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
patron = ':(?:\s*\d+-)?\s*(.*?)$'
item_local.infoLabels['episodio_titulo'] = scrapertools.find_single_match(title, patron)
itemlist.append(item_local.clone())
epis += 1
#logger.debug(item_local)
if len(itemlist) > 1:
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) #clasificamos
# Pasada por TMDB y clasificación de lista por temporada y episodio
tmdb.set_infoLabels(itemlist, True)
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
item, itemlist = generictools.post_tmdb_episodios(item, itemlist)
#logger.debug(item)
return itemlist
def actualizar_titulos(item):
logger.info()
item = generictools.update_title(item) #Llamamos al método que actualiza el título con tmdb.find_and_set_infoLabels
#Volvemos a la siguiente acción en el canal
return item
def conector_cnubis(scrapedurl, scrapedplayer):
logger.info("url=%s, player=https:%s" % (scrapedurl, scrapedplayer))
videourl = []
headers = { 'Referer': scrapedurl } #Referer con la url inical
data = httptools.downloadpage('https:' + scrapedplayer, headers=headers).data #busca el video a partir del player + url inical
#url_file, url_type = scrapertools.find_single_match(data, 'file\s*:\s*"([^"]*)"\s*,\s*type\s*:\s*"([^"]*)')
url_file = scrapertools.find_single_match(data, '<meta itemprop="contentURL" content="([^"]+)" />') #obtiene la url de vídeo
url_type = 'directo'
#videourl.append([url_type, 'https:' + url_file])
videourl.append([url_type, url_file]) #responde como si volviera de servertools.findvideos()
logger.info(videourl)
return videourl
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
try:
item.url = item.url + texto
if texto != '':
return listado(item)
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'documentales':
item.url = host + "newvideos.html"
item.extra = "novedades"
item.channel = channel
item.category_new= 'newest'
itemlist = listado(item)
if ">> Página siguiente" in itemlist[-1].title:
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,81 +0,0 @@
{
"id": "kbagi",
"name": "Kbagi/Diskokosmiko",
"language": ["cast", "lat"],
"active": false,
"adult": false,
"version": 1,
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",
"banner": "copiapop.png",
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "kbagiuser",
"type": "text",
"color": "0xFF25AA48",
"label": "Usuario Kbagi",
"enabled": true,
"visible": true
},
{
"id": "kbagipassword",
"type": "text",
"color": "0xFF25AA48",
"hidden": true,
"label": "Password Kbagi",
"enabled": "!eq(-1,'')",
"visible": true
},
{
"id": "diskokosmikouser",
"type": "text",
"color": "0xFFC52020",
"label": "Usuario Diskokosmiko",
"enabled": true,
"visible": true
},
{
"id": "diskokosmikopassword",
"type": "text",
"color": "0xFFC52020",
"hidden": true,
"label": "Password Diskokosmiko",
"enabled": "!eq(-1,'')",
"visible": true
},
{
"id": "adult_content",
"type": "bool",
"color": "0xFFd50b0b",
"label": "Mostrar contenido adulto en las búsquedas",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -1,384 +0,0 @@
# -*- coding: utf-8 -*-
import re
import threading
import urllib
import xbmc
from core import downloadtools
from core import filetools
from core import httptools
from core import jsontools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
__perfil__ = config.get_setting('perfil', "kbagi")
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']]
if __perfil__ - 1 >= 0:
color1, color2, color3, color4, color5 = perfil[__perfil__ - 1]
else:
color1 = color2 = color3 = color4 = color5 = ""
adult_content = config.get_setting("adult_content", "kbagi")
def login(pagina):
logger.info()
try:
dom = pagina.split(".")[0]
user = config.get_setting("%suser" %dom, "kbagi")
password = config.get_setting("%spassword" %dom, "kbagi")
if "kbagi" in pagina:
pagina = "k-bagi.com"
if not user:
return False, "Para ver los enlaces de %s es necesario registrarse en %s" %(dom, pagina)
data = httptools.downloadpage("http://%s" % pagina).data
if re.search(r'(?i)%s' % user, data):
return True, ""
token = scrapertools.find_single_match(data, 'name="__RequestVerificationToken".*?value="([^"]+)"')
post = "__RequestVerificationToken=%s&UserName=%s&Password=%s" % (token, user, password)
headers = {'X-Requested-With': 'XMLHttpRequest'}
url_log = "http://%s/action/Account/Login" % pagina
data = httptools.downloadpage(url_log, post, headers).data
if "redirectUrl" in data:
logger.info("Login correcto")
return True, ""
else:
logger.error("Error en el login")
return False, "Nombre de usuario no válido. Comprueba tus credenciales"
except:
import traceback
logger.error(traceback.format_exc())
return False, "Error durante el login. Comprueba tus credenciales"
def mainlist(item):
logger.info()
itemlist = []
item.text_color = color1
logueado, error_message = login("kbagi.com")
if not logueado:
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
else:
item.extra = "http://k-bagi.com"
itemlist.append(item.clone(title="kbagi", action="", text_color=color2))
itemlist.append(
item.clone(title=" Búsqueda", action="search", url="http://k-bagi.com/action/SearchFiles"))
itemlist.append(item.clone(title=" Colecciones", action="colecciones",
url="http://k-bagi.com/action/home/MoreNewestCollections?pageNumber=1"))
itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro",
url="http://k-bagi.com/action/SearchFiles"))
itemlist.append(item.clone(title=" Mi cuenta", action="cuenta"))
logueado, error_message = login("diskokosmiko.mx")
if not logueado:
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
else:
item.extra = "http://diskokosmiko.mx/"
itemlist.append(item.clone(title="DiskoKosmiko", action="", text_color=color2))
itemlist.append(item.clone(title=" Búsqueda", action="search", url="http://diskokosmiko.mx/action/SearchFiles"))
itemlist.append(item.clone(title=" Colecciones", action="colecciones",
url="http://diskokosmiko.mx/action/home/MoreNewestCollections?pageNumber=1"))
itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro",
url="http://diskokosmiko.mx/action/SearchFiles"))
itemlist.append(item.clone(title=" Mi cuenta", action="cuenta"))
itemlist.append(item.clone(action="", title=""))
folder_thumb = filetools.join(config.get_data_path(), 'thumbs_kbagi')
files = filetools.listdir(folder_thumb)
if files:
itemlist.append(
item.clone(title="Eliminar caché de imágenes (%s)" % len(files), action="delete_cache", text_color="red"))
itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold"))
return itemlist
def search(item, texto):
logger.info()
item.post = "Mode=List&Type=Video&Phrase=%s&SizeFrom=0&SizeTo=0&Extension=&ref=pager&pageNumber=1" % texto.replace(
" ", "+")
try:
return listado(item)
except:
import sys, traceback
for line in sys.exc_info():
logger.error("%s" % line)
logger.error(traceback.format_exc())
return []
def configuracion(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def listado(item):
logger.info()
itemlist = []
data_thumb = httptools.downloadpage(item.url, item.post.replace("Mode=List", "Mode=Gallery")).data
if not item.post:
data_thumb = ""
item.url = item.url.replace("/gallery,", "/list,")
data = httptools.downloadpage(item.url, item.post).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
patron = 'data-file-id(.*?</p>)</div></div>'
bloques = scrapertools.find_multiple_matches(data, patron)
for block in bloques:
if "adult_info" in block and not adult_content:
continue
size = scrapertools.find_single_match(block, '<p.*?>([^<]+)</p>')
patron = 'class="name"><a href="([^"]+)".*?>([^<]+)<'
scrapedurl, scrapedtitle = scrapertools.find_single_match(block, patron)
scrapedthumbnail = scrapertools.find_single_match(block, "background-image:url\('([^']+)'")
if scrapedthumbnail:
try:
thumb = scrapedthumbnail.split("-", 1)[0].replace("?", "\?")
if data_thumb:
url_thumb = scrapertools.find_single_match(data_thumb, "(%s[^']+)'" % thumb)
else:
url_thumb = scrapedthumbnail
scrapedthumbnail = filetools.join(folder, "%s.jpg" % url_thumb.split("e=", 1)[1][-20:])
except:
scrapedthumbnail = ""
if scrapedthumbnail:
t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, url_thumb])
t.setDaemon(True)
t.start()
else:
scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png"
scrapedurl = item.extra + scrapedurl
title = "%s (%s)" % (scrapedtitle, size)
if "adult_info" in block:
title += " [COLOR %s][+18][/COLOR]" % color4
plot = scrapertools.find_single_match(block, '<div class="desc">(.*?)</div>')
if plot:
plot = scrapertools.decodeHtmlentities(plot)
new_item = Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, text_color=color2,
extra=item.extra, infoLabels={'plot': plot}, post=item.post)
if item.post:
try:
new_item.folderurl, new_item.foldername = scrapertools.find_single_match(block,
'<p class="folder"><a href="([^"]+)".*?>([^<]+)<')
except:
pass
else:
new_item.folderurl = item.url.rsplit("/", 1)[0]
new_item.foldername = item.foldername
new_item.fanart = item.thumbnail
itemlist.append(new_item)
next_page = scrapertools.find_single_match(data, 'class="pageSplitter.*?" data-nextpage-number="([^"]+)"')
if next_page:
if item.post:
post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page, item.post)
url = item.url
else:
url = re.sub(r',\d+\?ref=pager', ",%s?ref=pager" % next_page, item.url)
post = ""
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página Siguiente (%s)" % next_page,
url=url, post=post, extra=item.extra))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="play", title="Reproducir/Descargar", server="kbagi"))
usuario = scrapertools.find_single_match(item.url, '%s/([^/]+)/' % item.extra)
url_usuario = item.extra + "/" + usuario
if item.folderurl and not item.folderurl.startswith(item.extra):
item.folderurl = item.extra + item.folderurl
if item.post:
itemlist.append(item.clone(action="listado", title="Ver colección: %s" % item.foldername,
url=item.folderurl + "/gallery,1,1?ref=pager", post=""))
data = httptools.downloadpage(item.folderurl).data
token = scrapertools.find_single_match(data,
'data-action="followChanged.*?name="__RequestVerificationToken".*?value="([^"]+)"')
collection_id = item.folderurl.rsplit("-", 1)[1]
post = "__RequestVerificationToken=%s&collectionId=%s" % (token, collection_id)
url = "%s/action/Follow/Follow" % item.extra
title = "Seguir Colección: %s" % item.foldername
if "dejar de seguir" in data:
title = "Dejar de seguir la colección: %s" % item.foldername
url = "%s/action/Follow/UnFollow" % item.extra
itemlist.append(item.clone(action="seguir", title=title, url=url, post=post, text_color=color5, folder=False))
itemlist.append(
item.clone(action="colecciones", title="Ver colecciones del usuario: %s" % usuario, url=url_usuario))
return itemlist
def colecciones(item):
logger.info()
itemlist = []
usuario = False
data = httptools.downloadpage(item.url).data
if "Ver colecciones del usuario" not in item.title and not item.index:
data = jsontools.load(data)["Data"]
content = data["Content"]
content = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", content)
else:
usuario = True
if item.follow:
content = scrapertools.find_single_match(data,
'id="followed_collections"(.*?)<div id="recommended_collections"')
else:
content = scrapertools.find_single_match(data,
'<div id="collections".*?<div class="collections_list(.*?)<div class="collections_list')
content = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", content)
patron = '<a class="name" href="([^"]+)".*?>([^<]+)<.*?src="([^"]+)".*?<p class="info">(.*?)</p>'
matches = scrapertools.find_multiple_matches(content, patron)
index = ""
if item.index and item.index != "0":
matches = matches[item.index:item.index + 20]
if len(matches) > item.index + 20:
index = item.index + 20
elif len(matches) > 20:
matches = matches[:20]
index = 20
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
for url, scrapedtitle, thumb, info in matches:
url = item.extra + url + "/gallery,1,1?ref=pager"
title = "%s (%s)" % (scrapedtitle, scrapertools.htmlclean(info))
try:
scrapedthumbnail = filetools.join(folder, "%s.jpg" % thumb.split("e=", 1)[1][-20:])
except:
try:
scrapedthumbnail = filetools.join(folder, "%s.jpg" % thumb.split("/thumbnail/", 1)[1][-20:])
thumb = thumb.replace("/thumbnail/", "/")
except:
scrapedthumbnail = ""
if scrapedthumbnail:
t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, thumb])
t.setDaemon(True)
t.start()
else:
scrapedthumbnail = thumb
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url,
thumbnail=scrapedthumbnail, text_color=color2, extra=item.extra,
foldername=scrapedtitle))
if not usuario and data.get("NextPageUrl"):
url = item.extra + data["NextPageUrl"]
itemlist.append(item.clone(title=">> Página Siguiente", url=url, text_color=""))
elif index:
itemlist.append(item.clone(title=">> Página Siguiente", url=item.url, index=index, text_color=""))
return itemlist
def seguir(item):
logger.info()
data = httptools.downloadpage(item.url, item.post)
message = "Colección seguida"
if "Dejar" in item.title:
message = "La colección ya no se sigue"
if data.sucess and config.get_platform() != "plex":
platformtools.dialog_notification("Acción correcta", message)
def cuenta(item):
logger.info()
itemlist = []
web = "kbagi"
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
logueado, error_message = login("diskokosmiko.mx")
if not logueado:
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
return itemlist
user = config.get_setting("%suser" % web, "kbagi")
user = unicode(user, "utf8").lower().encode("utf8")
url = item.extra + "/" + urllib.quote(user)
data = httptools.downloadpage(url).data
num_col = scrapertools.find_single_match(data, 'name="Has_collections" value="([^"]+)"')
if num_col != "0":
itemlist.append(item.clone(action="colecciones", url=url, index="0", title="Ver mis colecciones",
text_color=color5))
else:
itemlist.append(item.clone(action="", title="No tienes ninguna colección", text_color=color4))
num_follow = scrapertools.find_single_match(data, 'name="Follows_collections" value="([^"]+)"')
if num_follow != "0":
itemlist.append(item.clone(action="colecciones", url=url, index="0", title="Colecciones que sigo",
text_color=color5, follow=True))
else:
itemlist.append(item.clone(action="", title="No sigues ninguna colección", text_color=color4))
return itemlist
def filtro(item):
logger.info()
list_controls = []
valores = {}
dict_values = None
list_controls.append({'id': 'search', 'label': 'Texto a buscar', 'enabled': True, 'color': '0xFFC52020',
'type': 'text', 'default': '', 'visible': True})
list_controls.append({'id': 'tipo', 'label': 'Tipo de búsqueda', 'enabled': True, 'color': '0xFFFF8000',
'type': 'list', 'default': -1, 'visible': True})
list_controls[1]['lvalues'] = ['Aplicación', 'Archivo', 'Documento', 'Imagen', 'Música', 'Vídeo', 'Todos']
valores['tipo'] = ['Application', 'Archive', 'Document', 'Image', 'Music', 'Video', '']
list_controls.append({'id': 'ext', 'label': 'Extensión', 'enabled': True, 'color': '0xFFF4FA58',
'type': 'text', 'default': '', 'visible': True})
list_controls.append({'id': 'tmin', 'label': 'Tamaño mínimo (MB)', 'enabled': True, 'color': '0xFFCC2EFA',
'type': 'text', 'default': '0', 'visible': True})
list_controls.append({'id': 'tmax', 'label': 'Tamaño máximo (MB)', 'enabled': True, 'color': '0xFF2ECCFA',
'type': 'text', 'default': '0', 'visible': True})
# Se utilizan los valores por defecto/guardados
web = "kbagi"
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
valores_guardados = config.get_setting("filtro_defecto_" + web, item.channel)
if valores_guardados:
dict_values = valores_guardados
item.valores = valores
return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values,
caption="Filtra la búsqueda", item=item, callback='filtrado')
def filtrado(item, values):
values_copy = values.copy()
web = "kbagi"
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
# Guarda el filtro para que sea el que se cargue por defecto
config.set_setting("filtro_defecto_" + web, values_copy, item.channel)
tipo = item.valores["tipo"][values["tipo"]]
search = values["search"]
ext = values["ext"]
tmin = values["tmin"]
tmax = values["tmax"]
if not tmin.isdigit():
tmin = "0"
if not tmax.isdigit():
tmax = "0"
item.valores = ""
item.post = "Mode=List&Type=%s&Phrase=%s&SizeFrom=%s&SizeTo=%s&Extension=%s&ref=pager&pageNumber=1" \
% (tipo, search, tmin, tmax, ext)
item.action = "listado"
return listado(item)
def download_thumb(filename, url):
lock = threading.Lock()
lock.acquire()
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
if not filetools.exists(folder):
filetools.mkdir(folder)
lock.release()
if not filetools.exists(filename):
downloadtools.downloadfile(url, filename, silent=True)
return filename
def delete_cache(url):
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
filetools.rmdirtree(folder)
if config.is_xbmc():
xbmc.executebuiltin("Container.Refresh")

View File

@@ -735,8 +735,8 @@ def listado_busqueda(item):
#logger.debug(item_local)
if not category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo
#if not category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
# return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo
#Llamamos a TMDB para que complete InfoLabels desde itemlist. Mejor desde itemlist porque envía las queries en paralelo
tmdb.set_infoLabels(itemlist, __modo_grafico__)

View File

@@ -1,32 +0,0 @@
{
"id": "miltorrents",
"name": "Miltorrents",
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "http://imgur.com/KZoska0.png",
"banner": "miltorrents.png",
"categories": [
"torrent",
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,447 +0,0 @@
# -*- coding: utf-8 -*-
import math
import os
import re
import unicodedata
import urllib
from core import httptools
from core import scrapertools
from core import tmdb
from core.item import Item
from core.scrapertools import decodeHtmlentities as dhe
from platformcode import config, logger
from channelselector import get_thumb
def mainlist(item):
logger.info()
check_bg = item.action
if str(check_bg) == "":
check_bg = "bglobal"
itemlist = []
itemlist.append(Item(channel=item.channel, title="[COLOR yellow][B]Películas[/B][/COLOR]", action="peliculas",
url="http://www.miltorrents.com", thumbnail=get_thumb('movies', auto=True),
fanart="http://imgur.com/y4nJyZh.jpg"))
title = "[COLOR firebrick][B]Buscar[/B][/COLOR]" + " " + "[COLOR yellow][B]Peliculas[/B][/COLOR]"
itemlist.append(Item(channel=item.channel, title=" " + title, action="search", url="",
thumbnail=get_thumb('search', auto=True), fanart="http://imgur.com/gwjawWV.jpg",
extra="peliculas" + "|" + check_bg))
itemlist.append(Item(channel=item.channel, title="[COLOR slategray][B]Series[/B][/COLOR]", action="peliculas",
url="http://www.miltorrents.com/series", thumbnail=get_thumb('tvshows', auto=True),
fanart="http://imgur.com/LwS32zX.jpg"))
title = "[COLOR firebrick][B]Buscar[/B][/COLOR]" + " " + "[COLOR slategray][B]Series[/B][/COLOR]"
itemlist.append(Item(channel=item.channel, title=" " + title, action="search", url="",
thumbnail=get_thumb('search', auto=True), fanart="http://imgur.com/ecPmzDj.jpg",
extra="series" + "|" + check_bg))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
if item.extra:
if item.extra.split("|")[0] == "series":
item.url = "http://www.miltorrents.com/series/?pTit=%s&pOrd=FE" % (texto)
else:
item.url = "http://www.miltorrents.com/?pTit=%s&pOrd=FE" % (texto)
item.extra = "search" + "|" + item.extra.split("|")[1] + "|" + texto
try:
return peliculas(item)
# Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
else:
if item.contentType != "movie":
item.url = "http://www.miltorrents.com/series/?pTit=%s&pOrd=FE" % (texto)
check_sp = "tvshow"
else:
item.url = "http://www.miltorrents.com/?pTit=%s&pOrd=FE" % (texto)
check_sp = "peliculas"
item.extra = "search" + "|""bglobal" + "|" + texto + "|" + check_sp
try:
return peliculas(item)
# Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"Independance", "Independence", data)
if "serie" in item.url:
patron = '<div class="corner-episode">(.*?)<\/div>.*?<a href="([^"]+)".*?image:url\(\'([^"]+)\'.*?"tooltipbox">(.*?)<br'
matches = re.compile(patron, re.DOTALL).findall(data)
if item.extra.split("|")[0] == "search":
check_bg = item.action
for episodio, url, thumbnail, title in matches:
title = title.decode('latin1').encode('utf8')
title_fan = title.strip()
trailer = title_fan + " " + "series" + "trailer"
title_f = "[COLOR slategray][B]" + title.strip() + "[/B][/COLOR]" + " " + "[COLOR floralwhite][B]" + episodio + "[/B][/COLOR]"
trailer = urllib.quote(trailer)
extra = trailer + "|" + title_fan + "|" + " " + "|" + "pelicula"
itemlist.append(Item(channel=item.channel, title=title_f, url=url, action="findvideos", thumbnail=thumbnail,
fanart="http://imgur.com/NrZNOTN.jpg", extra=extra, folder=True, contentSerieName= title))
else:
patron = '<div class="moviesbox">(.*?)<a href="([^"]+)".*?image:url\(\'([^"]+)\'.*?<span class="tooltipbox">([^<]+)<i>\((\d\d\d\d)\)'
matches = re.compile(patron, re.DOTALL).findall(data)
for p_rating, url, thumbnail, title, year in matches:
try:
rating = scrapertools.get_match(p_rating, '<div class="moviesbox_rating">(.*?)<img')
except:
rating = "(Sin puntuacion)"
title = title.decode('latin1').encode('utf8')
title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d&#.*?;\d+|-|Temporada.*?Completa| ;|(Sin puntuacion)", "", title)
try:
check_rating = scrapertools.get_match(rating, '(\d+).')
if int(check_rating) >= 5 and int(check_rating) < 8:
rating = "[COLOR springgreen][B]" + rating + "[/B][/COLOR]"
elif int(check_rating) >= 8 and int(check_rating) < 10:
rating = "[COLOR yellow][B]" + rating + "[/B][/COLOR]"
elif int(check_rating) == 10:
rating = "[COLOR orangered][B]" + rating + "[/B][/COLOR]"
else:
rating = "[COLOR crimson][B]" + rating + "[/B][/COLOR]"
except:
rating = "[COLOR crimson][B]" + rating + "[/B][/COLOR]"
if "10." in rating:
rating = re.sub(r'10\.\d+', '10', rating)
title_f = "[COLOR gold][B]" + title + "[/B][/COLOR]" + " " + rating
trailer = title_fan + " " + "trailer"
trailer = urllib.quote(trailer)
extra = trailer + "|" + title_fan + "|" + year + "|" + "pelicula"
itemlist.append(Item(channel=item.channel, title=title_f, url=url, action="findvideos", thumbnail=thumbnail,
fanart="http://imgur.com/Oi1mlFn.jpg", extra=extra, folder=True, contentTitle= title, infoLabels={'year':year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
## Paginación
patronvideos = '<div class="pagination">.*?<a href="#">.*?<\/a><\/span><a href="([^"]+)"'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
url = matches[0]
itemlist.append(Item(channel=item.channel, action="peliculas", title="[COLOR khaki]siguiente[/COLOR]", url=url,
thumbnail="http://imgur.com/fJzoytz.png", fanart="http://imgur.com/3AqH1Zu.jpg",
folder=True))
return itemlist
def capitulos(item):
logger.info()
itemlist = []
data = item.extra
thumbnail = scrapertools.get_match(data, 'background-image:url\(\'([^"]+)\'')
thumbnail = re.sub(r"w185", "original", thumbnail)
patron = '<a href="([^"]+)".*?<i>(.*?)<\/i>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, capitulo in matches:
capitulo = re.sub(r"Cap.*?tulo", "", capitulo)
capitulo = "[COLOR floralwhite][B]" + capitulo + "[/B][/COLOR]"
if capitulo == item.extra.split("|")[4]:
continue
if not ".jpg" in item.extra.split("|")[2]:
fanart = item.show.split("|")[0]
else:
fanart = item.extra.split("|")[2]
itemlist.append(Item(channel=item.channel, title=capitulo, action="findvideos", url=url, thumbnail=thumbnail,
extra="fv2" + "|" + item.extra.split("|")[3], show=item.show, category=item.category,
fanart=fanart, folder=True))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
if not "serie" in item.url:
thumbnail = item.category
else:
thumbnail = ''
patronbloque_enlaces = '<div class="detail_content_subtitle">(.*?)<\/div>(.*?)<div class="torrent_sep">'
matchesenlaces = re.compile(patronbloque_enlaces, re.DOTALL).findall(data)
if len(matchesenlaces) == 0:
thumb = ""
check = ""
itemlist.append(
Item(channel=item.channel, title="[COLOR crimson][B]No hay Torrent[/B][/COLOR]", action="mainlist", url="",
fanart=item.show.split("|")[0], thumbnail=thumbnail, folder=False))
for calidad_bloque, bloque_enlaces in matchesenlaces:
calidad_bloque = dhe(calidad_bloque)
calidad_bloque = ''.join((c for c in unicodedata.normalize('NFD', unicode(calidad_bloque.decode('utf-8'))) if
unicodedata.category(c) != 'Mn'))
if "Alta" in calidad_bloque:
title = 'Alta Definicion'
title = " [COLOR yellow][B]" + title + "[/B][/COLOR]"
elif "estandar" in calidad_bloque:
title = 'Definicion Estandar'
title = " [COLOR mediumaquamarine][B]" + title + "[/B][/COLOR]"
else:
title = 'Screener'
title = " [COLOR slategray][B]" + title + "[/B][/COLOR]"
itemlist.append(
Item(channel=item.channel, title=title, action="mainlist", url="", fanart=item.show.split("|")[0],
thumbnail=thumbnail, folder=False))
if "serie" in item.url:
thumb = scrapertools.get_match(data, '<div class="detail_background2".*?url\(([^"]+)\)')
patron = '\:showDownload.*?(http.*?)\''
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
for url in matches:
calidad = ""
try:
if not url.endswith(".torrent") and not "elitetorrent" in url:
if url.endswith("fx"):
url = httptools.downloadpage(url, follow_redirects=False)
url = url.headers.get("location")
if url.endswith(".fx"):
url = httptools.downloadpage(url, follow_redirects=False)
url = url.headers.get("location")
url = " http://estrenosli.org" + url
else:
if not url.endswith(".mkv"):
url = httptools.downloadpage(url, follow_redirects=False)
url = url.headers.get("location")
torrents_path = config.get_videolibrary_path() + '/torrents'
if not os.path.exists(torrents_path):
os.mkdir(torrents_path)
try:
urllib.URLopener.version = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36 SE 2.X MetaSr 1.0'
urllib.urlretrieve(url, torrents_path + "/temp.torrent")
pepe = open(torrents_path + "/temp.torrent", "rb").read()
except:
pepe = ""
if "used CloudFlare" in pepe:
try:
urllib.urlretrieve("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url.strip(),
torrents_path + "/temp.torrent")
pepe = open(torrents_path + "/temp.torrent", "rb").read()
except:
pepe = ""
torrent = decode(pepe)
logger.debug('el torrent %s' % torrent)
try:
name = torrent["info"]["name"]
sizet = torrent["info"]['length']
sizet = convert_size(sizet)
except:
name = "no disponible"
try:
check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]),
"'length': (\d+)}")
size = max([int(i) for i in check_video])
for file in torrent["info"]["files"]:
manolo = "%r - %d bytes" % ("/".join(file["path"]), file["length"])
if str(size) in manolo:
video = manolo
size = convert_size(size)
ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\[.*?\]|\(.*?\)|.*?\.", "", video)
try:
os.remove(torrents_path + "/temp.torrent")
except:
pass
except:
size = sizet
ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\.es.|.*?\[.*?\]|.*?\(.*?\)\.|.*?\.", "",
name)
try:
os.remove(torrents_path + "/temp.torrent")
except:
pass
except:
size = "en estos momentos..."
ext_v = "no disponible"
if "Alta" in calidad_bloque:
title = "[COLOR navajowhite][B]" + calidad + "[/B][/COLOR]" + " " + "[COLOR peachpuff]( Video [/COLOR]" + "[COLOR peachpuff]" + ext_v + " -- " + size + " )[/COLOR]"
elif "estandar" in calidad_bloque:
title = "[COLOR lavender][B]" + calidad + "[/B][/COLOR]" + " " + "[COLOR azure]( Video [/COLOR]" + "[COLOR azure]" + ext_v + " -- " + size + " )[/COLOR]"
else:
title = "[COLOR gainsboro][B]" + calidad + "[/B][/COLOR]" + " " + "[COLOR silver]( Video [/COLOR]" + "[COLOR silver]" + ext_v + " -- " + size + " )[/COLOR]"
if "rar" in ext_v:
ext_v = ext_v + " -- No reproducible"
size = ""
item.title = re.sub(r"\[.*?\]", "", item.title)
temp_epi = scrapertools.find_multiple_matches(item.title, '(\d+)x(\d+)')
for temp, epi in temp_epi:
check = temp + "x" + epi
if item.extra.split("|")[0] == "fv2":
extra = item.extra.split("|")[1] + "|" + " " + "|" + temp + "|" + epi
else:
extra = item.extra + "|" + temp + "|" + epi
itemlist.append(Item(channel=item.channel, title=title, action="play", url=url, server="torrent",
thumbnail=thumbnail, extra=item.extra, show=item.show,
fanart=item.show.split("|")[0], folder=False))
else:
patron = '<a href=.*?(http.*?)\'\).*?<i>(.*?)<i>(.*?)<\/i>'
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
for url, calidad, peso in matches:
try:
if not url.endswith(".torrent") and not "elitetorrent" in url:
if url.endswith("fx"):
url = httptools.downloadpage(url, follow_redirects=False)
url = url.headers.get("location")
if url.endswith(".fx"):
url = httptools.downloadpage(url, follow_redirects=False)
url = url.headers.get("location")
url = " http://estrenosli.org" + url
else:
if not url.endswith(".mkv"):
url = httptools.downloadpage(url, follow_redirects=False)
url = url.headers.get("location")
torrents_path = config.get_videolibrary_path() + '/torrents'
if not os.path.exists(torrents_path):
os.mkdir(torrents_path)
urllib.URLopener.version = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36 SE 2.X MetaSr 1.0'
urllib.urlretrieve(url, torrents_path + "/temp.torrent")
pepe = open(torrents_path + "/temp.torrent", "rb").read()
if "used CloudFlare" in pepe:
try:
urllib.urlretrieve("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url.strip(),
torrents_path + "/temp.torrent")
pepe = open(torrents_path + "/temp.torrent", "rb").read()
except:
pepe = ""
torrent = decode(pepe)
try:
name = torrent["info"]["name"]
except:
name = "no disponible"
try:
check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]),
"'length': (\d+)}")
size = max([int(i) for i in check_video])
for file in torrent["info"]["files"]:
manolo = "%r - %d bytes" % ("/".join(file["path"]), file["length"])
if str(size) in manolo:
video = manolo
ext_v = re.sub(r"-.*? bytes|\.*?\[.*?\]\.|'|\.*?COM.|.*?\[.*?\]|\(.*?\)|.*?\.", "", video)
try:
os.remove(torrents_path + "/temp.torrent")
except:
pass
except:
ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\.es\.|.*?\[.*?\]|.*?\(.*?\)\.|.*?\.", "",
name)
try:
os.remove(torrents_path + "/temp.torrent")
except:
pass
except:
size = "en estos momentos..."
ext_v = "no disponible"
if "rar" in ext_v:
ext_v = ext_v + " -- No reproducible"
calidad = re.sub(r"</i>", "", calidad)
if "Alta" in calidad_bloque:
title = "[COLOR khaki][B]" + calidad + "[/B][/COLOR]" + "[COLOR darkkhaki][B]" + " - " + peso + "[/B][/COLOR]" + " " + "[COLOR lemonchiffon]( Video [/COLOR]" + "[COLOR lemonchiffon]" + ext_v + " )[/COLOR]"
elif "estandar" in calidad_bloque:
title = "[COLOR darkcyan][B]" + calidad + "[/B][/COLOR]" + "[COLOR cadetblue][B]" + " - " + peso + "[/B][/COLOR]" + " " + "[COLOR paleturquoise]( Video [/COLOR]" + "[COLOR paleturquoise]" + ext_v + " )[/COLOR]"
else:
title = "[COLOR dimgray][B]" + calidad + "[/B][/COLOR]" + "[COLOR gray][B]" + " - " + peso + "[/B][/COLOR]" + " " + "[COLOR lightslategray]( Video [/COLOR]" + "[COLOR lightslategray]" + ext_v + " )[/COLOR]"
itemlist.append(Item(channel=item.channel, title=title, action="play", url=url, server="torrent",
thumbnail=thumbnail, extra=item.extra, show=item.show,
fanart=item.show.split("|")[0], folder=False))
if "serie" in item.url and item.extra.split("|")[0] != "fv2":
title_info = 'Temporadas'
title_info = "[COLOR springgreen][B]" + title_info + "[/B][/COLOR]"
itemlist.append(Item(channel=item.channel, title=" " + title_info,
action="mainlist", url="", fanart=item.show.split("|")[0], thumbnail=thumbnail,
folder=False))
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = 'class="contactlinkh">(.*?)<\/a><\/div>(.*?)</div></div></div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for temporadas, bloque_capitulos in matches:
thumbnail = scrapertools.get_match(bloque_capitulos, 'background-image:url\(\'([^"]+)\'')
thumbnail = re.sub(r"w185", "original", thumbnail)
itemlist.append(Item(channel=item.channel, title="[COLOR chartreuse][B]" + temporadas + "[/B][/COLOR]",
action="capitulos", url=item.url, thumbnail=thumbnail,
extra="fv2" + "|" + bloque_capitulos + "|" + thumb + "|" + item.extra + "|" + check,
show=item.show, fanart=item.show.split("|")[0], category=item.category, folder=True))
return itemlist
def decode(text):
try:
src = tokenize(text)
data = decode_item(src.next, src.next())
for token in src: # look for more tokens
raise SyntaxError("trailing junk")
except (AttributeError, ValueError, StopIteration):
try:
data = data
except:
data = src
return data
def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match):
i = 0
while i < len(text):
m = match(text, i)
s = m.group(m.lastindex)
i = m.end()
if m.lastindex == 2:
yield "s"
yield text[i:i + int(s)]
i = i + int(s)
else:
yield s
def decode_item(next, token):
if token == "i":
# integer: "i" value "e"
data = int(next())
if next() != "e":
raise ValueError
elif token == "s":
# string: "s" value (virtual tokens)
data = next()
elif token == "l" or token == "d":
# container: "l" (or "d") values "e"
data = []
tok = next()
while tok != "e":
data.append(decode_item(next, tok))
tok = next()
if token == "d":
data = dict(zip(data[0::2], data[1::2]))
else:
raise ValueError
return data
def convert_size(size):
if (size == 0):
return '0B'
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size, 1024)))
p = math.pow(1024, i)
s = round(size / p, 2)
return '%s %s' % (s, size_name[i])

View File

@@ -1,30 +0,0 @@
{
"id": "peliscon",
"name": "Peliscon",
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "http://imgur.com/yTQRPUJ.png",
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,378 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
__modo_grafico__ = config.get_setting('modo_grafico', "peliscon")
host = "http://peliscon.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(
item.clone(title="[COLOR aqua][B]Películas[/B][/COLOR]", action="scraper", url= host + "/peliculas/",
thumbnail="http://imgur.com/FrcWTS8.png", fanart="http://imgur.com/MGQyetQ.jpg",
contentType="movie"))
itemlist.append(itemlist[-1].clone(title="[COLOR aqua][B]Series[/B][/COLOR]", action="scraper",
url= host + "/series/", thumbnail="http://imgur.com/FrcWTS8.png",
fanart="http://imgur.com/i41eduI.jpg", contentType="tvshow"))
itemlist.append(item.clone(title="[COLOR aqua][B] Últimos capitulos[/B][/COLOR]", action="ul_cap",
url= host + "/episodios/", thumbnail="http://imgur.com/FrcWTS8.png",
fanart="http://imgur.com/i41eduI.jpg", contentType="tvshow"))
itemlist.append(itemlist[-1].clone(title="[COLOR crimson][B]Buscar[/B][/COLOR]", action="search",
thumbnail="http://imgur.com/FrcWTS8.png", fanart="http://imgur.com/h1b7tfN.jpg"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=" + texto
item.extra = "search"
try:
return buscador(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def buscador(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = scrapertools.find_multiple_matches(data,
'<div class="result-item">.*?href="([^"]+)".*?alt="([^"]+)".*?<span class=".*?">([^"]+)</span>.*?<span class="year">([^"]+)</span>')
for url, title, genere, year in patron:
if "Serie" in genere:
checkmt = "tvshow"
genere = "[COLOR aqua][B]" + genere + "[/B][/COLOR]"
else:
checkmt = "movie"
genere = "[COLOR cadetblue][B]" + genere + "[/B][/COLOR]"
titulo = "[COLOR crimson]" + title + "[/COLOR]" + " [ " + genere + " ] "
if checkmt == "movie":
new_item = item.clone(action="findvideos", title=titulo, url=url, fulltitle=title, contentTitle=title,
contentType="movie", library=True)
else:
new_item = item.clone(action="findtemporadas", title=titulo, url=url, fulltitle=title, contentTitle=title,
show=title, contentType="tvshow", library=True)
new_item.infoLabels['year'] = year
itemlist.append(new_item)
try:
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
if not "Siguiente >>" in item.title:
if "0." in str(item.infoLabels['rating']):
item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]"
else:
item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]"
item.title = item.title + " " + str(item.infoLabels['rating'])
except:
pass
## Paginación
next = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)"')
if len(next) > 0:
url = next
itemlist.append(item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", action="buscador", url=url))
return itemlist
def scraper(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if item.contentType == "movie":
patron = scrapertools.find_multiple_matches(data,
'<div class="poster">.*?src="(.*?)" alt=.*?href="(.*?)">.*?'
'<h4>(.*?)<\/h4>.*?img\/flags\/(.*?)\.png.*?imdb.*?<span>(.*?)>')
for thumb, url, title, language, year in patron:
titulo = title
title = re.sub(r"!|¡", "", title)
title = title.replace("Autosia", "Autopsia")
title = re.sub(r"&#8217;|PRE-Estreno", "'", title)
new_item = item.clone(action="findvideos", title="[COLOR aqua]" + titulo + "[/COLOR]", url=url,
fulltitle=title, contentTitle=title, contentType="movie", extra=year, library=True,
language= language, infoLabels={'year':year})
itemlist.append(new_item)
else:
patron = scrapertools.find_multiple_matches(data,
'<div class="poster">.*?src="(.*?)" alt=.*?href="(.*?)">.*?'
'<h4>(.*?)<\/h4>.*?<span>(.*?)<')
for thumb, url, title, year in patron:
titulo = title.strip()
title = re.sub(r"\d+x.*", "", title)
new_item = item.clone(action="findtemporadas", title="[COLOR aqua]" + titulo + "[/COLOR]", url=url,
thumbnail=thumb, fulltitle=title, contentTitle=title, show=title,
contentType="tvshow", library=True, infoLabels={'year':year})
itemlist.append(new_item)
## Paginación
next = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)"')
if len(next) > 0:
url = next
itemlist.append(
item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", thumbnail="http://imgur.com/a7lQAld.png",
url=url))
try:
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
if not "Siguiente >>" in item.title:
if "0." in str(item.infoLabels['rating']):
item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]"
else:
item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]"
item.title = item.title + " " + str(item.infoLabels['rating'])
except:
pass
return itemlist
def ul_cap(item):
itemlist = []
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = scrapertools.find_multiple_matches(data,
'<div class="poster">.*?<img src="([^"]+)" alt="([^"]+):.*?href="([^"]+)"><span class="b">(\d+x\d+)<\/span>')
for thumb, title, url, cap in patron:
temp = re.sub(r"x\d+", "", cap)
epi = re.sub(r"\d+x", "", cap)
titulo = title.strip() + "--" + "[COLOR red][B]" + cap + "[/B][/COLOR]"
title = re.sub(r"\d+x.*", "", title)
new_item = item.clone(action="findvideos", title="[COLOR aqua]" + titulo + "[/COLOR]", url=url, thumbnail=thumb,
fulltitle=title, contentTitle=title, show=title, contentType="tvshow", temp=temp, epi=epi,
library=True)
itemlist.append(new_item)
## Paginación
next = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)"')
if len(next) > 0:
url = next
itemlist.append(
item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", thumbnail="http://imgur.com/a7lQAld.png",
url=url))
try:
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
if not "Siguiente >>" in item.title:
if "0." in str(item.infoLabels['rating']):
item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]"
else:
item.infoLabels['rating'] = "[COLOR springgreen] (" + str(item.infoLabels['rating']) + ")[/COLOR]"
item.title = item.title + " " + str(item.infoLabels['rating'])
except:
pass
return itemlist
def findtemporadas(item):
logger.info()
itemlist = []
if not item.temp:
check_temp = None
else:
check_temp = "yes"
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if len(item.extra.split("|")):
if len(item.extra.split("|")) >= 4:
fanart = item.extra.split("|")[2]
extra = item.extra.split("|")[3]
try:
fanart_extra = item.extra.split("|")[4]
except:
fanart_extra = item.extra.split("|")[3]
try:
fanart_info = item.extra.split("|")[5]
except:
fanart_extra = item.extra.split("|")[3]
elif len(item.extra.split("|")) == 3:
fanart = item.extra.split("|")[2]
extra = item.extra.split("|")[0]
fanart_extra = item.extra.split("|")[0]
fanart_info = item.extra.split("|")[1]
elif len(item.extra.split("|")) == 2:
fanart = item.extra.split("|")[1]
extra = item.extra.split("|")[0]
fanart_extra = item.extra.split("|")[0]
fanart_info = item.extra.split("|")[1]
else:
extra = item.extra
fanart_extra = item.extra
fanart_info = item.extra
try:
logger.info(fanart_extra)
logger.info(fanart_info)
except:
fanart_extra = item.fanart
fanart_info = item.fanart
bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada (\d+) <i>(.*?)</div></li></ul></div></div>')
for temporada, bloque_epis in bloque_episodios:
item.infoLabels = item.InfoLabels
item.infoLabels['season'] = temporada
itemlist.append(item.clone(action="epis",
title="[COLOR cornflowerblue][B]Temporada [/B][/COLOR]" + "[COLOR darkturquoise][B]" + temporada + "[/B][/COLOR]",
url=bloque_epis, contentType=item.contentType, contentTitle=item.contentTitle,
show=item.show, extra=item.extra, fanart_extra=fanart_extra, fanart_info=fanart_info,
datalibrary=data, check_temp=check_temp, folder=True))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if config.get_videolibrary_support() and itemlist:
if len(bloque_episodios) == 1:
extra = "epis"
else:
extra = "epis###serie_add"
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'tvdb_id': item.infoLabels['tvdb_id'],
'imdb_id': item.infoLabels['imdb_id']}
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color="0xFFe5ffcc",
action="add_serie_to_library", extra="", url=item.url,
contentSerieName=item.fulltitle, infoLabels=infoLabels,
thumbnail='http://imgur.com/3ik73p8.png', datalibrary=data))
return itemlist
def epis(item):
logger.info()
itemlist = []
if item.extra == "serie_add":
item.url = item.datalibrary
patron = scrapertools.find_multiple_matches(item.url, '<div class="imagen"><a href="([^"]+)".*?"numerando">(.*?)<')
for url, epi in patron:
episodio = scrapertools.find_single_match(epi, '\d+ - (\d+)')
item.infoLabels['episode'] = episodio
epi = re.sub(r" - ", "X", epi)
itemlist.append(
item.clone(title="[COLOR deepskyblue]Episodio " + "[COLOR red]" + epi, url=url, action="findvideos",
show=item.show, fanart=item.extra, extra=item.extra, fanart_extra=item.fanart_extra,
fanart_info=item.fanart_info, check_temp=item.check_temp, folder=True))
if item.extra != "serie_add":
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
item.fanart = item.extra
if item.infoLabels['title']: title = "[COLOR royalblue]" + item.infoLabels['title'] + "[/COLOR]"
item.title = item.title + " -- \"" + title + "\""
return itemlist
def findvideos(item):
logger.info()
itemlist = []
if item.temp:
url_epis = item.url
data = httptools.downloadpage(item.url).data
if item.contentType != "movie":
if not item.infoLabels['episode']:
capitulo = scrapertools.find_single_match(item.title, '(\d+x\d+)')
url_capitulo = scrapertools.find_single_match(data,
'<a href="(http://www.divxtotal.com/wp-content/uploads/.*?' + capitulo + '.*?.torrent)')
if len(item.extra.split("|")) >= 2:
extra = item.extra
else:
extra = item.fanart
else:
capitulo = item.title
url_capitulo = item.url
try:
fanart = item.fanart_extra
except:
fanart = item.extra.split("|")[0]
url_data = scrapertools.find_multiple_matches(data, '<div id="option-(.*?)".*?src="([^"]+)"')
for option, url in url_data:
server, idioma = scrapertools.find_single_match(data,
'href="#option-' + option + '">.*?</b>(.*?)<span class="dt_flag">.*?flags/(.*?).png')
if not item.temp:
item.infoLabels['year'] = None
if item.temp:
capitulo = re.sub(r".*--.*", "", capitulo)
title = "[COLOR darkcyan][B]Ver capítulo [/B][/COLOR]" + "[COLOR red][B]" + capitulo + "[/B][/COLOR]"
new_item = item.clone(title=title, url=url, action="play", fanart=fanart, thumbnail=item.thumbnail,
server_v=server, idioma=idioma, extra=item.extra, fulltitle=item.fulltitle,
folder=False)
new_item.infoLabels['episode'] = item.epi
new_item.infoLabels['season'] = item.temp
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist)
else:
title = "[COLOR darkcyan][B]Ver capítulo [/B][/COLOR]" + "[COLOR red][B]" + capitulo + "[/B][/COLOR]" + " " + "[COLOR darkred]" + server + " ( " + idioma + " )" + "[/COLOR]"
itemlist.append(Item(channel=item.channel, title=title, url=url, action="play", fanart=fanart,
thumbnail=item.thumbnail, extra=item.extra, fulltitle=item.fulltitle,
folder=False))
if item.temp:
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
if item.infoLabels['title']: title_inf = "[COLOR royalblue]" + item.infoLabels['title'] + "[/COLOR]"
item.title = item.title + " -- \"" + title_inf + "\"" + " " + "[COLOR darkred]" + item.server_v + " ( " + item.idioma + " )" + "[/COLOR]"
if item.infoLabels['episode'] and item.library or item.temp and item.library:
thumbnail = scrapertools.find_single_match(item.extra, 'http://assets.fanart.tv/.*jpg')
if thumbnail == "":
thumbnail = item.thumbnail
if not "assets.fanart" in item.fanart_info:
fanart = item.fanart_info
else:
fanart = item.fanart
if item.temp:
item.infoLabels['tvdb_id'] = item.tvdb
if item.temp and not item.check_temp:
url_epis = re.sub(r"-\dx.*", "", url_epis)
url_epis = url_epis.replace("episodios", "series")
itemlist.append(
Item(channel=item.channel, title="[COLOR salmon][B]Todos los episodios[/B][/COLOR]", url=url_epis,
action="findtemporadas", server="torrent",
thumbnail=item.infoLabels['thumbnail'],
contentType=item.contentType, contentTitle=item.contentTitle, InfoLabels=item.infoLabels,
thumb_art=item.thumb_art, thumb_info=item.thumbnail, fulltitle=item.fulltitle,
library=item.library, temp=item.temp, folder=True))
else:
url_data = scrapertools.find_multiple_matches(data, '<div id="option-(.*?)".*?src="([^"]+)"')
for option, url in url_data:
server, idioma = scrapertools.find_single_match(data,
'href="#option-' + option + '">.*?</b>(.*?)<span class="dt_flag">.*?flags/(.*?).png')
title = server + " ( " + idioma + " )"
item.infoLabels['year'] = None
itemlist.append(Item(channel=item.channel, title="[COLOR dodgerblue][B]" + title + " [/B][/COLOR]", url=url,
action="play", fanart=item.fanart, thumbnail=item.thumbnail, extra=item.extra,
InfoLabels=item.infoLabels, folder=True))
if item.library and config.get_videolibrary_support() and len(itemlist) > 0:
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'],
'title': item.infoLabels['title']}
itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca",
action="add_pelicula_to_library", url=item.url, fanart=item.extra.split("|")[0],
infoLabels=infoLabels, text_color="0xFFe5ffcc",
thumbnail='http://imgur.com/3ik73p8.png'))
return itemlist
def play(item):
itemlist = []
videolist = servertools.find_video_items(data=item.url)
for video in videolist:
itemlist.append(
Item(channel=item.channel, title="[COLOR saddlebrown][B]" + video.server + "[/B][/COLOR]", url=video.url,
server=video.server, action="play", fanart=item.fanart, thumbnail=item.thumbnail, extra=item.extra,
InfoLabels=item.infoLabels, folder=False))
return itemlist
def get_year(url):
data = httptools.downloadpage(url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
year = scrapertools.find_single_match(data, 'Fecha de lanzamiento.*?, (\d\d\d\d)')
if year == "":
year = "1111"
return year

View File

@@ -1,85 +0,0 @@
{
"id": "plusdede",
"name": "Plusdede",
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "https://s18.postimg.cc/e17e98eqh/6_-_4_Isbv_Q3.png",
"banner": "plusdede.png",
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "plusdedeuser",
"type": "text",
"label": "@30014",
"enabled": true,
"visible": true
},
{
"id": "plusdedepassword",
"type": "text",
"hidden": true,
"label": "@30015",
"enabled": "!eq(-1,'')",
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": "!eq(-1,'') + !eq(-2,'')",
"visible": true
},
{
"id": "plusdedesortlinks",
"type": "list",
"label": "Ordenar enlaces",
"default": 0,
"enabled": true,
"visible": "!eq(-2,'') + !eq(-3,'')",
"lvalues": [
"No",
"Por no Reportes",
"Por Idioma",
"Por Calidad",
"Por Idioma y Calidad",
"Por Idioma y no Reportes",
"Por Idioma, Calidad y no Reportes"
]
},
{
"id": "plusdedeshowlinks",
"type": "list",
"label": "Mostrar enlaces",
"default": 0,
"enabled": true,
"visible": "!eq(-3,'') + !eq(-4,'')",
"lvalues": [
"Todos",
"Ver online",
"Descargar"
]
},
{
"id": "plusdedenumberlinks",
"type": "list",
"label": "Limitar número de enlaces",
"default": 0,
"enabled": true,
"visible": "!eq(-4,'') + !eq(-5,'')",
"lvalues": [
"No",
"5",
"10",
"15",
"20",
"25",
"30"
]
}
]
}

View File

@@ -1,961 +0,0 @@
# -*- coding: utf-8 -*-
import os
import re
import sys
import urlparse
from time import sleep
from core import channeltools
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
HOST = 'https://www.plusdede.com'
__channel__ = 'plusdede'
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
color1, color2, color3 = ['0xFFB10021', '0xFFB10021', '0xFFB10004']
def login():
url_origen = HOST+"/login?popup=1"
try:
data = httptools.downloadpage(url_origen).data
except:
data = httptools.downloadpage(url_origen, follow_redirects=False).data
if '<span class="username">' in data:
return True
token = scrapertools.find_single_match(data, '<input name="_token" type="hidden" value="([^"]+)"')
if re.search('Escribe los números de la imagen', data):
captcha_url = scrapertools.find_single_match(data, '<img src="([^"]+)" alt="captcha">')
imagen_data = httptools.downloadpage(captcha_url).data
ficheropng = os.path.join(config.get_data_path(), "captcha_plusdede.png")
outfile=open(ficheropng,'wb')
outfile.write(imagen_data)
outfile.close()
img = xbmcgui.ControlImage(450,15,400,130,ficheropng)
wdlg = xbmcgui.WindowDialog()
wdlg.addControl(img)
wdlg.show()
sleep(1)
kb = platformtools.dialog_numeric(0, "Escribe los números de la imagen")
postcaptcha = ""
if kb !='':
solution = kb
postcaptcha = "&captcha=" + str(solution)
else:
return False
wdlg.close()
else:
postcaptcha=""
post = "_token=" + str(token) + "&email=" + str(config.get_setting("plusdedeuser", "plusdede")) + \
"&password=" + str(config.get_setting("plusdedepassword", "plusdede")) + postcaptcha\
#+ "&app=2131296469"
url = HOST
headers = {"User-Agent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/66.0.3163.100 Safari/537.36", "Referer": url, "X-Requested-With": "XMLHttpRequest","X-CSRF-TOKEN":
token}
data = httptools.downloadpage(HOST+"/login", post=post, headers=headers,
replace_headers=False).data
if "redirect" in data:
return True
else:
return False
def mainlist(item):
logger.info()
itemlist = []
if not config.get_setting("plusdedeuser", "plusdede"):
itemlist.append(
Item(channel=item.channel, title="Habilita tu cuenta en la configuración e ingresar de nuevo al canal", action="settingCanal",
url=""))
else:
result = login()
if not result:
itemlist.append(Item(channel=item.channel, action="mainlist", title="Login fallido. Volver a intentar..."))
return itemlist
item.url = HOST
item.fanart = fanart_host
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
itemlist.append(item.clone(title="Películas", action="menupeliculas", text_color=color3, text_blod=True))
item.thumbnail = "https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png"
itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True, thumbnail = 'https://s18.postimg.cc/xj21p46ih/10_-_Uf7e_XHE.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist
def settingCanal(item):
return platformtools.show_channel_settings()
def menuseries(item):
logger.info()
itemlist = []
item.url = HOST
item.fanart = fanart_host
item.text_color = None
item.thumbnail = "https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png"
itemlist.append(item.clone(action="peliculas", title=" Novedades", url="https://www.plusdede.com/series", thumbnail='https://s18.postimg.cc/in3ihji95/11_-_WPg_H5_Kx.png'))
itemlist.append(item.clone(action="generos", title=" Por géneros", url="https://www.plusdede.com/series", thumbnail='https://s18.postimg.cc/p0slktaah/5_-_c_Nf_KRvm.png'))
itemlist.append(
item.clone(action="peliculas", title=" Siguiendo", url="https://www.plusdede.com/series/following", thumbnail='https://s18.postimg.cc/68gqh7j15/7_-_tqw_AHa5.png'))
itemlist.append(item.clone(action="peliculas", title=" Capítulos Pendientes",
url="https://www.plusdede.com/series/mypending/0?popup=1", viewmode="movie", thumbnail='https://s18.postimg.cc/9s2o71w1l/2_-_3dbbx7_K.png'))
itemlist.append(
item.clone(action="peliculas", title=" Favoritas", url="https://www.plusdede.com/series/favorites", thumbnail='https://s18.postimg.cc/n8zmpwynd/4_-_JGrig_Ep.png'))
itemlist.append(
item.clone(action="peliculas", title=" Pendientes", url="https://www.plusdede.com/series/pending", thumbnail='https://s18.postimg.cc/4gnrmacix/13_-_cwl_TDog.png'))
itemlist.append(item.clone(action="peliculas", title=" Terminadas", url="https://www.plusdede.com/series/seen", thumbnail='https://s18.postimg.cc/5vpcay0qh/17_-_M2in_Fp_O.png'))
itemlist.append(
item.clone(action="peliculas", title=" Recomendadas", url="https://www.plusdede.com/series/recommended", thumbnail='https://s18.postimg.cc/bwn182sih/14_-_fin32_Kp.png'))
itemlist.append(item.clone(action="search", title=" Buscar...", url="https://www.plusdede.com/series", thumbnaiil='https://s18.postimg.cc/s7n54ghvt/1_-_01_ZDYii.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist
def menupeliculas(item):
logger.info()
itemlist = []
item.url = HOST
item.fanart = fanart_host
item.text_color = None
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
itemlist.append(item.clone(action="peliculas", title=" Novedades", url="https://www.plusdede.com/pelis", thumbnail='https://s18.postimg.cc/in3ihji95/11_-_WPg_H5_Kx.png'))
itemlist.append(item.clone(action="generos", title=" Por géneros", url="https://www.plusdede.com/pelis", thumbnail='https://s18.postimg.cc/p0slktaah/5_-_c_Nf_KRvm.png'))
itemlist.append(item.clone(action="peliculas", title=" Solo HD", url="https://www.plusdede.com/pelis?quality=3", thumbnail='https://s18.postimg.cc/e17e95mfd/16_-_qmqn4_Si.png'))
itemlist.append(
item.clone(action="peliculas", title=" Pendientes", url="https://www.plusdede.com/pelis/pending", thumbnail='https://s18.postimg.cc/4gnrmacix/13_-_cwl_TDog.png'))
itemlist.append(
item.clone(action="peliculas", title=" Recomendadas", url="https://www.plusdede.com/pelis/recommended", thumbnail='https://s18.postimg.cc/bwn182sih/14_-_fin32_Kp.png'))
itemlist.append(
item.clone(action="peliculas", title=" Favoritas", url="https://www.plusdede.com/pelis/favorites", thumbnail='https://s18.postimg.cc/n8zmpwynd/4_-_JGrig_Ep.png'))
itemlist.append(item.clone(action="peliculas", title=" Vistas", url="https://www.plusdede.com/pelis/seen", thumbnail='https://s18.postimg.cc/5vpcay0qh/17_-_M2in_Fp_O.png'))
itemlist.append(item.clone(action="search", title=" Buscar...", url="https://www.plusdede.com/pelis", thumbnail='https://s18.postimg.cc/s7n54ghvt/1_-_01_ZDYii.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist
def menulistas(item):
logger.info()
itemlist = []
item.url = HOST
item.fanart = fanart_host
item.text_color = None
itemlist.append(
item.clone(action="listas", tipo="populares", title=" Populares", url="https://www.plusdede.com/listas", thumbnail='https://s18.postimg.cc/7aqwzrha1/8_-_3rn14_Tq.png'))
itemlist.append(
item.clone(action="listas", tipo="siguiendo", title=" Siguiendo", url="https://www.plusdede.com/listas", thumbnail='https://s18.postimg.cc/4tf5sha89/9_-_z_F8c_UBT.png'))
itemlist.append(
item.clone(action="listas", tipo="tuslistas", title=" Tus Listas", url="https://www.plusdede.com/listas"))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist
def generos(item):
logger.info()
tipo = item.url.replace("https://www.plusdede.com/", "")
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
data = scrapertools.find_single_match(data,
'<select name="genre_id" class="selectpicker" title="Selecciona...">(.*?)</select>')
patron = '<option value="([^"]+)">([^<]+)</option>'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for id_genere, title in matches:
title = title.strip()
thumbnail = ""
plot = ""
# https://www.plusdede.com/pelis?genre_id=1
url = "https://www.plusdede.com/" + tipo + "?genre_id=" + id_genere
itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title))
return itemlist
def search(item, texto):
logger.info()
item.tipo = item.url.replace("https://www.plusdede.com/", "")
item.url = "https://www.plusdede.com/search/"
texto = texto.replace(" ", "-")
item.url = item.url + texto
try:
return buscar(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def buscar(item):
logger.info()
# Descarga la pagina
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
# Extrae las entradas (carpetas)
json_object = jsontools.load(data)
data = json_object["content"]
return parse_mixed_results(item, data)
def parse_mixed_results(item, data):
itemlist = []
patron = '<div class="media-dropdown mini dropdown model" data-value="([^"]+)"+'
patron += '.*?<a href="([^"]+)"[^<]data-toggle="tooltip" data-container="body"+'
patron += ' data-delay="500" title="([^"]+)"[^<]+'
patron += '.*?src="([^"]+)"+'
patron += '.*?<div class="year">([^<]+)</div>+'
patron += '.*?<div class="value"><i class="fa fa-star"></i> ([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
if item.tipo == "lista":
following = scrapertools.find_single_match(data, '<div class="follow-lista-buttons ([^"]+)">')
data_id = scrapertools.find_single_match(data, 'data-model="10" data-id="([^"]+)">')
if following.strip() == "following":
itemlist.append(
Item(channel='plusdede', title="Dejar de seguir", idtemp=data_id, token=item.token, valor="unfollow",
action="plusdede_check", url=item.url, tipo=item.tipo))
else:
itemlist.append(
Item(channel='plusdede', title="Seguir esta lista", idtemp=data_id, token=item.token, valor="follow",
action="plusdede_check", url=item.url, tipo=item.tipo))
for visto, scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedvalue in matches:
title = ""
if visto.strip() == "seen":
title += "[visto] "
title += scrapertools.htmlclean(scrapedtitle)
if scrapedyear != '':
title += " (" + scrapedyear + ")"
fulltitle = title
if scrapedvalue != '':
title += " (" + scrapedvalue + ")"
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
fanart = thumbnail.replace("mediathumb", "mediabigcover")
plot = ""
# https://www.plusdede.com/peli/the-lego-movie
# https://www.plusdede.com/links/view/slug/the-lego-movie/what/peli?popup=1
if "/peli/" in scrapedurl or "/docu/" in scrapedurl:
# sectionStr = "peli" if "/peli/" in scrapedurl else "docu"
if "/peli/" in scrapedurl:
sectionStr = "peli"
else:
sectionStr = "docu"
referer = urlparse.urljoin(item.url, scrapedurl)
url = urlparse.urljoin(item.url, scrapedurl)
if item.tipo != "series":
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, extra=referer, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart,
contentTitle=scrapedtitle, contentType="movie", context=["buscar_trailer"]))
else:
referer = item.url
url = urlparse.urljoin(item.url, scrapedurl)
if item.tipo != "pelis":
itemlist.append(Item(channel=item.channel, action="episodios", title=title, extra=referer, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart,
contentTitle=scrapedtitle, contentType="tvshow", context=["buscar_trailer"]))
next_page = scrapertools.find_single_match(data,
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)">')
if next_page != "":
url = urlparse.urljoin("https://www.plusdede.com", next_page).replace("amp;", "")
itemlist.append(
Item(channel=item.channel, action="pag_sig", token=item.token, title=">> Página siguiente",
extra=item.extra, url=url))
try:
import xbmcplugin
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
except:
pass
return itemlist
def siguientes(item): # No utilizada
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
bloque = scrapertools.find_single_match(data, '<h2>Siguiendo</h2>(.*?)<div class="box">')
patron = '<div class="coverMini shadow tiptip" title="([^"]+)">[^<]+'
patron += '<img class="centeredPic centeredPicFalse" onerror="[^"]+" src="([^"]+)"[^<]+'
patron += '<img src="/images/loading-mini.gif" class="loader"/>[^<]+'
patron += '<div class="extra-info"><span class="year">[^<]+'
patron += '</span><span class="value"><i class="icon-star"></i>[^<]+'
patron += '</span></div>[^<]+'
patron += '</div>[^<]+'
patron += '</a>[^<]+'
patron += '<a class="userepiinfo defaultLink" href="([^"]+)">(\d+)x(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
# for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
for scrapedtitle, scrapedthumbnail, scrapedurl, scrapedsession, scrapedepisode in matches:
title = scrapertools.htmlclean(scrapedtitle)
session = scrapertools.htmlclean(scrapedsession)
episode = scrapertools.htmlclean(scrapedepisode)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
fanart = thumbnail.replace("mediathumb", "mediabigcover")
plot = ""
title = session + "x" + episode + " - " + title
# https://www.plusdede.com/peli/the-lego-movie
# https://www.plusdede.com/links/view/slug/the-lego-movie/what/peli?popup=1
referer = urlparse.urljoin(item.url, scrapedurl)
url = referer
# itemlist.append( Item(channel=item.channel, action="episodios" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=title))
itemlist.append(
Item(channel=item.channel, action="episodio", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, show=title, fanart=fanart, extra=session + "|" + episode))
return itemlist
def episodio(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
session = str(int(item.extra.split("|")[0]))
episode = str(int(item.extra.split("|")[1]))
patrontemporada = '<div class="checkSeason"[^>]+>Temporada ' + session + '<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>'
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
for bloque_episodios in matchestemporadas:
# Extrae los episodios
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">' + episode + ' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
matches = re.compile(patron, re.DOTALL).findall(bloque_episodios)
for scrapedurl, scrapedtitle, info, visto in matches:
# visto_string = "[visto] " if visto.strip()=="active" else ""
if visto.strip() == "active":
visto_string = "[visto] "
else:
visto_string = ""
numero = episode
title = visto_string + session + "x" + numero + " " + scrapertools.htmlclean(scrapedtitle)
thumbnail = ""
plot = ""
# https://www.plusdede.com/peli/the-lego-movie
# https://www.plusdede.com/links/view/slug/the-lego-movie/what/peli?popup=1
# https://www.plusdede.com/links/viewepisode/id/475011?popup=1
epid = scrapertools.find_single_match(scrapedurl, "id/(\d+)")
url = "https://www.plusdede.com/links/viewepisode/id/" + epid
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, fanart=item.fanart, show=item.show))
itemlist2 = []
for capitulo in itemlist:
itemlist2 = findvideos(capitulo)
return itemlist2
def peliculas(item):
logger.info()
# Descarga la pagina
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
# Extrae las entradas (carpetas)
json_object = jsontools.load(data)
data = json_object["content"]
return parse_mixed_results(item, data)
def episodios(item):
logger.info()
itemlist = []
# Descarga la pagina
idserie = ''
data = httptools.downloadpage(item.url).data
patrontemporada = '<ul.*?<li class="season-header" >([^<]+)<(.*?)\s+</ul>'
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
idserie = scrapertools.find_single_match(data, 'data-model="5" data-id="(\d+)"')
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")):
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
for nombre_temporada, bloque_episodios in matchestemporadas:
# Extrae los episodios
patron_episodio = '<li><a href="#"(.*?)</a></li>'
# patron = '<li><a href="#" data-id="([^"]*)".*?data-href="([^"]+)">\s*<div class="name">\s*<span class="num">([^<]+)</span>\s*([^<]+)\s*</div>.*?"show-close-footer episode model([^"]+)"'
matches = re.compile(patron_episodio, re.DOTALL).findall(bloque_episodios)
for data_episodio in matches:
scrapeid = scrapertools.find_single_match(data_episodio, '<li><a href="#" data-id="([^"]*)"')
scrapedurl = scrapertools.find_single_match(data_episodio, 'data-href="([^"]+)">\s*<div class="name">')
numero = scrapertools.find_single_match(data_episodio, '<span class="num">([^<]+)</span>')
scrapedtitle = scrapertools.find_single_match(data_episodio,
'<span class="num">.*?</span>\s*([^<]+)\s*</div>')
visto = scrapertools.find_single_match(data_episodio, '"show-close-footer episode model([^"]+)"')
title = nombre_temporada.replace("Temporada ", "").replace("Extras de la serie", "Extras 0").replace(" ",
"") + "x" + numero + " " + scrapertools.htmlclean(
scrapedtitle)
if visto.strip() == "seen":
title = "[visto] " + title
thumbnail = item.thumbnail
fanart = item.fanart
plot = ""
# https://www.plusdede.com/peli/the-lego-movie
# https://www.plusdede.com/links/view/slug/the-lego-movie/what/peli?popup=1
# https://www.plusdede.com/links/viewepisode/id/475011?popup=1
# epid = scrapertools.find_single_match(scrapedurl,"id/(\d+)")
url = "https://www.plusdede.com" + scrapedurl
itemlist.append(
Item(channel=item.channel, action="findvideos", nom_serie=item.title, tipo="5", title=title, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=fanart, show=item.show))
if config.get_videolibrary_support():
# con año y valoracion la serie no se puede actualizar correctamente, si ademas cambia la valoracion, creara otra carpeta
# Sin año y sin valoración:
show = re.sub(r"\s\(\d+\)\s\(\d+\.\d+\)", "", item.show)
# Sin año:
# show = re.sub(r"\s\(\d+\)", "", item.show)
# Sin valoración:
# show = re.sub(r"\s\(\d+\.\d+\)", "", item.show)
itemlist.append(
Item(channel='plusdede', title="Añadir esta serie a la videoteca", url=item.url, token=token,
action="add_serie_to_library", extra="episodios###", show=show))
itemlist.append(
Item(channel='plusdede', title="Descargar todos los episodios de la serie", url=item.url, token=token,
action="download_all_episodes", extra="episodios", show=show))
itemlist.append(Item(channel='plusdede', title="Marcar como Pendiente", tipo="5", idtemp=idserie, token=token,
valor="pending", action="plusdede_check", show=show))
itemlist.append(Item(channel='plusdede', title="Marcar como Siguiendo", tipo="5", idtemp=idserie, token=token,
valor="following", action="plusdede_check", show=show))
itemlist.append(Item(channel='plusdede', title="Marcar como Finalizada", tipo="5", idtemp=idserie, token=token,
valor="seen", action="plusdede_check", show=show))
itemlist.append(Item(channel='plusdede', title="Marcar como Favorita", tipo="5", idtemp=idserie, token=token,
valor="favorite", action="plusdede_check", show=show))
itemlist.append(
Item(channel='plusdede', title="Quitar marca", tipo="5", idtemp=idserie, token=token, valor="nothing",
action="plusdede_check", show=show))
itemlist.append(
Item(channel='plusdede', title="Añadir a lista", tipo="5", tipo_esp="lista", idtemp=idserie, token=token,
action="plusdede_check", show=show))
return itemlist
def parse_listas(item, bloque_lista):
logger.info()
if item.tipo == "populares":
patron = '<div class="lista(.*?)</div>\s*</h4>'
else:
patron = '<div class="lista(.*?)</h4>\s*</div>'
matches = re.compile(patron, re.DOTALL).findall(bloque_lista)
itemlist = []
for lista in matches:
scrapedurl = scrapertools.htmlclean(scrapertools.find_single_match(lista, '<a href="([^"]+)">[^<]+</a>'))
scrapedtitle = scrapertools.find_single_match(lista, '<a href="[^"]+">([^<]+)</a>')
scrapedfollowers = scrapertools.find_single_match(lista, 'Follow: <span class="number">([^<]+)')
scrapedseries = scrapertools.find_single_match(lista, '<div class="lista-stat badge">Series: ([^<]+)')
scrapedpelis = scrapertools.find_single_match(lista, '<div class="lista-stat badge">Pelis: ([^<]+)')
title = scrapertools.htmlclean(scrapedtitle) + ' ('
if scrapedpelis != '':
title += scrapedpelis + ' pelis, '
if scrapedseries != '':
title += scrapedseries + ' series, '
if scrapedfollowers != '':
title += scrapedfollowers + ' seguidores'
title += ')'
url = urlparse.urljoin("https://www.plusdede.com", scrapedurl)
thumbnail = ""
itemlist.append(
Item(channel=item.channel, action="peliculas", token=item.token, tipo="lista", title=title, url=url))
nextpage = scrapertools.find_single_match(bloque_lista,
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)"')
if nextpage != '':
url = urlparse.urljoin("https://www.plusdede.com", nextpage)
itemlist.append(Item(channel=item.channel, action="lista_sig", token=item.token, tipo=item.tipo,
title=">> Página siguiente", extra=item.extra, url=url))
try:
import xbmcplugin
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
except:
pass
return itemlist
def listas(item):
logger.info()
if item.tipo == "tuslistas":
patron = 'Tus listas(.*?)>Listas que sigues<'
elif item.tipo == "siguiendo":
patron = '<h3>Listas que sigues</h3>(.*?)<h2>Listas populares</h2>'
else:
patron = '<div class="content">\s*<h2>Listas populares(.*?)</div>\s*</div>\s*</div>\s*</div>\s*</div>'
data = httptools.downloadpage(item.url).data
item.token = scrapertools.find_single_match(data, '_token" content="([^"]+)"').strip()
bloque_lista = scrapertools.find_single_match(data, patron)
return parse_listas(item, bloque_lista)
def lista_sig(item):
logger.info()
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
return parse_listas(item, data)
def pag_sig(item):
logger.info()
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
return parse_mixed_results(item, data)
def findvideos(item, verTodos=False):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data_model = scrapertools.find_single_match(data, 'data-model="([^"]+)"')
data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)"')
trailer = "https://www.youtube.com/watch?v=" + scrapertools.find_single_match(data,
'data-youtube="([^"]+)" class="youtube-link')
url = "https://www.plusdede.com/aportes/" + data_model + "/" + data_id + "?popup=1"
data = httptools.downloadpage(url).data
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
patron = 'target="_blank" (.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
idpeli = data_id
if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")) and data_model == "4":
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
itemlist.append(Item(channel=item.channel, action="play", title="TRAILER", url=item.url, trailer=trailer,
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
itemsort = []
sortlinks = config.get_setting("plusdedesortlinks",
item.channel) # 0:no, 1:valoracion, 2:idioma, 3:calidad, 4:idioma+calidad, 5:idioma+valoracion, 6:idioma+calidad+valoracion
showlinks = config.get_setting("plusdedeshowlinks", item.channel) # 0:todos, 1:ver online, 2:descargar
if sortlinks != '' and sortlinks != "No":
sortlinks = int(sortlinks)
else:
sortlinks = 0
if showlinks != '' and showlinks != "No":
showlinks = int(showlinks)
else:
showlinks = 0
for match in matches:
jdown = scrapertools.find_single_match(match, '<span class="fa fa-download"></span>([^<]+)')
if (showlinks == 1 and jdown != '') or (
showlinks == 2 and jdown == ''): # Descartar enlaces veronline/descargar
continue
idioma_1 = ""
idiomas = re.compile('<img src="https://cd.*?plusdede.com/images/flags/([^"]+).png', re.DOTALL).findall(match)
idioma_0 = idiomas[0]
if len(idiomas) > 1:
idioma_1 = idiomas[1]
idioma = idioma_0 + ", SUB " + idioma_1
else:
idioma_1 = ''
idioma = idioma_0
calidad_video = scrapertools.find_single_match(match,
'<span class="fa fa-video-camera"></span>(.*?)</div>').replace(
" ", "").replace("\n", "")
calidad_audio = scrapertools.find_single_match(match,
'<span class="fa fa-headphones"></span>(.*?)</div>').replace(
" ", "").replace("\n", "")
thumb_servidor = scrapertools.find_single_match(match, '<img src="([^"]+)">')
nombre_servidor = scrapertools.find_single_match(thumb_servidor, "hosts/([^\.]+).png")
if jdown != '':
title = "Download " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")"
else:
title = "Ver en " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")"
valoracion = 0
reports = scrapertools.find_single_match(match,
'<i class="fa fa-exclamation-triangle"></i><br/>\s*<span class="number" data-num="([^"]*)">')
valoracion -= int(reports)
title += " (" + reports + " reps)"
url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, 'href="([^"]+)"'))
thumbnail = thumb_servidor
plot = ""
if sortlinks > 0:
# orden1 para dejar los "downloads" detras de los "ver" al ordenar
# orden2 segun configuración
if sortlinks == 1:
orden = valoracion
elif sortlinks == 2:
orden = valora_idioma(idioma_0, idioma_1)
elif sortlinks == 3:
orden = valora_calidad(calidad_video, calidad_audio)
elif sortlinks == 4:
orden = (valora_idioma(idioma_0, idioma_1) * 100) + valora_calidad(calidad_video, calidad_audio)
elif sortlinks == 5:
orden = (valora_idioma(idioma_0, idioma_1) * 1000) + valoracion
elif sortlinks == 6:
orden = (valora_idioma(idioma_0, idioma_1) * 100000) + (
valora_calidad(calidad_video, calidad_audio) * 1000) + valoracion
itemsort.append(
{'action': "play", 'title': title, 'data_id': data_id, 'token': token, 'tipo': data_model, 'url': url,
'thumbnail': thumbnail, 'fanart': item.fanart, 'plot': plot, 'extra': item.url,
'fulltitle': item.fulltitle, 'orden1': (jdown == ''), 'orden2': orden})
else:
itemlist.append(
Item(channel=item.channel, action="play", data_id=data_id, token=token, tipo=data_model, title=title,
url=url, thumbnail=thumbnail, fanart=item.fanart, plot=plot, extra=item.url,
fulltitle=item.fulltitle))
if sortlinks > 0:
numberlinks = config.get_setting("plusdedenumberlinks", item.channel) # 0:todos, > 0:n*5 (5,10,15,20,...)
# numberlinks = int(numberlinks) if numberlinks != '' and numberlinks !="No" else 0
if numberlinks != '' and numberlinks != "No":
numberlinks = int(numberlinks)
else:
numberlinks = 0
if numberlinks == 0:
verTodos = True
itemsort = sorted(itemsort, key=lambda k: (k['orden1'], k['orden2']), reverse=True)
for i, subitem in enumerate(itemsort):
if verTodos == False and i >= numberlinks:
itemlist.append(
Item(channel=item.channel, action='findallvideos', title='Ver todos los enlaces', url=item.url,
extra=item.extra))
break
itemlist.append(
Item(channel=item.channel, action=subitem['action'], title=subitem['title'], data_id=subitem['data_id'],
token=subitem['token'], tipo=subitem['tipo'], url=subitem['url'], thumbnail=subitem['thumbnail'],
fanart=subitem['fanart'], plot=subitem['plot'], extra=subitem['extra'],
fulltitle=subitem['fulltitle']))
if data_model == "4":
itemlist.append(
Item(channel=item.channel, action="plusdede_check", tipo="4", token=token, title="Marcar como Pendiente",
valor="pending", idtemp=idpeli))
itemlist.append(
Item(channel=item.channel, action="plusdede_check", tipo="4", token=token, title="Marcar como Vista",
valor="seen", idtemp=idpeli))
itemlist.append(
Item(channel=item.channel, action="plusdede_check", tipo="4", token=token, title="Marcar como Favorita",
valor="favorite", idtemp=idpeli))
itemlist.append(Item(channel=item.channel, action="plusdede_check", tipo="4", token=token, title="Quitar Marca",
valor="nothing", idtemp=idpeli))
itemlist.append(
Item(channel='plusdede', title="Añadir a lista", tipo="4", tipo_esp="lista", idtemp=idpeli, token=token,
action="plusdede_check"))
return itemlist
def findallvideos(item):
return findvideos(item, True)
def play(item):
itemlist = []
if "trailer" in item:
url = item.trailer
itemlist = servertools.find_video_items(data=url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist
else:
logger.info("url=" + item.url)
# Hace la llamada
headers = {'Referer': item.extra}
data = httptools.downloadpage(item.url, headers=headers).data
url = scrapertools.find_single_match(data,
'<a href="([^"]+)" target="_blank"><button class="btn btn-primary">visitar enlace</button>')
url = urlparse.urljoin("https://www.plusdede.com", url)
headers = {'Referer': item.url}
media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location")
# logger.info("media_url="+media_url)
itemlist = servertools.find_video_items(data=media_url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
# Marcar como visto
try:
checkseen(item)
except:
pass
return itemlist
def checkseen(item):
logger.info(item)
url_temp = ""
if item.tipo == "8":
url_temp = "https://www.plusdede.com/set/episode/" + item.data_id + "/seen"
tipo_str = "series"
headers = {"User-Agent":"Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/61.0.3163.100 Safari/537.36", "Referer": "https://www.plusdede.com/serie/",
"X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token}
else:
url_temp = "https://www.plusdede.com/set/usermedia/" + item.tipo + "/" + item.data_id + "/seen"
tipo_str = "pelis"
headers = {"User-Agent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/61.0.3163.100 Safari/537.36", "Referer": "https://www.plusdede.com/serie/",
"X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token}
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data
return True
def infosinopsis(item):
logger.info()
data = httptools.downloadpage(item.url).data
scrapedtitle = scrapertools.find_single_match(data, '<div class="media-title">([^<]+)</div>')
scrapedvalue = scrapertools.find_single_match(data, '<span class="value">([^<]+)</span>')
scrapedyear = scrapertools.find_single_match(data,
'<strong>Fecha</strong>\s*<div class="mini-content">([^<]+)</div>').strip()
scrapedduration = scrapertools.htmlclean(scrapertools.find_single_match(data,
'<strong>Duración</strong>\s*<div class="mini-content">([^<]+)</div>').strip().replace(
" ", "").replace("\n", ""))
scrapedplot = scrapertools.find_single_match(data, '<div class="plot expandable">([^<]+)<div').strip()
generos = scrapertools.find_single_match(data, '<strong>Género</strong>\s*<ul>(.*?)</ul>')
scrapedgenres = re.compile('<li>([^<]+)</li>', re.DOTALL).findall(generos)
scrapedcasting = re.compile(
'<a href="https://www.plusdede.com/star/[^"]+"><div class="text-main">([^<]+)</div></a>\s*<div class="text-sub">\s*([^<]+)</div>',
re.DOTALL).findall(data)
title = scrapertools.htmlclean(scrapedtitle)
plot = "[B]Año: [/B]" + scrapedyear
plot += " [B]Duración: [/B]" + scrapedduration
plot += " [B]Puntuación usuarios: [/B]" + scrapedvalue
plot += "\n[B]Géneros: [/B]" + ", ".join(scrapedgenres)
plot += "\n\n[B]Sinopsis:[/B]\n" + scrapertools.htmlclean(scrapedplot)
plot += "\n\n[B]Casting:[/B]\n"
for actor, papel in scrapedcasting:
plot += actor + " (" + papel.strip() + ")\n"
tbd = TextBox("DialogTextViewer.xml", os.getcwd(), "Default")
tbd.ask(title, plot)
del tbd
return
try:
import xbmcgui
class TextBox(xbmcgui.WindowXML):
""" Create a skinned textbox window """
def __init__(self, *args, **kwargs):
pass
def onInit(self):
try:
self.getControl(5).setText(self.text)
self.getControl(1).setLabel(self.title)
except:
pass
def onClick(self, controlId):
pass
def onFocus(self, controlId):
pass
def onAction(self, action):
if action == 7:
self.close()
def ask(self, title, text):
self.title = title
self.text = text
self.doModal()
except:
pass
# Valoraciones de enlaces, los valores más altos se mostrarán primero :
def valora_calidad(video, audio):
prefs_video = ['hdmicro', 'hd1080', 'hd720', 'hdrip', 'dvdrip', 'rip', 'tc-screener', 'ts-screener']
prefs_audio = ['dts', '5.1', 'rip', 'line', 'screener']
video = ''.join(video.split()).lower()
# pts = (9 - prefs_video.index(video) if video in prefs_video else 1) * 10
if video in prefs_video:
pts = (9 - prefs_video.index(video)) * 10
else:
pts = (9 - 1) * 10
audio = ''.join(audio.split()).lower()
# pts += 9 - prefs_audio.index(audio) if audio in prefs_audio else 1
if audio in prefs_audio:
pts = (9 - prefs_audio.index(audio)) * 10
else:
pts = (9 - 1) * 10
return pts
def valora_idioma(idioma_0, idioma_1):
prefs = ['spanish', 'latino', 'catalan', 'english', 'french']
# pts = (9 - prefs.index(idioma_0) if idioma_0 in prefs else 1) * 10
if idioma_0 in prefs:
pts = (9 - prefs.index(idioma_0)) * 10
else:
pts = (9 - 1) * 10
if idioma_1 != '': # si hay subtítulos
idioma_1 = idioma_1.replace(' SUB', '')
# pts += 8 - prefs.index(idioma_1) if idioma_1 in prefs else 1
if idioma_1 in prefs:
pts += 8 - prefs.index(idioma_1)
else:
pts += 8 - 1
else:
pts += 9 # sin subtítulos por delante
return pts
def plusdede_check(item):
if item.tipo_esp == "lista":
url_temp = "https://www.plusdede.com/listas/addmediapopup/" + item.tipo + "/" + item.idtemp + "?popup=1"
data = httptools.downloadpage(url_temp).data
patron = '<div class="lista model" data-model="10" data-id="([^"]+)">+'
patron += '.*?<a href="/lista/[^"]+">([^<]+)</a>+'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for id_lista, nombre_lista in matches:
itemlist.append(Item(channel=item.channel, action="plusdede_check", tipo=item.tipo, tipo_esp="add_list",
token=item.token, title=nombre_lista, idlista=id_lista, idtemp=item.idtemp))
if len(itemlist) < 1:
itemlist.append(Item(channel=item.channel, action="", title="No tienes ninguna lista creada por ti!"))
return itemlist
else:
if item.tipo == "10" or item.tipo == "lista":
url_temp = "https://www.plusdede.com/set/lista/" + item.idtemp + "/" + item.valor
else:
if (item.tipo_esp == "add_list"):
url_temp = "https://www.plusdede.com/set/listamedia/" + item.idlista + "/add/" + item.tipo + "/" + item.idtemp
else:
url_temp = "https://www.plusdede.com/set/usermedia/" + item.tipo + "/" + item.idtemp + "/" + item.valor
# httptools.downloadpage(url_temp, post="id="+item.idtemp)
if item.tipo == "5":
tipo_str = "series"
elif item.tipo == "lista":
tipo_str = "listas"
else:
tipo_str = "pelis"
headers = {"User-Agent":"Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/61.0.3163.100 Safari/537.36","Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
"X-CSRF-TOKEN": item.token}
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers,
replace_headers=True).data.strip()
dialog = platformtools
dialog.ok = platformtools.dialog_ok
if data == "1":
if item.valor != "nothing":
dialog.ok('SUCCESS', 'Marca realizada con éxito!')
elif item.valor == "nothing":
dialog.ok('SUCCESS', 'Marca eliminada con éxito!')
elif item.valor == "unfollow":
dialog.ok('SUCCESS', 'Has dejado de seguir esta lista!')
elif item.valor == "follow":
dialog.ok('SUCCESS', 'Has comenzado a seguir esta lista!')
elif item.tipo_esp == "add_list":
dialog.ok('SUCCESS', 'Añadido a la lista!')
else:
dialog.ok('ERROR', 'No se pudo realizar la acción!')

View File

@@ -3,7 +3,7 @@
"name": "Repelis",
"active": true,
"adult": false,
"language": ["lat","cast","vo"],
"language": ["lat","cast"],
"thumbnail": "https://s8.postimg.cc/yem7wyfw1/repelis1.png",
"banner": "https://s8.postimg.cc/p6tzg9gjl/repelis2.png",
"categories": [

View File

@@ -0,0 +1,76 @@
{
"id": "rexpelis",
"name": "Rexpelis",
"active": true,
"adult": false,
"language": ["lat","cast"],
"thumbnail": "https://i.postimg.cc/MMJ5g9Y1/rexpelis1.png",
"banner": "https://i.postimg.cc/XrXs5GJB/rexpelis2.png",
"categories": [
"movie"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"ESP",
"VO"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,299 @@
# -*- coding: utf-8 -*-
# -*- Channel Rexpelis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
from channelselector import get_thumb
from channels import autoplay
from channels import filtertools
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger, platformtools
idio = {'es-mx': 'LAT','es-es': 'ESP','en': 'VO'}
cali = {'poor': 'SD','low': 'SD','medium': 'HD','high': 'HD'}
list_language = idio.values()
list_quality = ["SD","HD"]
list_servers = ['rapidvideo', 'streamango', 'fastplay', 'openload', 'netu', 'vidoza', 'uptobox']
__channel__='rexpelis'
host = "https://www.rexpelis.com"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
data = httptools.downloadpage(host).data
matches = scrapertools.find_multiple_matches(data, 'cant-genre">([^<]+)')
cantidad = 0
for cantidad1 in matches:
cantidad += int(cantidad1)
itemlist.append(Item(channel = item.channel, title = "Actualizadas", action = "peliculas", url = host, page=1, type ="movie", thumbnail = get_thumb("updated", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Estrenos", action = "estrenos", url = host + "/estrenos", page=1, thumbnail = get_thumb("premieres", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Por género (Total películas: %s)" %cantidad, action = "generos", url = host, extra = "Genero", thumbnail = get_thumb("genres", auto = True) ))
itemlist.append(Item(channel = item.channel, title = "Por año", action = "annos", url = host, extra = "Genero", thumbnail = get_thumb("year", auto = True) ))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/search?term=", thumbnail = get_thumb("search", auto = True)))
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
def configuracion(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def estrenos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'item-pelicula.*?href="([^"]+).*?'
patron += 'src="([^"]+).*?'
patron += '<p>([^<]+).*?'
patron += '<span>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
scrapedtitle = scrapedtitle.replace("Película ","")
itemlist.append(Item(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,
infoLabels = {'year':scrapedyear},
thumbnail = scrapedthumbnail,
title = scrapedtitle + " (%s)" %scrapedyear,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
return itemlist
def search(item, texto):
logger.info()
item.url = host + "/suggest?que=" + texto
item.extra = "busca"
item.page = 1
item.texto = texto
if texto != '':
return sub_search(item)
else:
return []
def sub_search(item):
logger.info()
itemlist = []
url = item.url
headers = [
['X-Requested-With', 'XMLHttpRequest']
]
data = httptools.downloadpage(item.url).data
token = scrapertools.find_single_match(data, 'csrf-token" content="([^"]+)')
data = httptools.downloadpage(item.url + "&_token=" + token, headers=headers).data
logger.info("Intel33 %s" %data)
data_js = jsontools.load(data)["data"]["m"]
logger.info("Intel44 %s" %data_js)
for js in data_js:
itemlist.append(Item(channel = item.channel,
action = "findvideos",
contentTitle = js["title"],
infoLabels = {'year': js["release_year"]},
thumbnail = js["cover"],
title = js["title"] + " (%s)" %js["release_year"],
url = js["slug"]
))
tmdb.set_infoLabels(itemlist)
#pagination
if len(itemlist)>0:
itemlist.append(Item(channel = item.channel,
action = "peliculas",
page = item.page + 1,
title = "Página siguiente >>",
url = item.url
))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
headers = [
['X-Requested-With', 'XMLHttpRequest']
]
data = httptools.downloadpage(item.url).data
token = scrapertools.find_single_match(data, 'csrf-token" content="([^"]+)')
post = "page=%s&type=%s&_token=%s" %(item.page, item.type, token)
if item.slug:
post += "&slug=%s" %item.slug
logger.info("Intel11 %s" %post)
data = httptools.downloadpage(host + "/pagination", post=post, headers=headers).data
patron = 'href="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += '<p>([^<]+).*?'
patron += '<span>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle , scrapedyear in matches:
itemlist.append(Item(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,
infoLabels = {'year':scrapedyear},
thumbnail = scrapedthumbnail,
title = scrapedtitle + " (%s)" %scrapedyear,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
#pagination
if len(itemlist)>0:
itemlist.append(Item(channel = item.channel,
action = "peliculas",
page = item.page + 1,
title = "Página siguiente >>",
url = item.url
))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = host
item.page=1
elif categoria == 'infantiles':
item.url = host + '/genero/animacion'
item.page = 1
elif categoria == 'terror':
item.url = host + '/genero/terror'
item.page = 1
itemlist = peliculas(item)
if "Pagina" in itemlist[-1].title:
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, "genressomb.*?</ul>")
patron = 'href="([^"]+)".*?'
patron += '</i>([^<]+).*?'
patron += 'cant-genre">([^<]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, titulo, cantidad in matches:
itemlist.append(Item(channel = item.channel,
action = "peliculas_gen",
page = 1,
slug = titulo,
title = titulo + "(%s)" %cantidad,
type = "genres",
url = url
))
return itemlist
def peliculas_gen(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'item-pelicula.*?href="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += '<p>([^<]+).*?'
patron += '<span>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle , scrapedyear in matches:
itemlist.append(Item(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,
infoLabels = {'year':scrapedyear},
thumbnail = scrapedthumbnail,
title = scrapedtitle + " (%s)" %scrapedyear,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
return itemlist
def annos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'div class="years.*?</ul>')
patron = 'href="([^"]+)"'
patron += '>([^<]+).*?'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, titulo in matches:
itemlist.append(Item(channel = item.channel,
action = "peliculas",
page = 1,
slug = titulo,
title = titulo,
type = "year",
url = url
))
return itemlist
def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?i)<iframe.*?src="([^"]+).*?'
patron += ''
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
titulo = "Ver en: %s"
itemlist.append(
item.clone(channel = item.channel,
action = "play",
title = titulo,
url = scrapedurl
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
tmdb.set_infoLabels(itemlist, __modo_grafico__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if itemlist:
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
contentTitle = item.contentTitle
))
return itemlist
def play(item):
logger.info()
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -154,7 +154,7 @@ def setting_channel_new(item):
# Cargar lista de opciones (canales activos del usuario y que permitan búsqueda global)
# ------------------------
lista = []; ids = []; lista_lang = []
lista = []; ids = []; lista_lang = []; lista_ctgs = []
channels_list = channelselector.filterchannels('all')
for channel in channels_list:
channel_parameters = channeltools.get_channel_parameters(channel.channel)
@@ -171,36 +171,52 @@ def setting_channel_new(item):
lista.append(it)
ids.append(channel.channel)
lista_lang.append(channel_parameters['language'])
lista_ctgs.append(channel_parameters['categories'])
# Diálogo para pre-seleccionar
# ----------------------------
preselecciones_std = ['Modificar selección actual', 'Modificar partiendo de Todos', 'Modificar partiendo de Ninguno', 'Modificar partiendo de Castellano', 'Modificar partiendo de Latino']
if item.action == 'setting_channel':
# Configuración de los canales incluídos en la búsqueda
preselecciones = preselecciones_std
presel_values = [1, 2, 3, 4, 5]
else:
# Llamada desde "buscar en otros canales" (se puede saltar la selección e ir directo a la búsqueda)
preselecciones = ['Buscar con la selección actual'] + preselecciones_std
presel_values = [0, 1, 2, 3, 4, 5]
preselecciones = [
'Buscar con la selección actual',
'Modificar selección actual',
'Modificar partiendo de Todos',
'Modificar partiendo de Ninguno',
'Modificar partiendo de Castellano',
'Modificar partiendo de Latino'
]
presel_values = ['skip', 'actual', 'all', 'none', 'cast', 'lat']
categs = ['movie', 'tvshow', 'documentary', 'anime', 'vos', 'direct', 'torrent']
if config.get_setting('adult_mode') > 0: categs.append('adult')
for c in categs:
preselecciones.append('Modificar partiendo de %s' % config.get_localized_category(c))
presel_values.append(c)
if item.action == 'setting_channel': # Configuración de los canales incluídos en la búsqueda
del preselecciones[0]
del presel_values[0]
#else: # Llamada desde "buscar en otros canales" (se puede saltar la selección e ir directo a la búsqueda)
ret = platformtools.dialog_select(config.get_localized_string(59994), preselecciones)
if ret == -1: return False # pedido cancel
if presel_values[ret] == 0: return True # continuar sin modificar
elif presel_values[ret] == 3: preselect = []
elif presel_values[ret] == 2: preselect = range(len(ids))
elif presel_values[ret] in [4, 5]:
busca = 'cast' if presel_values[ret] == 4 else 'lat'
if presel_values[ret] == 'skip': return True # continuar sin modificar
elif presel_values[ret] == 'none': preselect = []
elif presel_values[ret] == 'all': preselect = range(len(ids))
elif presel_values[ret] in ['cast', 'lat']:
preselect = []
for i, lg in enumerate(lista_lang):
if busca in lg or '*' in lg:
if presel_values[ret] in lg or '*' in lg:
preselect.append(i)
else:
elif presel_values[ret] == 'actual':
preselect = []
for i, canal in enumerate(ids):
channel_status = config.get_setting('include_in_global_search', canal)
if channel_status:
preselect.append(i)
else:
preselect = []
for i, ctgs in enumerate(lista_ctgs):
if presel_values[ret] in ctgs:
preselect.append(i)
# Diálogo para seleccionar
# ------------------------

View File

@@ -1,33 +0,0 @@
{
"id": "tiotorrent",
"name": "TioTorrent",
"active": false,
"adult": false,
"language": ["cast","lat"],
"thumbnail": "https://s1.postimg.cc/29eths1fi7/tiotorrent.png",
"banner": "https://s1.postimg.cc/9gkc73lxb3/tiotorrent-banner.png",
"version": 1,
"categories": [
"movie",
"tvshow",
"torrent"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,343 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel TioTorrent -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channelselector import get_thumb
from platformcode import logger
from platformcode import config
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
host = 'http://www.tiotorrent.com/'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas",
action="movie_list",
thumbnail=get_thumb("channels_movie.png")
))
itemlist.append(item.clone(title="Series",
action="series_list",
thumbnail=get_thumb("channels_tvshow.png")
))
return itemlist
def movie_list(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Estrenos",
action="lista",
url=host + 'estrenos-de-cine',
extra='movie'
))
itemlist.append(item.clone(title="Todas",
action="lista",
url=host + 'peliculas',
extra='movie'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '/peliculas/?pTit=',
thumbnail=get_thumb("search.png"),
extra='movie'
))
return itemlist
def series_list(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Todas",
action="lista",
url=host + 'series',
extra='serie'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '/series/?pTit=',
thumbnail=get_thumb("search.png"),
extra='serie'
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def lista (item):
logger.info ()
itemlist = []
data = get_source(item.url)
if item.extra == 'movie':
patron = "<div class=moviesbox.*?><a href=(.*?)>.*?image:url\('(.*?)'\)>.*?<b>.*?>(.*?)<"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
contentTitle = scrapedtitle.decode('latin1').encode('utf8')
title = contentTitle
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w396", "")
filtro_list = {"poster_path": filtro_thumb}
filtro_list = filtro_list.items()
itemlist.append(item.clone(action='findvideos',
title=title, url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
infoLabels={'filtro': filtro_list},
extra=item.extra
))
else:
patron = "<div class=moviesbox.*?>.*?episode>(.*?)x(.*?)<.*?href=(.*?)>.*?image:url\('(.*?)'.*?href.*?>(.*?)<"
matches = re.compile(patron, re.DOTALL).findall(data)
for season, episode, scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
contentSerieName = scrapedtitle
title = '%s' % contentSerieName
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w396", "")
filtro_list = {"poster_path": filtro_thumb}
filtro_list = filtro_list.items()
contentSeason=season
contentEpisode=episode
itemlist.append(item.clone(action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=contentSerieName,
contentSeason=contentSeason,
contentEpisode=contentEpisode,
infoLabels={'filtro': filtro_list},
extra=item.extra
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb =True)
#Paginacion
if itemlist !=[]:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data,'<span class=pagination_next><a href=(.*?)>')
import inspect
if next_page !='':
itemlist.append(item.clone(action = "lista",
title = 'Siguiente >>>',
url = next_page,
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'
))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
else:
return []
def seasons(item):
logger.info()
itemlist=[]
infoLabels = item.infoLabels
data=get_source(item.url)
patron ='href=javascript:showSeasson\(.*?\); id=.*?>Temporada (.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for season in matches:
title='Temporada %s' % season
infoLabels['season'] = season
itemlist.append(Item(channel=item.channel,
title= title,
url=item.url,
action='episodesxseasons',
contentSeasonNumber=season,
contentSerieName=item.contentSerieName,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="all_episodes", contentSerieName=item.contentSerieName))
return itemlist
def all_episodes(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = "<div class=corner-episode>%sx(.\d+)<\/div><a href=(.*?)>.*?" % item.contentSeasonNumber
patron += "image:url\('(.*?)'.*?href.*?>(%s)<" % item.contentSerieName
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
for episode, scrapedurl, scrapedthumbnail, scrapedtitle in matches:
contentEpisodeNumber=episode
season = item.contentSeasonNumber
url=scrapedurl
thumbnail=scrapedthumbnail
infoLabels['episode']=episode
title = '%sx%s - %s' % (season, episode, item.contentSerieName)
itemlist.append(Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=item.contentSerieName,
contentEpisodeNumber=contentEpisodeNumber,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
# def findvideos(item):
# logger.info()
# itemlist=[]
# data = get_source(item.url)
# patron = "<a class=dload.*? target=_blank>.*?<\/a><i>(.*?)<\/i>.*?<a href=.*?showDownload\((.*?)\);"
# matches = re.compile(patron, re.DOTALL).findall(data)
#
# for quality, extra_info in matches:
# extra_info= extra_info.replace("'",'')
# extra_info= extra_info.split(',')
# title = '%s [%s]' % (item.contentTitle, quality)
# url = extra_info[1]
# if item.extra == 'movie':
# url = extra_info[1]
# else:
# url = extra_info[2]
# server = 'torrent'
# itemlist.append(Item(channel=item.channel,
# title=title,
# contentTitle= item.title,
# url=url,
# action='play',
# quality=quality,
# server=server,
# thumbnail = item.infoLabels['thumbnail'],
# infoLabels=item.infoLabels
# ))
#
# if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
# itemlist.append(Item(channel=item.channel,
# title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
# url=item.url,
# action="add_pelicula_to_library",
# extra="findvideos",
# contentTitle=item.contentTitle
# ))
#
# return itemlist
def findvideos(item):
logger.info()
itemlist=[]
data = get_source(item.url)
patron = "showDownload\(([^\)]+)\);.*?alt=.*?torrent (.*?) "
matches = re.compile(patron, re.DOTALL).findall(data)
for extra_info, quality in matches:
extra_info= extra_info.replace(",'",'|')
extra_info= extra_info.split('|')
title = '%s [%s]' % ('Torrent', quality.strip())
if item.extra == 'movie':
url = extra_info[2].strip("'")
else:
url = extra_info[3].strip("'")
server = 'torrent'
if not '.torrent' in url:
if 'tvsinpagar' in url:
url = url.replace('http://','http://www.')
try:
from_web = httptools.downloadpage(url, follow_redirects=False)
url = from_web.headers['location']
except:
pass
if '.torrent' in url:
itemlist.append(Item(channel=item.channel,
title=title,
contentTitle= item.title,
url=url,
action='play',
quality=quality,
server=server,
thumbnail = item.infoLabels['thumbnail'],
infoLabels=item.infoLabels
))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def newest(category):
logger.info()
item = Item()
try:
if category in ['peliculas', 'torrent']:
item.url = host + 'estrenos-de-cine'
item.extra='movie'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
if category == 'torrent':
item.url = host+'series'
item.extra = 'serie'
itemlist.extend(lista(item))
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -15,7 +15,7 @@ from platformcode import config, logger, platformtools
idio = {'https://cdn.yape.nu//languajes/la.png': 'LAT','https://cdn.yape.nu//languajes/es.png': 'ESP','https://cdn.yape.nu//languajes/en_es.png': 'VOSE'}
cali = {'HD 1080p': 'HD 1080p','TS Screener HQ':'TS Screener HQ', 'BR Screnner':'BR Screnner','HD Rip':'HD Rip','DVD Screnner':'DVD Screnner'}
cali = {'TS Screnner': 'TS Screnner', 'HD 1080p': 'HD 1080p','TS Screener HQ':'TS Screener HQ', 'BR Screnner':'BR Screnner','HD Rip':'HD Rip','DVD Screnner':'DVD Screnner'}
list_language = idio.values()
list_quality = cali.values()
@@ -34,11 +34,13 @@ except:
def mainlist(item):
logger.info()
data = httptools.downloadpage(host + "/catalogue?sort=latest").data
total = scrapertools.find_single_match(data, 'class="font-weight-bold mr-2">([^<]+)')
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel = item.channel, title = "Actualizadas", action = "peliculas", url = host + "/catalogue?sort=time_update&page=", page=1, thumbnail = get_thumb("updated", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Mas vistas", action = "peliculas", url = host + "/catalogue?sort=mosts-today&page=", page=1, thumbnail = get_thumb("more watched", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Ultimas agregadas", action = "peliculas", url = host + "/catalogue?sort=latest&page=", page=1, thumbnail = get_thumb("last", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Ultimas agregadas - (Total películas: %s)" %total, action = "peliculas", url = host + "/catalogue?sort=latest&page=", page=1, thumbnail = get_thumb("last", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos", url = host, extra = "Genero", thumbnail = get_thumb("genres", auto = True) ))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/search?term=", thumbnail = get_thumb("search", auto = True)))

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.7 MiB

After

Width:  |  Height:  |  Size: 978 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.7 MiB

View File

@@ -1,43 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "((?:k-bagi.com|diskokosmiko.mx)/[^\\s'\"]+)",
"url": "http://\\1"
}
]
},
"free": true,
"id": "kbagi",
"name": "kbagi",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",
"version": 1
}

View File

@@ -1,55 +0,0 @@
# -*- coding: utf-8 -*-
from channels import kbagi
from core import httptools
from core import jsontools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
domain = "diskokosmiko.mx"
if "k-bagi.com" in page_url:
domain = "kbagi.com"
logueado, error_message = kbagi.login(domain)
if not logueado:
return False, error_message
data = httptools.downloadpage(page_url).data
if ("File was deleted" or "Not Found" or "File was locked by administrator") in data:
return False, "[%s] El archivo no existe o ha sido borrado" %domain
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
host = "http://k-bagi.com"
host_string = "k-bagi"
if "diskokosmiko.mx" in page_url:
host = "http://diskokosmiko.mx"
host_string = "diskokosmiko"
url = scrapertools.find_single_match(data, '<form action="([^"]+)" class="download_form"')
if url:
url = host + url
fileid = url.rsplit("f=", 1)[1]
token = scrapertools.find_single_match(data,
'<div class="download_container">.*?name="__RequestVerificationToken".*?value="([^"]+)"')
post = "fileId=%s&__RequestVerificationToken=%s" % (fileid, token)
headers = {'X-Requested-With': 'XMLHttpRequest'}
data = httptools.downloadpage(url, post, headers).data
data = jsontools.load(data)
mediaurl = data.get("DownloadUrl")
extension = data.get("Extension")
video_urls.append([".%s [%s]" % (extension, host_string), mediaurl])
for video_url in video_urls:
logger.info(" %s - %s" % (video_url[0], video_url[1]))
return video_urls