Actualizados
cinehindi: cambio de esctructura kbagi: eliminado, no existe peliscon: eliminado, no existe plusdede: eliminado, no existe repelis: cambio cosmético rexpelis: nuevo canal yape: cambio cosmético Nuevo fantart por halloween
This commit is contained in:
@@ -3,25 +3,34 @@
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channelselector import get_thumb
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
IDIOMAS = {'Hindi': 'Hindi'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['openload', 'netutv']
|
||||
|
||||
host = "http://www.cinehindi.com/"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="genero", title="Generos", url=host))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host))
|
||||
itemlist.append(Item(channel=item.channel, action="genero", title="Generos", url=host, thumbnail = get_thumb("genres", auto = True)))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host, thumbnail = get_thumb("newest", auto = True)))
|
||||
itemlist.append(Item(channel=item.channel, action="proximas", title="Próximas Películas",
|
||||
url=urlparse.urljoin(host, "proximamente")))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "?s=")))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "?s="), thumbnail = get_thumb("search", auto = True)))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -50,9 +59,7 @@ def search(item, texto):
|
||||
|
||||
def proximas(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc...
|
||||
patron = 'class="item">.*?' # Todos los items de peliculas (en esta web) empiezan con esto
|
||||
@@ -77,40 +84,36 @@ def proximas(item):
|
||||
item.url = next_page_url + 'proximamente/page/' + str(i) + '/'
|
||||
itemlist.append(Item(channel=item.channel, action="proximas", title=">> Página siguiente", url=item.url,
|
||||
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc...
|
||||
patron = 'class="item">.*?' # Todos los items de peliculas (en esta web) empiezan con esto
|
||||
patron += '<a href="([^"]+).*?' # scrapedurl
|
||||
patron += '<img src="([^"]+).*?' # scrapedthumbnail
|
||||
patron += 'alt="([^"]+).*?' # scrapedtitle
|
||||
patron += '<span class="ttx">([^<]+).*?' # scrapedplot
|
||||
patron += '<div class="fixyear">(.*?)</span></div></div>' # scrapedfixyear
|
||||
|
||||
patron += '<div class="fixyear">(.*?)</span></div><' # scrapedfixyear
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, scrapedfixyear in matches:
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedfixyear in matches:
|
||||
patron = '<span class="year">([^<]+)' # scrapedyear
|
||||
scrapedyear = scrapertools.find_single_match(scrapedfixyear, patron)
|
||||
scrapedtitle = scrapedtitle.replace(scrapertools.find_single_match(scrapedtitle,'\(\d{4}\)'),'').strip()
|
||||
title = scrapedtitle
|
||||
if scrapedyear:
|
||||
scrapedtitle += ' (%s)' % (scrapedyear)
|
||||
|
||||
title += ' (%s)' % (scrapedyear)
|
||||
item.infoLabels['year'] = int(scrapedyear)
|
||||
patron = '<span class="calidad2">([^<]+).*?' # scrapedquality
|
||||
scrapedquality = scrapertools.find_single_match(scrapedfixyear, patron)
|
||||
if scrapedquality:
|
||||
scrapedtitle += ' [%s]' % (scrapedquality)
|
||||
|
||||
title += ' [%s]' % (scrapedquality)
|
||||
itemlist.append(
|
||||
item.clone(title=scrapedtitle, url=scrapedurl, plot=scrapedplot, action="findvideos", extra=scrapedtitle,
|
||||
show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie", context=["buscar_trailer"]))
|
||||
|
||||
item.clone(title=title, url=scrapedurl, action="findvideos", extra=scrapedtitle,
|
||||
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie", context=["buscar_trailer"]))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
scrapertools.printMatches(itemlist)
|
||||
# Paginacion
|
||||
patron_genero = '<h1>([^"]+)<\/h1>'
|
||||
genero = scrapertools.find_single_match(data, patron_genero)
|
||||
@@ -118,9 +121,7 @@ def lista(item):
|
||||
patron = "<a rel='nofollow' class=previouspostslink' href='([^']+)'>Siguiente "
|
||||
else:
|
||||
patron = "<span class='current'>.+?href='(.+?)'>"
|
||||
|
||||
next_page_url = scrapertools.find_single_match(data, patron)
|
||||
|
||||
if next_page_url != "":
|
||||
item.url = next_page_url
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=next_page_url,
|
||||
@@ -130,22 +131,34 @@ def lista(item):
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist1 = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist1.extend(servertools.find_video_items(data=data))
|
||||
patron_show = '<div class="data"><h1 itemprop="name">([^<]+)<\/h1>'
|
||||
show = scrapertools.find_single_match(data, patron_show)
|
||||
for videoitem in itemlist1:
|
||||
videoitem.channel = item.channel
|
||||
videoitem.infoLabels = item.infoLabels
|
||||
for i in range(len(itemlist1)):
|
||||
if not 'youtube' in itemlist1[i].title:
|
||||
itemlist.append(itemlist1[i])
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentChannel!='videolibrary':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
{
|
||||
"id": "kbagi",
|
||||
"name": "Kbagi/Diskokosmiko",
|
||||
"language": ["cast", "lat"],
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"version": 1,
|
||||
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",
|
||||
"banner": "copiapop.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "kbagiuser",
|
||||
"type": "text",
|
||||
"color": "0xFF25AA48",
|
||||
"label": "Usuario Kbagi",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "kbagipassword",
|
||||
"type": "text",
|
||||
"color": "0xFF25AA48",
|
||||
"hidden": true,
|
||||
"label": "Password Kbagi",
|
||||
"enabled": "!eq(-1,'')",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "diskokosmikouser",
|
||||
"type": "text",
|
||||
"color": "0xFFC52020",
|
||||
"label": "Usuario Diskokosmiko",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "diskokosmikopassword",
|
||||
"type": "text",
|
||||
"color": "0xFFC52020",
|
||||
"hidden": true,
|
||||
"label": "Password Diskokosmiko",
|
||||
"enabled": "!eq(-1,'')",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "adult_content",
|
||||
"type": "bool",
|
||||
"color": "0xFFd50b0b",
|
||||
"label": "Mostrar contenido adulto en las búsquedas",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,384 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import threading
|
||||
import urllib
|
||||
import xbmc
|
||||
|
||||
from core import downloadtools
|
||||
from core import filetools
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
|
||||
__perfil__ = config.get_setting('perfil', "kbagi")
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']]
|
||||
|
||||
if __perfil__ - 1 >= 0:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__ - 1]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
adult_content = config.get_setting("adult_content", "kbagi")
|
||||
|
||||
|
||||
def login(pagina):
|
||||
logger.info()
|
||||
try:
|
||||
dom = pagina.split(".")[0]
|
||||
user = config.get_setting("%suser" %dom, "kbagi")
|
||||
password = config.get_setting("%spassword" %dom, "kbagi")
|
||||
if "kbagi" in pagina:
|
||||
pagina = "k-bagi.com"
|
||||
if not user:
|
||||
return False, "Para ver los enlaces de %s es necesario registrarse en %s" %(dom, pagina)
|
||||
data = httptools.downloadpage("http://%s" % pagina).data
|
||||
if re.search(r'(?i)%s' % user, data):
|
||||
return True, ""
|
||||
token = scrapertools.find_single_match(data, 'name="__RequestVerificationToken".*?value="([^"]+)"')
|
||||
post = "__RequestVerificationToken=%s&UserName=%s&Password=%s" % (token, user, password)
|
||||
headers = {'X-Requested-With': 'XMLHttpRequest'}
|
||||
url_log = "http://%s/action/Account/Login" % pagina
|
||||
data = httptools.downloadpage(url_log, post, headers).data
|
||||
if "redirectUrl" in data:
|
||||
logger.info("Login correcto")
|
||||
return True, ""
|
||||
else:
|
||||
logger.error("Error en el login")
|
||||
return False, "Nombre de usuario no válido. Comprueba tus credenciales"
|
||||
except:
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
return False, "Error durante el login. Comprueba tus credenciales"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.text_color = color1
|
||||
logueado, error_message = login("kbagi.com")
|
||||
if not logueado:
|
||||
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
|
||||
else:
|
||||
item.extra = "http://k-bagi.com"
|
||||
itemlist.append(item.clone(title="kbagi", action="", text_color=color2))
|
||||
itemlist.append(
|
||||
item.clone(title=" Búsqueda", action="search", url="http://k-bagi.com/action/SearchFiles"))
|
||||
itemlist.append(item.clone(title=" Colecciones", action="colecciones",
|
||||
url="http://k-bagi.com/action/home/MoreNewestCollections?pageNumber=1"))
|
||||
itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro",
|
||||
url="http://k-bagi.com/action/SearchFiles"))
|
||||
itemlist.append(item.clone(title=" Mi cuenta", action="cuenta"))
|
||||
logueado, error_message = login("diskokosmiko.mx")
|
||||
if not logueado:
|
||||
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
|
||||
else:
|
||||
item.extra = "http://diskokosmiko.mx/"
|
||||
itemlist.append(item.clone(title="DiskoKosmiko", action="", text_color=color2))
|
||||
itemlist.append(item.clone(title=" Búsqueda", action="search", url="http://diskokosmiko.mx/action/SearchFiles"))
|
||||
itemlist.append(item.clone(title=" Colecciones", action="colecciones",
|
||||
url="http://diskokosmiko.mx/action/home/MoreNewestCollections?pageNumber=1"))
|
||||
itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro",
|
||||
url="http://diskokosmiko.mx/action/SearchFiles"))
|
||||
itemlist.append(item.clone(title=" Mi cuenta", action="cuenta"))
|
||||
itemlist.append(item.clone(action="", title=""))
|
||||
folder_thumb = filetools.join(config.get_data_path(), 'thumbs_kbagi')
|
||||
files = filetools.listdir(folder_thumb)
|
||||
if files:
|
||||
itemlist.append(
|
||||
item.clone(title="Eliminar caché de imágenes (%s)" % len(files), action="delete_cache", text_color="red"))
|
||||
itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
item.post = "Mode=List&Type=Video&Phrase=%s&SizeFrom=0&SizeTo=0&Extension=&ref=pager&pageNumber=1" % texto.replace(
|
||||
" ", "+")
|
||||
try:
|
||||
return listado(item)
|
||||
except:
|
||||
import sys, traceback
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
logger.error(traceback.format_exc())
|
||||
return []
|
||||
|
||||
|
||||
def configuracion(item):
|
||||
ret = platformtools.show_channel_settings()
|
||||
platformtools.itemlist_refresh()
|
||||
return ret
|
||||
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data_thumb = httptools.downloadpage(item.url, item.post.replace("Mode=List", "Mode=Gallery")).data
|
||||
if not item.post:
|
||||
data_thumb = ""
|
||||
item.url = item.url.replace("/gallery,", "/list,")
|
||||
data = httptools.downloadpage(item.url, item.post).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
|
||||
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
|
||||
patron = 'data-file-id(.*?</p>)</div></div>'
|
||||
bloques = scrapertools.find_multiple_matches(data, patron)
|
||||
for block in bloques:
|
||||
if "adult_info" in block and not adult_content:
|
||||
continue
|
||||
size = scrapertools.find_single_match(block, '<p.*?>([^<]+)</p>')
|
||||
patron = 'class="name"><a href="([^"]+)".*?>([^<]+)<'
|
||||
scrapedurl, scrapedtitle = scrapertools.find_single_match(block, patron)
|
||||
scrapedthumbnail = scrapertools.find_single_match(block, "background-image:url\('([^']+)'")
|
||||
if scrapedthumbnail:
|
||||
try:
|
||||
thumb = scrapedthumbnail.split("-", 1)[0].replace("?", "\?")
|
||||
if data_thumb:
|
||||
url_thumb = scrapertools.find_single_match(data_thumb, "(%s[^']+)'" % thumb)
|
||||
else:
|
||||
url_thumb = scrapedthumbnail
|
||||
scrapedthumbnail = filetools.join(folder, "%s.jpg" % url_thumb.split("e=", 1)[1][-20:])
|
||||
except:
|
||||
scrapedthumbnail = ""
|
||||
if scrapedthumbnail:
|
||||
t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, url_thumb])
|
||||
t.setDaemon(True)
|
||||
t.start()
|
||||
else:
|
||||
scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png"
|
||||
scrapedurl = item.extra + scrapedurl
|
||||
title = "%s (%s)" % (scrapedtitle, size)
|
||||
if "adult_info" in block:
|
||||
title += " [COLOR %s][+18][/COLOR]" % color4
|
||||
plot = scrapertools.find_single_match(block, '<div class="desc">(.*?)</div>')
|
||||
if plot:
|
||||
plot = scrapertools.decodeHtmlentities(plot)
|
||||
new_item = Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, text_color=color2,
|
||||
extra=item.extra, infoLabels={'plot': plot}, post=item.post)
|
||||
if item.post:
|
||||
try:
|
||||
new_item.folderurl, new_item.foldername = scrapertools.find_single_match(block,
|
||||
'<p class="folder"><a href="([^"]+)".*?>([^<]+)<')
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
new_item.folderurl = item.url.rsplit("/", 1)[0]
|
||||
new_item.foldername = item.foldername
|
||||
new_item.fanart = item.thumbnail
|
||||
itemlist.append(new_item)
|
||||
next_page = scrapertools.find_single_match(data, 'class="pageSplitter.*?" data-nextpage-number="([^"]+)"')
|
||||
if next_page:
|
||||
if item.post:
|
||||
post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page, item.post)
|
||||
url = item.url
|
||||
else:
|
||||
url = re.sub(r',\d+\?ref=pager', ",%s?ref=pager" % next_page, item.url)
|
||||
post = ""
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página Siguiente (%s)" % next_page,
|
||||
url=url, post=post, extra=item.extra))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(item.clone(action="play", title="Reproducir/Descargar", server="kbagi"))
|
||||
usuario = scrapertools.find_single_match(item.url, '%s/([^/]+)/' % item.extra)
|
||||
url_usuario = item.extra + "/" + usuario
|
||||
if item.folderurl and not item.folderurl.startswith(item.extra):
|
||||
item.folderurl = item.extra + item.folderurl
|
||||
if item.post:
|
||||
itemlist.append(item.clone(action="listado", title="Ver colección: %s" % item.foldername,
|
||||
url=item.folderurl + "/gallery,1,1?ref=pager", post=""))
|
||||
data = httptools.downloadpage(item.folderurl).data
|
||||
token = scrapertools.find_single_match(data,
|
||||
'data-action="followChanged.*?name="__RequestVerificationToken".*?value="([^"]+)"')
|
||||
collection_id = item.folderurl.rsplit("-", 1)[1]
|
||||
post = "__RequestVerificationToken=%s&collectionId=%s" % (token, collection_id)
|
||||
url = "%s/action/Follow/Follow" % item.extra
|
||||
title = "Seguir Colección: %s" % item.foldername
|
||||
if "dejar de seguir" in data:
|
||||
title = "Dejar de seguir la colección: %s" % item.foldername
|
||||
url = "%s/action/Follow/UnFollow" % item.extra
|
||||
itemlist.append(item.clone(action="seguir", title=title, url=url, post=post, text_color=color5, folder=False))
|
||||
itemlist.append(
|
||||
item.clone(action="colecciones", title="Ver colecciones del usuario: %s" % usuario, url=url_usuario))
|
||||
return itemlist
|
||||
|
||||
|
||||
def colecciones(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
usuario = False
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if "Ver colecciones del usuario" not in item.title and not item.index:
|
||||
data = jsontools.load(data)["Data"]
|
||||
content = data["Content"]
|
||||
content = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", content)
|
||||
else:
|
||||
usuario = True
|
||||
if item.follow:
|
||||
content = scrapertools.find_single_match(data,
|
||||
'id="followed_collections"(.*?)<div id="recommended_collections"')
|
||||
else:
|
||||
content = scrapertools.find_single_match(data,
|
||||
'<div id="collections".*?<div class="collections_list(.*?)<div class="collections_list')
|
||||
content = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", content)
|
||||
patron = '<a class="name" href="([^"]+)".*?>([^<]+)<.*?src="([^"]+)".*?<p class="info">(.*?)</p>'
|
||||
matches = scrapertools.find_multiple_matches(content, patron)
|
||||
index = ""
|
||||
if item.index and item.index != "0":
|
||||
matches = matches[item.index:item.index + 20]
|
||||
if len(matches) > item.index + 20:
|
||||
index = item.index + 20
|
||||
elif len(matches) > 20:
|
||||
matches = matches[:20]
|
||||
index = 20
|
||||
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
|
||||
for url, scrapedtitle, thumb, info in matches:
|
||||
url = item.extra + url + "/gallery,1,1?ref=pager"
|
||||
title = "%s (%s)" % (scrapedtitle, scrapertools.htmlclean(info))
|
||||
try:
|
||||
scrapedthumbnail = filetools.join(folder, "%s.jpg" % thumb.split("e=", 1)[1][-20:])
|
||||
except:
|
||||
try:
|
||||
scrapedthumbnail = filetools.join(folder, "%s.jpg" % thumb.split("/thumbnail/", 1)[1][-20:])
|
||||
thumb = thumb.replace("/thumbnail/", "/")
|
||||
except:
|
||||
scrapedthumbnail = ""
|
||||
if scrapedthumbnail:
|
||||
t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, thumb])
|
||||
t.setDaemon(True)
|
||||
t.start()
|
||||
else:
|
||||
scrapedthumbnail = thumb
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url,
|
||||
thumbnail=scrapedthumbnail, text_color=color2, extra=item.extra,
|
||||
foldername=scrapedtitle))
|
||||
if not usuario and data.get("NextPageUrl"):
|
||||
url = item.extra + data["NextPageUrl"]
|
||||
itemlist.append(item.clone(title=">> Página Siguiente", url=url, text_color=""))
|
||||
elif index:
|
||||
itemlist.append(item.clone(title=">> Página Siguiente", url=item.url, index=index, text_color=""))
|
||||
return itemlist
|
||||
|
||||
|
||||
def seguir(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url, item.post)
|
||||
message = "Colección seguida"
|
||||
if "Dejar" in item.title:
|
||||
message = "La colección ya no se sigue"
|
||||
if data.sucess and config.get_platform() != "plex":
|
||||
platformtools.dialog_notification("Acción correcta", message)
|
||||
|
||||
|
||||
def cuenta(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
web = "kbagi"
|
||||
if "diskokosmiko" in item.extra:
|
||||
web = "diskokosmiko"
|
||||
logueado, error_message = login("diskokosmiko.mx")
|
||||
if not logueado:
|
||||
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
|
||||
return itemlist
|
||||
user = config.get_setting("%suser" % web, "kbagi")
|
||||
user = unicode(user, "utf8").lower().encode("utf8")
|
||||
url = item.extra + "/" + urllib.quote(user)
|
||||
data = httptools.downloadpage(url).data
|
||||
num_col = scrapertools.find_single_match(data, 'name="Has_collections" value="([^"]+)"')
|
||||
if num_col != "0":
|
||||
itemlist.append(item.clone(action="colecciones", url=url, index="0", title="Ver mis colecciones",
|
||||
text_color=color5))
|
||||
else:
|
||||
itemlist.append(item.clone(action="", title="No tienes ninguna colección", text_color=color4))
|
||||
|
||||
num_follow = scrapertools.find_single_match(data, 'name="Follows_collections" value="([^"]+)"')
|
||||
if num_follow != "0":
|
||||
itemlist.append(item.clone(action="colecciones", url=url, index="0", title="Colecciones que sigo",
|
||||
text_color=color5, follow=True))
|
||||
else:
|
||||
itemlist.append(item.clone(action="", title="No sigues ninguna colección", text_color=color4))
|
||||
return itemlist
|
||||
|
||||
|
||||
def filtro(item):
|
||||
logger.info()
|
||||
list_controls = []
|
||||
valores = {}
|
||||
dict_values = None
|
||||
list_controls.append({'id': 'search', 'label': 'Texto a buscar', 'enabled': True, 'color': '0xFFC52020',
|
||||
'type': 'text', 'default': '', 'visible': True})
|
||||
list_controls.append({'id': 'tipo', 'label': 'Tipo de búsqueda', 'enabled': True, 'color': '0xFFFF8000',
|
||||
'type': 'list', 'default': -1, 'visible': True})
|
||||
list_controls[1]['lvalues'] = ['Aplicación', 'Archivo', 'Documento', 'Imagen', 'Música', 'Vídeo', 'Todos']
|
||||
valores['tipo'] = ['Application', 'Archive', 'Document', 'Image', 'Music', 'Video', '']
|
||||
list_controls.append({'id': 'ext', 'label': 'Extensión', 'enabled': True, 'color': '0xFFF4FA58',
|
||||
'type': 'text', 'default': '', 'visible': True})
|
||||
list_controls.append({'id': 'tmin', 'label': 'Tamaño mínimo (MB)', 'enabled': True, 'color': '0xFFCC2EFA',
|
||||
'type': 'text', 'default': '0', 'visible': True})
|
||||
list_controls.append({'id': 'tmax', 'label': 'Tamaño máximo (MB)', 'enabled': True, 'color': '0xFF2ECCFA',
|
||||
'type': 'text', 'default': '0', 'visible': True})
|
||||
# Se utilizan los valores por defecto/guardados
|
||||
web = "kbagi"
|
||||
if "diskokosmiko" in item.extra:
|
||||
web = "diskokosmiko"
|
||||
valores_guardados = config.get_setting("filtro_defecto_" + web, item.channel)
|
||||
if valores_guardados:
|
||||
dict_values = valores_guardados
|
||||
item.valores = valores
|
||||
return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values,
|
||||
caption="Filtra la búsqueda", item=item, callback='filtrado')
|
||||
|
||||
|
||||
def filtrado(item, values):
|
||||
values_copy = values.copy()
|
||||
web = "kbagi"
|
||||
if "diskokosmiko" in item.extra:
|
||||
web = "diskokosmiko"
|
||||
# Guarda el filtro para que sea el que se cargue por defecto
|
||||
config.set_setting("filtro_defecto_" + web, values_copy, item.channel)
|
||||
tipo = item.valores["tipo"][values["tipo"]]
|
||||
search = values["search"]
|
||||
ext = values["ext"]
|
||||
tmin = values["tmin"]
|
||||
tmax = values["tmax"]
|
||||
if not tmin.isdigit():
|
||||
tmin = "0"
|
||||
if not tmax.isdigit():
|
||||
tmax = "0"
|
||||
item.valores = ""
|
||||
item.post = "Mode=List&Type=%s&Phrase=%s&SizeFrom=%s&SizeTo=%s&Extension=%s&ref=pager&pageNumber=1" \
|
||||
% (tipo, search, tmin, tmax, ext)
|
||||
item.action = "listado"
|
||||
return listado(item)
|
||||
|
||||
|
||||
def download_thumb(filename, url):
|
||||
lock = threading.Lock()
|
||||
lock.acquire()
|
||||
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
|
||||
if not filetools.exists(folder):
|
||||
filetools.mkdir(folder)
|
||||
lock.release()
|
||||
if not filetools.exists(filename):
|
||||
downloadtools.downloadfile(url, filename, silent=True)
|
||||
return filename
|
||||
|
||||
|
||||
def delete_cache(url):
|
||||
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
|
||||
filetools.rmdirtree(folder)
|
||||
if config.is_xbmc():
|
||||
xbmc.executebuiltin("Container.Refresh")
|
||||
@@ -1,30 +0,0 @@
|
||||
{
|
||||
"id": "peliscon",
|
||||
"name": "Peliscon",
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "http://imgur.com/yTQRPUJ.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,378 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', "peliscon")
|
||||
|
||||
host = "http://peliscon.com"
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(
|
||||
item.clone(title="[COLOR aqua][B]Películas[/B][/COLOR]", action="scraper", url= host + "/peliculas/",
|
||||
thumbnail="http://imgur.com/FrcWTS8.png", fanart="http://imgur.com/MGQyetQ.jpg",
|
||||
contentType="movie"))
|
||||
itemlist.append(itemlist[-1].clone(title="[COLOR aqua][B]Series[/B][/COLOR]", action="scraper",
|
||||
url= host + "/series/", thumbnail="http://imgur.com/FrcWTS8.png",
|
||||
fanart="http://imgur.com/i41eduI.jpg", contentType="tvshow"))
|
||||
itemlist.append(item.clone(title="[COLOR aqua][B] Últimos capitulos[/B][/COLOR]", action="ul_cap",
|
||||
url= host + "/episodios/", thumbnail="http://imgur.com/FrcWTS8.png",
|
||||
fanart="http://imgur.com/i41eduI.jpg", contentType="tvshow"))
|
||||
itemlist.append(itemlist[-1].clone(title="[COLOR crimson][B]Buscar[/B][/COLOR]", action="search",
|
||||
thumbnail="http://imgur.com/FrcWTS8.png", fanart="http://imgur.com/h1b7tfN.jpg"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=" + texto
|
||||
item.extra = "search"
|
||||
try:
|
||||
return buscador(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def buscador(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = scrapertools.find_multiple_matches(data,
|
||||
'<div class="result-item">.*?href="([^"]+)".*?alt="([^"]+)".*?<span class=".*?">([^"]+)</span>.*?<span class="year">([^"]+)</span>')
|
||||
for url, title, genere, year in patron:
|
||||
if "Serie" in genere:
|
||||
checkmt = "tvshow"
|
||||
genere = "[COLOR aqua][B]" + genere + "[/B][/COLOR]"
|
||||
else:
|
||||
checkmt = "movie"
|
||||
genere = "[COLOR cadetblue][B]" + genere + "[/B][/COLOR]"
|
||||
titulo = "[COLOR crimson]" + title + "[/COLOR]" + " [ " + genere + " ] "
|
||||
if checkmt == "movie":
|
||||
new_item = item.clone(action="findvideos", title=titulo, url=url, fulltitle=title, contentTitle=title,
|
||||
contentType="movie", library=True)
|
||||
else:
|
||||
new_item = item.clone(action="findtemporadas", title=titulo, url=url, fulltitle=title, contentTitle=title,
|
||||
show=title, contentType="tvshow", library=True)
|
||||
new_item.infoLabels['year'] = year
|
||||
itemlist.append(new_item)
|
||||
try:
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
for item in itemlist:
|
||||
if not "Siguiente >>" in item.title:
|
||||
if "0." in str(item.infoLabels['rating']):
|
||||
item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]"
|
||||
else:
|
||||
item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]"
|
||||
item.title = item.title + " " + str(item.infoLabels['rating'])
|
||||
except:
|
||||
pass
|
||||
## Paginación
|
||||
next = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)"')
|
||||
if len(next) > 0:
|
||||
url = next
|
||||
itemlist.append(item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", action="buscador", url=url))
|
||||
return itemlist
|
||||
|
||||
|
||||
def scraper(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
if item.contentType == "movie":
|
||||
patron = scrapertools.find_multiple_matches(data,
|
||||
'<div class="poster">.*?src="(.*?)" alt=.*?href="(.*?)">.*?'
|
||||
'<h4>(.*?)<\/h4>.*?img\/flags\/(.*?)\.png.*?imdb.*?<span>(.*?)>')
|
||||
for thumb, url, title, language, year in patron:
|
||||
titulo = title
|
||||
title = re.sub(r"!|¡", "", title)
|
||||
title = title.replace("Autosia", "Autopsia")
|
||||
title = re.sub(r"’|PRE-Estreno", "'", title)
|
||||
new_item = item.clone(action="findvideos", title="[COLOR aqua]" + titulo + "[/COLOR]", url=url,
|
||||
fulltitle=title, contentTitle=title, contentType="movie", extra=year, library=True,
|
||||
language= language, infoLabels={'year':year})
|
||||
itemlist.append(new_item)
|
||||
else:
|
||||
patron = scrapertools.find_multiple_matches(data,
|
||||
'<div class="poster">.*?src="(.*?)" alt=.*?href="(.*?)">.*?'
|
||||
'<h4>(.*?)<\/h4>.*?<span>(.*?)<')
|
||||
for thumb, url, title, year in patron:
|
||||
titulo = title.strip()
|
||||
title = re.sub(r"\d+x.*", "", title)
|
||||
new_item = item.clone(action="findtemporadas", title="[COLOR aqua]" + titulo + "[/COLOR]", url=url,
|
||||
thumbnail=thumb, fulltitle=title, contentTitle=title, show=title,
|
||||
contentType="tvshow", library=True, infoLabels={'year':year})
|
||||
itemlist.append(new_item)
|
||||
## Paginación
|
||||
next = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)"')
|
||||
if len(next) > 0:
|
||||
url = next
|
||||
itemlist.append(
|
||||
item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", thumbnail="http://imgur.com/a7lQAld.png",
|
||||
url=url))
|
||||
try:
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
for item in itemlist:
|
||||
if not "Siguiente >>" in item.title:
|
||||
if "0." in str(item.infoLabels['rating']):
|
||||
item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]"
|
||||
else:
|
||||
item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]"
|
||||
item.title = item.title + " " + str(item.infoLabels['rating'])
|
||||
except:
|
||||
pass
|
||||
return itemlist
|
||||
|
||||
|
||||
def ul_cap(item):
|
||||
itemlist = []
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = scrapertools.find_multiple_matches(data,
|
||||
'<div class="poster">.*?<img src="([^"]+)" alt="([^"]+):.*?href="([^"]+)"><span class="b">(\d+x\d+)<\/span>')
|
||||
for thumb, title, url, cap in patron:
|
||||
temp = re.sub(r"x\d+", "", cap)
|
||||
epi = re.sub(r"\d+x", "", cap)
|
||||
titulo = title.strip() + "--" + "[COLOR red][B]" + cap + "[/B][/COLOR]"
|
||||
title = re.sub(r"\d+x.*", "", title)
|
||||
new_item = item.clone(action="findvideos", title="[COLOR aqua]" + titulo + "[/COLOR]", url=url, thumbnail=thumb,
|
||||
fulltitle=title, contentTitle=title, show=title, contentType="tvshow", temp=temp, epi=epi,
|
||||
library=True)
|
||||
itemlist.append(new_item)
|
||||
## Paginación
|
||||
next = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)"')
|
||||
if len(next) > 0:
|
||||
url = next
|
||||
itemlist.append(
|
||||
item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", thumbnail="http://imgur.com/a7lQAld.png",
|
||||
url=url))
|
||||
try:
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
for item in itemlist:
|
||||
|
||||
if not "Siguiente >>" in item.title:
|
||||
|
||||
if "0." in str(item.infoLabels['rating']):
|
||||
item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]"
|
||||
else:
|
||||
item.infoLabels['rating'] = "[COLOR springgreen] (" + str(item.infoLabels['rating']) + ")[/COLOR]"
|
||||
item.title = item.title + " " + str(item.infoLabels['rating'])
|
||||
except:
|
||||
pass
|
||||
return itemlist
|
||||
|
||||
|
||||
def findtemporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if not item.temp:
|
||||
check_temp = None
|
||||
else:
|
||||
check_temp = "yes"
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
if len(item.extra.split("|")):
|
||||
if len(item.extra.split("|")) >= 4:
|
||||
fanart = item.extra.split("|")[2]
|
||||
extra = item.extra.split("|")[3]
|
||||
try:
|
||||
fanart_extra = item.extra.split("|")[4]
|
||||
except:
|
||||
fanart_extra = item.extra.split("|")[3]
|
||||
try:
|
||||
fanart_info = item.extra.split("|")[5]
|
||||
except:
|
||||
fanart_extra = item.extra.split("|")[3]
|
||||
elif len(item.extra.split("|")) == 3:
|
||||
fanart = item.extra.split("|")[2]
|
||||
extra = item.extra.split("|")[0]
|
||||
fanart_extra = item.extra.split("|")[0]
|
||||
fanart_info = item.extra.split("|")[1]
|
||||
elif len(item.extra.split("|")) == 2:
|
||||
fanart = item.extra.split("|")[1]
|
||||
extra = item.extra.split("|")[0]
|
||||
fanart_extra = item.extra.split("|")[0]
|
||||
fanart_info = item.extra.split("|")[1]
|
||||
else:
|
||||
extra = item.extra
|
||||
fanart_extra = item.extra
|
||||
fanart_info = item.extra
|
||||
try:
|
||||
logger.info(fanart_extra)
|
||||
logger.info(fanart_info)
|
||||
except:
|
||||
fanart_extra = item.fanart
|
||||
fanart_info = item.fanart
|
||||
|
||||
bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada (\d+) <i>(.*?)</div></li></ul></div></div>')
|
||||
for temporada, bloque_epis in bloque_episodios:
|
||||
item.infoLabels = item.InfoLabels
|
||||
item.infoLabels['season'] = temporada
|
||||
itemlist.append(item.clone(action="epis",
|
||||
title="[COLOR cornflowerblue][B]Temporada [/B][/COLOR]" + "[COLOR darkturquoise][B]" + temporada + "[/B][/COLOR]",
|
||||
url=bloque_epis, contentType=item.contentType, contentTitle=item.contentTitle,
|
||||
show=item.show, extra=item.extra, fanart_extra=fanart_extra, fanart_info=fanart_info,
|
||||
datalibrary=data, check_temp=check_temp, folder=True))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
if config.get_videolibrary_support() and itemlist:
|
||||
if len(bloque_episodios) == 1:
|
||||
extra = "epis"
|
||||
else:
|
||||
extra = "epis###serie_add"
|
||||
|
||||
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'tvdb_id': item.infoLabels['tvdb_id'],
|
||||
'imdb_id': item.infoLabels['imdb_id']}
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color="0xFFe5ffcc",
|
||||
action="add_serie_to_library", extra="", url=item.url,
|
||||
contentSerieName=item.fulltitle, infoLabels=infoLabels,
|
||||
thumbnail='http://imgur.com/3ik73p8.png', datalibrary=data))
|
||||
return itemlist
|
||||
|
||||
|
||||
def epis(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if item.extra == "serie_add":
|
||||
item.url = item.datalibrary
|
||||
patron = scrapertools.find_multiple_matches(item.url, '<div class="imagen"><a href="([^"]+)".*?"numerando">(.*?)<')
|
||||
for url, epi in patron:
|
||||
episodio = scrapertools.find_single_match(epi, '\d+ - (\d+)')
|
||||
item.infoLabels['episode'] = episodio
|
||||
epi = re.sub(r" - ", "X", epi)
|
||||
itemlist.append(
|
||||
item.clone(title="[COLOR deepskyblue]Episodio " + "[COLOR red]" + epi, url=url, action="findvideos",
|
||||
show=item.show, fanart=item.extra, extra=item.extra, fanart_extra=item.fanart_extra,
|
||||
fanart_info=item.fanart_info, check_temp=item.check_temp, folder=True))
|
||||
if item.extra != "serie_add":
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
for item in itemlist:
|
||||
item.fanart = item.extra
|
||||
if item.infoLabels['title']: title = "[COLOR royalblue]" + item.infoLabels['title'] + "[/COLOR]"
|
||||
item.title = item.title + " -- \"" + title + "\""
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if item.temp:
|
||||
url_epis = item.url
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.contentType != "movie":
|
||||
if not item.infoLabels['episode']:
|
||||
capitulo = scrapertools.find_single_match(item.title, '(\d+x\d+)')
|
||||
url_capitulo = scrapertools.find_single_match(data,
|
||||
'<a href="(http://www.divxtotal.com/wp-content/uploads/.*?' + capitulo + '.*?.torrent)')
|
||||
if len(item.extra.split("|")) >= 2:
|
||||
extra = item.extra
|
||||
else:
|
||||
extra = item.fanart
|
||||
else:
|
||||
capitulo = item.title
|
||||
url_capitulo = item.url
|
||||
try:
|
||||
fanart = item.fanart_extra
|
||||
except:
|
||||
fanart = item.extra.split("|")[0]
|
||||
url_data = scrapertools.find_multiple_matches(data, '<div id="option-(.*?)".*?src="([^"]+)"')
|
||||
for option, url in url_data:
|
||||
server, idioma = scrapertools.find_single_match(data,
|
||||
'href="#option-' + option + '">.*?</b>(.*?)<span class="dt_flag">.*?flags/(.*?).png')
|
||||
if not item.temp:
|
||||
item.infoLabels['year'] = None
|
||||
if item.temp:
|
||||
capitulo = re.sub(r".*--.*", "", capitulo)
|
||||
title = "[COLOR darkcyan][B]Ver capítulo [/B][/COLOR]" + "[COLOR red][B]" + capitulo + "[/B][/COLOR]"
|
||||
new_item = item.clone(title=title, url=url, action="play", fanart=fanart, thumbnail=item.thumbnail,
|
||||
server_v=server, idioma=idioma, extra=item.extra, fulltitle=item.fulltitle,
|
||||
folder=False)
|
||||
new_item.infoLabels['episode'] = item.epi
|
||||
new_item.infoLabels['season'] = item.temp
|
||||
itemlist.append(new_item)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
else:
|
||||
title = "[COLOR darkcyan][B]Ver capítulo [/B][/COLOR]" + "[COLOR red][B]" + capitulo + "[/B][/COLOR]" + " " + "[COLOR darkred]" + server + " ( " + idioma + " )" + "[/COLOR]"
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=url, action="play", fanart=fanart,
|
||||
thumbnail=item.thumbnail, extra=item.extra, fulltitle=item.fulltitle,
|
||||
folder=False))
|
||||
if item.temp:
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
for item in itemlist:
|
||||
if item.infoLabels['title']: title_inf = "[COLOR royalblue]" + item.infoLabels['title'] + "[/COLOR]"
|
||||
item.title = item.title + " -- \"" + title_inf + "\"" + " " + "[COLOR darkred]" + item.server_v + " ( " + item.idioma + " )" + "[/COLOR]"
|
||||
if item.infoLabels['episode'] and item.library or item.temp and item.library:
|
||||
thumbnail = scrapertools.find_single_match(item.extra, 'http://assets.fanart.tv/.*jpg')
|
||||
if thumbnail == "":
|
||||
thumbnail = item.thumbnail
|
||||
if not "assets.fanart" in item.fanart_info:
|
||||
fanart = item.fanart_info
|
||||
else:
|
||||
fanart = item.fanart
|
||||
if item.temp:
|
||||
item.infoLabels['tvdb_id'] = item.tvdb
|
||||
if item.temp and not item.check_temp:
|
||||
url_epis = re.sub(r"-\dx.*", "", url_epis)
|
||||
url_epis = url_epis.replace("episodios", "series")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="[COLOR salmon][B]Todos los episodios[/B][/COLOR]", url=url_epis,
|
||||
action="findtemporadas", server="torrent",
|
||||
thumbnail=item.infoLabels['thumbnail'],
|
||||
contentType=item.contentType, contentTitle=item.contentTitle, InfoLabels=item.infoLabels,
|
||||
thumb_art=item.thumb_art, thumb_info=item.thumbnail, fulltitle=item.fulltitle,
|
||||
library=item.library, temp=item.temp, folder=True))
|
||||
else:
|
||||
url_data = scrapertools.find_multiple_matches(data, '<div id="option-(.*?)".*?src="([^"]+)"')
|
||||
for option, url in url_data:
|
||||
server, idioma = scrapertools.find_single_match(data,
|
||||
'href="#option-' + option + '">.*?</b>(.*?)<span class="dt_flag">.*?flags/(.*?).png')
|
||||
title = server + " ( " + idioma + " )"
|
||||
item.infoLabels['year'] = None
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR dodgerblue][B]" + title + " [/B][/COLOR]", url=url,
|
||||
action="play", fanart=item.fanart, thumbnail=item.thumbnail, extra=item.extra,
|
||||
InfoLabels=item.infoLabels, folder=True))
|
||||
|
||||
if item.library and config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'],
|
||||
'title': item.infoLabels['title']}
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca",
|
||||
action="add_pelicula_to_library", url=item.url, fanart=item.extra.split("|")[0],
|
||||
infoLabels=infoLabels, text_color="0xFFe5ffcc",
|
||||
thumbnail='http://imgur.com/3ik73p8.png'))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
itemlist = []
|
||||
videolist = servertools.find_video_items(data=item.url)
|
||||
for video in videolist:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="[COLOR saddlebrown][B]" + video.server + "[/B][/COLOR]", url=video.url,
|
||||
server=video.server, action="play", fanart=item.fanart, thumbnail=item.thumbnail, extra=item.extra,
|
||||
InfoLabels=item.infoLabels, folder=False))
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_year(url):
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
year = scrapertools.find_single_match(data, 'Fecha de lanzamiento.*?, (\d\d\d\d)')
|
||||
if year == "":
|
||||
year = "1111"
|
||||
return year
|
||||
@@ -1,85 +0,0 @@
|
||||
{
|
||||
"id": "plusdede",
|
||||
"name": "Plusdede",
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "https://s18.postimg.cc/e17e98eqh/6_-_4_Isbv_Q3.png",
|
||||
"banner": "plusdede.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "plusdedeuser",
|
||||
"type": "text",
|
||||
"label": "@30014",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "plusdedepassword",
|
||||
"type": "text",
|
||||
"hidden": true,
|
||||
"label": "@30015",
|
||||
"enabled": "!eq(-1,'')",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": "!eq(-1,'') + !eq(-2,'')",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "plusdedesortlinks",
|
||||
"type": "list",
|
||||
"label": "Ordenar enlaces",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-2,'') + !eq(-3,'')",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"Por no Reportes",
|
||||
"Por Idioma",
|
||||
"Por Calidad",
|
||||
"Por Idioma y Calidad",
|
||||
"Por Idioma y no Reportes",
|
||||
"Por Idioma, Calidad y no Reportes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "plusdedeshowlinks",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-3,'') + !eq(-4,'')",
|
||||
"lvalues": [
|
||||
"Todos",
|
||||
"Ver online",
|
||||
"Descargar"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "plusdedenumberlinks",
|
||||
"type": "list",
|
||||
"label": "Limitar número de enlaces",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-4,'') + !eq(-5,'')",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"5",
|
||||
"10",
|
||||
"15",
|
||||
"20",
|
||||
"25",
|
||||
"30"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,961 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import urlparse
|
||||
from time import sleep
|
||||
|
||||
from core import channeltools
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
|
||||
HOST = 'https://www.plusdede.com'
|
||||
__channel__ = 'plusdede'
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
color1, color2, color3 = ['0xFFB10021', '0xFFB10021', '0xFFB10004']
|
||||
|
||||
|
||||
def login():
|
||||
url_origen = HOST+"/login?popup=1"
|
||||
try:
|
||||
data = httptools.downloadpage(url_origen).data
|
||||
except:
|
||||
data = httptools.downloadpage(url_origen, follow_redirects=False).data
|
||||
if '<span class="username">' in data:
|
||||
return True
|
||||
token = scrapertools.find_single_match(data, '<input name="_token" type="hidden" value="([^"]+)"')
|
||||
if re.search('Escribe los números de la imagen', data):
|
||||
captcha_url = scrapertools.find_single_match(data, '<img src="([^"]+)" alt="captcha">')
|
||||
imagen_data = httptools.downloadpage(captcha_url).data
|
||||
ficheropng = os.path.join(config.get_data_path(), "captcha_plusdede.png")
|
||||
outfile=open(ficheropng,'wb')
|
||||
outfile.write(imagen_data)
|
||||
outfile.close()
|
||||
img = xbmcgui.ControlImage(450,15,400,130,ficheropng)
|
||||
wdlg = xbmcgui.WindowDialog()
|
||||
wdlg.addControl(img)
|
||||
wdlg.show()
|
||||
sleep(1)
|
||||
kb = platformtools.dialog_numeric(0, "Escribe los números de la imagen")
|
||||
|
||||
postcaptcha = ""
|
||||
if kb !='':
|
||||
solution = kb
|
||||
postcaptcha = "&captcha=" + str(solution)
|
||||
else:
|
||||
return False
|
||||
wdlg.close()
|
||||
else:
|
||||
postcaptcha=""
|
||||
|
||||
post = "_token=" + str(token) + "&email=" + str(config.get_setting("plusdedeuser", "plusdede")) + \
|
||||
"&password=" + str(config.get_setting("plusdedepassword", "plusdede")) + postcaptcha\
|
||||
#+ "&app=2131296469"
|
||||
url = HOST
|
||||
headers = {"User-Agent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/66.0.3163.100 Safari/537.36", "Referer": url, "X-Requested-With": "XMLHttpRequest","X-CSRF-TOKEN":
|
||||
token}
|
||||
data = httptools.downloadpage(HOST+"/login", post=post, headers=headers,
|
||||
replace_headers=False).data
|
||||
if "redirect" in data:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if not config.get_setting("plusdedeuser", "plusdede"):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Habilita tu cuenta en la configuración e ingresar de nuevo al canal", action="settingCanal",
|
||||
url=""))
|
||||
else:
|
||||
result = login()
|
||||
if not result:
|
||||
itemlist.append(Item(channel=item.channel, action="mainlist", title="Login fallido. Volver a intentar..."))
|
||||
return itemlist
|
||||
|
||||
item.url = HOST
|
||||
item.fanart = fanart_host
|
||||
|
||||
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
|
||||
itemlist.append(item.clone(title="Películas", action="menupeliculas", text_color=color3, text_blod=True))
|
||||
|
||||
item.thumbnail = "https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png"
|
||||
itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True))
|
||||
|
||||
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True, thumbnail = 'https://s18.postimg.cc/xj21p46ih/10_-_Uf7e_XHE.png'))
|
||||
|
||||
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
|
||||
item.thumbnail = ""
|
||||
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
|
||||
return itemlist
|
||||
|
||||
|
||||
def settingCanal(item):
|
||||
return platformtools.show_channel_settings()
|
||||
|
||||
|
||||
def menuseries(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.url = HOST
|
||||
item.fanart = fanart_host
|
||||
item.text_color = None
|
||||
|
||||
item.thumbnail = "https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png"
|
||||
itemlist.append(item.clone(action="peliculas", title=" Novedades", url="https://www.plusdede.com/series", thumbnail='https://s18.postimg.cc/in3ihji95/11_-_WPg_H5_Kx.png'))
|
||||
itemlist.append(item.clone(action="generos", title=" Por géneros", url="https://www.plusdede.com/series", thumbnail='https://s18.postimg.cc/p0slktaah/5_-_c_Nf_KRvm.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="peliculas", title=" Siguiendo", url="https://www.plusdede.com/series/following", thumbnail='https://s18.postimg.cc/68gqh7j15/7_-_tqw_AHa5.png'))
|
||||
itemlist.append(item.clone(action="peliculas", title=" Capítulos Pendientes",
|
||||
url="https://www.plusdede.com/series/mypending/0?popup=1", viewmode="movie", thumbnail='https://s18.postimg.cc/9s2o71w1l/2_-_3dbbx7_K.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="peliculas", title=" Favoritas", url="https://www.plusdede.com/series/favorites", thumbnail='https://s18.postimg.cc/n8zmpwynd/4_-_JGrig_Ep.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="peliculas", title=" Pendientes", url="https://www.plusdede.com/series/pending", thumbnail='https://s18.postimg.cc/4gnrmacix/13_-_cwl_TDog.png'))
|
||||
itemlist.append(item.clone(action="peliculas", title=" Terminadas", url="https://www.plusdede.com/series/seen", thumbnail='https://s18.postimg.cc/5vpcay0qh/17_-_M2in_Fp_O.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="peliculas", title=" Recomendadas", url="https://www.plusdede.com/series/recommended", thumbnail='https://s18.postimg.cc/bwn182sih/14_-_fin32_Kp.png'))
|
||||
itemlist.append(item.clone(action="search", title=" Buscar...", url="https://www.plusdede.com/series", thumbnaiil='https://s18.postimg.cc/s7n54ghvt/1_-_01_ZDYii.png'))
|
||||
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
|
||||
|
||||
item.thumbnail = ""
|
||||
itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
|
||||
return itemlist
|
||||
|
||||
|
||||
def menupeliculas(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
item.url = HOST
|
||||
item.fanart = fanart_host
|
||||
item.text_color = None
|
||||
|
||||
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
|
||||
itemlist.append(item.clone(action="peliculas", title=" Novedades", url="https://www.plusdede.com/pelis", thumbnail='https://s18.postimg.cc/in3ihji95/11_-_WPg_H5_Kx.png'))
|
||||
itemlist.append(item.clone(action="generos", title=" Por géneros", url="https://www.plusdede.com/pelis", thumbnail='https://s18.postimg.cc/p0slktaah/5_-_c_Nf_KRvm.png'))
|
||||
itemlist.append(item.clone(action="peliculas", title=" Solo HD", url="https://www.plusdede.com/pelis?quality=3", thumbnail='https://s18.postimg.cc/e17e95mfd/16_-_qmqn4_Si.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="peliculas", title=" Pendientes", url="https://www.plusdede.com/pelis/pending", thumbnail='https://s18.postimg.cc/4gnrmacix/13_-_cwl_TDog.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="peliculas", title=" Recomendadas", url="https://www.plusdede.com/pelis/recommended", thumbnail='https://s18.postimg.cc/bwn182sih/14_-_fin32_Kp.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="peliculas", title=" Favoritas", url="https://www.plusdede.com/pelis/favorites", thumbnail='https://s18.postimg.cc/n8zmpwynd/4_-_JGrig_Ep.png'))
|
||||
itemlist.append(item.clone(action="peliculas", title=" Vistas", url="https://www.plusdede.com/pelis/seen", thumbnail='https://s18.postimg.cc/5vpcay0qh/17_-_M2in_Fp_O.png'))
|
||||
itemlist.append(item.clone(action="search", title=" Buscar...", url="https://www.plusdede.com/pelis", thumbnail='https://s18.postimg.cc/s7n54ghvt/1_-_01_ZDYii.png'))
|
||||
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
|
||||
|
||||
item.thumbnail = ""
|
||||
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
|
||||
return itemlist
|
||||
|
||||
|
||||
def menulistas(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
item.url = HOST
|
||||
item.fanart = fanart_host
|
||||
item.text_color = None
|
||||
|
||||
itemlist.append(
|
||||
item.clone(action="listas", tipo="populares", title=" Populares", url="https://www.plusdede.com/listas", thumbnail='https://s18.postimg.cc/7aqwzrha1/8_-_3rn14_Tq.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="listas", tipo="siguiendo", title=" Siguiendo", url="https://www.plusdede.com/listas", thumbnail='https://s18.postimg.cc/4tf5sha89/9_-_z_F8c_UBT.png'))
|
||||
itemlist.append(
|
||||
item.clone(action="listas", tipo="tuslistas", title=" Tus Listas", url="https://www.plusdede.com/listas"))
|
||||
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
|
||||
|
||||
item.thumbnail = ""
|
||||
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
|
||||
return itemlist
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
tipo = item.url.replace("https://www.plusdede.com/", "")
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
data = scrapertools.find_single_match(data,
|
||||
'<select name="genre_id" class="selectpicker" title="Selecciona...">(.*?)</select>')
|
||||
patron = '<option value="([^"]+)">([^<]+)</option>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for id_genere, title in matches:
|
||||
title = title.strip()
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
# https://www.plusdede.com/pelis?genre_id=1
|
||||
url = "https://www.plusdede.com/" + tipo + "?genre_id=" + id_genere
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
item.tipo = item.url.replace("https://www.plusdede.com/", "")
|
||||
item.url = "https://www.plusdede.com/search/"
|
||||
texto = texto.replace(" ", "-")
|
||||
|
||||
item.url = item.url + texto
|
||||
try:
|
||||
return buscar(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def buscar(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
json_object = jsontools.load(data)
|
||||
data = json_object["content"]
|
||||
|
||||
return parse_mixed_results(item, data)
|
||||
|
||||
|
||||
def parse_mixed_results(item, data):
|
||||
itemlist = []
|
||||
patron = '<div class="media-dropdown mini dropdown model" data-value="([^"]+)"+'
|
||||
patron += '.*?<a href="([^"]+)"[^<]data-toggle="tooltip" data-container="body"+'
|
||||
patron += ' data-delay="500" title="([^"]+)"[^<]+'
|
||||
patron += '.*?src="([^"]+)"+'
|
||||
patron += '.*?<div class="year">([^<]+)</div>+'
|
||||
patron += '.*?<div class="value"><i class="fa fa-star"></i> ([^<]+)</div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if item.tipo == "lista":
|
||||
following = scrapertools.find_single_match(data, '<div class="follow-lista-buttons ([^"]+)">')
|
||||
data_id = scrapertools.find_single_match(data, 'data-model="10" data-id="([^"]+)">')
|
||||
if following.strip() == "following":
|
||||
itemlist.append(
|
||||
Item(channel='plusdede', title="Dejar de seguir", idtemp=data_id, token=item.token, valor="unfollow",
|
||||
action="plusdede_check", url=item.url, tipo=item.tipo))
|
||||
else:
|
||||
itemlist.append(
|
||||
Item(channel='plusdede', title="Seguir esta lista", idtemp=data_id, token=item.token, valor="follow",
|
||||
action="plusdede_check", url=item.url, tipo=item.tipo))
|
||||
|
||||
for visto, scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedvalue in matches:
|
||||
title = ""
|
||||
if visto.strip() == "seen":
|
||||
title += "[visto] "
|
||||
title += scrapertools.htmlclean(scrapedtitle)
|
||||
if scrapedyear != '':
|
||||
title += " (" + scrapedyear + ")"
|
||||
fulltitle = title
|
||||
if scrapedvalue != '':
|
||||
title += " (" + scrapedvalue + ")"
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
fanart = thumbnail.replace("mediathumb", "mediabigcover")
|
||||
plot = ""
|
||||
# https://www.plusdede.com/peli/the-lego-movie
|
||||
# https://www.plusdede.com/links/view/slug/the-lego-movie/what/peli?popup=1
|
||||
|
||||
if "/peli/" in scrapedurl or "/docu/" in scrapedurl:
|
||||
|
||||
# sectionStr = "peli" if "/peli/" in scrapedurl else "docu"
|
||||
if "/peli/" in scrapedurl:
|
||||
sectionStr = "peli"
|
||||
else:
|
||||
sectionStr = "docu"
|
||||
referer = urlparse.urljoin(item.url, scrapedurl)
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
if item.tipo != "series":
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, extra=referer, url=url,
|
||||
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart,
|
||||
contentTitle=scrapedtitle, contentType="movie", context=["buscar_trailer"]))
|
||||
else:
|
||||
referer = item.url
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
if item.tipo != "pelis":
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title, extra=referer, url=url,
|
||||
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart,
|
||||
contentTitle=scrapedtitle, contentType="tvshow", context=["buscar_trailer"]))
|
||||
|
||||
next_page = scrapertools.find_single_match(data,
|
||||
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)">')
|
||||
if next_page != "":
|
||||
url = urlparse.urljoin("https://www.plusdede.com", next_page).replace("amp;", "")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="pag_sig", token=item.token, title=">> Página siguiente",
|
||||
extra=item.extra, url=url))
|
||||
|
||||
try:
|
||||
import xbmcplugin
|
||||
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
|
||||
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
|
||||
except:
|
||||
pass
|
||||
return itemlist
|
||||
|
||||
|
||||
def siguientes(item): # No utilizada
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
bloque = scrapertools.find_single_match(data, '<h2>Siguiendo</h2>(.*?)<div class="box">')
|
||||
patron = '<div class="coverMini shadow tiptip" title="([^"]+)">[^<]+'
|
||||
patron += '<img class="centeredPic centeredPicFalse" onerror="[^"]+" src="([^"]+)"[^<]+'
|
||||
patron += '<img src="/images/loading-mini.gif" class="loader"/>[^<]+'
|
||||
patron += '<div class="extra-info"><span class="year">[^<]+'
|
||||
patron += '</span><span class="value"><i class="icon-star"></i>[^<]+'
|
||||
patron += '</span></div>[^<]+'
|
||||
patron += '</div>[^<]+'
|
||||
patron += '</a>[^<]+'
|
||||
patron += '<a class="userepiinfo defaultLink" href="([^"]+)">(\d+)x(\d+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
# for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
for scrapedtitle, scrapedthumbnail, scrapedurl, scrapedsession, scrapedepisode in matches:
|
||||
title = scrapertools.htmlclean(scrapedtitle)
|
||||
session = scrapertools.htmlclean(scrapedsession)
|
||||
episode = scrapertools.htmlclean(scrapedepisode)
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
fanart = thumbnail.replace("mediathumb", "mediabigcover")
|
||||
plot = ""
|
||||
title = session + "x" + episode + " - " + title
|
||||
# https://www.plusdede.com/peli/the-lego-movie
|
||||
# https://www.plusdede.com/links/view/slug/the-lego-movie/what/peli?popup=1
|
||||
|
||||
referer = urlparse.urljoin(item.url, scrapedurl)
|
||||
url = referer
|
||||
# itemlist.append( Item(channel=item.channel, action="episodios" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=title))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="episodio", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title, show=title, fanart=fanart, extra=session + "|" + episode))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodio(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
session = str(int(item.extra.split("|")[0]))
|
||||
episode = str(int(item.extra.split("|")[1]))
|
||||
patrontemporada = '<div class="checkSeason"[^>]+>Temporada ' + session + '<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>'
|
||||
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
|
||||
|
||||
for bloque_episodios in matchestemporadas:
|
||||
|
||||
# Extrae los episodios
|
||||
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">' + episode + ' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque_episodios)
|
||||
|
||||
for scrapedurl, scrapedtitle, info, visto in matches:
|
||||
# visto_string = "[visto] " if visto.strip()=="active" else ""
|
||||
if visto.strip() == "active":
|
||||
visto_string = "[visto] "
|
||||
else:
|
||||
visto_string = ""
|
||||
numero = episode
|
||||
title = visto_string + session + "x" + numero + " " + scrapertools.htmlclean(scrapedtitle)
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
# https://www.plusdede.com/peli/the-lego-movie
|
||||
# https://www.plusdede.com/links/view/slug/the-lego-movie/what/peli?popup=1
|
||||
# https://www.plusdede.com/links/viewepisode/id/475011?popup=1
|
||||
epid = scrapertools.find_single_match(scrapedurl, "id/(\d+)")
|
||||
url = "https://www.plusdede.com/links/viewepisode/id/" + epid
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fulltitle=title, fanart=item.fanart, show=item.show))
|
||||
|
||||
itemlist2 = []
|
||||
for capitulo in itemlist:
|
||||
itemlist2 = findvideos(capitulo)
|
||||
return itemlist2
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
json_object = jsontools.load(data)
|
||||
data = json_object["content"]
|
||||
|
||||
return parse_mixed_results(item, data)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
idserie = ''
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patrontemporada = '<ul.*?<li class="season-header" >([^<]+)<(.*?)\s+</ul>'
|
||||
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
|
||||
idserie = scrapertools.find_single_match(data, 'data-model="5" data-id="(\d+)"')
|
||||
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
|
||||
if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")):
|
||||
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
|
||||
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
|
||||
for nombre_temporada, bloque_episodios in matchestemporadas:
|
||||
# Extrae los episodios
|
||||
patron_episodio = '<li><a href="#"(.*?)</a></li>'
|
||||
# patron = '<li><a href="#" data-id="([^"]*)".*?data-href="([^"]+)">\s*<div class="name">\s*<span class="num">([^<]+)</span>\s*([^<]+)\s*</div>.*?"show-close-footer episode model([^"]+)"'
|
||||
matches = re.compile(patron_episodio, re.DOTALL).findall(bloque_episodios)
|
||||
for data_episodio in matches:
|
||||
|
||||
scrapeid = scrapertools.find_single_match(data_episodio, '<li><a href="#" data-id="([^"]*)"')
|
||||
scrapedurl = scrapertools.find_single_match(data_episodio, 'data-href="([^"]+)">\s*<div class="name">')
|
||||
numero = scrapertools.find_single_match(data_episodio, '<span class="num">([^<]+)</span>')
|
||||
scrapedtitle = scrapertools.find_single_match(data_episodio,
|
||||
'<span class="num">.*?</span>\s*([^<]+)\s*</div>')
|
||||
visto = scrapertools.find_single_match(data_episodio, '"show-close-footer episode model([^"]+)"')
|
||||
|
||||
title = nombre_temporada.replace("Temporada ", "").replace("Extras de la serie", "Extras 0").replace(" ",
|
||||
"") + "x" + numero + " " + scrapertools.htmlclean(
|
||||
scrapedtitle)
|
||||
if visto.strip() == "seen":
|
||||
title = "[visto] " + title
|
||||
|
||||
thumbnail = item.thumbnail
|
||||
fanart = item.fanart
|
||||
plot = ""
|
||||
# https://www.plusdede.com/peli/the-lego-movie
|
||||
# https://www.plusdede.com/links/view/slug/the-lego-movie/what/peli?popup=1
|
||||
# https://www.plusdede.com/links/viewepisode/id/475011?popup=1
|
||||
# epid = scrapertools.find_single_match(scrapedurl,"id/(\d+)")
|
||||
url = "https://www.plusdede.com" + scrapedurl
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", nom_serie=item.title, tipo="5", title=title, url=url,
|
||||
thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=fanart, show=item.show))
|
||||
|
||||
|
||||
if config.get_videolibrary_support():
|
||||
# con año y valoracion la serie no se puede actualizar correctamente, si ademas cambia la valoracion, creara otra carpeta
|
||||
# Sin año y sin valoración:
|
||||
show = re.sub(r"\s\(\d+\)\s\(\d+\.\d+\)", "", item.show)
|
||||
# Sin año:
|
||||
# show = re.sub(r"\s\(\d+\)", "", item.show)
|
||||
# Sin valoración:
|
||||
# show = re.sub(r"\s\(\d+\.\d+\)", "", item.show)
|
||||
itemlist.append(
|
||||
Item(channel='plusdede', title="Añadir esta serie a la videoteca", url=item.url, token=token,
|
||||
action="add_serie_to_library", extra="episodios###", show=show))
|
||||
itemlist.append(
|
||||
Item(channel='plusdede', title="Descargar todos los episodios de la serie", url=item.url, token=token,
|
||||
action="download_all_episodes", extra="episodios", show=show))
|
||||
itemlist.append(Item(channel='plusdede', title="Marcar como Pendiente", tipo="5", idtemp=idserie, token=token,
|
||||
valor="pending", action="plusdede_check", show=show))
|
||||
itemlist.append(Item(channel='plusdede', title="Marcar como Siguiendo", tipo="5", idtemp=idserie, token=token,
|
||||
valor="following", action="plusdede_check", show=show))
|
||||
itemlist.append(Item(channel='plusdede', title="Marcar como Finalizada", tipo="5", idtemp=idserie, token=token,
|
||||
valor="seen", action="plusdede_check", show=show))
|
||||
itemlist.append(Item(channel='plusdede', title="Marcar como Favorita", tipo="5", idtemp=idserie, token=token,
|
||||
valor="favorite", action="plusdede_check", show=show))
|
||||
itemlist.append(
|
||||
Item(channel='plusdede', title="Quitar marca", tipo="5", idtemp=idserie, token=token, valor="nothing",
|
||||
action="plusdede_check", show=show))
|
||||
itemlist.append(
|
||||
Item(channel='plusdede', title="Añadir a lista", tipo="5", tipo_esp="lista", idtemp=idserie, token=token,
|
||||
action="plusdede_check", show=show))
|
||||
return itemlist
|
||||
|
||||
|
||||
def parse_listas(item, bloque_lista):
|
||||
logger.info()
|
||||
|
||||
if item.tipo == "populares":
|
||||
patron = '<div class="lista(.*?)</div>\s*</h4>'
|
||||
else:
|
||||
patron = '<div class="lista(.*?)</h4>\s*</div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque_lista)
|
||||
itemlist = []
|
||||
|
||||
for lista in matches:
|
||||
scrapedurl = scrapertools.htmlclean(scrapertools.find_single_match(lista, '<a href="([^"]+)">[^<]+</a>'))
|
||||
scrapedtitle = scrapertools.find_single_match(lista, '<a href="[^"]+">([^<]+)</a>')
|
||||
scrapedfollowers = scrapertools.find_single_match(lista, 'Follow: <span class="number">([^<]+)')
|
||||
scrapedseries = scrapertools.find_single_match(lista, '<div class="lista-stat badge">Series: ([^<]+)')
|
||||
scrapedpelis = scrapertools.find_single_match(lista, '<div class="lista-stat badge">Pelis: ([^<]+)')
|
||||
|
||||
title = scrapertools.htmlclean(scrapedtitle) + ' ('
|
||||
if scrapedpelis != '':
|
||||
title += scrapedpelis + ' pelis, '
|
||||
if scrapedseries != '':
|
||||
title += scrapedseries + ' series, '
|
||||
if scrapedfollowers != '':
|
||||
title += scrapedfollowers + ' seguidores'
|
||||
title += ')'
|
||||
url = urlparse.urljoin("https://www.plusdede.com", scrapedurl)
|
||||
thumbnail = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", token=item.token, tipo="lista", title=title, url=url))
|
||||
|
||||
nextpage = scrapertools.find_single_match(bloque_lista,
|
||||
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)"')
|
||||
if nextpage != '':
|
||||
url = urlparse.urljoin("https://www.plusdede.com", nextpage)
|
||||
itemlist.append(Item(channel=item.channel, action="lista_sig", token=item.token, tipo=item.tipo,
|
||||
title=">> Página siguiente", extra=item.extra, url=url))
|
||||
|
||||
try:
|
||||
import xbmcplugin
|
||||
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
|
||||
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def listas(item):
|
||||
logger.info()
|
||||
if item.tipo == "tuslistas":
|
||||
patron = 'Tus listas(.*?)>Listas que sigues<'
|
||||
elif item.tipo == "siguiendo":
|
||||
patron = '<h3>Listas que sigues</h3>(.*?)<h2>Listas populares</h2>'
|
||||
else:
|
||||
patron = '<div class="content">\s*<h2>Listas populares(.*?)</div>\s*</div>\s*</div>\s*</div>\s*</div>'
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
item.token = scrapertools.find_single_match(data, '_token" content="([^"]+)"').strip()
|
||||
|
||||
bloque_lista = scrapertools.find_single_match(data, patron)
|
||||
|
||||
return parse_listas(item, bloque_lista)
|
||||
|
||||
|
||||
def lista_sig(item):
|
||||
logger.info()
|
||||
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
return parse_listas(item, data)
|
||||
|
||||
|
||||
def pag_sig(item):
|
||||
logger.info()
|
||||
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
return parse_mixed_results(item, data)
|
||||
|
||||
|
||||
def findvideos(item, verTodos=False):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
data_model = scrapertools.find_single_match(data, 'data-model="([^"]+)"')
|
||||
data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)"')
|
||||
trailer = "https://www.youtube.com/watch?v=" + scrapertools.find_single_match(data,
|
||||
'data-youtube="([^"]+)" class="youtube-link')
|
||||
|
||||
url = "https://www.plusdede.com/aportes/" + data_model + "/" + data_id + "?popup=1"
|
||||
|
||||
data = httptools.downloadpage(url).data
|
||||
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
|
||||
|
||||
patron = 'target="_blank" (.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
idpeli = data_id
|
||||
if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")) and data_model == "4":
|
||||
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
|
||||
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="play", title="TRAILER", url=item.url, trailer=trailer,
|
||||
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
|
||||
|
||||
itemsort = []
|
||||
sortlinks = config.get_setting("plusdedesortlinks",
|
||||
item.channel) # 0:no, 1:valoracion, 2:idioma, 3:calidad, 4:idioma+calidad, 5:idioma+valoracion, 6:idioma+calidad+valoracion
|
||||
showlinks = config.get_setting("plusdedeshowlinks", item.channel) # 0:todos, 1:ver online, 2:descargar
|
||||
|
||||
if sortlinks != '' and sortlinks != "No":
|
||||
sortlinks = int(sortlinks)
|
||||
else:
|
||||
sortlinks = 0
|
||||
|
||||
if showlinks != '' and showlinks != "No":
|
||||
showlinks = int(showlinks)
|
||||
else:
|
||||
showlinks = 0
|
||||
|
||||
for match in matches:
|
||||
|
||||
jdown = scrapertools.find_single_match(match, '<span class="fa fa-download"></span>([^<]+)')
|
||||
if (showlinks == 1 and jdown != '') or (
|
||||
showlinks == 2 and jdown == ''): # Descartar enlaces veronline/descargar
|
||||
continue
|
||||
idioma_1 = ""
|
||||
idiomas = re.compile('<img src="https://cd.*?plusdede.com/images/flags/([^"]+).png', re.DOTALL).findall(match)
|
||||
idioma_0 = idiomas[0]
|
||||
if len(idiomas) > 1:
|
||||
idioma_1 = idiomas[1]
|
||||
idioma = idioma_0 + ", SUB " + idioma_1
|
||||
else:
|
||||
idioma_1 = ''
|
||||
idioma = idioma_0
|
||||
|
||||
calidad_video = scrapertools.find_single_match(match,
|
||||
'<span class="fa fa-video-camera"></span>(.*?)</div>').replace(
|
||||
" ", "").replace("\n", "")
|
||||
calidad_audio = scrapertools.find_single_match(match,
|
||||
'<span class="fa fa-headphones"></span>(.*?)</div>').replace(
|
||||
" ", "").replace("\n", "")
|
||||
|
||||
thumb_servidor = scrapertools.find_single_match(match, '<img src="([^"]+)">')
|
||||
nombre_servidor = scrapertools.find_single_match(thumb_servidor, "hosts/([^\.]+).png")
|
||||
|
||||
if jdown != '':
|
||||
title = "Download " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")"
|
||||
else:
|
||||
title = "Ver en " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")"
|
||||
|
||||
valoracion = 0
|
||||
|
||||
reports = scrapertools.find_single_match(match,
|
||||
'<i class="fa fa-exclamation-triangle"></i><br/>\s*<span class="number" data-num="([^"]*)">')
|
||||
valoracion -= int(reports)
|
||||
title += " (" + reports + " reps)"
|
||||
|
||||
url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, 'href="([^"]+)"'))
|
||||
thumbnail = thumb_servidor
|
||||
plot = ""
|
||||
if sortlinks > 0:
|
||||
# orden1 para dejar los "downloads" detras de los "ver" al ordenar
|
||||
# orden2 segun configuración
|
||||
if sortlinks == 1:
|
||||
orden = valoracion
|
||||
elif sortlinks == 2:
|
||||
orden = valora_idioma(idioma_0, idioma_1)
|
||||
elif sortlinks == 3:
|
||||
orden = valora_calidad(calidad_video, calidad_audio)
|
||||
elif sortlinks == 4:
|
||||
orden = (valora_idioma(idioma_0, idioma_1) * 100) + valora_calidad(calidad_video, calidad_audio)
|
||||
elif sortlinks == 5:
|
||||
orden = (valora_idioma(idioma_0, idioma_1) * 1000) + valoracion
|
||||
elif sortlinks == 6:
|
||||
orden = (valora_idioma(idioma_0, idioma_1) * 100000) + (
|
||||
valora_calidad(calidad_video, calidad_audio) * 1000) + valoracion
|
||||
itemsort.append(
|
||||
{'action': "play", 'title': title, 'data_id': data_id, 'token': token, 'tipo': data_model, 'url': url,
|
||||
'thumbnail': thumbnail, 'fanart': item.fanart, 'plot': plot, 'extra': item.url,
|
||||
'fulltitle': item.fulltitle, 'orden1': (jdown == ''), 'orden2': orden})
|
||||
else:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", data_id=data_id, token=token, tipo=data_model, title=title,
|
||||
url=url, thumbnail=thumbnail, fanart=item.fanart, plot=plot, extra=item.url,
|
||||
fulltitle=item.fulltitle))
|
||||
|
||||
if sortlinks > 0:
|
||||
numberlinks = config.get_setting("plusdedenumberlinks", item.channel) # 0:todos, > 0:n*5 (5,10,15,20,...)
|
||||
# numberlinks = int(numberlinks) if numberlinks != '' and numberlinks !="No" else 0
|
||||
if numberlinks != '' and numberlinks != "No":
|
||||
numberlinks = int(numberlinks)
|
||||
else:
|
||||
numberlinks = 0
|
||||
|
||||
if numberlinks == 0:
|
||||
verTodos = True
|
||||
itemsort = sorted(itemsort, key=lambda k: (k['orden1'], k['orden2']), reverse=True)
|
||||
for i, subitem in enumerate(itemsort):
|
||||
if verTodos == False and i >= numberlinks:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action='findallvideos', title='Ver todos los enlaces', url=item.url,
|
||||
extra=item.extra))
|
||||
break
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action=subitem['action'], title=subitem['title'], data_id=subitem['data_id'],
|
||||
token=subitem['token'], tipo=subitem['tipo'], url=subitem['url'], thumbnail=subitem['thumbnail'],
|
||||
fanart=subitem['fanart'], plot=subitem['plot'], extra=subitem['extra'],
|
||||
fulltitle=subitem['fulltitle']))
|
||||
|
||||
if data_model == "4":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="plusdede_check", tipo="4", token=token, title="Marcar como Pendiente",
|
||||
valor="pending", idtemp=idpeli))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="plusdede_check", tipo="4", token=token, title="Marcar como Vista",
|
||||
valor="seen", idtemp=idpeli))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="plusdede_check", tipo="4", token=token, title="Marcar como Favorita",
|
||||
valor="favorite", idtemp=idpeli))
|
||||
itemlist.append(Item(channel=item.channel, action="plusdede_check", tipo="4", token=token, title="Quitar Marca",
|
||||
valor="nothing", idtemp=idpeli))
|
||||
itemlist.append(
|
||||
Item(channel='plusdede', title="Añadir a lista", tipo="4", tipo_esp="lista", idtemp=idpeli, token=token,
|
||||
action="plusdede_check"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findallvideos(item):
|
||||
return findvideos(item, True)
|
||||
|
||||
|
||||
def play(item):
|
||||
itemlist = []
|
||||
if "trailer" in item:
|
||||
url = item.trailer
|
||||
itemlist = servertools.find_video_items(data=url)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
else:
|
||||
logger.info("url=" + item.url)
|
||||
|
||||
# Hace la llamada
|
||||
headers = {'Referer': item.extra}
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
url = scrapertools.find_single_match(data,
|
||||
'<a href="([^"]+)" target="_blank"><button class="btn btn-primary">visitar enlace</button>')
|
||||
url = urlparse.urljoin("https://www.plusdede.com", url)
|
||||
|
||||
headers = {'Referer': item.url}
|
||||
media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location")
|
||||
# logger.info("media_url="+media_url)
|
||||
|
||||
itemlist = servertools.find_video_items(data=media_url)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
|
||||
# Marcar como visto
|
||||
try:
|
||||
checkseen(item)
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def checkseen(item):
|
||||
logger.info(item)
|
||||
url_temp = ""
|
||||
if item.tipo == "8":
|
||||
url_temp = "https://www.plusdede.com/set/episode/" + item.data_id + "/seen"
|
||||
tipo_str = "series"
|
||||
headers = {"User-Agent":"Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/61.0.3163.100 Safari/537.36", "Referer": "https://www.plusdede.com/serie/",
|
||||
"X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token}
|
||||
else:
|
||||
url_temp = "https://www.plusdede.com/set/usermedia/" + item.tipo + "/" + item.data_id + "/seen"
|
||||
tipo_str = "pelis"
|
||||
headers = {"User-Agent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/61.0.3163.100 Safari/537.36", "Referer": "https://www.plusdede.com/serie/",
|
||||
"X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token}
|
||||
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data
|
||||
return True
|
||||
|
||||
|
||||
def infosinopsis(item):
|
||||
logger.info()
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
scrapedtitle = scrapertools.find_single_match(data, '<div class="media-title">([^<]+)</div>')
|
||||
scrapedvalue = scrapertools.find_single_match(data, '<span class="value">([^<]+)</span>')
|
||||
scrapedyear = scrapertools.find_single_match(data,
|
||||
'<strong>Fecha</strong>\s*<div class="mini-content">([^<]+)</div>').strip()
|
||||
scrapedduration = scrapertools.htmlclean(scrapertools.find_single_match(data,
|
||||
'<strong>Duración</strong>\s*<div class="mini-content">([^<]+)</div>').strip().replace(
|
||||
" ", "").replace("\n", ""))
|
||||
scrapedplot = scrapertools.find_single_match(data, '<div class="plot expandable">([^<]+)<div').strip()
|
||||
generos = scrapertools.find_single_match(data, '<strong>Género</strong>\s*<ul>(.*?)</ul>')
|
||||
scrapedgenres = re.compile('<li>([^<]+)</li>', re.DOTALL).findall(generos)
|
||||
scrapedcasting = re.compile(
|
||||
'<a href="https://www.plusdede.com/star/[^"]+"><div class="text-main">([^<]+)</div></a>\s*<div class="text-sub">\s*([^<]+)</div>',
|
||||
re.DOTALL).findall(data)
|
||||
title = scrapertools.htmlclean(scrapedtitle)
|
||||
plot = "[B]Año: [/B]" + scrapedyear
|
||||
plot += " [B]Duración: [/B]" + scrapedduration
|
||||
plot += " [B]Puntuación usuarios: [/B]" + scrapedvalue
|
||||
plot += "\n[B]Géneros: [/B]" + ", ".join(scrapedgenres)
|
||||
plot += "\n\n[B]Sinopsis:[/B]\n" + scrapertools.htmlclean(scrapedplot)
|
||||
plot += "\n\n[B]Casting:[/B]\n"
|
||||
for actor, papel in scrapedcasting:
|
||||
plot += actor + " (" + papel.strip() + ")\n"
|
||||
|
||||
tbd = TextBox("DialogTextViewer.xml", os.getcwd(), "Default")
|
||||
tbd.ask(title, plot)
|
||||
del tbd
|
||||
return
|
||||
|
||||
|
||||
try:
|
||||
import xbmcgui
|
||||
|
||||
|
||||
class TextBox(xbmcgui.WindowXML):
|
||||
""" Create a skinned textbox window """
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def onInit(self):
|
||||
try:
|
||||
self.getControl(5).setText(self.text)
|
||||
self.getControl(1).setLabel(self.title)
|
||||
except:
|
||||
pass
|
||||
|
||||
def onClick(self, controlId):
|
||||
pass
|
||||
|
||||
def onFocus(self, controlId):
|
||||
pass
|
||||
|
||||
def onAction(self, action):
|
||||
if action == 7:
|
||||
self.close()
|
||||
|
||||
def ask(self, title, text):
|
||||
self.title = title
|
||||
self.text = text
|
||||
self.doModal()
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
# Valoraciones de enlaces, los valores más altos se mostrarán primero :
|
||||
|
||||
def valora_calidad(video, audio):
|
||||
prefs_video = ['hdmicro', 'hd1080', 'hd720', 'hdrip', 'dvdrip', 'rip', 'tc-screener', 'ts-screener']
|
||||
prefs_audio = ['dts', '5.1', 'rip', 'line', 'screener']
|
||||
|
||||
video = ''.join(video.split()).lower()
|
||||
# pts = (9 - prefs_video.index(video) if video in prefs_video else 1) * 10
|
||||
if video in prefs_video:
|
||||
pts = (9 - prefs_video.index(video)) * 10
|
||||
else:
|
||||
pts = (9 - 1) * 10
|
||||
|
||||
audio = ''.join(audio.split()).lower()
|
||||
# pts += 9 - prefs_audio.index(audio) if audio in prefs_audio else 1
|
||||
if audio in prefs_audio:
|
||||
pts = (9 - prefs_audio.index(audio)) * 10
|
||||
else:
|
||||
pts = (9 - 1) * 10
|
||||
|
||||
return pts
|
||||
|
||||
|
||||
def valora_idioma(idioma_0, idioma_1):
|
||||
prefs = ['spanish', 'latino', 'catalan', 'english', 'french']
|
||||
# pts = (9 - prefs.index(idioma_0) if idioma_0 in prefs else 1) * 10
|
||||
if idioma_0 in prefs:
|
||||
pts = (9 - prefs.index(idioma_0)) * 10
|
||||
else:
|
||||
pts = (9 - 1) * 10
|
||||
|
||||
if idioma_1 != '': # si hay subtítulos
|
||||
idioma_1 = idioma_1.replace(' SUB', '')
|
||||
|
||||
# pts += 8 - prefs.index(idioma_1) if idioma_1 in prefs else 1
|
||||
if idioma_1 in prefs:
|
||||
pts += 8 - prefs.index(idioma_1)
|
||||
else:
|
||||
pts += 8 - 1
|
||||
|
||||
else:
|
||||
pts += 9 # sin subtítulos por delante
|
||||
return pts
|
||||
|
||||
|
||||
def plusdede_check(item):
|
||||
if item.tipo_esp == "lista":
|
||||
url_temp = "https://www.plusdede.com/listas/addmediapopup/" + item.tipo + "/" + item.idtemp + "?popup=1"
|
||||
data = httptools.downloadpage(url_temp).data
|
||||
|
||||
patron = '<div class="lista model" data-model="10" data-id="([^"]+)">+'
|
||||
patron += '.*?<a href="/lista/[^"]+">([^<]+)</a>+'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
for id_lista, nombre_lista in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="plusdede_check", tipo=item.tipo, tipo_esp="add_list",
|
||||
token=item.token, title=nombre_lista, idlista=id_lista, idtemp=item.idtemp))
|
||||
if len(itemlist) < 1:
|
||||
itemlist.append(Item(channel=item.channel, action="", title="No tienes ninguna lista creada por ti!"))
|
||||
return itemlist
|
||||
else:
|
||||
|
||||
if item.tipo == "10" or item.tipo == "lista":
|
||||
url_temp = "https://www.plusdede.com/set/lista/" + item.idtemp + "/" + item.valor
|
||||
else:
|
||||
if (item.tipo_esp == "add_list"):
|
||||
url_temp = "https://www.plusdede.com/set/listamedia/" + item.idlista + "/add/" + item.tipo + "/" + item.idtemp
|
||||
else:
|
||||
url_temp = "https://www.plusdede.com/set/usermedia/" + item.tipo + "/" + item.idtemp + "/" + item.valor
|
||||
# httptools.downloadpage(url_temp, post="id="+item.idtemp)
|
||||
if item.tipo == "5":
|
||||
tipo_str = "series"
|
||||
elif item.tipo == "lista":
|
||||
tipo_str = "listas"
|
||||
else:
|
||||
tipo_str = "pelis"
|
||||
headers = {"User-Agent":"Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/61.0.3163.100 Safari/537.36","Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
|
||||
"X-CSRF-TOKEN": item.token}
|
||||
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers,
|
||||
replace_headers=True).data.strip()
|
||||
dialog = platformtools
|
||||
dialog.ok = platformtools.dialog_ok
|
||||
if data == "1":
|
||||
if item.valor != "nothing":
|
||||
dialog.ok('SUCCESS', 'Marca realizada con éxito!')
|
||||
elif item.valor == "nothing":
|
||||
dialog.ok('SUCCESS', 'Marca eliminada con éxito!')
|
||||
elif item.valor == "unfollow":
|
||||
dialog.ok('SUCCESS', 'Has dejado de seguir esta lista!')
|
||||
elif item.valor == "follow":
|
||||
dialog.ok('SUCCESS', 'Has comenzado a seguir esta lista!')
|
||||
elif item.tipo_esp == "add_list":
|
||||
dialog.ok('SUCCESS', 'Añadido a la lista!')
|
||||
else:
|
||||
dialog.ok('ERROR', 'No se pudo realizar la acción!')
|
||||
@@ -3,7 +3,7 @@
|
||||
"name": "Repelis",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat","cast","vo"],
|
||||
"language": ["lat","cast"],
|
||||
"thumbnail": "https://s8.postimg.cc/yem7wyfw1/repelis1.png",
|
||||
"banner": "https://s8.postimg.cc/p6tzg9gjl/repelis2.png",
|
||||
"categories": [
|
||||
|
||||
76
plugin.video.alfa/channels/rexpelis.json
Normal file
76
plugin.video.alfa/channels/rexpelis.json
Normal file
@@ -0,0 +1,76 @@
|
||||
{
|
||||
"id": "rexpelis",
|
||||
"name": "Rexpelis",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat","cast"],
|
||||
"thumbnail": "https://i.postimg.cc/MMJ5g9Y1/rexpelis1.png",
|
||||
"banner": "https://i.postimg.cc/XrXs5GJB/rexpelis2.png",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"LAT",
|
||||
"ESP",
|
||||
"VO"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
299
plugin.video.alfa/channels/rexpelis.py
Normal file
299
plugin.video.alfa/channels/rexpelis.py
Normal file
@@ -0,0 +1,299 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel Rexpelis -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
from channelselector import get_thumb
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger, platformtools
|
||||
|
||||
|
||||
idio = {'es-mx': 'LAT','es-es': 'ESP','en': 'VO'}
|
||||
cali = {'poor': 'SD','low': 'SD','medium': 'HD','high': 'HD'}
|
||||
|
||||
list_language = idio.values()
|
||||
list_quality = ["SD","HD"]
|
||||
list_servers = ['rapidvideo', 'streamango', 'fastplay', 'openload', 'netu', 'vidoza', 'uptobox']
|
||||
|
||||
|
||||
__channel__='rexpelis'
|
||||
|
||||
host = "https://www.rexpelis.com"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(host).data
|
||||
matches = scrapertools.find_multiple_matches(data, 'cant-genre">([^<]+)')
|
||||
cantidad = 0
|
||||
for cantidad1 in matches:
|
||||
cantidad += int(cantidad1)
|
||||
itemlist.append(Item(channel = item.channel, title = "Actualizadas", action = "peliculas", url = host, page=1, type ="movie", thumbnail = get_thumb("updated", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Estrenos", action = "estrenos", url = host + "/estrenos", page=1, thumbnail = get_thumb("premieres", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Por género (Total películas: %s)" %cantidad, action = "generos", url = host, extra = "Genero", thumbnail = get_thumb("genres", auto = True) ))
|
||||
itemlist.append(Item(channel = item.channel, title = "Por año", action = "annos", url = host, extra = "Genero", thumbnail = get_thumb("year", auto = True) ))
|
||||
itemlist.append(Item(channel = item.channel, title = ""))
|
||||
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/search?term=", thumbnail = get_thumb("search", auto = True)))
|
||||
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def configuracion(item):
|
||||
ret = platformtools.show_channel_settings()
|
||||
platformtools.itemlist_refresh()
|
||||
return ret
|
||||
|
||||
|
||||
def estrenos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'item-pelicula.*?href="([^"]+).*?'
|
||||
patron += 'src="([^"]+).*?'
|
||||
patron += '<p>([^<]+).*?'
|
||||
patron += '<span>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
|
||||
scrapedtitle = scrapedtitle.replace("Película ","")
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "findvideos",
|
||||
contentTitle = scrapedtitle,
|
||||
infoLabels = {'year':scrapedyear},
|
||||
thumbnail = scrapedthumbnail,
|
||||
title = scrapedtitle + " (%s)" %scrapedyear,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
item.url = host + "/suggest?que=" + texto
|
||||
item.extra = "busca"
|
||||
item.page = 1
|
||||
item.texto = texto
|
||||
if texto != '':
|
||||
return sub_search(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def sub_search(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url = item.url
|
||||
headers = [
|
||||
['X-Requested-With', 'XMLHttpRequest']
|
||||
]
|
||||
data = httptools.downloadpage(item.url).data
|
||||
token = scrapertools.find_single_match(data, 'csrf-token" content="([^"]+)')
|
||||
data = httptools.downloadpage(item.url + "&_token=" + token, headers=headers).data
|
||||
logger.info("Intel33 %s" %data)
|
||||
data_js = jsontools.load(data)["data"]["m"]
|
||||
logger.info("Intel44 %s" %data_js)
|
||||
for js in data_js:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "findvideos",
|
||||
contentTitle = js["title"],
|
||||
infoLabels = {'year': js["release_year"]},
|
||||
thumbnail = js["cover"],
|
||||
title = js["title"] + " (%s)" %js["release_year"],
|
||||
url = js["slug"]
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
#pagination
|
||||
if len(itemlist)>0:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "peliculas",
|
||||
page = item.page + 1,
|
||||
title = "Página siguiente >>",
|
||||
url = item.url
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
headers = [
|
||||
['X-Requested-With', 'XMLHttpRequest']
|
||||
]
|
||||
data = httptools.downloadpage(item.url).data
|
||||
token = scrapertools.find_single_match(data, 'csrf-token" content="([^"]+)')
|
||||
post = "page=%s&type=%s&_token=%s" %(item.page, item.type, token)
|
||||
if item.slug:
|
||||
post += "&slug=%s" %item.slug
|
||||
logger.info("Intel11 %s" %post)
|
||||
data = httptools.downloadpage(host + "/pagination", post=post, headers=headers).data
|
||||
patron = 'href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '<p>([^<]+).*?'
|
||||
patron += '<span>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle , scrapedyear in matches:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "findvideos",
|
||||
contentTitle = scrapedtitle,
|
||||
infoLabels = {'year':scrapedyear},
|
||||
thumbnail = scrapedthumbnail,
|
||||
title = scrapedtitle + " (%s)" %scrapedyear,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
#pagination
|
||||
if len(itemlist)>0:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "peliculas",
|
||||
page = item.page + 1,
|
||||
title = "Página siguiente >>",
|
||||
url = item.url
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host
|
||||
item.page=1
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + '/genero/animacion'
|
||||
item.page = 1
|
||||
elif categoria == 'terror':
|
||||
item.url = host + '/genero/terror'
|
||||
item.page = 1
|
||||
itemlist = peliculas(item)
|
||||
if "Pagina" in itemlist[-1].title:
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, "genressomb.*?</ul>")
|
||||
patron = 'href="([^"]+)".*?'
|
||||
patron += '</i>([^<]+).*?'
|
||||
patron += 'cant-genre">([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for url, titulo, cantidad in matches:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "peliculas_gen",
|
||||
page = 1,
|
||||
slug = titulo,
|
||||
title = titulo + "(%s)" %cantidad,
|
||||
type = "genres",
|
||||
url = url
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas_gen(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'item-pelicula.*?href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '<p>([^<]+).*?'
|
||||
patron += '<span>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle , scrapedyear in matches:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "findvideos",
|
||||
contentTitle = scrapedtitle,
|
||||
infoLabels = {'year':scrapedyear},
|
||||
thumbnail = scrapedthumbnail,
|
||||
title = scrapedtitle + " (%s)" %scrapedyear,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def annos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'div class="years.*?</ul>')
|
||||
patron = 'href="([^"]+)"'
|
||||
patron += '>([^<]+).*?'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for url, titulo in matches:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "peliculas",
|
||||
page = 1,
|
||||
slug = titulo,
|
||||
title = titulo,
|
||||
type = "year",
|
||||
url = url
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '(?i)<iframe.*?src="([^"]+).*?'
|
||||
patron += ''
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
titulo = "Ver en: %s"
|
||||
itemlist.append(
|
||||
item.clone(channel = item.channel,
|
||||
action = "play",
|
||||
title = titulo,
|
||||
url = scrapedurl
|
||||
))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if itemlist:
|
||||
itemlist.append(Item(channel = item.channel))
|
||||
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
|
||||
text_color="magenta"))
|
||||
# Opción "Añadir esta película a la biblioteca de KODI"
|
||||
if item.extra != "library":
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
contentTitle = item.contentTitle
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
@@ -15,7 +15,7 @@ from platformcode import config, logger, platformtools
|
||||
|
||||
|
||||
idio = {'https://cdn.yape.nu//languajes/la.png': 'LAT','https://cdn.yape.nu//languajes/es.png': 'ESP','https://cdn.yape.nu//languajes/en_es.png': 'VOSE'}
|
||||
cali = {'HD 1080p': 'HD 1080p','TS Screener HQ':'TS Screener HQ', 'BR Screnner':'BR Screnner','HD Rip':'HD Rip','DVD Screnner':'DVD Screnner'}
|
||||
cali = {'TS Screnner': 'TS Screnner', 'HD 1080p': 'HD 1080p','TS Screener HQ':'TS Screener HQ', 'BR Screnner':'BR Screnner','HD Rip':'HD Rip','DVD Screnner':'DVD Screnner'}
|
||||
|
||||
list_language = idio.values()
|
||||
list_quality = cali.values()
|
||||
@@ -34,11 +34,13 @@ except:
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(host + "/catalogue?sort=latest").data
|
||||
total = scrapertools.find_single_match(data, 'class="font-weight-bold mr-2">([^<]+)')
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel = item.channel, title = "Actualizadas", action = "peliculas", url = host + "/catalogue?sort=time_update&page=", page=1, thumbnail = get_thumb("updated", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Mas vistas", action = "peliculas", url = host + "/catalogue?sort=mosts-today&page=", page=1, thumbnail = get_thumb("more watched", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Ultimas agregadas", action = "peliculas", url = host + "/catalogue?sort=latest&page=", page=1, thumbnail = get_thumb("last", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Ultimas agregadas - (Total películas: %s)" %total, action = "peliculas", url = host + "/catalogue?sort=latest&page=", page=1, thumbnail = get_thumb("last", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos", url = host, extra = "Genero", thumbnail = get_thumb("genres", auto = True) ))
|
||||
itemlist.append(Item(channel = item.channel, title = ""))
|
||||
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/search?term=", thumbnail = get_thumb("search", auto = True)))
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 2.7 MiB After Width: | Height: | Size: 978 KiB |
BIN
plugin.video.alfa/fanart1.jpg
Normal file
BIN
plugin.video.alfa/fanart1.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.7 MiB |
@@ -1,43 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "((?:k-bagi.com|diskokosmiko.mx)/[^\\s'\"]+)",
|
||||
"url": "http://\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "kbagi",
|
||||
"name": "kbagi",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",
|
||||
"version": 1
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from channels import kbagi
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
domain = "diskokosmiko.mx"
|
||||
if "k-bagi.com" in page_url:
|
||||
domain = "kbagi.com"
|
||||
logueado, error_message = kbagi.login(domain)
|
||||
if not logueado:
|
||||
return False, error_message
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if ("File was deleted" or "Not Found" or "File was locked by administrator") in data:
|
||||
return False, "[%s] El archivo no existe o ha sido borrado" %domain
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
host = "http://k-bagi.com"
|
||||
host_string = "k-bagi"
|
||||
if "diskokosmiko.mx" in page_url:
|
||||
host = "http://diskokosmiko.mx"
|
||||
host_string = "diskokosmiko"
|
||||
|
||||
url = scrapertools.find_single_match(data, '<form action="([^"]+)" class="download_form"')
|
||||
if url:
|
||||
url = host + url
|
||||
fileid = url.rsplit("f=", 1)[1]
|
||||
token = scrapertools.find_single_match(data,
|
||||
'<div class="download_container">.*?name="__RequestVerificationToken".*?value="([^"]+)"')
|
||||
post = "fileId=%s&__RequestVerificationToken=%s" % (fileid, token)
|
||||
headers = {'X-Requested-With': 'XMLHttpRequest'}
|
||||
data = httptools.downloadpage(url, post, headers).data
|
||||
data = jsontools.load(data)
|
||||
mediaurl = data.get("DownloadUrl")
|
||||
extension = data.get("Extension")
|
||||
|
||||
video_urls.append([".%s [%s]" % (extension, host_string), mediaurl])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info(" %s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
Reference in New Issue
Block a user