Merge pull request #128 from Alfa-beto/Fixes

Ajustes a canales varios
This commit is contained in:
Alfa
2017-10-14 20:42:41 -04:00
committed by GitHub
20 changed files with 947 additions and 488 deletions

View File

@@ -0,0 +1,81 @@
{
"id": "kbagi",
"name": "Kbagi/Diskokosmiko",
"language": ["cast", "lat"],
"active": true,
"adult": false,
"version": 1,
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",
"banner": "copiapop.png",
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "kbagiuser",
"type": "text",
"color": "0xFF25AA48",
"label": "Usuario Kbagi",
"enabled": true,
"visible": true
},
{
"id": "kbagipassword",
"type": "text",
"color": "0xFF25AA48",
"hidden": true,
"label": "Password Kbagi",
"enabled": "!eq(-1,'')",
"visible": true
},
{
"id": "diskokosmikouser",
"type": "text",
"color": "0xFFC52020",
"label": "Usuario Diskokosmiko",
"enabled": true,
"visible": true
},
{
"id": "diskokosmikopassword",
"type": "text",
"color": "0xFFC52020",
"hidden": true,
"label": "Password Diskokosmiko",
"enabled": "!eq(-1,'')",
"visible": true
},
{
"id": "adult_content",
"type": "bool",
"color": "0xFFd50b0b",
"label": "Mostrar contenido adulto en las búsquedas",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -0,0 +1,426 @@
# -*- coding: utf-8 -*-
import re
import threading
from core import filetools
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
__perfil__ = config.get_setting('perfil', "kbagi")
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']]
if __perfil__ - 1 >= 0:
color1, color2, color3, color4, color5 = perfil[__perfil__ - 1]
else:
color1 = color2 = color3 = color4 = color5 = ""
adult_content = config.get_setting("adult_content", "kbagi")
def login(pagina):
logger.info()
try:
user = config.get_setting("%suser" % pagina.split(".")[0], "kbagi")
password = config.get_setting("%spassword" % pagina.split(".")[0], "kbagi")
if pagina == "kbagi.com":
if user == "" and password == "":
return False, "Para ver los enlaces de kbagi es necesario registrarse en kbagi.com"
elif user == "" or password == "":
return False, "kbagi: Usuario o contraseña en blanco. Revisa tus credenciales"
else:
if user == "" or password == "":
return False, "DiskoKosmiko: Usuario o contraseña en blanco. Revisa tus credenciales"
data = httptools.downloadpage("http://%s" % pagina).data
if re.search(r'(?i)%s' % user, data):
return True, ""
token = scrapertools.find_single_match(data, 'name="__RequestVerificationToken".*?value="([^"]+)"')
post = "__RequestVerificationToken=%s&UserName=%s&Password=%s" % (token, user, password)
headers = {'X-Requested-With': 'XMLHttpRequest'}
url_log = "http://%s/action/Account/Login" % pagina
data = httptools.downloadpage(url_log, post, headers).data
if "redirectUrl" in data:
logger.info("Login correcto")
return True, ""
else:
logger.error("Error en el login")
return False, "Nombre de usuario no válido. Comprueba tus credenciales"
except:
import traceback
logger.error(traceback.format_exc())
return False, "Error durante el login. Comprueba tus credenciales"
def mainlist(item):
logger.info()
itemlist = []
item.text_color = color1
logueado, error_message = login("kbagi.com")
if not logueado:
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
else:
item.extra = "http://kbagi.com"
itemlist.append(item.clone(title="kbagi", action="", text_color=color2))
itemlist.append(
item.clone(title=" Búsqueda", action="search", url="http://kbagi.com/action/SearchFiles"))
itemlist.append(item.clone(title=" Colecciones", action="colecciones",
url="http://kbagi.com/action/home/MoreNewestCollections?pageNumber=1"))
itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro",
url="http://kbagi.com/action/SearchFiles"))
itemlist.append(item.clone(title=" Mi cuenta", action="cuenta"))
item.extra = "http://diskokosmiko.mx/"
itemlist.append(item.clone(title="DiskoKosmiko", action="", text_color=color2))
itemlist.append(item.clone(title=" Búsqueda", action="search", url="http://diskokosmiko.mx/action/SearchFiles"))
itemlist.append(item.clone(title=" Colecciones", action="colecciones",
url="http://diskokosmiko.mx/action/home/MoreNewestCollections?pageNumber=1"))
itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro",
url="http://diskokosmiko.mx/action/SearchFiles"))
itemlist.append(item.clone(title=" Mi cuenta", action="cuenta"))
itemlist.append(item.clone(action="", title=""))
folder_thumb = filetools.join(config.get_data_path(), 'thumbs_kbagi')
files = filetools.listdir(folder_thumb)
if files:
itemlist.append(
item.clone(title="Eliminar caché de imágenes (%s)" % len(files), action="delete_cache", text_color="red"))
itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold"))
return itemlist
def search(item, texto):
logger.info()
item.post = "Mode=List&Type=Video&Phrase=%s&SizeFrom=0&SizeTo=0&Extension=&ref=pager&pageNumber=1" % texto.replace(
" ", "+")
try:
return listado(item)
except:
import sys, traceback
for line in sys.exc_info():
logger.error("%s" % line)
logger.error(traceback.format_exc())
return []
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def listado(item):
logger.info()
itemlist = []
data_thumb = httptools.downloadpage(item.url, item.post.replace("Mode=List", "Mode=Gallery")).data
if not item.post:
data_thumb = ""
item.url = item.url.replace("/gallery,", "/list,")
data = httptools.downloadpage(item.url, item.post).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
patron = '<div class="size">(.*?)</div></div></div>'
bloques = scrapertools.find_multiple_matches(data, patron)
for block in bloques:
if "adult_info" in block and not adult_content:
continue
size = scrapertools.find_single_match(block, '<p>([^<]+)</p>')
scrapedurl, scrapedtitle = scrapertools.find_single_match(block,
'<div class="name"><a href="([^"]+)".*?>([^<]+)<')
scrapedthumbnail = scrapertools.find_single_match(block, "background-image:url\('([^']+)'")
if scrapedthumbnail:
try:
thumb = scrapedthumbnail.split("-", 1)[0].replace("?", "\?")
if data_thumb:
url_thumb = scrapertools.find_single_match(data_thumb, "(%s[^']+)'" % thumb)
else:
url_thumb = scrapedthumbnail
scrapedthumbnail = filetools.join(folder, "%s.jpg" % url_thumb.split("e=", 1)[1][-20:])
except:
scrapedthumbnail = ""
if scrapedthumbnail:
t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, url_thumb])
t.setDaemon(True)
t.start()
else:
scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png"
scrapedurl = item.extra + scrapedurl
title = "%s (%s)" % (scrapedtitle, size)
if "adult_info" in block:
title += " [COLOR %s][+18][/COLOR]" % color4
plot = scrapertools.find_single_match(block, '<div class="desc">(.*?)</div>')
if plot:
plot = scrapertools.decodeHtmlentities(plot)
new_item = Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, text_color=color2,
extra=item.extra, infoLabels={'plot': plot}, post=item.post)
if item.post:
try:
new_item.folderurl, new_item.foldername = scrapertools.find_single_match(block,
'<p class="folder"><a href="([^"]+)".*?>([^<]+)<')
except:
pass
else:
new_item.folderurl = item.url.rsplit("/", 1)[0]
new_item.foldername = item.foldername
new_item.fanart = item.thumbnail
itemlist.append(new_item)
next_page = scrapertools.find_single_match(data, '<div class="pageSplitterBorder" data-nextpage-number="([^"]+)"')
if next_page:
if item.post:
post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page, item.post)
url = item.url
else:
url = re.sub(r',\d+\?ref=pager', ",%s?ref=pager" % next_page, item.url)
post = ""
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página Siguiente (%s)" % next_page,
url=url, post=post, extra=item.extra))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="play", title="Reproducir/Descargar", server="kbagi"))
usuario = scrapertools.find_single_match(item.url, '%s/([^/]+)/' % item.extra)
url_usuario = item.extra + "/" + usuario
if item.folderurl and not item.folderurl.startswith(item.extra):
item.folderurl = item.extra + item.folderurl
if item.post:
itemlist.append(item.clone(action="listado", title="Ver colección: %s" % item.foldername,
url=item.folderurl + "/gallery,1,1?ref=pager", post=""))
data = httptools.downloadpage(item.folderurl).data
token = scrapertools.find_single_match(data,
'data-action="followChanged.*?name="__RequestVerificationToken".*?value="([^"]+)"')
collection_id = item.folderurl.rsplit("-", 1)[1]
post = "__RequestVerificationToken=%s&collectionId=%s" % (token, collection_id)
url = "%s/action/Follow/Follow" % item.extra
title = "Seguir Colección: %s" % item.foldername
if "dejar de seguir" in data:
title = "Dejar de seguir la colección: %s" % item.foldername
url = "%s/action/Follow/UnFollow" % item.extra
itemlist.append(item.clone(action="seguir", title=title, url=url, post=post, text_color=color5, folder=False))
itemlist.append(
item.clone(action="colecciones", title="Ver colecciones del usuario: %s" % usuario, url=url_usuario))
return itemlist
def colecciones(item):
logger.info()
from core import jsontools
itemlist = []
usuario = False
data = httptools.downloadpage(item.url).data
if "Ver colecciones del usuario" not in item.title and not item.index:
data = jsontools.load(data)["Data"]
content = data["Content"]
content = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", content)
else:
usuario = True
if item.follow:
content = scrapertools.find_single_match(data,
'id="followed_collections"(.*?)<div id="recommended_collections"')
else:
content = scrapertools.find_single_match(data,
'<div id="collections".*?<div class="collections_list(.*?)<div class="collections_list')
content = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", content)
patron = '<a class="name" href="([^"]+)".*?>([^<]+)<.*?src="([^"]+)".*?<p class="info">(.*?)</p>'
matches = scrapertools.find_multiple_matches(content, patron)
index = ""
if item.index and item.index != "0":
matches = matches[item.index:item.index + 20]
if len(matches) > item.index + 20:
index = item.index + 20
elif len(matches) > 20:
matches = matches[:20]
index = 20
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
for url, scrapedtitle, thumb, info in matches:
url = item.extra + url + "/gallery,1,1?ref=pager"
title = "%s (%s)" % (scrapedtitle, scrapertools.htmlclean(info))
try:
scrapedthumbnail = filetools.join(folder, "%s.jpg" % thumb.split("e=", 1)[1][-20:])
except:
try:
scrapedthumbnail = filetools.join(folder, "%s.jpg" % thumb.split("/thumbnail/", 1)[1][-20:])
thumb = thumb.replace("/thumbnail/", "/")
except:
scrapedthumbnail = ""
if scrapedthumbnail:
t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, thumb])
t.setDaemon(True)
t.start()
else:
scrapedthumbnail = thumb
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url,
thumbnail=scrapedthumbnail, text_color=color2, extra=item.extra,
foldername=scrapedtitle))
if not usuario and data.get("NextPageUrl"):
url = item.extra + data["NextPageUrl"]
itemlist.append(item.clone(title=">> Página Siguiente", url=url, text_color=""))
elif index:
itemlist.append(item.clone(title=">> Página Siguiente", url=item.url, index=index, text_color=""))
return itemlist
def seguir(item):
logger.info()
data = httptools.downloadpage(item.url, item.post)
message = "Colección seguida"
if "Dejar" in item.title:
message = "La colección ya no se sigue"
if data.sucess and config.get_platform() != "plex":
from platformcode import platformtools
platformtools.dialog_notification("Acción correcta", message)
def cuenta(item):
logger.info()
import urllib
itemlist = []
web = "kbagi"
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
logueado, error_message = login("diskokosmiko.mx")
if not logueado:
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
return itemlist
user = config.get_setting("%suser" % web, "kbagi")
user = unicode(user, "utf8").lower().encode("utf8")
url = item.extra + "/" + urllib.quote(user)
data = httptools.downloadpage(url).data
num_col = scrapertools.find_single_match(data, 'name="Has_collections" value="([^"]+)"')
if num_col != "0":
itemlist.append(item.clone(action="colecciones", url=url, index="0", title="Ver mis colecciones",
text_color=color5))
else:
itemlist.append(item.clone(action="", title="No tienes ninguna colección", text_color=color4))
num_follow = scrapertools.find_single_match(data, 'name="Follows_collections" value="([^"]+)"')
if num_follow != "0":
itemlist.append(item.clone(action="colecciones", url=url, index="0", title="Colecciones que sigo",
text_color=color5, follow=True))
else:
itemlist.append(item.clone(action="", title="No sigues ninguna colección", text_color=color4))
return itemlist
def filtro(item):
logger.info()
list_controls = []
valores = {}
dict_values = None
list_controls.append({'id': 'search', 'label': 'Texto a buscar', 'enabled': True, 'color': '0xFFC52020',
'type': 'text', 'default': '', 'visible': True})
list_controls.append({'id': 'tipo', 'label': 'Tipo de búsqueda', 'enabled': True, 'color': '0xFFFF8000',
'type': 'list', 'default': -1, 'visible': True})
list_controls[1]['lvalues'] = ['Aplicación', 'Archivo', 'Documento', 'Imagen', 'Música', 'Vídeo', 'Todos']
valores['tipo'] = ['Application', 'Archive', 'Document', 'Image', 'Music', 'Video', '']
list_controls.append({'id': 'ext', 'label': 'Extensión', 'enabled': True, 'color': '0xFFF4FA58',
'type': 'text', 'default': '', 'visible': True})
list_controls.append({'id': 'tmin', 'label': 'Tamaño mínimo (MB)', 'enabled': True, 'color': '0xFFCC2EFA',
'type': 'text', 'default': '0', 'visible': True})
list_controls.append({'id': 'tmax', 'label': 'Tamaño máximo (MB)', 'enabled': True, 'color': '0xFF2ECCFA',
'type': 'text', 'default': '0', 'visible': True})
# Se utilizan los valores por defecto/guardados
web = "kbagi"
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
valores_guardados = config.get_setting("filtro_defecto_" + web, item.channel)
if valores_guardados:
dict_values = valores_guardados
item.valores = valores
from platformcode import platformtools
return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values,
caption="Filtra la búsqueda", item=item, callback='filtrado')
def filtrado(item, values):
values_copy = values.copy()
web = "kbagi"
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
# Guarda el filtro para que sea el que se cargue por defecto
config.set_setting("filtro_defecto_" + web, values_copy, item.channel)
tipo = item.valores["tipo"][values["tipo"]]
search = values["search"]
ext = values["ext"]
tmin = values["tmin"]
tmax = values["tmax"]
if not tmin.isdigit():
tmin = "0"
if not tmax.isdigit():
tmax = "0"
item.valores = ""
item.post = "Mode=List&Type=%s&Phrase=%s&SizeFrom=%s&SizeTo=%s&Extension=%s&ref=pager&pageNumber=1" \
% (tipo, search, tmin, tmax, ext)
item.action = "listado"
return listado(item)
def download_thumb(filename, url):
from core import downloadtools
lock = threading.Lock()
lock.acquire()
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
if not filetools.exists(folder):
filetools.mkdir(folder)
lock.release()
if not filetools.exists(filename):
downloadtools.downloadfile(url, filename, silent=True)
return filename
def delete_cache(url):
folder = filetools.join(config.get_data_path(), 'thumbs_kbagi')
filetools.rmdirtree(folder)
if config.is_xbmc():
import xbmc
xbmc.executebuiltin("Container.Refresh")

View File

@@ -104,9 +104,10 @@ def peliculas(item):
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = contentTitle , infoLabels={'year':year} )
if year:
tmdb.set_infoLabels_item(new_item)
#if year:
# tmdb.set_infoLabels_item(new_item)
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
try:
patron = '<a href="([^"]+)" ><span class="icon-chevron-right"></span></a></div>'
next_page = re.compile(patron,re.DOTALL).findall(data)

View File

@@ -155,6 +155,8 @@ def findvideos(item):
url = scrapedurl
server = servertools.get_server_name(servidor)
title = "Enlace encontrado en %s" % (server)
if idioma == 'Ingles Subtitulado':
idioma = 'vose'
itemlist.append(Item(channel=item.channel, action="play", title=title, fulltitle=item.fulltitle, url=url,
thumbnail=scrapedthumbnail, language=idioma, quality=calidad, server=server))
if itemlist:

View File

@@ -76,14 +76,11 @@ def peliculas(item):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Extrae la marca de siguiente página
paginador = scrapertools.find_single_match(data, "<div class='paginado'>.*?lateral")
next_page = scrapertools.find_single_match(data, 'class="nextpostslink" rel="next" href="(.*?)">')
patron = "<li.*?<a class='current'>.*?href='([^']+)"
scrapedurl = scrapertools.find_single_match(paginador, patron)
if scrapedurl:
if next_page:
scrapedtitle = "!Pagina Siguiente ->"
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, folder=True))
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=next_page, folder=True))
return itemlist

View File

@@ -43,7 +43,7 @@ def porIdioma(item):
itemlist.append(Item(channel=item.channel, title="Castellano", action="agregadas",
url= host + "/idioma/espanol-castellano/", viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, title="VOS", action="agregadas", url= host + "/idioma/subtitulada/",
Item(channel=item.channel, title="VOSE", action="agregadas", url= host + "/idioma/subtitulada/",
viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Latino", action="agregadas",
url= host + "/idioma/espanol-latino/", viewmode="movie_with_plot"))

View File

@@ -1,63 +0,0 @@
{
"id": "pymovie",
"name": "pymovie",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://s27.postimg.org/hvmvz7vab/pymovie.png",
"banner": "https://s28.postimg.org/3k0wjnwul/pymovie_banner.png",
"version": 1,
"changes": [
{
"date": "25/05/2017",
"description": "cambios esteticos"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "04/01/2017",
"description": "Release."
}
],
"categories": [
"movie",
"tvshow",
"documentary"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,399 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
host = "http://www.pymovie.com.mx"
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
tgenero = {"comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
"drama": "https://s16.postimg.org/94sia332d/drama.png",
"accion": "https://s3.postimg.org/y6o9puflv/accion.png",
"aventura": "https://s10.postimg.org/6su40czih/aventura.png",
"romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
"animacion": "https://s13.postimg.org/5on877l87/animacion.png",
"ciencia ficcion": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
"terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
"musical": "https://s29.postimg.org/bbxmdh9c7/musical.png",
"deporte": "https://s13.postimg.org/xuxf5h06v/deporte.png",
"artes Marciales": "https://s24.postimg.org/w1aw45j5h/artesmarciales.png",
"intriga": "https://s27.postimg.org/v9og43u2b/intriga.png",
"infantil": "https://s23.postimg.org/g5rmazozv/infantil.png",
"mexicanas": "https://s3.postimg.org/p36ntnxfn/mexicana.png",
"espionaje": "https://s2.postimg.org/5hv64b989/espionaje.png",
"biografia": "https://s15.postimg.org/5lrpbx323/biografia.png"}
tcalidad = {'hd-1080': '[COLOR limegreen]HD-1080[/COLOR]', 'hd-720': '[COLOR limegreen]HD-720[/COLOR]',
'blueray': '[COLOR limegreen]BLUERAY[/COLOR]', 'dvd': '[COLOR limegreen]DVD[/COLOR]',
'cam': '[COLOR red]CAM[/COLOR]'}
tcalidad2 = {'hd-1080': 'https://s21.postimg.org/4h1s0t1wn/hd1080.png',
'hd-720': 'https://s12.postimg.org/lthu7v4q5/hd720.png', 'blueray': '',
'dvd': 'https://s1.postimg.org/m89hus1tb/dvd.png', 'cam': 'https://s11.postimg.org/ad4o5wpz7/cam.png'}
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas", action="menupeliculas",
thumbnail='https://s8.postimg.org/6wqwy2c2t/peliculas.png',
fanart='https://s8.postimg.org/6wqwy2c2t/peliculas.png', extra='peliculas/'))
itemlist.append(itemlist[-1].clone(title="Series", action="menuseries",
thumbnail='https://s27.postimg.org/iahczwgrn/series.png',
fanart='https://s27.postimg.org/iahczwgrn/series.png', extra='peliculas/'))
itemlist.append(itemlist[-1].clone(title="Documentales", action="menudocumental",
thumbnail='https://s16.postimg.org/7xjj4bmol/documental.png',
fanart='https://s16.postimg.org/7xjj4bmol/documental.png', extra='documental'))
return itemlist
def menupeliculas(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Ultimas", action="lista", url=host + '/Ordenar/Estreno/?page=1',
thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png',
fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png', extra='Estreno'))
itemlist.append(Item(channel=item.channel, title="Todas", action="lista", url=host + '?page=1',
thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', extra='todas'))
itemlist.append(Item(channel=item.channel, title="Generos", action="seccion", url=host,
thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', extra='generos'))
itemlist.append(
Item(channel=item.channel, title="Alfabetico", action="lista", url=host + '/Ordenar/Alfabetico/?page=1',
thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png', fanart='https://s17.postimg.org/fwi1y99en/a-z.png',
extra='Alfabetico'))
itemlist.append(Item(channel=item.channel, title="Calidad", action="seccion", url=host,
thumbnail='https://s13.postimg.org/6nzv8nlkn/calidad.png',
fanart='https://s13.postimg.org/6nzv8nlkn/calidad.png', extra='calidad'))
itemlist.append(
Item(channel=item.channel, title="Mas Vistas", action="lista", url=host + '/Ordenar/MasVistas/?page=1',
thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png',
fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png', extra='Estreno'))
itemlist.append(
Item(channel=item.channel, title="Mas Votadas", action="lista", url=host + '/Ordenar/MasVotos/?page=1',
thumbnail='https://s7.postimg.org/9kg1nthzf/votadas.png',
fanart='https://s7.postimg.org/9kg1nthzf/votadas.png', extra='Estreno'))
itemlist.append(
Item(channel=item.channel, title="Calificacion", action="lista", url=host + '/Ordenar/Calificacion/?page=1',
thumbnail='https://s18.postimg.org/mjqrl49h5/calificacion.png',
fanart='https://s18.postimg.org/mjqrl49h5/calificacion.png', extra='Estreno'))
return itemlist
def menuseries(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Ultimas", action="lista", url=host + "/Series-estreno/?page=1",
thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png',
fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png', extra='series'))
itemlist.append(Item(channel=item.channel, title="Generos", action="seccion", url=host,
thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', extra='series-generos'))
itemlist.append(
Item(channel=item.channel, title="Alfabetico", action="lista", url=host + '/Ordernar-Serie/Alfabetico/?page=1',
thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png', fanart='https://s17.postimg.org/fwi1y99en/a-z.png',
extra='series-alpha'))
itemlist.append(
Item(channel=item.channel, title="Mas Vistas", action="lista", url=host + '/Ordernar-Serie/MasVistas/?page=1',
thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png',
fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png', extra='series-masvistas'))
itemlist.append(
Item(channel=item.channel, title="Mas Votadas", action="lista", url=host + '/Ordernar-Serie/Masvotos/?page=1',
thumbnail='https://s7.postimg.org/9kg1nthzf/votadas.png',
fanart='https://s7.postimg.org/9kg1nthzf/votadas.png', extra='series-masvotadas'))
itemlist.append(Item(channel=item.channel, title="Recomendadas", action="lista",
url=host + '/Ordernar-Serie/Recomendadas/?page=1',
thumbnail='https://s12.postimg.org/s881laywd/recomendadas.png',
fanart='https://s12.postimg.org/s881laywd/recomendadas.png', extra='series-recomendadas'))
return itemlist
def menudocumental(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todas", action="lista", url=host + "/Documentales/?page=1",
thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', extra='documental'))
itemlist.append(Item(channel=item.channel, title="Alfabetico", action="lista",
url=host + "/OrdenarDocumental/Alfabetico/?page=1",
thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png',
fanart='https://s17.postimg.org/fwi1y99en/a-z.png', extra='documental'))
itemlist.append(Item(channel=item.channel, title="Mas Vistas", action="lista",
url=host + "/OrdenarDocumental/MasVistas/?page=1",
thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png',
fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png', extra='documental'))
return itemlist
def lista(item):
logger.info()
if item.extra == 'series':
accion = 'episodiosxtemp'
elif 'series-' in item.extra:
accion = 'temporadas'
else:
accion = 'findvideos'
itemlist = []
data = httptools.downloadpage(item.url).data
if 'series' in item.extra or item.extra == 'documental':
patron = '<h2 itemprop="name" >([^<]+)<\/h2><a href="([^.]+)" title=".*?" ><img.*?src="([^"]+)".*?class=".*?boren2"\/([^<]+)'
else:
patron = '<h2 itemprop="name" >([^<]+)<\/h2><a href="([^.]+)" title=".*?" ><img.*?src="([^"]+)".*?class=".*?boren2".*?>([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl, scrapedthumbnail, scrapedcalidad in matches:
url = scrapertools.decodeHtmlentities(host + scrapedurl)
url = url.strip(' ')
scrapedcalidad = scrapedcalidad.strip(' ')
scrapedcalidad = scrapedcalidad.strip('p')
scrapedcalidad = scrapedcalidad.lower()
if 'series' in item.extra or item.extra == 'documental':
title = scrapertools.decodeHtmlentities(scrapedtitle)
else:
calidad = tcalidad[scrapedcalidad]
title = scrapertools.decodeHtmlentities(scrapedtitle) + ' (' + calidad + ') '
thumbnail = scrapedthumbnail
fanart = ''
plot = ''
itemlist.append(Item(channel=item.channel, action=accion, title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=fanart, contentSerieName=scrapedtitle, contentTitle=scrapedtitle, extra=item.extra))
# Paginacion
if itemlist != []:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data, '<a href="\?page=([^"]+)" class="next">next &')
while item.url[-1] != '=':
item.url = item.url[:-1]
next_page_url = item.url + next_page
if next_page != '':
itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page_url,
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png', extra=item.extra))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
templist = []
data = httptools.downloadpage(item.url).data
patron = 'class="listatemporadas" ><a href="([^"]+)" title=".*?" ><img src="([^"]+)" width="80" height="100" title=".*?alt=".*?<h3>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = host + scrapedurl
title = scrapedtitle
thumbnail = scrapedthumbnail
plot = ''
fanart = ''
contentSeasonNumber = scrapedtitle.replace('Temporada ', '')
itemlist.append(Item(channel=item.channel, action="episodiosxtemp", title=title, fulltitle=item.title, url=url,
thumbnail=thumbnail, plot=plot, fanart=fanart, contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber))
if item.extra == 'temporadas':
for tempitem in itemlist:
templist += episodiosxtemp(tempitem)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += episodiosxtemp(tempitem)
return itemlist
def episodiosxtemp(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a href="\/VerCapitulo\/([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
ep = 1
for scrapedtitle in matches:
scrapedtitle = scrapedtitle.replace(item.contentSeasonNumber + 'x' + '0' + str(ep), '')
url = host + '/VerCapitulo/' + scrapedtitle.replace(' ', '-')
title = item.contentSeasonNumber + 'x' + str(ep) + ' ' + scrapedtitle.strip('/')
thumbnail = item.thumbnail
plot = ''
fanart = ''
plot = ''
contentEpisodeNumber = ep
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.title, url=url,
thumbnail=thumbnail, plot=plot, fanart=fanart, extra='series',
contentSerieName=item.contentSerieName, contentSeasonNumber=item.contentSeasonNumber,
contentEpisodeNumber=contentEpisodeNumber))
ep = ep + 1
return itemlist
def seccion(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<option class="opselect" value="([^"]+)".*?>([^<]+)<\/option>'
matches = re.compile(patron, re.DOTALL).findall(data)
if item.extra == 'generos':
oplista = tgenero
opdir = '/Categoria/'
elif item.extra == 'calidad':
oplista = tcalidad
opdir = '/Calidad/'
elif item.extra == 'series-generos':
oplista = tgenero
opdir = '/Categoria-Series/'
for scrapeddir, scrapedtitle in matches:
url = item.url + opdir + scrapeddir + '/?page=1'
title = scrapedtitle.upper()
if 'generos' in item.extra and scrapedtitle.lower() in oplista:
thumbnail = oplista[scrapedtitle.lower()]
fanart = oplista[scrapedtitle.lower()]
elif 'calidad' in item.extra and scrapedtitle.lower() in oplista:
thumbnail = tcalidad2[scrapedtitle.lower()]
fanart = tcalidad[scrapedtitle.lower()]
else:
thumbnail = ''
fanart = ''
if scrapedtitle.lower() in oplista:
itemlist.append(Item(channel=item.channel, action="lista", title=title, fulltitle=item.title, url=url,
thumbnail=thumbnail, fanart=fanart, extra=item.extra))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
audio = {'Latino': '[COLOR limegreen]LATINO[/COLOR]', 'Español': '[COLOR yellow]ESPAÑOL[/COLOR]',
'Ingles': '[COLOR red]ORIGINAL SUBTITULADO[/COLOR]', 'Latino-Ingles': 'DUAL'}
data = httptools.downloadpage(item.url).data
if item.extra != 'series':
patron = 'data-video="([^"]+)" class="reproductorVideo"><ul><li>([^<]+)<\/li><li>([^<]+)<\/li>'
tipotitle = item.contentTitle
elif item.extra == 'series':
tipotitle = str(item.contentSeasonNumber) + 'x' + str(item.contentEpisodeNumber) + ' ' + item.contentSerieName
patron = '<li class="enlaces-l"><a href="([^"]+)" target="_blank"><ul><li>([^<]+)<.*?>([^<]+)<.*?>Reproducir<'
matches = re.compile(patron, re.DOTALL).findall(data)
if item.extra != 'documental':
n = 0
for scrapedurl, scrapedcalidad, scrapedaudio in matches:
if 'series' in item.extra:
datab = httptools.downloadpage(host + scrapedurl).data
url = scrapertools.find_single_match(datab, 'class="reproductor"><iframe src="([^"]+)"')
print url + 'esta es la direccion'
else:
url = scrapedurl
title = tipotitle
idioma = audio[scrapedaudio]
itemlist.extend(servertools.find_video_items(data=url))
if n < len(itemlist):
itemlist[n].title = tipotitle + ' (' + idioma + ' ) ' + '(' + itemlist[n].server + ' )'
n = n + 1
else:
url = scrapertools.find_single_match(data, 'class="reproductor"><iframe src="([^"]+)"')
itemlist.extend(servertools.find_video_items(data=url))
for videoitem in itemlist:
if item.extra == 'documental':
videoitem.title = item.title + ' (' + videoitem.server + ')'
videoitem.channel = item.channel
videoitem.action = "play"
videoitem.folder = False
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'series':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
item.extra = 'Estrenos'
try:
if categoria == 'peliculas':
item.url = host + '/Ordenar/Estreno/?page=1'
elif categoria == 'infantiles':
item.url = host + '/Categoria/Animacion/?page=1'
elif categoria == 'documentales':
item.url = host + '/Documentales/?page=1'
item.extra = 'documental'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -352,4 +352,4 @@ def decode(string):
output = output.decode('utf8')
return output
return output

View File

@@ -119,14 +119,15 @@ def episodios(item):
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle, bloqueidiomas in matches:
title = scrapedtitle.strip() + " (" + extrae_idiomas(bloqueidiomas) + ")"
idiomas, language = extrae_idiomas(bloqueidiomas)
title = scrapedtitle.strip() + " (" + idiomas + ")"
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
plot=plot, show=item.show, folder=True))
plot=plot, show=item.show, folder=True, language=language))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
@@ -142,18 +143,19 @@ def extrae_idiomas(bloqueidiomas):
patronidiomas = '([a-z0-9]+).png"'
idiomas = re.compile(patronidiomas, re.DOTALL).findall(bloqueidiomas)
textoidiomas = ""
language=[]
for idioma in idiomas:
if idioma == "1":
textoidiomas = textoidiomas + "Español" + "/"
if idioma == "2":
textoidiomas = textoidiomas + "Latino" + "/"
if idioma == "3":
textoidiomas = textoidiomas + "VOS" + "/"
textoidiomas = textoidiomas + "VOSE" + "/"
if idioma == "4":
textoidiomas = textoidiomas + "VO" + "/"
language.append(codigo_a_idioma(idioma))
textoidiomas = textoidiomas[:-1]
return textoidiomas
return textoidiomas, language
def codigo_a_idioma(codigo):
@@ -163,7 +165,7 @@ def codigo_a_idioma(codigo):
if codigo == "2":
idioma = "Latino"
if codigo == "3":
idioma = "VOS"
idioma = "VOSE"
if codigo == "4":
idioma = "VO"
@@ -195,14 +197,15 @@ def findvideos(item):
for idioma, servername, scrapedurl in matches:
title = "Mirror en " + servername + " (" + codigo_a_idioma(idioma) + ")"
language = codigo_a_idioma(idioma)
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
plot=plot, folder=False))
plot=plot, folder=False, language=language))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -108,7 +108,7 @@ def lista(item):
actual_page_url = item.url
next_page = scrapertools.find_single_match(data, '<div class=pag_b><a href=(.*?) >Siguiente<\/a><\/div>')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=item.url + next_page,
itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=host + next_page,
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'))
return itemlist

View File

@@ -144,7 +144,7 @@ def lista_gen(item):
context1=[renumbertools.context(item), autoplay.context]
itemlist.append(
Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
show=scrapedtitle, context=context1))
show=scrapedtitle, context=context1, language=scrapedlang))
tmdb.set_infoLabels(itemlist)
# Paginacion

View File

@@ -171,12 +171,13 @@ def findvideos(item):
matches = re.compile(pattern, re.S).findall(data)
for url, server, lang in matches:
title = "[%s] - [%s]" % (lang, server)
for url, server, language in matches:
title = "[%s] - [%s]" % (language, server)
url = host + url
server = re.sub('(\..*)', '', server)
logger.debug("url %s" % url)
itemlist.append(Item(channel=item.channel, action="play", title=title, fulltitle=item.fulltitle, url=url,
thumbnail=item.thumbnail, lang=lang))
thumbnail=item.thumbnail, language=language, server=server))
return itemlist
@@ -191,5 +192,6 @@ def play(item):
for video_item in itemlist:
video_item.title = "%s [%s]" % (item.fulltitle, item.lang)
video_item.thumbnail = item.thumbnail
video_item.language = item.language
return itemlist

View File

@@ -0,0 +1,25 @@
{
"id": "tiotorrent",
"name": "TioTorrent",
"active": true,
"adult": false,
"language": ["cast","lat"],
"thumbnail": "https://s1.postimg.org/29eths1fi7/tiotorrent.png",
"banner": "https://s1.postimg.org/9gkc73lxb3/tiotorrent-banner.png",
"version": 1,
"categories": [
"movie",
"tvshow",
"torrent"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,285 @@
# -*- coding: utf-8 -*-
# -*- Channel TioTorrent -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channelselector import get_thumb
from platformcode import logger
from platformcode import config
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
host = 'http://www.tiotorrent.com/'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas",
action="movie_list",
thumbnail=get_thumb("channels_movie.png")
))
itemlist.append(item.clone(title="Series",
action="series_list",
thumbnail=get_thumb("channels_tvshow.png")
))
return itemlist
def movie_list(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Estrenos",
action="lista",
url=host + 'estrenos-de-cine',
extra='movie'
))
itemlist.append(item.clone(title="Todas",
action="lista",
url=host + 'peliculas',
extra='movie'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '/peliculas/?pTit=',
thumbnail=get_thumb("search.png"),
extra='movie'
))
return itemlist
def series_list(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Todas",
action="lista",
url=host + 'series',
extra='serie'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '/series/?pTit=',
thumbnail=get_thumb("search.png"),
extra='serie'
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def lista (item):
logger.info ()
itemlist = []
data = get_source(item.url)
if item.extra == 'movie':
patron = "<div class=moviesbox.*?><a href=(.*?)>.*?image:url\('(.*?)'\)>.*?<b>.*?>(.*?)<"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
contentTitle = scrapedtitle.decode('latin1').encode('utf8')
title = contentTitle
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w396", "")
filtro_list = {"poster_path": filtro_thumb}
filtro_list = filtro_list.items()
itemlist.append(item.clone(action='findvideos',
title=title, url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
infoLabels={'filtro': filtro_list},
extra=item.extra
))
else:
patron = "<div class=moviesbox.*?>.*?episode>(.*?)x(.*?)<.*?href=(.*?)>.*?image:url\('(.*?)'.*?href.*?>(.*?)<"
matches = re.compile(patron, re.DOTALL).findall(data)
for season, episode, scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
contentSerieName = scrapedtitle
title = '%s' % contentSerieName
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w396", "")
filtro_list = {"poster_path": filtro_thumb}
filtro_list = filtro_list.items()
contentSeason=season
contentEpisode=episode
itemlist.append(item.clone(action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=contentSerieName,
contentSeason=contentSeason,
contentEpisode=contentEpisode,
infoLabels={'filtro': filtro_list},
extra=item.extra
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb =True)
#Paginacion
if itemlist !=[]:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data,'<span class=pagination_next><a href=(.*?)>')
import inspect
if next_page !='':
itemlist.append(item.clone(action = "lista",
title = 'Siguiente >>>',
url = next_page,
thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'
))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
else:
return []
def seasons(item):
logger.info()
itemlist=[]
infoLabels = item.infoLabels
data=get_source(item.url)
patron ='href=javascript:showSeasson\(.*?\); id=.*?>Temporada (.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for season in matches:
title='Temporada %s' % season
infoLabels['season'] = season
itemlist.append(Item(channel=item.channel,
title= title,
url=item.url,
action='episodesxseasons',
contentSeasonNumber=season,
contentSerieName=item.contentSerieName,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="all_episodes", contentSerieName=item.contentSerieName))
return itemlist
def all_episodes(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = "<div class=corner-episode>%sx(.\d+)<\/div><a href=(.*?)>.*?" % item.contentSeasonNumber
patron += "image:url\('(.*?)'.*?href.*?>(%s)<" % item.contentSerieName
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
for episode, scrapedurl, scrapedthumbnail, scrapedtitle in matches:
contentEpisodeNumber=episode
season = item.contentSeasonNumber
url=scrapedurl
thumbnail=scrapedthumbnail
infoLabels['episode']=episode
title = '%sx%s - %s' % (season, episode, item.contentSerieName)
itemlist.append(Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=item.contentSerieName,
contentEpisodeNumber=contentEpisodeNumber,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
def findvideos(item):
logger.info()
itemlist=[]
data = get_source(item.url)
patron = "<a class=dload.*? target=_blank>.*?<\/a><i>(.*?)<\/i>.*?<a href=.*?showDownload\((.*?)\);"
matches = re.compile(patron, re.DOTALL).findall(data)
for quality, extra_info in matches:
extra_info= extra_info.replace("'",'')
extra_info= extra_info.split(',')
title = '%s [%s]' % (item.contentTitle, quality)
url = extra_info[1]
if item.extra == 'movie':
url = extra_info[1]
else:
url = extra_info[2]
server = 'torrent'
itemlist.append(Item(channel=item.channel,
title=title,
contentTitle= item.title,
url=url,
action='play',
quality=quality,
server=server,
thumbnail = item.infoLabels['thumbnail'],
infoLabels=item.infoLabels
))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def newest(category):
logger.info()
item = Item()
try:
if category == 'peliculas':
item.url = host + 'estrenos-de-cine'
item.extra='movie'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -13,7 +13,7 @@ from platformcode import config, logger
host = 'http://verpeliculasnuevas.com'
IDIOMAS = {'latino': 'Latino', 'castellano': 'Español', 'sub': 'VOS'}
IDIOMAS = {'latino': 'Latino', 'castellano': 'Español', 'sub': 'VOSE'}
list_language = IDIOMAS.values()
taudio = {'latino': '[COLOR limegreen]LATINO[/COLOR]',

View File

@@ -1,4 +1,7 @@
# -*- coding: utf-8 -*-
# -*- Channel TioTorrent -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re

View File

@@ -223,7 +223,6 @@ def save_tvshow(item, episodelist):
return 0, 0, -1 # Salimos sin guardar
scraper_return = scraper.find_and_set_infoLabels(item)
# Llegados a este punto podemos tener:
# scraper_return = True: Un item con infoLabels con la información actualizada de la serie
# scraper_return = False: Un item sin información de la peli (se ha dado a cancelar en la ventana)
@@ -238,6 +237,8 @@ def save_tvshow(item, episodelist):
if config.get_setting("original_title_folder", "videolibrary") == 1 and item.infoLabels['originaltitle']:
base_name = item.infoLabels['originaltitle']
elif item.infoLabels['tvshowtitle']:
base_name = item.infoLabels['tvshowtitle']
elif item.infoLabels['title']:
base_name = item.infoLabels['title']
else:
@@ -566,7 +567,6 @@ def add_tvshow(item, channel=None):
# Obtiene el listado de episodios
itemlist = getattr(channel, item.action)(item)
insertados, sobreescritos, fallidos, path = save_tvshow(item, itemlist)
if not insertados and not sobreescritos and not fallidos:

View File

@@ -0,0 +1,43 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "((?:kbagi.com|diskokosmiko.mx)/[^\\s'\"]+)",
"url": "http://\\1"
}
]
},
"free": true,
"id": "kbagi",
"name": "kbagi",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",
"version": 1
}

View File

@@ -0,0 +1,53 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import jsontools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
if "kbagi.com" in page_url:
from channels import kbagi
logueado, error_message = kbagi.login("kbagi.com")
if not logueado:
return False, error_message
data = httptools.downloadpage(page_url).data
if ("File was deleted" or "Not Found" or "File was locked by administrator") in data:
return False, "[kbagi] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
host = "http://kbagi.com"
host_string = "kbagi"
if "diskokosmiko.mx" in page_url:
host = "http://diskokosmiko.mx"
host_string = "diskokosmiko"
url = scrapertools.find_single_match(data, '<form action="([^"]+)" class="download_form"')
if url:
url = host + url
fileid = url.rsplit("f=", 1)[1]
token = scrapertools.find_single_match(data,
'<div class="download_container">.*?name="__RequestVerificationToken".*?value="([^"]+)"')
post = "fileId=%s&__RequestVerificationToken=%s" % (fileid, token)
headers = {'X-Requested-With': 'XMLHttpRequest'}
data = httptools.downloadpage(url, post, headers).data
data = jsontools.load(data)
mediaurl = data.get("DownloadUrl")
extension = data.get("Extension")
video_urls.append([".%s [%s]" % (extension, host_string), mediaurl])
for video_url in video_urls:
logger.info(" %s - %s" % (video_url[0], video_url[1]))
return video_urls