correcciones y nuevo canal

This commit is contained in:
Unknown
2018-09-05 14:50:44 -03:00
parent 03ca863051
commit 1e2d7fbca7
8 changed files with 873 additions and 56 deletions

View File

@@ -143,52 +143,27 @@ def episodios(item):
def findvideos(item):
logger.info()
import base64
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
data1 = scrapertools.find_single_match(data,
'<div id="playex" .+?>(.+?)<\/nav><\/div><\/div>')
patron='src="(.+?)"'
itemla = scrapertools.find_multiple_matches(data1,patron)
if "favicons?domain" in itemla[0]:
method = 1
data2=scrapertools.find_single_match(data, "var \$user_hashs = {(.+?)}")
patron='".+?":"(.+?)"'
itemla = scrapertools.find_multiple_matches(data2,patron)
else:
method = 0
for i in range(len(itemla)):
if method==0:
url=itemla[i]
else:
import base64
b=base64.b64decode(itemla[i])
url=b.decode('utf8')
#verificar existencia del video (testing)
codigo=verificar_video(itemla[i])
if codigo==200:
if "ok.ru" in url:
server='okru'
else:
server=''
if "youtube" in url:
server='youtube'
if "openload" in url:
server='openload'
if "google" in url:
server='gvideo'
if "rapidvideo" in url:
server='rapidvideo'
if "streamango" in url:
server='streamango'
if server!='':
title="Enlace encontrado en %s " % (server.capitalize())
else:
title="NO DISPONIBLE"
if title!="NO DISPONIBLE":
itemlist.append(item.clone(title=title,url=url, action="play", server=server))
'<div id="playex" .+?>(.+?)<\/nav>?\s<\/div><\/div>')
patron = "changeLink\('([^']+)'\)"
matches = re.compile(patron, re.DOTALL).findall(data1)
for url64 in matches:
url =base64.b64decode(url64)
if 'danimados' in url:
new_data = httptools.downloadpage('https:'+url.replace('stream', 'stream_iframe')).data
url = scrapertools.find_single_match(new_data, '<source src="([^"]+)"')
itemlist.append(item.clone(title='%s',url=url, action="play"))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
itemlist.append(
item.clone(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
@@ -196,18 +171,3 @@ def findvideos(item):
autoplay.start(itemlist, item)
return itemlist
def verificar_video(url):
codigo=httptools.downloadpage(url).code
if codigo==200:
# Revise de otra forma
data=httptools.downloadpage(url).data
removed = scrapertools.find_single_match(data,'removed(.+)')
if len(removed) != 0:
codigo1=404
else:
codigo1=200
else:
codigo1=200
return codigo1

View File

@@ -0,0 +1,64 @@
{
"id": "diskokosmiko",
"name": "Diskokosmiko",
"language": ["cast", "lat"],
"active": true,
"adult": false,
"version": 1,
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",
"banner": "copiapop.png",
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "diskokosmikouser",
"type": "text",
"color": "0xFFC52020",
"label": "Usuario Diskokosmiko",
"enabled": true,
"visible": true
},
{
"id": "diskokosmikopassword",
"type": "text",
"color": "0xFFC52020",
"hidden": true,
"label": "Password Diskokosmiko",
"enabled": "!eq(-1,'')",
"visible": true
},
{
"id": "adult_content",
"type": "bool",
"color": "0xFFd50b0b",
"label": "Mostrar contenido adulto en las búsquedas",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -0,0 +1,366 @@
# -*- coding: utf-8 -*-
import re
import threading
import urllib
import xbmc
from core import downloadtools
from core import filetools
from core import httptools
from core import jsontools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
__perfil__ = config.get_setting('perfil', "diskokosmiko")
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']]
if __perfil__ - 1 >= 0:
color1, color2, color3, color4, color5 = perfil[__perfil__ - 1]
else:
color1 = color2 = color3 = color4 = color5 = ""
adult_content = config.get_setting("adult_content", "diskokosmiko")
def login(pagina):
logger.info()
try:
dom = pagina.split(".")[0]
user = config.get_setting("%suser" %dom, "diskokosmiko")
password = config.get_setting("%spassword" %dom, "diskokosmiko")
if not user:
return False, "Para ver los enlaces de %s es necesario registrarse en %s" %(dom, pagina)
data = httptools.downloadpage("http://%s" % pagina).data
if re.search(r'(?i)%s' % user, data):
return True, ""
token = scrapertools.find_single_match(data, 'name="__RequestVerificationToken".*?value="([^"]+)"')
post = "__RequestVerificationToken=%s&UserName=%s&Password=%s" % (token, user, password)
headers = {'X-Requested-With': 'XMLHttpRequest'}
url_log = "http://%s/action/Account/Login" % pagina
data = httptools.downloadpage(url_log, post, headers).data
if "redirectUrl" in data:
logger.info("Login correcto")
return True, ""
else:
logger.error("Error en el login")
return False, "Nombre de usuario no válido. Comprueba tus credenciales"
except:
import traceback
logger.error(traceback.format_exc())
return False, "Error durante el login. Comprueba tus credenciales"
def mainlist(item):
logger.info()
itemlist = []
item.text_color = color1
logueado, error_message = login("diskokosmiko.mx")
if not logueado:
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
else:
item.extra = "http://diskokosmiko.mx/"
itemlist.append(item.clone(title="DiskoKosmiko", action="", text_color=color2))
itemlist.append(item.clone(title=" Búsqueda", action="search", url="http://diskokosmiko.mx/action/SearchFiles"))
itemlist.append(item.clone(title=" Colecciones", action="colecciones",
url="http://diskokosmiko.mx/action/home/MoreNewestCollections?pageNumber=1"))
itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro",
url="http://diskokosmiko.mx/action/SearchFiles"))
itemlist.append(item.clone(title=" Mi cuenta", action="cuenta"))
itemlist.append(item.clone(action="", title=""))
folder_thumb = filetools.join(config.get_data_path(), 'thumbs_disko')
files = filetools.listdir(folder_thumb)
if files:
itemlist.append(
item.clone(title="Eliminar caché de imágenes (%s)" % len(files), action="delete_cache", text_color="red"))
itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold"))
return itemlist
def search(item, texto):
logger.info()
item.post = "Mode=List&Type=Video&Phrase=%s&SizeFrom=0&SizeTo=0&Extension=&ref=pager&pageNumber=1" % texto.replace(
" ", "+")
try:
return listado(item)
except:
import sys, traceback
for line in sys.exc_info():
logger.error("%s" % line)
logger.error(traceback.format_exc())
return []
def configuracion(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def listado(item):
logger.info()
itemlist = []
data_thumb = httptools.downloadpage(item.url, item.post.replace("Mode=List", "Mode=Gallery")).data
if not item.post:
data_thumb = ""
item.url = item.url.replace("/gallery,", "/list,")
data = httptools.downloadpage(item.url, item.post).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
folder = filetools.join(config.get_data_path(), 'thumbs_disko')
patron = 'data-file-id(.*?</p>)</div></div>'
bloques = scrapertools.find_multiple_matches(data, patron)
for block in bloques:
if "adult_info" in block and not adult_content:
continue
size = scrapertools.find_single_match(block, '<p.*?>([^<]+)</p>')
patron = 'class="name"><a href="([^"]+)".*?>([^<]+)<'
scrapedurl, scrapedtitle = scrapertools.find_single_match(block, patron)
scrapedthumbnail = scrapertools.find_single_match(block, "background-image:url\('([^']+)'")
if scrapedthumbnail:
try:
thumb = scrapedthumbnail.split("-", 1)[0].replace("?", "\?")
if data_thumb:
url_thumb = scrapertools.find_single_match(data_thumb, "(%s[^']+)'" % thumb)
else:
url_thumb = scrapedthumbnail
scrapedthumbnail = filetools.join(folder, "%s.jpg" % url_thumb.split("e=", 1)[1][-20:])
except:
scrapedthumbnail = ""
if scrapedthumbnail:
t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, url_thumb])
t.setDaemon(True)
t.start()
else:
scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png"
scrapedurl = item.extra + scrapedurl
title = "%s (%s)" % (scrapedtitle, size)
if "adult_info" in block:
title += " [COLOR %s][+18][/COLOR]" % color4
plot = scrapertools.find_single_match(block, '<div class="desc">(.*?)</div>')
if plot:
plot = scrapertools.decodeHtmlentities(plot)
new_item = Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, text_color=color2,
extra=item.extra, infoLabels={'plot': plot}, post=item.post)
if item.post:
try:
new_item.folderurl, new_item.foldername = scrapertools.find_single_match(block,
'<p class="folder"><a href="([^"]+)".*?>([^<]+)<')
except:
pass
else:
new_item.folderurl = item.url.rsplit("/", 1)[0]
new_item.foldername = item.foldername
new_item.fanart = item.thumbnail
itemlist.append(new_item)
next_page = scrapertools.find_single_match(data, 'class="pageSplitter.*?" data-nextpage-number="([^"]+)"')
if next_page:
if item.post:
post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page, item.post)
url = item.url
else:
url = re.sub(r',\d+\?ref=pager', ",%s?ref=pager" % next_page, item.url)
post = ""
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página Siguiente (%s)" % next_page,
url=url, post=post, extra=item.extra))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="play", title="Reproducir/Descargar", server="diskokosmiko"))
usuario = scrapertools.find_single_match(item.url, '%s/([^/]+)/' % item.extra)
url_usuario = item.extra + "/" + usuario
if item.folderurl and not item.folderurl.startswith(item.extra):
item.folderurl = item.extra + item.folderurl
if item.post:
itemlist.append(item.clone(action="listado", title="Ver colección: %s" % item.foldername,
url=item.folderurl + "/gallery,1,1?ref=pager", post=""))
data = httptools.downloadpage(item.folderurl).data
token = scrapertools.find_single_match(data,
'data-action="followChanged.*?name="__RequestVerificationToken".*?value="([^"]+)"')
collection_id = item.folderurl.rsplit("-", 1)[1]
post = "__RequestVerificationToken=%s&collectionId=%s" % (token, collection_id)
url = "%s/action/Follow/Follow" % item.extra
title = "Seguir Colección: %s" % item.foldername
if "dejar de seguir" in data:
title = "Dejar de seguir la colección: %s" % item.foldername
url = "%s/action/Follow/UnFollow" % item.extra
itemlist.append(item.clone(action="seguir", title=title, url=url, post=post, text_color=color5, folder=False))
itemlist.append(
item.clone(action="colecciones", title="Ver colecciones del usuario: %s" % usuario, url=url_usuario))
return itemlist
def colecciones(item):
logger.info()
itemlist = []
usuario = False
data = httptools.downloadpage(item.url).data
if "Ver colecciones del usuario" not in item.title and not item.index:
data = jsontools.load(data)["Data"]
content = data["Content"]
content = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", content)
else:
usuario = True
if item.follow:
content = scrapertools.find_single_match(data,
'id="followed_collections"(.*?)<div id="recommended_collections"')
else:
content = scrapertools.find_single_match(data,
'<div id="collections".*?<div class="collections_list(.*?)<div class="collections_list')
content = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", content)
patron = '<a class="name" href="([^"]+)".*?>([^<]+)<.*?src="([^"]+)".*?<p class="info">(.*?)</p>'
matches = scrapertools.find_multiple_matches(content, patron)
index = ""
if item.index and item.index != "0":
matches = matches[item.index:item.index + 20]
if len(matches) > item.index + 20:
index = item.index + 20
elif len(matches) > 20:
matches = matches[:20]
index = 20
folder = filetools.join(config.get_data_path(), 'thumbs_disko')
for url, scrapedtitle, thumb, info in matches:
url = item.extra + url + "/gallery,1,1?ref=pager"
title = "%s (%s)" % (scrapedtitle, scrapertools.htmlclean(info))
try:
scrapedthumbnail = filetools.join(folder, "%s.jpg" % thumb.split("e=", 1)[1][-20:])
except:
try:
scrapedthumbnail = filetools.join(folder, "%s.jpg" % thumb.split("/thumbnail/", 1)[1][-20:])
thumb = thumb.replace("/thumbnail/", "/")
except:
scrapedthumbnail = ""
if scrapedthumbnail:
t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, thumb])
t.setDaemon(True)
t.start()
else:
scrapedthumbnail = thumb
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url,
thumbnail=scrapedthumbnail, text_color=color2, extra=item.extra,
foldername=scrapedtitle))
if not usuario and data.get("NextPageUrl"):
url = item.extra + data["NextPageUrl"]
itemlist.append(item.clone(title=">> Página Siguiente", url=url, text_color=""))
elif index:
itemlist.append(item.clone(title=">> Página Siguiente", url=item.url, index=index, text_color=""))
return itemlist
def seguir(item):
logger.info()
data = httptools.downloadpage(item.url, item.post)
message = "Colección seguida"
if "Dejar" in item.title:
message = "La colección ya no se sigue"
if data.sucess and config.get_platform() != "plex":
platformtools.dialog_notification("Acción correcta", message)
def cuenta(item):
logger.info()
itemlist = []
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
logueado, error_message = login("diskokosmiko.mx")
if not logueado:
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
return itemlist
user = config.get_setting("%suser" % web, "diskokosmiko")
user = unicode(user, "utf8").lower().encode("utf8")
url = item.extra + "/" + urllib.quote(user)
data = httptools.downloadpage(url).data
num_col = scrapertools.find_single_match(data, 'name="Has_collections" value="([^"]+)"')
if num_col != "0":
itemlist.append(item.clone(action="colecciones", url=url, index="0", title="Ver mis colecciones",
text_color=color5))
else:
itemlist.append(item.clone(action="", title="No tienes ninguna colección", text_color=color4))
num_follow = scrapertools.find_single_match(data, 'name="Follows_collections" value="([^"]+)"')
if num_follow != "0":
itemlist.append(item.clone(action="colecciones", url=url, index="0", title="Colecciones que sigo",
text_color=color5, follow=True))
else:
itemlist.append(item.clone(action="", title="No sigues ninguna colección", text_color=color4))
return itemlist
def filtro(item):
logger.info()
list_controls = []
valores = {}
dict_values = None
list_controls.append({'id': 'search', 'label': 'Texto a buscar', 'enabled': True, 'color': '0xFFC52020',
'type': 'text', 'default': '', 'visible': True})
list_controls.append({'id': 'tipo', 'label': 'Tipo de búsqueda', 'enabled': True, 'color': '0xFFFF8000',
'type': 'list', 'default': -1, 'visible': True})
list_controls[1]['lvalues'] = ['Aplicación', 'Archivo', 'Documento', 'Imagen', 'Música', 'Vídeo', 'Todos']
valores['tipo'] = ['Application', 'Archive', 'Document', 'Image', 'Music', 'Video', '']
list_controls.append({'id': 'ext', 'label': 'Extensión', 'enabled': True, 'color': '0xFFF4FA58',
'type': 'text', 'default': '', 'visible': True})
list_controls.append({'id': 'tmin', 'label': 'Tamaño mínimo (MB)', 'enabled': True, 'color': '0xFFCC2EFA',
'type': 'text', 'default': '0', 'visible': True})
list_controls.append({'id': 'tmax', 'label': 'Tamaño máximo (MB)', 'enabled': True, 'color': '0xFF2ECCFA',
'type': 'text', 'default': '0', 'visible': True})
# Se utilizan los valores por defecto/guardados
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
valores_guardados = config.get_setting("filtro_defecto_" + web, item.channel)
if valores_guardados:
dict_values = valores_guardados
item.valores = valores
return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values,
caption="Filtra la búsqueda", item=item, callback='filtrado')
def filtrado(item, values):
values_copy = values.copy()
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
# Guarda el filtro para que sea el que se cargue por defecto
config.set_setting("filtro_defecto_" + web, values_copy, item.channel)
tipo = item.valores["tipo"][values["tipo"]]
search = values["search"]
ext = values["ext"]
tmin = values["tmin"]
tmax = values["tmax"]
if not tmin.isdigit():
tmin = "0"
if not tmax.isdigit():
tmax = "0"
item.valores = ""
item.post = "Mode=List&Type=%s&Phrase=%s&SizeFrom=%s&SizeTo=%s&Extension=%s&ref=pager&pageNumber=1" \
% (tipo, search, tmin, tmax, ext)
item.action = "listado"
return listado(item)
def download_thumb(filename, url):
lock = threading.Lock()
lock.acquire()
folder = filetools.join(config.get_data_path(), 'thumbs_disko')
if not filetools.exists(folder):
filetools.mkdir(folder)
lock.release()
if not filetools.exists(filename):
downloadtools.downloadfile(url, filename, silent=True)
return filename
def delete_cache(url):
folder = filetools.join(config.get_data_path(), 'thumbs_disko')
filetools.rmdirtree(folder)
if config.is_xbmc():
xbmc.executebuiltin("Container.Refresh")

View File

@@ -0,0 +1,41 @@
{
"id": "fanpelis",
"name":"Fanpelis",
"thumbnail":"http://fanpelis.com/wp-content/uploads/2018/05/111.png",
"banner":"",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"version": 1,
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,293 @@
# -*- coding: utf-8 -*-
# -*- Channel Fanpelis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import urlparse
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
host = "http://fanpelis.com/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(
Item(channel=item.channel,
title="Peliculas",
action="sub_menu",
url=host + "movies/",
thumbnail=get_thumb('movies', auto=True)))
itemlist.append(
Item(channel=item.channel,
title="Series",
action="sub_menu",
url=host + "series/",
thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(
Item(channel=item.channel,
title="Buscar",
action="search",
url=host,
thumbnail=get_thumb("search", auto=True)))
return itemlist
def sub_menu(item):
logger.info()
itemlist = list()
itemlist.append(
Item(channel=item.channel,
title="Ultimas",
action="list_all",
url=item.url,
thumbnail=get_thumb("last", auto=True)))
itemlist.append(
Item(channel=item.channel,
title="Generos",
action="categories",
url=host,
thumbnail=get_thumb('genres', auto=True)
))
itemlist.append(
Item(channel=item.channel,
title="Por Año",
action="categories",
url=host,
thumbnail=get_thumb('year', auto=True)
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def categories(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.title == 'Generos':
patron = 'menu-item-object-category menu-item-\d+"><a href="([^"]+)">([^<]+)<'
else:
patron = 'menu-item-object-release-year menu-item-\d+"><a href="([^"]+)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel,
action="list_all",
title=title,
url=url
))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
try:
if texto != '':
item.texto = texto
return list_all(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def list_all(item):
logger.info()
itemlist = []
if item.texto != '':
url = item.url + "?s=%s" % item.texto
else:
url = item.url
try:
data = get_source(url)
except:
return itemlist
data = data.replace("'", '"')
pattern = 'class="ml-item.*?"><a href="([^"]+)".*?oldtitle="([^"]+)".*?'
pattern += '<img data-original="([^"]+)".*?<div id(.*?)/a>'
matches = scrapertools.find_multiple_matches(data, pattern)
for url, title, thumb, info in matches:
year = scrapertools.find_single_match(info, 'rel="tag">(\d{4})<')
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumb,
infoLabels = {'year': year}
)
if 'series' in url:
new_item.action = 'seasons'
new_item.contentSerieName = title
else:
new_item.action = 'findvideos'
new_item.contentTitle = title
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
active_page = scrapertools.find_single_match(data, '<li class="active"><a class="">(\d+)</a>')
if item.texto != '':
next_page = host + 'page/%s/' % (int(active_page) + 1)
else:
next_page = item.url +'page/%s/' % (int (active_page) + 1)
if next_page:
url = urlparse.urljoin(host, next_page)
itemlist.append(Item(channel=item.channel,
action="list_all",
title=">> Página siguiente",
url=url,
texto=item.texto,
thumbnail=get_thumb("next.png")))
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<strong>Temporada(\d+)</strong>'
matches = re.compile(patron, re.DOTALL).findall(data)
for temporada in matches:
title = 'Temporada %s' % temporada
contentSeasonNumber = temporada
item.infoLabels['season'] = contentSeasonNumber
itemlist.append(item.clone(action='episodesxseason',
title=title,
contentSeasonNumber=contentSeasonNumber
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
season = item.contentSeasonNumber
data = get_source(item.url)
data = scrapertools.find_single_match(data, '<strong>Temporada%s</strong>.*?</ul>' % season)
patron = '<a href="([^"]+)"><i class="fa fa-play"></i>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
ep = 1
for scrapedurl, scrapedtitle in matches:
epi = str(ep)
title = season + 'x%s - Episodio %s' % (epi, epi)
url = scrapedurl
contentEpisodeNumber = epi
item.infoLabels['episode'] = contentEpisodeNumber
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
contentEpisodeNumber=contentEpisodeNumber,
))
ep += 1
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div class="movieplay"><iframe src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
itemlist.append(Item(channel=item.channel, title='%s', url=url, action='play', infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def newest(category):
logger.info()
item = Item()
try:
if category == 'peliculas':
item.url = host + "movies/"
elif category == 'infantiles':
item.url = host + 'genre/animacion'
elif category == 'terror':
item.url = host + 'genre/terror'
itemlist = list_all(item)
if itemlist[-1].title == '>> Página siguiente':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -2,7 +2,7 @@
"id": "kbagi",
"name": "Kbagi/Diskokosmiko",
"language": ["cast", "lat"],
"active": true,
"active": false,
"adult": false,
"version": 1,
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",

View File

@@ -0,0 +1,43 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "((?:diskokosmiko.mx)/[^\\s'\"]+)",
"url": "http://\\1"
}
]
},
"free": true,
"id": "diskokosmoko",
"name": "diskokosmiko",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",
"version": 1
}

View File

@@ -0,0 +1,50 @@
# -*- coding: utf-8 -*-
from channels import kbagi
from core import httptools
from core import jsontools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
domain = "diskokosmiko.mx"
logueado, error_message = diskokosmiko.login(domain)
if not logueado:
return False, error_message
data = httptools.downloadpage(page_url).data
if ("File was deleted" or "Not Found" or "File was locked by administrator") in data:
return False, "[%s] El archivo no existe o ha sido borrado" %domain
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
host = "http://diskokosmiko.mx"
host_string = "diskokosmiko"
url = scrapertools.find_single_match(data, '<form action="([^"]+)" class="download_form"')
if url:
url = host + url
fileid = url.rsplit("f=", 1)[1]
token = scrapertools.find_single_match(data,
'<div class="download_container">.*?name="__RequestVerificationToken".*?value="([^"]+)"')
post = "fileId=%s&__RequestVerificationToken=%s" % (fileid, token)
headers = {'X-Requested-With': 'XMLHttpRequest'}
data = httptools.downloadpage(url, post, headers).data
data = jsontools.load(data)
mediaurl = data.get("DownloadUrl")
extension = data.get("Extension")
video_urls.append([".%s [%s]" % (extension, host_string), mediaurl])
for video_url in video_urls:
logger.info(" %s - %s" % (video_url[0], video_url[1]))
return video_urls