# -*- coding: utf-8 -*- import re import threading import urllib import xbmc from core import downloadtools from core import filetools from core import httptools from core import jsontools from core import scrapertools from core.item import Item from platformcode import config, logger from platformcode import platformtools __perfil__ = config.get_setting('perfil', "kbagi") # Fijar perfil de color perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'], ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'], ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']] if __perfil__ - 1 >= 0: color1, color2, color3, color4, color5 = perfil[__perfil__ - 1] else: color1 = color2 = color3 = color4 = color5 = "" adult_content = config.get_setting("adult_content", "kbagi") def login(pagina): logger.info() try: dom = pagina.split(".")[0] user = config.get_setting("%suser" %dom, "kbagi") password = config.get_setting("%spassword" %dom, "kbagi") if "kbagi" in pagina: pagina = "k-bagi.com" if not user: return False, "Para ver los enlaces de %s es necesario registrarse en %s" %(dom, pagina) data = httptools.downloadpage("http://%s" % pagina).data if re.search(r'(?i)%s' % user, data): return True, "" token = scrapertools.find_single_match(data, 'name="__RequestVerificationToken".*?value="([^"]+)"') post = "__RequestVerificationToken=%s&UserName=%s&Password=%s" % (token, user, password) headers = {'X-Requested-With': 'XMLHttpRequest'} url_log = "http://%s/action/Account/Login" % pagina data = httptools.downloadpage(url_log, post, headers).data if "redirectUrl" in data: logger.info("Login correcto") return True, "" else: logger.error("Error en el login") return False, "Nombre de usuario no válido. Comprueba tus credenciales" except: import traceback logger.error(traceback.format_exc()) return False, "Error durante el login. Comprueba tus credenciales" def mainlist(item): logger.info() itemlist = [] item.text_color = color1 logueado, error_message = login("kbagi.com") if not logueado: itemlist.append(item.clone(title=error_message, action="configuracion", folder=False)) else: item.extra = "http://k-bagi.com" itemlist.append(item.clone(title="kbagi", action="", text_color=color2)) itemlist.append( item.clone(title=" Búsqueda", action="search", url="http://k-bagi.com/action/SearchFiles")) itemlist.append(item.clone(title=" Colecciones", action="colecciones", url="http://k-bagi.com/action/home/MoreNewestCollections?pageNumber=1")) itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro", url="http://k-bagi.com/action/SearchFiles")) itemlist.append(item.clone(title=" Mi cuenta", action="cuenta")) logueado, error_message = login("diskokosmiko.mx") if not logueado: itemlist.append(item.clone(title=error_message, action="configuracion", folder=False)) else: item.extra = "http://diskokosmiko.mx/" itemlist.append(item.clone(title="DiskoKosmiko", action="", text_color=color2)) itemlist.append(item.clone(title=" Búsqueda", action="search", url="http://diskokosmiko.mx/action/SearchFiles")) itemlist.append(item.clone(title=" Colecciones", action="colecciones", url="http://diskokosmiko.mx/action/home/MoreNewestCollections?pageNumber=1")) itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro", url="http://diskokosmiko.mx/action/SearchFiles")) itemlist.append(item.clone(title=" Mi cuenta", action="cuenta")) itemlist.append(item.clone(action="", title="")) folder_thumb = filetools.join(config.get_data_path(), 'thumbs_kbagi') files = filetools.listdir(folder_thumb) if files: itemlist.append( item.clone(title="Eliminar caché de imágenes (%s)" % len(files), action="delete_cache", text_color="red")) itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold")) return itemlist def search(item, texto): logger.info() item.post = "Mode=List&Type=Video&Phrase=%s&SizeFrom=0&SizeTo=0&Extension=&ref=pager&pageNumber=1" % texto.replace( " ", "+") try: return listado(item) except: import sys, traceback for line in sys.exc_info(): logger.error("%s" % line) logger.error(traceback.format_exc()) return [] def configuracion(item): ret = platformtools.show_channel_settings() platformtools.itemlist_refresh() return ret def listado(item): logger.info() itemlist = [] data_thumb = httptools.downloadpage(item.url, item.post.replace("Mode=List", "Mode=Gallery")).data if not item.post: data_thumb = "" item.url = item.url.replace("/gallery,", "/list,") data = httptools.downloadpage(item.url, item.post).data data = re.sub(r"\n|\r|\t|\s{2}| |
", "", data) folder = filetools.join(config.get_data_path(), 'thumbs_kbagi') patron = 'data-file-id(.*?

)' bloques = scrapertools.find_multiple_matches(data, patron) for block in bloques: if "adult_info" in block and not adult_content: continue size = scrapertools.find_single_match(block, '([^<]+)

') patron = 'class="name">([^<]+)<' scrapedurl, scrapedtitle = scrapertools.find_single_match(block, patron) scrapedthumbnail = scrapertools.find_single_match(block, "background-image:url\('([^']+)'") if scrapedthumbnail: try: thumb = scrapedthumbnail.split("-", 1)[0].replace("?", "\?") if data_thumb: url_thumb = scrapertools.find_single_match(data_thumb, "(%s[^']+)'" % thumb) else: url_thumb = scrapedthumbnail scrapedthumbnail = filetools.join(folder, "%s.jpg" % url_thumb.split("e=", 1)[1][-20:]) except: scrapedthumbnail = "" if scrapedthumbnail: t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, url_thumb]) t.setDaemon(True) t.start() else: scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png" scrapedurl = item.extra + scrapedurl title = "%s (%s)" % (scrapedtitle, size) if "adult_info" in block: title += " [COLOR %s][+18][/COLOR]" % color4 plot = scrapertools.find_single_match(block, '
(.*?)
') if plot: plot = scrapertools.decodeHtmlentities(plot) new_item = Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, text_color=color2, extra=item.extra, infoLabels={'plot': plot}, post=item.post) if item.post: try: new_item.folderurl, new_item.foldername = scrapertools.find_single_match(block, '

([^<]+)<') except: pass else: new_item.folderurl = item.url.rsplit("/", 1)[0] new_item.foldername = item.foldername new_item.fanart = item.thumbnail itemlist.append(new_item) next_page = scrapertools.find_single_match(data, 'class="pageSplitter.*?" data-nextpage-number="([^"]+)"') if next_page: if item.post: post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page, item.post) url = item.url else: url = re.sub(r',\d+\?ref=pager', ",%s?ref=pager" % next_page, item.url) post = "" itemlist.append(Item(channel=item.channel, action="listado", title=">> Página Siguiente (%s)" % next_page, url=url, post=post, extra=item.extra)) return itemlist def findvideos(item): logger.info() itemlist = [] itemlist.append(item.clone(action="play", title="Reproducir/Descargar", server="kbagi")) usuario = scrapertools.find_single_match(item.url, '%s/([^/]+)/' % item.extra) url_usuario = item.extra + "/" + usuario if item.folderurl and not item.folderurl.startswith(item.extra): item.folderurl = item.extra + item.folderurl if item.post: itemlist.append(item.clone(action="listado", title="Ver colección: %s" % item.foldername, url=item.folderurl + "/gallery,1,1?ref=pager", post="")) data = httptools.downloadpage(item.folderurl).data token = scrapertools.find_single_match(data, 'data-action="followChanged.*?name="__RequestVerificationToken".*?value="([^"]+)"') collection_id = item.folderurl.rsplit("-", 1)[1] post = "__RequestVerificationToken=%s&collectionId=%s" % (token, collection_id) url = "%s/action/Follow/Follow" % item.extra title = "Seguir Colección: %s" % item.foldername if "dejar de seguir" in data: title = "Dejar de seguir la colección: %s" % item.foldername url = "%s/action/Follow/UnFollow" % item.extra itemlist.append(item.clone(action="seguir", title=title, url=url, post=post, text_color=color5, folder=False)) itemlist.append( item.clone(action="colecciones", title="Ver colecciones del usuario: %s" % usuario, url=url_usuario)) return itemlist def colecciones(item): logger.info() itemlist = [] usuario = False data = httptools.downloadpage(item.url).data if "Ver colecciones del usuario" not in item.title and not item.index: data = jsontools.load(data)["Data"] content = data["Content"] content = re.sub(r"\n|\r|\t|\s{2}| |
", "", content) else: usuario = True if item.follow: content = scrapertools.find_single_match(data, 'id="followed_collections"(.*?)