# -*- coding: utf-8 -*- import re import urllib import urlparse from core import scrapertools from core import servertools from platformcode import config, logger __modo_grafico__ = config.get_setting('modo_grafico', "seriecanal") __perfil__ = config.get_setting('perfil', "descargasmix") # Fijar perfil de color perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] color1, color2, color3 = perfil[__perfil__] URL_BASE = "http://www.seriecanal.com/" def login(): logger.info() data = scrapertools.downloadpage(URL_BASE) if "Cerrar Sesion" in data: return True, "" usuario = config.get_setting("user", "seriecanal") password = config.get_setting("password", "seriecanal") if usuario == "" or password == "": return False, 'Regístrate en www.seriecanal.com e introduce tus datos en "Configurar Canal"' else: post = urllib.urlencode({'username': usuario, 'password': password}) data = scrapertools.downloadpage("http://www.seriecanal.com/index.php?page=member&do=login&tarea=acceder", post=post) if "Bienvenid@, se ha identificado correctamente en nuestro sistema" in data: return True, "" else: return False, "Error en el login. El usuario y/o la contraseña no son correctos" def mainlist(item): logger.info() itemlist = [] item.text_color = color1 result, message = login() if result: itemlist.append(item.clone(action="series", title="Últimos episodios", url=URL_BASE)) itemlist.append(item.clone(action="genero", title="Series por género")) itemlist.append(item.clone(action="alfabetico", title="Series por orden alfabético")) itemlist.append(item.clone(action="search", title="Buscar...")) else: itemlist.append(item.clone(action="", title=message, text_color="red")) itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) return itemlist def configuracion(item): from platformcode import platformtools ret = platformtools.show_channel_settings() platformtools.itemlist_refresh() return ret def search(item, texto): logger.info() item.url = "http://www.seriecanal.com/index.php?page=portada&do=category&method=post&category_id=0&order=" \ "C_Create&view=thumb&pgs=1&p2=1" try: post = "keyserie=" + texto item.extra = post return series(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def genero(item): logger.info() itemlist = [] data = scrapertools.downloadpage(URL_BASE) data = scrapertools.find_single_match(data, '
(.*?)
' matches = scrapertools.find_multiple_matches(data, patron) for scrapedthumbnail, scrapedurl, scrapedplot, scrapedtitle, scrapedtemp, scrapedepi in matches: title = scrapedtitle + " - " + scrapedtemp + " - " + scrapedepi url = urlparse.urljoin(URL_BASE, scrapedurl) temporada = scrapertools.find_single_match(scrapedtemp, "(\d+)") new_item = item.clone() new_item.contentType = "tvshow" if temporada != "": new_item.infoLabels['season'] = temporada new_item.contentType = "season" logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append(new_item.clone(action="findvideos", title=title, fulltitle=scrapedtitle, url=url, thumbnail=scrapedthumbnail, plot=scrapedplot, contentTitle=scrapedtitle, context=["buscar_trailer"], show=scrapedtitle)) try: from core import tmdb tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) except: pass # Extra marca siguiente página next_page = scrapertools.find_single_match(data, 'Episodio - Enlaces de Descarga(.*?)') patron = '([^"]+)
' matches = scrapertools.find_multiple_matches(data_download, patron) for scrapedurl, scrapedepi in matches: new_item = item.clone() if "Episodio" not in scrapedepi: scrapedtitle = "[Torrent] Episodio " + scrapedepi else: scrapedtitle = "[Torrent] " + scrapedepi scrapedtitle = scrapertools.htmlclean(scrapedtitle) new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)") logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]") itemlist.append(new_item.clone(action="play", title=scrapedtitle, url=scrapedurl, server="torrent", contentType="episode")) # Busca en la seccion online data_online = scrapertools.find_single_match(data, "