diff --git a/plugin.video.alfa/channels/seriecanal.json b/plugin.video.alfa/channels/seriecanal.json index b3166f5f..e53459ae 100644 --- a/plugin.video.alfa/channels/seriecanal.json +++ b/plugin.video.alfa/channels/seriecanal.json @@ -1,7 +1,7 @@ { "id": "seriecanal", "name": "Seriecanal", - "active": true, + "active": false, "adult": false, "language": ["cast"], "thumbnail": "http://i.imgur.com/EwMK8Yd.png", diff --git a/plugin.video.alfa/channels/seriecanal.py b/plugin.video.alfa/channels/seriecanal.py index 0ac2bfb4..843966c8 100644 --- a/plugin.video.alfa/channels/seriecanal.py +++ b/plugin.video.alfa/channels/seriecanal.py @@ -4,12 +4,14 @@ import re import urllib import urlparse +from core import httptools from core import scrapertools from core import servertools +from core import tmdb from platformcode import config, logger __modo_grafico__ = config.get_setting('modo_grafico', "seriecanal") -__perfil__ = config.get_setting('perfil', "descargasmix") +__perfil__ = config.get_setting('perfil', "seriecanal") # Fijar perfil de color perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], @@ -17,23 +19,21 @@ perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] color1, color2, color3 = perfil[__perfil__] -URL_BASE = "http://www.seriecanal.com/" +host = "https://www.seriecanal.com/" def login(): logger.info() - data = scrapertools.downloadpage(URL_BASE) + data = httptools.downloadpage(host).data if "Cerrar Sesion" in data: return True, "" - usuario = config.get_setting("user", "seriecanal") password = config.get_setting("password", "seriecanal") if usuario == "" or password == "": return False, 'Regístrate en www.seriecanal.com e introduce tus datos en "Configurar Canal"' else: post = urllib.urlencode({'username': usuario, 'password': password}) - data = scrapertools.downloadpage("http://www.seriecanal.com/index.php?page=member&do=login&tarea=acceder", - post=post) + data = httptools.downloadpage(host + "index.php?page=member&do=login&tarea=acceder", post=post).data if "Bienvenid@, se ha identificado correctamente en nuestro sistema" in data: return True, "" else: @@ -44,18 +44,15 @@ def mainlist(item): logger.info() itemlist = [] item.text_color = color1 - result, message = login() if result: - itemlist.append(item.clone(action="series", title="Últimos episodios", url=URL_BASE)) + itemlist.append(item.clone(action="series", title="Últimos episodios", url=host)) itemlist.append(item.clone(action="genero", title="Series por género")) itemlist.append(item.clone(action="alfabetico", title="Series por orden alfabético")) itemlist.append(item.clone(action="search", title="Buscar...")) else: itemlist.append(item.clone(action="", title=message, text_color="red")) - itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) - return itemlist @@ -68,7 +65,7 @@ def configuracion(item): def search(item, texto): logger.info() - item.url = "http://www.seriecanal.com/index.php?page=portada&do=category&method=post&category_id=0&order=" \ + item.url = host + "index.php?page=portada&do=category&method=post&category_id=0&order=" \ "C_Create&view=thumb&pgs=1&p2=1" try: post = "keyserie=" + texto @@ -85,27 +82,24 @@ def search(item, texto): def genero(item): logger.info() itemlist = [] - data = scrapertools.downloadpage(URL_BASE) + data = httptools.downloadpage(host).data data = scrapertools.find_single_match(data, '') - matches = scrapertools.find_multiple_matches(data, '([^"]+)') for scrapedurl, scrapedtitle in matches: scrapedtitle = scrapedtitle.capitalize() - url = urlparse.urljoin(URL_BASE, scrapedurl) + url = urlparse.urljoin(host, scrapedurl) itemlist.append(item.clone(action="series", title=scrapedtitle, url=url)) - return itemlist def alfabetico(item): logger.info() itemlist = [] - data = scrapertools.downloadpage(URL_BASE) + data = httptools.downloadpage(host).data data = scrapertools.find_single_match(data, '') - matches = scrapertools.find_multiple_matches(data, '([^"]+)') for scrapedurl, scrapedtitle in matches: - url = urlparse.urljoin(URL_BASE, scrapedurl) + url = urlparse.urljoin(host, scrapedurl) itemlist.append(item.clone(action="series", title=scrapedtitle, url=url)) return itemlist @@ -115,45 +109,38 @@ def series(item): itemlist = [] item.infoLabels = {} item.text_color = color2 - if item.extra != "": - data = scrapertools.downloadpage(item.url, post=item.extra) + data = httptools.downloadpage(item.url, post=item.extra).data else: - data = scrapertools.downloadpage(item.url) + data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '
([^"]+).*?([^"]+)

.*?' \ '

(.*?)

' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedthumbnail, scrapedurl, scrapedplot, scrapedtitle, scrapedtemp, scrapedepi in matches: title = scrapedtitle + " - " + scrapedtemp + " - " + scrapedepi - url = urlparse.urljoin(URL_BASE, scrapedurl) - temporada = scrapertools.find_single_match(scrapedtemp, "(\d+)") - new_item = item.clone() - new_item.contentType = "tvshow" + url = urlparse.urljoin(host, scrapedurl) + temporada = scrapertools.find_single_match(scrapedtemp, "\d+") + episode = scrapertools.find_single_match(scrapedepi, "\d+") + #item.contentType = "tvshow" if temporada != "": - new_item.infoLabels['season'] = temporada - new_item.contentType = "season" - - logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + scrapedthumbnail + "]") - itemlist.append(new_item.clone(action="findvideos", title=title, fulltitle=scrapedtitle, url=url, - thumbnail=scrapedthumbnail, plot=scrapedplot, contentTitle=scrapedtitle, - context=["buscar_trailer"], show=scrapedtitle)) - - try: - from core import tmdb - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - except: - pass + item.infoLabels['season'] = temporada + #item.contentType = "season" + if episode != "": + item.infoLabels['episode'] = episode + #item.contentType = "episode" + itemlist.append(item.clone(action="findvideos", title=title, url=url, + contentSerieName=scrapedtitle, + context=["buscar_trailer"])) + tmdb.set_infoLabels(itemlist) # Extra marca siguiente página next_page = scrapertools.find_single_match(data, 'Episodio - Enlaces de Descarga(.*?)') patron = '

([^"]+)' @@ -178,18 +163,15 @@ def findvideos(item): else: scrapedtitle = "[Torrent] " + scrapedepi scrapedtitle = scrapertools.htmlclean(scrapedtitle) - new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)") logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]") itemlist.append(new_item.clone(action="play", title=scrapedtitle, url=scrapedurl, server="torrent", contentType="episode")) - # Busca en la seccion online data_online = scrapertools.find_single_match(data, "Enlaces de Visionado Online(.*?)") patron = '([^"]+)' matches = scrapertools.find_multiple_matches(data_online, patron) - for scrapedurl, scrapedthumb, scrapedtitle in matches: # Deshecha enlaces de trailers scrapedtitle = scrapertools.htmlclean(scrapedtitle) @@ -200,7 +182,6 @@ def findvideos(item): new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)") itemlist.append(new_item.clone(action="play", title=title, url=scrapedurl, contentType="episode")) - # Comprueba si hay otras temporadas if not "No hay disponible ninguna Temporada adicional" in data: data_temp = scrapertools.find_single_match(data, '

(.*?)') @@ -210,7 +191,7 @@ def findvideos(item): matches = scrapertools.find_multiple_matches(data_temp, patron) for scrapedurl, scrapedtitle in matches: new_item = item.clone() - url = urlparse.urljoin(URL_BASE, scrapedurl) + url = urlparse.urljoin(host, scrapedurl) scrapedtitle = scrapedtitle.capitalize() temporada = scrapertools.find_single_match(scrapedtitle, "Temporada (\d+)") if temporada != "": @@ -218,13 +199,7 @@ def findvideos(item): new_item.infoLabels['episode'] = "" itemlist.append(new_item.clone(action="findvideos", title=scrapedtitle, url=url, text_color="red", contentType="season")) - - try: - from core import tmdb - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - except: - pass - + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) new_item = item.clone() if config.is_xbmc(): new_item.contextual = True @@ -236,7 +211,6 @@ def findvideos(item): def play(item): logger.info() itemlist = [] - if item.extra == "torrent": itemlist.append(item.clone()) else: diff --git a/plugin.video.alfa/servers/tusfiles.py b/plugin.video.alfa/servers/tusfiles.py deleted file mode 100755 index 9b389558..00000000 --- a/plugin.video.alfa/servers/tusfiles.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- - -from core import httptools -from core import scrapertools -from platformcode import logger - - -def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) - - if "tusfiles.net" in page_url: - data = httptools.downloadpage(page_url).data - - if "File Not Found" in data: - return False, "[Tusfiles] El archivo no existe o ha sido borrado" - if "download is no longer available" in data: - return False, "[Tusfiles] El archivo ya no está disponible" - - return True, "" - - -def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("page_url='%s'" % page_url) - - # Saca el código del vídeo - data = httptools.downloadpage(page_url).data.replace("\\", "") - video_urls = [] - - if "tusfiles.org" in page_url: - matches = scrapertools.find_multiple_matches(data, - '"label"\s*:\s*(.*?),"type"\s*:\s*"([^"]+)","file"\s*:\s*"([^"]+)"') - for calidad, tipo, video_url in matches: - tipo = tipo.replace("video/", "") - video_urls.append([".%s %sp [tusfiles]" % (tipo, calidad), video_url]) - - video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0].rsplit(" ")[1])) - else: - matches = scrapertools.find_multiple_matches(data, 'Deleted' in data \ - or 'Removed' in data or 'No such' in data: - return False, "No existe o ha sido borrado de vidspot" - - return True, "" - - -def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=%s" % page_url) - - # Normaliza la URL - videoid = scrapertools.get_match(page_url, "http://vidspot.net/([a-z0-9A-Z]+)") - page_url = "http://vidspot.net/embed-%s-728x400.html" % videoid - data = scrapertools.cachePage(page_url) - if "Access denied" in data: - geobloqueo = True - else: - geobloqueo = False - - if geobloqueo: - url = "http://www.videoproxy.co/hide.php" - post = "go=%s" % page_url - location = scrapertools.get_header_from_response(url, post=post, header_to_get="location") - url = "http://www.videoproxy.co/%s" % location - data = scrapertools.cachePage(url) - - # Extrae la URL - media_url = scrapertools.find_single_match(data, '"file" : "([^"]+)",') - - video_urls = [] - - if media_url != "": - if geobloqueo: - url = "http://www.videoproxy.co/hide.php" - post = "go=%s" % media_url - location = scrapertools.get_header_from_response(url, post=post, header_to_get="location") - media_url = "http://www.videoproxy.co/%s&direct=false" % location - else: - media_url += "&direct=false" - - video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [vidspot]", media_url]) - - for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) - - return video_urls