diff --git a/plugin.video.alfa/channels/allpeliculas.py b/plugin.video.alfa/channels/allpeliculas.py index 875c87db..6cd93449 100644 --- a/plugin.video.alfa/channels/allpeliculas.py +++ b/plugin.video.alfa/channels/allpeliculas.py @@ -33,15 +33,13 @@ SERVERS = {"26": "powvideo", "45": "okru", "75": "openload", "12": "netutv", "65 list_servers = ['powvideo', 'okru', 'openload', 'netutv', 'thevideos', 'spruto', 'stormo', 'idowatch', 'nowvideo', 'fastplay', 'raptu', 'tusfiles'] -host = "http://allpeliculas.com/" +host = "http://allpeliculas.io/" def mainlist(item): logger.info() itemlist = [] item.text_color = color1 - autoplay.init(item.channel, list_servers, list_quality) - itemlist.append(item.clone(title="Películas", action="lista", fanart="http://i.imgur.com/c3HS8kj.png", url= host + "movies/newmovies?page=1", extra1 = 0, thumbnail=get_thumb('movies', auto=True))) @@ -51,16 +49,13 @@ def mainlist(item): url= host, thumbnail=get_thumb('colections', auto=True))) itemlist.append(item.clone(title="", action="")) itemlist.append(item.clone(title="Buscar...", action="search", thumbnail=get_thumb('search', auto=True))) - autoplay.show_option(item.channel, itemlist) - return itemlist def colecciones(item): logger.info() itemlist = [] - data = httptools.downloadpage(item.url).data patron = 'href="(/peliculas[^"]+).*?' patron += 'title_geo">([^<]+).*?' @@ -143,11 +138,11 @@ def findvideos(item): patron += '>([^<]+)' matches = scrapertools.find_multiple_matches(data, patron) for url, calidad in matches: + calidad = scrapertools.find_single_match(calidad, "\d+") + scrapertools.find_single_match(calidad, "\..+") itemlist.append(item.clone( channel = item.channel, action = "play", title = calidad, - fulltitle = item.title, thumbnail = item.thumbnail, contentThumbnail = item.thumbnail, url = url, @@ -159,7 +154,7 @@ def findvideos(item): if config.get_videolibrary_support(): itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green", action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail, - fulltitle = item.fulltitle + contentTitle = item.contentTitle )) # Requerido para FilterTools @@ -183,31 +178,22 @@ def lista(item): dict_param = dict() item.infoLabels = {} item.text_color = color2 - params = '{}' if item.extra1 != 0: dict_param["genero"] = [item.extra1] params = jsontools.dump(dict_param) - data = httptools.downloadpage(item.url, post=params).data data = data.replace("","").replace("<\/mark>","") dict_data = jsontools.load(data) - for it in dict_data["items"]: - title = it["title"] - plot = it["slogan"] - rating = it["imdb"] year = it["year"] url = host + "pelicula/" + it["slug"] + title = it["title"] + " (%s)" %year thumb = host + it["image"] item.infoLabels['year'] = year - itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb, - plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie")) - - try: - tmdb.set_infoLabels(itemlist, __modo_grafico__) - except: - pass + itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb, + context=["buscar_trailer"], contentTitle=it["title"], contentType="movie")) + tmdb.set_infoLabels(itemlist, __modo_grafico__) pagina = scrapertools.find_single_match(item.url, 'page=([0-9]+)') item.url = item.url.replace(pagina, "") if pagina == "": @@ -219,6 +205,7 @@ def lista(item): )) return itemlist + def search(item, texto): logger.info() if texto != "": @@ -246,12 +233,10 @@ def newest(categoria): if itemlist[-1].action == "lista": itemlist.pop() - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] - return itemlist diff --git a/plugin.video.alfa/channels/cuelgame.json b/plugin.video.alfa/channels/cuelgame.json deleted file mode 100755 index b85a0b81..00000000 --- a/plugin.video.alfa/channels/cuelgame.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "id": "cuelgame", - "name": "Cuelgame", - "active": false, - "adult": false, - "language": ["cast"], - "thumbnail": "cuelgame.png", - "banner": "cuelgame.png", - "categories": [ - "torrent", - "movie", - "tvshow", - "documentary", - "vos" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_torrent", - "type": "bool", - "label": "Incluir en Novedades - Torrent", - "default": true, - "enabled": true, - "visible": true - } - ] -} diff --git a/plugin.video.alfa/channels/cuelgame.py b/plugin.video.alfa/channels/cuelgame.py deleted file mode 100755 index a6b4b641..00000000 --- a/plugin.video.alfa/channels/cuelgame.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import scrapertools, httptools -from core.item import Item -from core.scrapertools import decodeHtmlentities as dhe -from platformcode import logger - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Videos[/COLOR]", action="scraper", - url="http://cuelgame.net/?category=4", - thumbnail="http://img5a.flixcart.com/image/poster/q/t/d/vintage-camera-collage-sr148-medium-400x400-imadkbnrnbpggqyz.jpeg", - fanart="http://imgur.com/7frGoPL.jpg")) - itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Buscar[/COLOR]", action="search", url="", - thumbnail="http://images2.alphacoders.com/846/84682.jpg", - fanart="http://imgur.com/1sIHN1r.jpg")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = "http://cuelgame.net/search.php?q=%s" % (texto) - - try: - return scraper(item) - # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def scraper(item): - logger.info() - itemlist = [] - # Descarga la página - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| |CET", "", data) - patron = '

0: - # corrige "&" para la paginación - next_page = matches[0].replace("amp;", "") - scrapedurl = urlparse.urljoin(item.url, next_page) - itemlist.append(Item(channel=item.channel, action="scraper", title="Página siguiente >>", url=scrapedurl, - thumbnail="http://imgur.com/ycPgVVO.png", folder=True)) - return itemlist - - -def newest(categoria): - logger.info() - itemlist = [] - item = Item() - try: - if categoria == 'torrent': - item.url = 'http://cuelgame.net/?category=4' - itemlist = scraper(item) - if itemlist[-1].action == "Página siguiente >>": - itemlist.pop() - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - return itemlist diff --git a/plugin.video.alfa/channels/plusdede.json b/plugin.video.alfa/channels/plusdede.json index 9a305d15..88418235 100755 --- a/plugin.video.alfa/channels/plusdede.json +++ b/plugin.video.alfa/channels/plusdede.json @@ -1,7 +1,7 @@ { "id": "plusdede", "name": "Plusdede", - "active": true, + "active": false, "adult": false, "language": ["cast"], "thumbnail": "https://s18.postimg.cc/e17e98eqh/6_-_4_Isbv_Q3.png", diff --git a/plugin.video.alfa/channels/puyasubs.py b/plugin.video.alfa/channels/puyasubs.py index 55485f77..a171741e 100755 --- a/plugin.video.alfa/channels/puyasubs.py +++ b/plugin.video.alfa/channels/puyasubs.py @@ -5,8 +5,10 @@ import re from core import httptools from core import jsontools from core import scrapertools +from core import tmdb from core.item import Item -from platformcode import config, logger +from megaserver import Client +from platformcode import config, logger, platformtools __modo_grafico__ = config.get_setting('modo_grafico', 'puyasubs') __perfil__ = config.get_setting('perfil', "puyasubs") @@ -20,39 +22,36 @@ if __perfil__ < 3: else: color1 = color2 = color3 = color4 = color5 = "" +host = "http://puya.si" + def mainlist(item): logger.info() - itemlist = list() - itemlist.append(Item(channel=item.channel, action="listado", title="Novedades Anime", thumbnail=item.thumbnail, - url="http://puya.si/?cat=4", text_color=color1)) + url= host + "/?cat=4", text_color=color1)) itemlist.append(Item(channel=item.channel, action="listado", title="Novedades Doramas", thumbnail=item.thumbnail, - url="http://puya.si/?cat=142", text_color=color1)) + url= host + "/?cat=142", text_color=color1)) itemlist.append(Item(channel=item.channel, action="", title="Descargas", text_color=color2)) itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Animes y Doramas en proceso", - thumbnail=item.thumbnail, url="http://puya.si/?page_id=25501", text_color=color1)) + thumbnail=item.thumbnail, url= host + "/?page_id=25501", text_color=color1)) itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Animes Finalizados", - thumbnail=item.thumbnail, url="http://puya.si/?page_id=15388", text_color=color1)) + thumbnail=item.thumbnail, url= host + "/?page_id=15388", text_color=color1)) itemlist.append(Item(channel=item.channel, action="letra", title=" Descargas Animes Finalizados por Letra", - thumbnail=item.thumbnail, url="http://puya.si/?page_id=15388", text_color=color1)) + thumbnail=item.thumbnail, url= host + "/?page_id=15388", text_color=color1)) itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Doramas Finalizados", - thumbnail=item.thumbnail, url="http://puya.si/?page_id=25507", text_color=color1)) + thumbnail=item.thumbnail, url= host + "/?page_id=25507", text_color=color1)) itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Películas y Ovas", - thumbnail=item.thumbnail, url="http://puya.si/?page_id=25503", text_color=color1)) + thumbnail=item.thumbnail, url= host + "/?page_id=25503", text_color=color1)) itemlist.append(Item(channel=item.channel, action="torrents", title="Lista de Torrents", thumbnail=item.thumbnail, url="https://www.frozen-layer.com/buscar/descargas", text_color=color1)) - itemlist.append(Item(channel=item.channel, action="search", title="Buscar anime/dorama/película", - thumbnail=item.thumbnail, url="http://puya.si/?s=", text_color=color3)) - + thumbnail=item.thumbnail, url= host + "/?s=", text_color=color3)) itemlist.append(item.clone(title="Configurar canal", action="configuracion", text_color=color5, folder=False)) return itemlist def configuracion(item): - from platformcode import platformtools ret = platformtools.show_channel_settings() platformtools.itemlist_refresh() return ret @@ -73,9 +72,7 @@ def search(item, texto): def listado(item): logger.info() - itemlist = list() - data = httptools.downloadpage(item.url).data bloques = scrapertools.find_multiple_matches(data, '

(.*?)') patron = 'href="([^"]+)".*?>(.*?).*?(?:(.*?)|)' @@ -96,27 +93,22 @@ def listado(item): itemlist.append(Item(channel=item.channel, action="findvideos", url=url, title=title, thumbnail=thumb, contentTitle=contenttitle, show=contenttitle, contentType=tipo, infoLabels={'filtro': filtro_tmdb}, text_color=color1)) - if ("cat=4" in item.url or item.extra == "busqueda") and not item.extra == "novedades": from core import tmdb tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - next_page = scrapertools.find_single_match(data, ".*? item.pagina + 20: pagina = item.pagina + 20 itemlist.append(Item(channel=item.channel, action="descargas", url=item.url, title=">> Página Siguiente", thumbnail=item.thumbnail, pagina=pagina, letra=item.letra, text_color=color2)) - return itemlist def letra(item): logger.info() - itemlist = list() data = httptools.downloadpage(item.url).data patron = '
  • (?:|)([A-z#]{1})(?:|)
  • ' @@ -163,20 +148,16 @@ def letra(item): for match in matches: itemlist.append(Item(channel=item.channel, title=match, action="descargas", letra=match, url=item.url, thumbnail=item.thumbnail, text_color=color1)) - return itemlist def torrents(item): logger.info() - itemlist = list() if not item.pagina: item.pagina = 0 - post = "utf8=%E2%9C%93&busqueda=puyasubs&search=Buscar&tab=anime&con_seeds=con_seeds" data = httptools.downloadpage(item.url, post).data - patron = ".*?href='([^']+)' title='descargar torrent'>.*?title='informacion de (.*?)'.*?.*?(.*?)" \ ".*?(\d+).*?(\d+)" matches = scrapertools.find_multiple_matches(data, patron) @@ -184,20 +165,15 @@ def torrents(item): contentTitle = title if "(" in contentTitle: contentTitle = contentTitle.split("(")[0] - size = size.strip() filtro_tmdb = {"original_language": "ja"}.items() title += " [COLOR %s][Semillas:%s[/COLOR]|[COLOR %s]Leech:%s[/COLOR]|%s]" % ( color4, seeds, color5, leechers, size) url = "https://www.frozen-layer.com" + url - itemlist.append(Item(channel=item.channel, action="play", url=url, title=title, contentTitle=contentTitle, server="torrent", show=contentTitle, contentType="tvshow", text_color=color1, infoLabels={'filtro': filtro_tmdb})) - - from core import tmdb tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - if len(matches) > item.pagina + 25: pagina = item.pagina + 25 itemlist.append(Item(channel=item.channel, action="torrents", url=item.url, title=">> Página Siguiente", @@ -208,43 +184,39 @@ def torrents(item): next_page = "https://www.frozen-layer.com" + next_page itemlist.append(Item(channel=item.channel, action="torrents", url=next_page, title=">> Página Siguiente", thumbnail=item.thumbnail, pagina=0, text_color=color2)) - return itemlist def findvideos(item): logger.info() - if item.infoLabels["tmdb_id"] and not item.infoLabels["plot"]: - from core import tmdb - tmdb.set_infoLabels_item(item, True, idioma_busqueda="en") - - itemlist = list() - + itemlist = [] data = httptools.downloadpage(item.url).data + data2 = data.replace("\n","") idiomas = scrapertools.find_single_match(data, 'Subtitulo:\s*(.*?)
    ') - calidades = ['720p', '1080p'] - torrentes = scrapertools.find_multiple_matches(data, '
    720p" in data and ">1080p" in data: - try: - title = "[%s] %s" % (calidades[i], title) - except: - pass - itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent")) - + if ">720p" in data2 and ">1080p" in data2: + title = "[%s] %s" % (calidades[i], title) + if "nyaa" in enlace: + data1 = httptools.downloadpage(url=enlace).data + enlace = "https://nyaa.si" + scrapertools.find_single_match(data1, 'a href="(/do[^"]+)') + itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent")) + enlace = scrapertools.find_single_match(data1, '720p" in data and ">1080p" in data: + if ">720p" in data and ">1080p" in data2: try: title = "[%s] %s" % (calidades[i], title) except: pass itemlist.append(item.clone(title=title, action="play", url=enlace, server="onefichier")) - safelink = scrapertools.find_multiple_matches(data, '.*?href='([^']+)'") - if enlace: - itemlist.append(item.clone(url=enlace)) - else: - itemlist.append(item) - return itemlist @@ -365,7 +308,7 @@ def newest(categoria): logger.info() item = Item() try: - item.url = "http://puya.si/?cat=4" + item.url = host + "/?cat=4" item.extra = "novedades" itemlist = listado(item) @@ -373,12 +316,10 @@ def newest(categoria): itemlist.pop() for it in itemlist: it.contentTitle = it.title - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] - return itemlist diff --git a/plugin.video.alfa/channels/repelis.py b/plugin.video.alfa/channels/repelis.py index dff9c978..508ed981 100644 --- a/plugin.video.alfa/channels/repelis.py +++ b/plugin.video.alfa/channels/repelis.py @@ -9,11 +9,11 @@ from channelselector import get_thumb from channels import autoplay from channels import filtertools from core import httptools +from core import jsontools from core import scrapertools from core import servertools from core import tmdb from core.item import Item -from lib import jsunpack from platformcode import config, logger, platformtools diff --git a/plugin.video.alfa/channels/seriecanal.json b/plugin.video.alfa/channels/seriecanal.json deleted file mode 100644 index e53459ae..00000000 --- a/plugin.video.alfa/channels/seriecanal.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "id": "seriecanal", - "name": "Seriecanal", - "active": false, - "adult": false, - "language": ["cast"], - "thumbnail": "http://i.imgur.com/EwMK8Yd.png", - "banner": "seriecanal.png", - "categories": [ - "tvshow", - "vos" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": true, - "visible": true - }, - { - "id": "modo_grafico", - "type": "bool", - "label": "Buscar información extra", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "user", - "type": "text", - "label": "Usuario", - "color": "0xFFd50b0b", - "enabled": true, - "visible": true - }, - { - "id": "password", - "type": "text", - "label": "Contraseña", - "color": "0xFFd50b0b", - "enabled": true, - "visible": true, - "hidden": true - }, - { - "id": "perfil", - "type": "list", - "label": "Perfil de color", - "default": 2, - "enabled": true, - "visible": true, - "lvalues": [ - "Perfil 3", - "Perfil 2", - "Perfil 1" - ] - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/channels/seriecanal.py b/plugin.video.alfa/channels/seriecanal.py deleted file mode 100644 index 843966c8..00000000 --- a/plugin.video.alfa/channels/seriecanal.py +++ /dev/null @@ -1,226 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urllib -import urlparse - -from core import httptools -from core import scrapertools -from core import servertools -from core import tmdb -from platformcode import config, logger - -__modo_grafico__ = config.get_setting('modo_grafico', "seriecanal") -__perfil__ = config.get_setting('perfil', "seriecanal") - -# Fijar perfil de color -perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], - ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], - ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] -color1, color2, color3 = perfil[__perfil__] - -host = "https://www.seriecanal.com/" - - -def login(): - logger.info() - data = httptools.downloadpage(host).data - if "Cerrar Sesion" in data: - return True, "" - usuario = config.get_setting("user", "seriecanal") - password = config.get_setting("password", "seriecanal") - if usuario == "" or password == "": - return False, 'Regístrate en www.seriecanal.com e introduce tus datos en "Configurar Canal"' - else: - post = urllib.urlencode({'username': usuario, 'password': password}) - data = httptools.downloadpage(host + "index.php?page=member&do=login&tarea=acceder", post=post).data - if "Bienvenid@, se ha identificado correctamente en nuestro sistema" in data: - return True, "" - else: - return False, "Error en el login. El usuario y/o la contraseña no son correctos" - - -def mainlist(item): - logger.info() - itemlist = [] - item.text_color = color1 - result, message = login() - if result: - itemlist.append(item.clone(action="series", title="Últimos episodios", url=host)) - itemlist.append(item.clone(action="genero", title="Series por género")) - itemlist.append(item.clone(action="alfabetico", title="Series por orden alfabético")) - itemlist.append(item.clone(action="search", title="Buscar...")) - else: - itemlist.append(item.clone(action="", title=message, text_color="red")) - itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) - return itemlist - - -def configuracion(item): - from platformcode import platformtools - ret = platformtools.show_channel_settings() - platformtools.itemlist_refresh() - return ret - - -def search(item, texto): - logger.info() - item.url = host + "index.php?page=portada&do=category&method=post&category_id=0&order=" \ - "C_Create&view=thumb&pgs=1&p2=1" - try: - post = "keyserie=" + texto - item.extra = post - return series(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def genero(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(host).data - data = scrapertools.find_single_match(data, '
      (.*?)
    ') - matches = scrapertools.find_multiple_matches(data, '([^"]+)
    ') - for scrapedurl, scrapedtitle in matches: - scrapedtitle = scrapedtitle.capitalize() - url = urlparse.urljoin(host, scrapedurl) - itemlist.append(item.clone(action="series", title=scrapedtitle, url=url)) - return itemlist - - -def alfabetico(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(host).data - data = scrapertools.find_single_match(data, '
      (.*?)
    ') - matches = scrapertools.find_multiple_matches(data, '([^"]+)') - for scrapedurl, scrapedtitle in matches: - url = urlparse.urljoin(host, scrapedurl) - itemlist.append(item.clone(action="series", title=scrapedtitle, url=url)) - return itemlist - - -def series(item): - logger.info() - itemlist = [] - item.infoLabels = {} - item.text_color = color2 - if item.extra != "": - data = httptools.downloadpage(item.url, post=item.extra).data - else: - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - - patron = '
    ([^"]+).*?([^"]+)

    .*?' \ - '

    (.*?)

    ' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedthumbnail, scrapedurl, scrapedplot, scrapedtitle, scrapedtemp, scrapedepi in matches: - title = scrapedtitle + " - " + scrapedtemp + " - " + scrapedepi - url = urlparse.urljoin(host, scrapedurl) - temporada = scrapertools.find_single_match(scrapedtemp, "\d+") - episode = scrapertools.find_single_match(scrapedepi, "\d+") - #item.contentType = "tvshow" - if temporada != "": - item.infoLabels['season'] = temporada - #item.contentType = "season" - if episode != "": - item.infoLabels['episode'] = episode - #item.contentType = "episode" - itemlist.append(item.clone(action="findvideos", title=title, url=url, - contentSerieName=scrapedtitle, - context=["buscar_trailer"])) - tmdb.set_infoLabels(itemlist) - # Extra marca siguiente página - next_page = scrapertools.find_single_match(data, 'Episodio - Enlaces de Descarga(.*?)') - patron = '

    ([^"]+)' - matches = scrapertools.find_multiple_matches(data_download, patron) - for scrapedurl, scrapedepi in matches: - new_item = item.clone() - if "Episodio" not in scrapedepi: - scrapedtitle = "[Torrent] Episodio " + scrapedepi - else: - scrapedtitle = "[Torrent] " + scrapedepi - scrapedtitle = scrapertools.htmlclean(scrapedtitle) - new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)") - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]") - itemlist.append(new_item.clone(action="play", title=scrapedtitle, url=scrapedurl, server="torrent", - contentType="episode")) - # Busca en la seccion online - data_online = scrapertools.find_single_match(data, "Enlaces de Visionado Online(.*?)") - patron = '([^"]+)' - matches = scrapertools.find_multiple_matches(data_online, patron) - for scrapedurl, scrapedthumb, scrapedtitle in matches: - # Deshecha enlaces de trailers - scrapedtitle = scrapertools.htmlclean(scrapedtitle) - if (scrapedthumb != "images/series/youtube.png") & (scrapedtitle != "Trailer"): - new_item = item.clone() - server = scrapertools.find_single_match(scrapedthumb, "images/series/(.*?).png") - title = "[" + server.capitalize() + "]" + " " + scrapedtitle - - new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)") - itemlist.append(new_item.clone(action="play", title=title, url=scrapedurl, contentType="episode")) - # Comprueba si hay otras temporadas - if not "No hay disponible ninguna Temporada adicional" in data: - data_temp = scrapertools.find_single_match(data, '

    (.*?)') - data_temp = re.sub(r"\n|\r|\t|\s{2}| ", "", data_temp) - patron = '

    ([^"]+)' - matches = scrapertools.find_multiple_matches(data_temp, patron) - for scrapedurl, scrapedtitle in matches: - new_item = item.clone() - url = urlparse.urljoin(host, scrapedurl) - scrapedtitle = scrapedtitle.capitalize() - temporada = scrapertools.find_single_match(scrapedtitle, "Temporada (\d+)") - if temporada != "": - new_item.infoLabels['season'] = temporada - new_item.infoLabels['episode'] = "" - itemlist.append(new_item.clone(action="findvideos", title=scrapedtitle, url=url, text_color="red", - contentType="season")) - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - new_item = item.clone() - if config.is_xbmc(): - new_item.contextual = True - itemlist.append(new_item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="", - text_color="magenta")) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - if item.extra == "torrent": - itemlist.append(item.clone()) - else: - # Extrae url de enlace bit.ly - if item.url.startswith("http://bit.ly/"): - item.url = scrapertools.getLocationHeaderFromResponse(item.url) - video_list = servertools.findvideos(item.url) - if video_list: - url = video_list[0][1] - server = video_list[0][2] - itemlist.append(item.clone(server=server, url=url)) - - return itemlist diff --git a/plugin.video.alfa/channels/seriesyonkis.json b/plugin.video.alfa/channels/seriesyonkis.json deleted file mode 100755 index c1f15fd5..00000000 --- a/plugin.video.alfa/channels/seriesyonkis.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "id": "seriesyonkis", - "name": "Seriesyonkis", - "active": false, - "adult": false, - "language": ["cast"], - "thumbnail": "seriesyonkis.png", - "banner": "seriesyonkis.png", - "fanart": "seriesyonkis.jpg", - "categories": [ - "tvshow", - "anime", - "vos" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": true, - "visible": true - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/channels/seriesyonkis.py b/plugin.video.alfa/channels/seriesyonkis.py deleted file mode 100755 index c9b7c9e4..00000000 --- a/plugin.video.alfa/channels/seriesyonkis.py +++ /dev/null @@ -1,197 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import httptools -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger - -host = 'https://yonkis.to' - - -def mainlist(item): - logger.info() - - itemlist = list() - itemlist.append(Item(channel=item.channel, action="alfabetico", title="Listado alfabetico", url=host)) - itemlist.append(Item(channel=item.channel, action="mas_vistas", title="Series más vistas", - url=host + "/series-mas-vistas")) - itemlist.append(Item(channel=item.channel, action="ultimos", title="Últimos episodios añadidos", - url=host)) - itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + "/buscar/serie")) - - return itemlist - - -def alfabetico(item): - logger.info() - - itemlist = list() - - itemlist.append(Item(channel=item.channel, action="series", title="0-9", url=host + "/lista-de-series/0-9")) - for letra in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': - itemlist.append(Item(channel=item.channel, action="series", title=letra, url=host+"/lista-de-series/"+letra)) - - return itemlist - - -def mas_vistas(item): - logger.info() - - data = httptools.downloadpage(item.url).data - matches = re.compile('', re.S).findall(data) - - itemlist = [] - for scrapedtitle, scrapedurl, scrapedthumbnail in matches: - scrapedurl = urlparse.urljoin(item.url, scrapedurl) - scrapedthumbnail = urlparse.urljoin(item.url, scrapedthumbnail.replace("/90/", "/150/")) - - itemlist.append( - Item(channel=item.channel, action="episodios", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, show=scrapedtitle, fanart=item.fanart)) - - return itemlist - - -def search(item, texto): - logger.info() - - itemlist = [] - post = "keyword=%s&search_type=serie" % texto - data = httptools.downloadpage(item.url, post=post).data - - try: - patron = '([^<]+)

    ' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle, scrapedthumb, scrapedplot in matches: - title = scrapedtitle.strip() - url = host + scrapedurl - thumb = host + scrapedthumb.replace("/90/", "/150/") - plot = re.sub(r"\n|\r|\t|\s{2,}", "", scrapedplot.strip()) - logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumb + "]") - - itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url, - thumbnail=thumb, plot=plot, show=title)) - - return itemlist - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def ultimos(item): - logger.info() - - itemlist = [] - - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) - logger.debug("data %s" % data) - matches = re.compile('data-href="([^"]+)" data-src="([^"]+)" data-alt="([^"]+)".*?]+>(.*?)', re.S).findall(data) - - for url, thumb, show, title in matches: - - url = host + url - - itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=thumb, show=show.strip(), - action="findvideos", fulltitle=title)) - - return itemlist - - -def series(item): - logger.info() - itemlist = [] - - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) - - matches = scrapertools.find_single_match(data, '
      (.*?)
    ') - matches = re.compile('title="([^"]+)" href="([^"]+)"', re.S).findall(matches) - for title, url in matches: - itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title, - url=urlparse.urljoin(item.url, url), thumbnail=item.thumbnail, show=title)) - - # Paginador - matches = re.compile('>', re.S).findall(data) - - paginador = None - if len(matches) > 0: - paginador = Item(channel=item.channel, action="series", title="!Página siguiente", - url=urlparse.urljoin(item.url, matches[0]), thumbnail=item.thumbnail, show=item.show) - - if paginador and len(itemlist) > 0: - itemlist.insert(0, paginador) - itemlist.append(paginador) - - return itemlist - - -def episodios(item): - logger.info() - - itemlist = [] - - # Descarga la pagina - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) - - pattern = '(.*?)(.*?)', re.S).findall(data) - - for url, s_e, title in matches: - url = host + url - title = s_e.strip() + title - itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=thumb, show=item.show, plot=plot, - action="findvideos", fulltitle=title)) - - if config.get_videolibrary_support(): - itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, - action="add_serie_to_library", extra="episodios", show=item.show)) - itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, - action="download_all_episodes", extra="episodios", show=item.show)) - - return itemlist - - -def findvideos(item): - logger.info() - itemlist = [] - - # Descarga la pagina - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) - - pattern = ']+>]+alt="([^"]+)" /> - Cogiendo en el bosque -

    Cogiendo en el bosque

    - ''' - patronvideos = '
    (.*?)
    (.+?)<').findall(match)[0] - except: - try: - duracion = re.compile('\((.+?)\)Siguiente - patronsiguiente = 'Siguiente ' - siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data) - if len(siguiente) > 0: - scrapedurl = urlparse.urljoin(url, siguiente[0]) - itemlist.append(Item(channel=item.channel, action="novedades", title="!Next page", url=scrapedurl, folder=True)) - - return itemlist - - -def masVistos(item): - logger.info() - - itemlist = [] - itemlist.append( - Item(channel=item.channel, title="Hoy", action="novedades", url="http://tuporno.tv/hoy", folder=True)) - itemlist.append(Item(channel=item.channel, title="Recientes", action="novedades", url="http://tuporno.tv/recientes", - folder=True)) - itemlist.append( - Item(channel=item.channel, title="Semana", action="novedades", url="http://tuporno.tv/semana", folder=True)) - itemlist.append( - Item(channel=item.channel, title="Mes", action="novedades", url="http://tuporno.tv/mes", folder=True)) - itemlist.append( - Item(channel=item.channel, title="Año", action="novedades", url="http://tuporno.tv/ano", folder=True)) - return itemlist - - -def categorias(item): - logger.info() - - url = item.url - # ------------------------------------------------------ - # Descarga la página - # ------------------------------------------------------ - data = scrapertools.cachePage(url) - # logger.info(data) - # ------------------------------------------------------ - # Extrae las entradas - # ------------------------------------------------------ - # seccion categorias - # Patron de las entradas - if url == "http://tuporno.tv/categorias/": - patronvideos = '
  • |
    |
    |
    |
    |-\s", "", data) - patronvideos = '
  • ' - matches = re.compile(patronvideos, re.DOTALL).findall(data) - - if len(matches) > 0: - itemlist = [] - for match in matches: - # Titulo - scrapedtitle = match[2].replace("", "") - scrapedtitle = scrapedtitle.replace("", "") - scrapedurl = urlparse.urljoin("http://tuporno.tv/", match[0]) - scrapedthumbnail = urlparse.urljoin("http://tuporno.tv/", match[1]) - scrapedplot = "" - duracion = match[3] - - itemlist.append( - Item(channel=item.channel, action="play", title=scrapedtitle + " [" + duracion + "]", url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False)) - - '''Siguiente ''' - patronsiguiente = 'Siguiente ' - siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data) - if len(siguiente) > 0: - patronultima = '