From 6be05f070941be7071e39638a1b35a32dea281e9 Mon Sep 17 00:00:00 2001 From: Intel1 Date: Wed, 19 Sep 2018 08:13:40 -0500 Subject: [PATCH 1/2] Eliminados y desactivados cuelgame, seriecanal, seriesyonkis, tupornotv: Eliminados plusdede: Desactivado --- plugin.video.alfa/channels/cuelgame.json | 34 --- plugin.video.alfa/channels/cuelgame.py | 97 ------- plugin.video.alfa/channels/plusdede.json | 2 +- plugin.video.alfa/channels/seriecanal.py | 226 ---------------- plugin.video.alfa/channels/seriesyonkis.json | 25 -- plugin.video.alfa/channels/seriesyonkis.py | 197 -------------- plugin.video.alfa/channels/tupornotv.json | 22 -- plugin.video.alfa/channels/tupornotv.py | 264 ------------------- 8 files changed, 1 insertion(+), 866 deletions(-) delete mode 100755 plugin.video.alfa/channels/cuelgame.json delete mode 100755 plugin.video.alfa/channels/cuelgame.py delete mode 100644 plugin.video.alfa/channels/seriecanal.py delete mode 100755 plugin.video.alfa/channels/seriesyonkis.json delete mode 100755 plugin.video.alfa/channels/seriesyonkis.py delete mode 100755 plugin.video.alfa/channels/tupornotv.json delete mode 100755 plugin.video.alfa/channels/tupornotv.py diff --git a/plugin.video.alfa/channels/cuelgame.json b/plugin.video.alfa/channels/cuelgame.json deleted file mode 100755 index b85a0b81..00000000 --- a/plugin.video.alfa/channels/cuelgame.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "id": "cuelgame", - "name": "Cuelgame", - "active": false, - "adult": false, - "language": ["cast"], - "thumbnail": "cuelgame.png", - "banner": "cuelgame.png", - "categories": [ - "torrent", - "movie", - "tvshow", - "documentary", - "vos" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_torrent", - "type": "bool", - "label": "Incluir en Novedades - Torrent", - "default": true, - "enabled": true, - "visible": true - } - ] -} diff --git a/plugin.video.alfa/channels/cuelgame.py b/plugin.video.alfa/channels/cuelgame.py deleted file mode 100755 index a6b4b641..00000000 --- a/plugin.video.alfa/channels/cuelgame.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import scrapertools, httptools -from core.item import Item -from core.scrapertools import decodeHtmlentities as dhe -from platformcode import logger - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Videos[/COLOR]", action="scraper", - url="http://cuelgame.net/?category=4", - thumbnail="http://img5a.flixcart.com/image/poster/q/t/d/vintage-camera-collage-sr148-medium-400x400-imadkbnrnbpggqyz.jpeg", - fanart="http://imgur.com/7frGoPL.jpg")) - itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Buscar[/COLOR]", action="search", url="", - thumbnail="http://images2.alphacoders.com/846/84682.jpg", - fanart="http://imgur.com/1sIHN1r.jpg")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = "http://cuelgame.net/search.php?q=%s" % (texto) - - try: - return scraper(item) - # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def scraper(item): - logger.info() - itemlist = [] - # Descarga la página - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| |CET", "", data) - patron = '

0: - # corrige "&" para la paginación - next_page = matches[0].replace("amp;", "") - scrapedurl = urlparse.urljoin(item.url, next_page) - itemlist.append(Item(channel=item.channel, action="scraper", title="Página siguiente >>", url=scrapedurl, - thumbnail="http://imgur.com/ycPgVVO.png", folder=True)) - return itemlist - - -def newest(categoria): - logger.info() - itemlist = [] - item = Item() - try: - if categoria == 'torrent': - item.url = 'http://cuelgame.net/?category=4' - itemlist = scraper(item) - if itemlist[-1].action == "Página siguiente >>": - itemlist.pop() - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - return itemlist diff --git a/plugin.video.alfa/channels/plusdede.json b/plugin.video.alfa/channels/plusdede.json index 9a305d15..88418235 100755 --- a/plugin.video.alfa/channels/plusdede.json +++ b/plugin.video.alfa/channels/plusdede.json @@ -1,7 +1,7 @@ { "id": "plusdede", "name": "Plusdede", - "active": true, + "active": false, "adult": false, "language": ["cast"], "thumbnail": "https://s18.postimg.cc/e17e98eqh/6_-_4_Isbv_Q3.png", diff --git a/plugin.video.alfa/channels/seriecanal.py b/plugin.video.alfa/channels/seriecanal.py deleted file mode 100644 index 843966c8..00000000 --- a/plugin.video.alfa/channels/seriecanal.py +++ /dev/null @@ -1,226 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urllib -import urlparse - -from core import httptools -from core import scrapertools -from core import servertools -from core import tmdb -from platformcode import config, logger - -__modo_grafico__ = config.get_setting('modo_grafico', "seriecanal") -__perfil__ = config.get_setting('perfil', "seriecanal") - -# Fijar perfil de color -perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], - ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], - ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] -color1, color2, color3 = perfil[__perfil__] - -host = "https://www.seriecanal.com/" - - -def login(): - logger.info() - data = httptools.downloadpage(host).data - if "Cerrar Sesion" in data: - return True, "" - usuario = config.get_setting("user", "seriecanal") - password = config.get_setting("password", "seriecanal") - if usuario == "" or password == "": - return False, 'Regístrate en www.seriecanal.com e introduce tus datos en "Configurar Canal"' - else: - post = urllib.urlencode({'username': usuario, 'password': password}) - data = httptools.downloadpage(host + "index.php?page=member&do=login&tarea=acceder", post=post).data - if "Bienvenid@, se ha identificado correctamente en nuestro sistema" in data: - return True, "" - else: - return False, "Error en el login. El usuario y/o la contraseña no son correctos" - - -def mainlist(item): - logger.info() - itemlist = [] - item.text_color = color1 - result, message = login() - if result: - itemlist.append(item.clone(action="series", title="Últimos episodios", url=host)) - itemlist.append(item.clone(action="genero", title="Series por género")) - itemlist.append(item.clone(action="alfabetico", title="Series por orden alfabético")) - itemlist.append(item.clone(action="search", title="Buscar...")) - else: - itemlist.append(item.clone(action="", title=message, text_color="red")) - itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) - return itemlist - - -def configuracion(item): - from platformcode import platformtools - ret = platformtools.show_channel_settings() - platformtools.itemlist_refresh() - return ret - - -def search(item, texto): - logger.info() - item.url = host + "index.php?page=portada&do=category&method=post&category_id=0&order=" \ - "C_Create&view=thumb&pgs=1&p2=1" - try: - post = "keyserie=" + texto - item.extra = post - return series(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def genero(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(host).data - data = scrapertools.find_single_match(data, '
    (.*?)
') - matches = scrapertools.find_multiple_matches(data, '([^"]+)
') - for scrapedurl, scrapedtitle in matches: - scrapedtitle = scrapedtitle.capitalize() - url = urlparse.urljoin(host, scrapedurl) - itemlist.append(item.clone(action="series", title=scrapedtitle, url=url)) - return itemlist - - -def alfabetico(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(host).data - data = scrapertools.find_single_match(data, '') - matches = scrapertools.find_multiple_matches(data, '([^"]+)') - for scrapedurl, scrapedtitle in matches: - url = urlparse.urljoin(host, scrapedurl) - itemlist.append(item.clone(action="series", title=scrapedtitle, url=url)) - return itemlist - - -def series(item): - logger.info() - itemlist = [] - item.infoLabels = {} - item.text_color = color2 - if item.extra != "": - data = httptools.downloadpage(item.url, post=item.extra).data - else: - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - - patron = '
([^"]+).*?([^"]+)

.*?' \ - '

(.*?)

' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedthumbnail, scrapedurl, scrapedplot, scrapedtitle, scrapedtemp, scrapedepi in matches: - title = scrapedtitle + " - " + scrapedtemp + " - " + scrapedepi - url = urlparse.urljoin(host, scrapedurl) - temporada = scrapertools.find_single_match(scrapedtemp, "\d+") - episode = scrapertools.find_single_match(scrapedepi, "\d+") - #item.contentType = "tvshow" - if temporada != "": - item.infoLabels['season'] = temporada - #item.contentType = "season" - if episode != "": - item.infoLabels['episode'] = episode - #item.contentType = "episode" - itemlist.append(item.clone(action="findvideos", title=title, url=url, - contentSerieName=scrapedtitle, - context=["buscar_trailer"])) - tmdb.set_infoLabels(itemlist) - # Extra marca siguiente página - next_page = scrapertools.find_single_match(data, 'Episodio - Enlaces de Descarga(.*?)') - patron = '

([^"]+)' - matches = scrapertools.find_multiple_matches(data_download, patron) - for scrapedurl, scrapedepi in matches: - new_item = item.clone() - if "Episodio" not in scrapedepi: - scrapedtitle = "[Torrent] Episodio " + scrapedepi - else: - scrapedtitle = "[Torrent] " + scrapedepi - scrapedtitle = scrapertools.htmlclean(scrapedtitle) - new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)") - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]") - itemlist.append(new_item.clone(action="play", title=scrapedtitle, url=scrapedurl, server="torrent", - contentType="episode")) - # Busca en la seccion online - data_online = scrapertools.find_single_match(data, "Enlaces de Visionado Online(.*?)") - patron = '([^"]+)' - matches = scrapertools.find_multiple_matches(data_online, patron) - for scrapedurl, scrapedthumb, scrapedtitle in matches: - # Deshecha enlaces de trailers - scrapedtitle = scrapertools.htmlclean(scrapedtitle) - if (scrapedthumb != "images/series/youtube.png") & (scrapedtitle != "Trailer"): - new_item = item.clone() - server = scrapertools.find_single_match(scrapedthumb, "images/series/(.*?).png") - title = "[" + server.capitalize() + "]" + " " + scrapedtitle - - new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)") - itemlist.append(new_item.clone(action="play", title=title, url=scrapedurl, contentType="episode")) - # Comprueba si hay otras temporadas - if not "No hay disponible ninguna Temporada adicional" in data: - data_temp = scrapertools.find_single_match(data, '

(.*?)') - data_temp = re.sub(r"\n|\r|\t|\s{2}| ", "", data_temp) - patron = '

([^"]+)' - matches = scrapertools.find_multiple_matches(data_temp, patron) - for scrapedurl, scrapedtitle in matches: - new_item = item.clone() - url = urlparse.urljoin(host, scrapedurl) - scrapedtitle = scrapedtitle.capitalize() - temporada = scrapertools.find_single_match(scrapedtitle, "Temporada (\d+)") - if temporada != "": - new_item.infoLabels['season'] = temporada - new_item.infoLabels['episode'] = "" - itemlist.append(new_item.clone(action="findvideos", title=scrapedtitle, url=url, text_color="red", - contentType="season")) - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - new_item = item.clone() - if config.is_xbmc(): - new_item.contextual = True - itemlist.append(new_item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="", - text_color="magenta")) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - if item.extra == "torrent": - itemlist.append(item.clone()) - else: - # Extrae url de enlace bit.ly - if item.url.startswith("http://bit.ly/"): - item.url = scrapertools.getLocationHeaderFromResponse(item.url) - video_list = servertools.findvideos(item.url) - if video_list: - url = video_list[0][1] - server = video_list[0][2] - itemlist.append(item.clone(server=server, url=url)) - - return itemlist diff --git a/plugin.video.alfa/channels/seriesyonkis.json b/plugin.video.alfa/channels/seriesyonkis.json deleted file mode 100755 index c1f15fd5..00000000 --- a/plugin.video.alfa/channels/seriesyonkis.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "id": "seriesyonkis", - "name": "Seriesyonkis", - "active": false, - "adult": false, - "language": ["cast"], - "thumbnail": "seriesyonkis.png", - "banner": "seriesyonkis.png", - "fanart": "seriesyonkis.jpg", - "categories": [ - "tvshow", - "anime", - "vos" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": true, - "visible": true - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/channels/seriesyonkis.py b/plugin.video.alfa/channels/seriesyonkis.py deleted file mode 100755 index c9b7c9e4..00000000 --- a/plugin.video.alfa/channels/seriesyonkis.py +++ /dev/null @@ -1,197 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import httptools -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger - -host = 'https://yonkis.to' - - -def mainlist(item): - logger.info() - - itemlist = list() - itemlist.append(Item(channel=item.channel, action="alfabetico", title="Listado alfabetico", url=host)) - itemlist.append(Item(channel=item.channel, action="mas_vistas", title="Series más vistas", - url=host + "/series-mas-vistas")) - itemlist.append(Item(channel=item.channel, action="ultimos", title="Últimos episodios añadidos", - url=host)) - itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + "/buscar/serie")) - - return itemlist - - -def alfabetico(item): - logger.info() - - itemlist = list() - - itemlist.append(Item(channel=item.channel, action="series", title="0-9", url=host + "/lista-de-series/0-9")) - for letra in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': - itemlist.append(Item(channel=item.channel, action="series", title=letra, url=host+"/lista-de-series/"+letra)) - - return itemlist - - -def mas_vistas(item): - logger.info() - - data = httptools.downloadpage(item.url).data - matches = re.compile('', re.S).findall(data) - - itemlist = [] - for scrapedtitle, scrapedurl, scrapedthumbnail in matches: - scrapedurl = urlparse.urljoin(item.url, scrapedurl) - scrapedthumbnail = urlparse.urljoin(item.url, scrapedthumbnail.replace("/90/", "/150/")) - - itemlist.append( - Item(channel=item.channel, action="episodios", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, show=scrapedtitle, fanart=item.fanart)) - - return itemlist - - -def search(item, texto): - logger.info() - - itemlist = [] - post = "keyword=%s&search_type=serie" % texto - data = httptools.downloadpage(item.url, post=post).data - - try: - patron = '([^<]+)

' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle, scrapedthumb, scrapedplot in matches: - title = scrapedtitle.strip() - url = host + scrapedurl - thumb = host + scrapedthumb.replace("/90/", "/150/") - plot = re.sub(r"\n|\r|\t|\s{2,}", "", scrapedplot.strip()) - logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumb + "]") - - itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url, - thumbnail=thumb, plot=plot, show=title)) - - return itemlist - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def ultimos(item): - logger.info() - - itemlist = [] - - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) - logger.debug("data %s" % data) - matches = re.compile('data-href="([^"]+)" data-src="([^"]+)" data-alt="([^"]+)".*?]+>(.*?)', re.S).findall(data) - - for url, thumb, show, title in matches: - - url = host + url - - itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=thumb, show=show.strip(), - action="findvideos", fulltitle=title)) - - return itemlist - - -def series(item): - logger.info() - itemlist = [] - - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) - - matches = scrapertools.find_single_match(data, '
    (.*?)
') - matches = re.compile('title="([^"]+)" href="([^"]+)"', re.S).findall(matches) - for title, url in matches: - itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title, - url=urlparse.urljoin(item.url, url), thumbnail=item.thumbnail, show=title)) - - # Paginador - matches = re.compile('>', re.S).findall(data) - - paginador = None - if len(matches) > 0: - paginador = Item(channel=item.channel, action="series", title="!Página siguiente", - url=urlparse.urljoin(item.url, matches[0]), thumbnail=item.thumbnail, show=item.show) - - if paginador and len(itemlist) > 0: - itemlist.insert(0, paginador) - itemlist.append(paginador) - - return itemlist - - -def episodios(item): - logger.info() - - itemlist = [] - - # Descarga la pagina - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) - - pattern = '(.*?)(.*?)', re.S).findall(data) - - for url, s_e, title in matches: - url = host + url - title = s_e.strip() + title - itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=thumb, show=item.show, plot=plot, - action="findvideos", fulltitle=title)) - - if config.get_videolibrary_support(): - itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, - action="add_serie_to_library", extra="episodios", show=item.show)) - itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, - action="download_all_episodes", extra="episodios", show=item.show)) - - return itemlist - - -def findvideos(item): - logger.info() - itemlist = [] - - # Descarga la pagina - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) - - pattern = ']+>]+alt="([^"]+)" /> - Cogiendo en el bosque -

Cogiendo en el bosque

- ''' - patronvideos = '
(.*?)
(.+?)<').findall(match)[0] - except: - try: - duracion = re.compile('\((.+?)\)Siguiente - patronsiguiente = 'Siguiente ' - siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data) - if len(siguiente) > 0: - scrapedurl = urlparse.urljoin(url, siguiente[0]) - itemlist.append(Item(channel=item.channel, action="novedades", title="!Next page", url=scrapedurl, folder=True)) - - return itemlist - - -def masVistos(item): - logger.info() - - itemlist = [] - itemlist.append( - Item(channel=item.channel, title="Hoy", action="novedades", url="http://tuporno.tv/hoy", folder=True)) - itemlist.append(Item(channel=item.channel, title="Recientes", action="novedades", url="http://tuporno.tv/recientes", - folder=True)) - itemlist.append( - Item(channel=item.channel, title="Semana", action="novedades", url="http://tuporno.tv/semana", folder=True)) - itemlist.append( - Item(channel=item.channel, title="Mes", action="novedades", url="http://tuporno.tv/mes", folder=True)) - itemlist.append( - Item(channel=item.channel, title="Año", action="novedades", url="http://tuporno.tv/ano", folder=True)) - return itemlist - - -def categorias(item): - logger.info() - - url = item.url - # ------------------------------------------------------ - # Descarga la página - # ------------------------------------------------------ - data = scrapertools.cachePage(url) - # logger.info(data) - # ------------------------------------------------------ - # Extrae las entradas - # ------------------------------------------------------ - # seccion categorias - # Patron de las entradas - if url == "http://tuporno.tv/categorias/": - patronvideos = '
  • |
    |
    |
    |
    |-\s", "", data) - patronvideos = '
  • ' - matches = re.compile(patronvideos, re.DOTALL).findall(data) - - if len(matches) > 0: - itemlist = [] - for match in matches: - # Titulo - scrapedtitle = match[2].replace("", "") - scrapedtitle = scrapedtitle.replace("", "") - scrapedurl = urlparse.urljoin("http://tuporno.tv/", match[0]) - scrapedthumbnail = urlparse.urljoin("http://tuporno.tv/", match[1]) - scrapedplot = "" - duracion = match[3] - - itemlist.append( - Item(channel=item.channel, action="play", title=scrapedtitle + " [" + duracion + "]", url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False)) - - '''Siguiente ''' - patronsiguiente = 'Siguiente ' - siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data) - if len(siguiente) > 0: - patronultima = '