diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index 82140bc8..ebc44974 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@ - + @@ -19,17 +19,19 @@ [B]Estos son los cambios para esta versión:[/B] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR] - ¤ repelis ¤ thevid - ¤ vevio ¤ danimados - ¤ sipeliculas ¤ cinecalidad - ¤ locopelis ¤ pelisipad ¤ divxtotal ¤ elitetorrent ¤ estrenosgo ¤ grantorrent ¤ mejortorrent1 ¤ newpct1 - ¤ tvvip ¤ zonatorrent - ¤ maxipelis24 ¤ wikiseries + ¤ pelismagnet ¤ todopeliculas + ¤ allpeliculas ¤ puyasubs + ¤ yape ¤ dilo + ¤ goovie ¤ pelisipad + ¤ seriesblanco ¤ pepecine + ¤ maxipelis24 ¤ pelisplanet + ¤ yts ¤ arreglos internos - ¤ Agradecimientos a @angedam y @chivmalev por colaborar en ésta versión + + ¤ Agradecimientos a @wrlopez y @chivmalev por colaborar en ésta versión Navega con Kodi por páginas web para ver sus videos de manera fácil. diff --git a/plugin.video.alfa/channels/allpeliculas.py b/plugin.video.alfa/channels/allpeliculas.py index 875c87db..6cd93449 100644 --- a/plugin.video.alfa/channels/allpeliculas.py +++ b/plugin.video.alfa/channels/allpeliculas.py @@ -33,15 +33,13 @@ SERVERS = {"26": "powvideo", "45": "okru", "75": "openload", "12": "netutv", "65 list_servers = ['powvideo', 'okru', 'openload', 'netutv', 'thevideos', 'spruto', 'stormo', 'idowatch', 'nowvideo', 'fastplay', 'raptu', 'tusfiles'] -host = "http://allpeliculas.com/" +host = "http://allpeliculas.io/" def mainlist(item): logger.info() itemlist = [] item.text_color = color1 - autoplay.init(item.channel, list_servers, list_quality) - itemlist.append(item.clone(title="Películas", action="lista", fanart="http://i.imgur.com/c3HS8kj.png", url= host + "movies/newmovies?page=1", extra1 = 0, thumbnail=get_thumb('movies', auto=True))) @@ -51,16 +49,13 @@ def mainlist(item): url= host, thumbnail=get_thumb('colections', auto=True))) itemlist.append(item.clone(title="", action="")) itemlist.append(item.clone(title="Buscar...", action="search", thumbnail=get_thumb('search', auto=True))) - autoplay.show_option(item.channel, itemlist) - return itemlist def colecciones(item): logger.info() itemlist = [] - data = httptools.downloadpage(item.url).data patron = 'href="(/peliculas[^"]+).*?' patron += 'title_geo">([^<]+).*?' @@ -143,11 +138,11 @@ def findvideos(item): patron += '>([^<]+)' matches = scrapertools.find_multiple_matches(data, patron) for url, calidad in matches: + calidad = scrapertools.find_single_match(calidad, "\d+") + scrapertools.find_single_match(calidad, "\..+") itemlist.append(item.clone( channel = item.channel, action = "play", title = calidad, - fulltitle = item.title, thumbnail = item.thumbnail, contentThumbnail = item.thumbnail, url = url, @@ -159,7 +154,7 @@ def findvideos(item): if config.get_videolibrary_support(): itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green", action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail, - fulltitle = item.fulltitle + contentTitle = item.contentTitle )) # Requerido para FilterTools @@ -183,31 +178,22 @@ def lista(item): dict_param = dict() item.infoLabels = {} item.text_color = color2 - params = '{}' if item.extra1 != 0: dict_param["genero"] = [item.extra1] params = jsontools.dump(dict_param) - data = httptools.downloadpage(item.url, post=params).data data = data.replace("","").replace("<\/mark>","") dict_data = jsontools.load(data) - for it in dict_data["items"]: - title = it["title"] - plot = it["slogan"] - rating = it["imdb"] year = it["year"] url = host + "pelicula/" + it["slug"] + title = it["title"] + " (%s)" %year thumb = host + it["image"] item.infoLabels['year'] = year - itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb, - plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie")) - - try: - tmdb.set_infoLabels(itemlist, __modo_grafico__) - except: - pass + itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb, + context=["buscar_trailer"], contentTitle=it["title"], contentType="movie")) + tmdb.set_infoLabels(itemlist, __modo_grafico__) pagina = scrapertools.find_single_match(item.url, 'page=([0-9]+)') item.url = item.url.replace(pagina, "") if pagina == "": @@ -219,6 +205,7 @@ def lista(item): )) return itemlist + def search(item, texto): logger.info() if texto != "": @@ -246,12 +233,10 @@ def newest(categoria): if itemlist[-1].action == "lista": itemlist.pop() - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] - return itemlist diff --git a/plugin.video.alfa/channels/cuelgame.json b/plugin.video.alfa/channels/cuelgame.json deleted file mode 100755 index b85a0b81..00000000 --- a/plugin.video.alfa/channels/cuelgame.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "id": "cuelgame", - "name": "Cuelgame", - "active": false, - "adult": false, - "language": ["cast"], - "thumbnail": "cuelgame.png", - "banner": "cuelgame.png", - "categories": [ - "torrent", - "movie", - "tvshow", - "documentary", - "vos" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_torrent", - "type": "bool", - "label": "Incluir en Novedades - Torrent", - "default": true, - "enabled": true, - "visible": true - } - ] -} diff --git a/plugin.video.alfa/channels/cuelgame.py b/plugin.video.alfa/channels/cuelgame.py deleted file mode 100755 index a6b4b641..00000000 --- a/plugin.video.alfa/channels/cuelgame.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import scrapertools, httptools -from core.item import Item -from core.scrapertools import decodeHtmlentities as dhe -from platformcode import logger - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Videos[/COLOR]", action="scraper", - url="http://cuelgame.net/?category=4", - thumbnail="http://img5a.flixcart.com/image/poster/q/t/d/vintage-camera-collage-sr148-medium-400x400-imadkbnrnbpggqyz.jpeg", - fanart="http://imgur.com/7frGoPL.jpg")) - itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Buscar[/COLOR]", action="search", url="", - thumbnail="http://images2.alphacoders.com/846/84682.jpg", - fanart="http://imgur.com/1sIHN1r.jpg")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = "http://cuelgame.net/search.php?q=%s" % (texto) - - try: - return scraper(item) - # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def scraper(item): - logger.info() - itemlist = [] - # Descarga la página - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| |CET", "", data) - patron = '

0: - # corrige "&" para la paginación - next_page = matches[0].replace("amp;", "") - scrapedurl = urlparse.urljoin(item.url, next_page) - itemlist.append(Item(channel=item.channel, action="scraper", title="Página siguiente >>", url=scrapedurl, - thumbnail="http://imgur.com/ycPgVVO.png", folder=True)) - return itemlist - - -def newest(categoria): - logger.info() - itemlist = [] - item = Item() - try: - if categoria == 'torrent': - item.url = 'http://cuelgame.net/?category=4' - itemlist = scraper(item) - if itemlist[-1].action == "Página siguiente >>": - itemlist.pop() - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - return itemlist diff --git a/plugin.video.alfa/channels/dilo.json b/plugin.video.alfa/channels/dilo.json new file mode 100644 index 00000000..1d094d6f --- /dev/null +++ b/plugin.video.alfa/channels/dilo.json @@ -0,0 +1,37 @@ +{ + "id": "dilo", + "name": "Dilo", + "active": true, + "adult": false, + "language": [], + "thumbnail": "https://s22.postimg.cc/u6efsniqp/dilo.png", + "banner": "", + "categories": [ + "tvshow", + "vos" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "CAST", + "LAT", + "VOSE" + ] + } + ] +} diff --git a/plugin.video.alfa/channels/dilo.py b/plugin.video.alfa/channels/dilo.py new file mode 100644 index 00000000..70a52b20 --- /dev/null +++ b/plugin.video.alfa/channels/dilo.py @@ -0,0 +1,297 @@ +# -*- coding: utf-8 -*- +# -*- Channel Dilo -*- +# -*- Created for Alfa-addon -*- +# -*- By the Alfa Develop Group -*- + +import re + +from channels import autoplay +from channels import filtertools +from core import httptools +from core import scrapertools +from core import servertools +from core import jsontools +from core import tmdb +from core.item import Item +from platformcode import config, logger +from channelselector import get_thumb + +host = 'https://www.dilo.nu/' + +IDIOMAS = {'Español': 'CAST', 'Latino': 'LAT', 'Subtitulado': 'VOSE'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = ['openload', 'streamango', 'powvideo', 'clipwatching', 'streamplay', 'streamcherry', 'gamovideo'] + +def get_source(url): + logger.info() + data = httptools.downloadpage(url).data + data = re.sub(r'\n|\r|\t| |
|\s{2,}', "", data) + return data + +def mainlist(item): + logger.info() + + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="Nuevos capitulos", action="latest_episodes", url=host, + thumbnail=get_thumb('new episodes', auto=True))) + + itemlist.append(Item(channel=item.channel, title="Ultimas", action="latest_shows", url=host, + thumbnail=get_thumb('last', auto=True))) + + itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'catalogue', + thumbnail=get_thumb('all', auto=True))) + + itemlist.append(Item(channel=item.channel, title="Generos", action="section", + url=host + 'catalogue', thumbnail=get_thumb('genres', auto=True))) + + itemlist.append(Item(channel=item.channel, title="Por Años", action="section", url=host + 'catalogue', + thumbnail=get_thumb('year', auto=True))) + + itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?s=', + thumbnail=get_thumb('search', auto=True))) + + autoplay.show_option(item.channel, itemlist) + + return itemlist + + +def list_all(item): + logger.info() + + itemlist = [] + data = get_source(item.url) + patron = '
.*?text-uppercase"') + patron = '' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + title = scrapedtitle + contentSerieName = scrapedtitle + itemlist.append(Item(channel=item.channel, action='seasons', url=scrapedurl, thumbnail=scrapedthumbnail, + title=title, contentSerieName=contentSerieName)) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + return itemlist + + +def seasons(item): + from core import jsontools + import urllib + logger.info() + + itemlist=[] + + data=get_source(item.url) + serie_id = scrapertools.find_single_match(data, '{"item_id": (\d+)}') + post = {'item_id': serie_id} + post = urllib.urlencode(post) + seasons_url = '%sapi/web/seasons.php' % host + headers = {'Referer':item.url} + data = jsontools.load(httptools.downloadpage(seasons_url, post=post, headers=headers).data) + infoLabels = item.infoLabels + for dict in data: + season = dict['number'] + + if season != '0': + infoLabels['season'] = season + title = 'Temporada %s' % season + itemlist.append(Item(channel=item.channel, url=item.url, title=title, action='episodesxseason', + contentSeasonNumber=season, id=serie_id, infoLabels=infoLabels)) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, + action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName)) + + return itemlist + + +def episodesxseason(item): + logger.info() + from core import jsontools + import urllib + logger.info() + + itemlist = [] + season = item.infoLabels['season'] + post = {'item_id': item.id, 'season_number': season} + post = urllib.urlencode(post) + + seasons_url = '%sapi/web/episodes.php' % host + headers = {'Referer': item.url} + data = jsontools.load(httptools.downloadpage(seasons_url, post=post, headers=headers).data) + infoLabels = item.infoLabels + for dict in data: + + episode = dict['number'] + epi_name = dict['name'] + title = '%sx%s - %s' % (season, episode, epi_name) + url = '%s%s/' % (host, dict['permalink']) + infoLabels['episode'] = episode + itemlist.append(Item(channel=item.channel, title=title, action='findvideos', url=url, + contentEpisodeNumber=season, id=item.id, infoLabels=infoLabels)) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + return itemlist + +def episodios(item): + logger.info() + itemlist = [] + templist = seasons(item) + for tempitem in templist: + itemlist += episodesxseason(tempitem) + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + data = get_source(item.url) + patron = 'data-link="([^"]+)">.*?500">([^<]+)<.*?>Reproducir en ([^<]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + for enc_url, server, language in matches: + if not config.get_setting('unify'): + title = ' [%s]' % language + else: + title = '' + + itemlist.append(Item(channel=item.channel, title='%s'+title, url=enc_url, action='play', + language=IDIOMAS[language], server=server, infoLabels=item.infoLabels)) + + itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize()) + + # Requerido para FilterTools + + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + return itemlist + + + +def decode_link(enc_url): + logger.info() + + try: + new_data = get_source(enc_url) + new_enc_url = scrapertools.find_single_match(new_data, 'src="([^"]+)"') + try: + url = httptools.downloadpage(new_enc_url, follow_redirects=False).headers['location'] + except: + if not 'jquery' in new_enc_url: + url = new_enc_url + except: + pass + + return url + + +def play(item): + logger.info() + + item.url = decode_link(item.url) + + itemlist = [item] + + return itemlist + + +def search(item, texto): + logger.info() + import urllib + itemlist = [] + texto = texto.replace(" ", "+") + item.url = item.url + texto + if texto != '': + try: + return list_all(item) + except: + itemlist.append(item.clone(url='', title='No hay elementos...', action='')) + return itemlist diff --git a/plugin.video.alfa/channels/divxtotal.json b/plugin.video.alfa/channels/divxtotal.json index bf1c4ac6..811b5e44 100644 --- a/plugin.video.alfa/channels/divxtotal.json +++ b/plugin.video.alfa/channels/divxtotal.json @@ -27,6 +27,22 @@ "type": "bool", "visible": true }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "CAST", + "LAT", + "VO", + "VOS", + "VOSE" + ] + }, { "id": "timeout_downloadpage", "type": "list", diff --git a/plugin.video.alfa/channels/divxtotal.py b/plugin.video.alfa/channels/divxtotal.py index 1a0deaf5..b5ed39f6 100644 --- a/plugin.video.alfa/channels/divxtotal.py +++ b/plugin.video.alfa/channels/divxtotal.py @@ -14,6 +14,15 @@ from core.item import Item from platformcode import config, logger from core import tmdb from lib import generictools +from channels import filtertools +from channels import autoplay + + +#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'} +IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = ['torrent'] host = 'https://www.divxtotal3.net/' channel = 'divxtotal' @@ -33,6 +42,9 @@ def mainlist(item): thumb_series = get_thumb("channels_tvshow.png") thumb_buscar = get_thumb("search.png") thumb_separador = get_thumb("next.png") + thumb_settings = get_thumb("setting_0.png") + + autoplay.init(item.channel, list_servers, list_quality) item.url_plus = "peliculas/" itemlist.append(Item(channel=item.channel, title="Películas", action="categorias", url=host + item.url_plus, url_plus=item.url_plus, thumbnail=thumb_cartelera, extra="Películas")) @@ -49,7 +61,20 @@ def mainlist(item): itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host + "?s=%s", thumbnail=thumb_buscar, extra="search")) + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador)) + + itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings)) + + autoplay.show_option(item.channel, itemlist) #Activamos Autoplay + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return def submenu(item): @@ -174,7 +199,7 @@ def listado(item): cnt_tot = 40 # Poner el num. máximo de items por página cnt_title = 0 # Contador de líneas insertadas en Itemlist inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable - fin = inicio + 10 # Después de este tiempo pintamos (segundos) + fin = inicio + 5 # Después de este tiempo pintamos (segundos) timeout_search = timeout # Timeout para descargas if item.extra == 'search': timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas @@ -287,7 +312,7 @@ def listado(item): else: url = scrapedurl #No se encuentra la Serie, se trata como Episodio suelto - cnt_title += 1 + #cnt_title += 1 item_local = item.clone() #Creamos copia de Item para trabajar if item_local.tipo: #... y limpiamos del item_local.tipo @@ -458,7 +483,13 @@ def listado(item): item_local.contentSeason_save = item_local.contentSeason del item_local.infoLabels['season'] - itemlist.append(item_local.clone()) #Pintar pantalla + #Ahora se filtra por idioma, si procede, y se pinta lo que vale + if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra + itemlist = filtertools.get_link(itemlist, item_local, list_language) + else: + itemlist.append(item_local.clone()) #Si no, pintar pantalla + + cnt_title = len(itemlist) #Contador de líneas añadidas #logger.debug(item_local) @@ -483,6 +514,10 @@ def listado(item): def findvideos(item): logger.info() itemlist = [] + itemlist_t = [] #Itemlist total de enlaces + itemlist_f = [] #Itemlist de enlaces filtrados + if not item.language: + item.language = ['CAST'] #Castellano por defecto matches = [] item.category = categoria @@ -552,11 +587,26 @@ def findvideos(item): item_local.action = "play" #Visualizar vídeo item_local.server = "torrent" #Seridor Torrent - itemlist.append(item_local.clone()) #Pintar pantalla + itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas + + # Requerido para FilterTools + if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra + itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío #logger.debug("TORRENT: " + scrapedurl + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName) #logger.debug(item_local) + if len(itemlist_f) > 0: #Si hay entradas filtradas... + itemlist.extend(itemlist_f) #Pintamos pantalla filtrada + else: + if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ... + thumb_separador = get_thumb("next.png") #... pintamos todo con aviso + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador)) + itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado + + # Requerido para AutoPlay + autoplay.start(itemlist, item) #Lanzamos Autoplay + return itemlist diff --git a/plugin.video.alfa/channels/elitetorrent.json b/plugin.video.alfa/channels/elitetorrent.json index ec17889b..51604e55 100644 --- a/plugin.video.alfa/channels/elitetorrent.json +++ b/plugin.video.alfa/channels/elitetorrent.json @@ -18,9 +18,62 @@ "id": "include_in_global_search", "type": "bool", "label": "Incluir en busqueda global", - "default": false, + "default": true, "enabled": true, "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra en TMDB", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "CAST", + "LAT", + "VO", + "VOS", + "VOSE" + ] + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "timeout_downloadpage", + "type": "list", + "label": "Timeout (segs.) en descarga de páginas o verificación de servidores", + "default": 5, + "enabled": true, + "visible": true, + "lvalues": [ + "None", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10" + ] } - ] -} + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/elitetorrent.py b/plugin.video.alfa/channels/elitetorrent.py index 1a052a4a..2e2b6029 100644 --- a/plugin.video.alfa/channels/elitetorrent.py +++ b/plugin.video.alfa/channels/elitetorrent.py @@ -4,6 +4,7 @@ import re import sys import urllib import urlparse +import time from channelselector import get_thumb from core import httptools @@ -13,8 +14,22 @@ from core.item import Item from platformcode import config, logger from core import tmdb from lib import generictools +from channels import filtertools +from channels import autoplay + + +#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'} +IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = ['torrent'] host = 'http://www.elitetorrent.biz' +channel = "elitetorrent" + +categoria = channel.capitalize() +__modo_grafico__ = config.get_setting('modo_grafico', channel) +timeout = config.get_setting('timeout_downloadpage', channel) def mainlist(item): @@ -26,19 +41,37 @@ def mainlist(item): thumb_series = get_thumb("channels_tvshow.png") thumb_series_hd = get_thumb("channels_tvshow_hd.png") thumb_buscar = get_thumb("search.png") + thumb_separador = get_thumb("next.png") + thumb_settings = get_thumb("setting_0.png") + + autoplay.init(item.channel, list_servers, list_quality) itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host, extra="peliculas", thumbnail=thumb_pelis)) itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series", thumbnail=thumb_series)) - itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host, thumbnail=thumb_buscar)) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host, thumbnail=thumb_buscar, filter_lang=True)) + + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador)) + + itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings)) + + autoplay.show_option(item.channel, itemlist) #Activamos Autoplay return itemlist +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return + + def submenu(item): logger.info() itemlist = [] + item.filter_lang = True data = '' try: @@ -84,10 +117,13 @@ def submenu(item): if "/serie" in scrapedurl: continue + if 'subtitulado' in scrapedtitle.lower() or 'latino' in scrapedtitle.lower() or 'original' in scrapedtitle.lower(): + item.filter_lang = False + itemlist.append(item.clone(action="listado", title=scrapedtitle, url=scrapedurl)) if item.extra == "series": #Añadimos Series VOSE que está fuera del menú principal - itemlist.append(item.clone(action="listado", title="Series VOSE", url=host + "/series-vose/")) + itemlist.append(item.clone(action="listado", title="Series VOSE", url=host + "/series-vose/", filter_lang=False)) return itemlist @@ -96,10 +132,18 @@ def listado(item): logger.info() itemlist = [] + inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable + fin = inicio + 5 # Después de este tiempo pintamos (segundos) + timeout_search = timeout # Timeout para descargas + if item.extra == 'search': + timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas + if timeout_search < 5: + timeout_search = 5 # Timeout un poco más largo para las búsquedas + # Descarga la página data = '' try: - data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url, timeout=timeout_search).data) except: pass @@ -168,6 +212,9 @@ def listado(item): item_local.language += ["VO"] if "dual" in scrapedcategory.lower() or "dual" in title.lower(): item_local.language[0:0] = ["DUAL"] + + if item_local.language == []: + item_local.language = ['CAST'] #Por defecto #Limpiamos el título de la basura innecesaria title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("Sub", "").replace("sub", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "") @@ -215,10 +262,14 @@ def listado(item): item_local.infoLabels['year'] = "-" #Pasamos a TMDB cada Item, para evitar el efecto memoria de tmdb - if item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global, pasamos - tmdb.set_infoLabels(item_local, True) + #if item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global, pasamos + # tmdb.set_infoLabels(item_local, True) - itemlist.append(item_local.clone()) #Pintar pantalla + #Ahora se filtra por idioma, si procede, y se pinta lo que vale + if config.get_setting('filter_languages', channel) > 0 and item.filter_lang: #Si hay idioma seleccionado, se filtra + itemlist = filtertools.get_link(itemlist, item_local, list_language) + else: + itemlist.append(item_local.clone()) #Si no, pintar pantalla #if not item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global # return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo @@ -250,7 +301,7 @@ def listado(item): else: title = '[COLOR gold]Página siguiente >>[/COLOR] %s' % (int(matches[1]) - 1) - itemlist.append(Item(channel=item.channel, action="listado", title=title, url=scrapedurl, extra=item.extra)) + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=scrapedurl, extra=item.extra, filter_lang=item.filter_lang)) return itemlist @@ -258,11 +309,15 @@ def listado(item): def findvideos(item): logger.info() itemlist = [] + itemlist_t = [] #Itemlist total de enlaces + itemlist_f = [] #Itemlist de enlaces filtrados + if not item.language: + item.language = ['CAST'] #Castellano por defecto #Bajamos los datos de la página data = '' try: - data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url, timeout=timeout).data) except: pass @@ -308,12 +363,12 @@ def findvideos(item): if size: item.quality = '%s [%s]' % (item.quality, size) #Agregamos size al final de calidad item.quality = item.quality.replace("GB", "G B").replace("MB", "M B") #Se evita la palabra reservada en Unify - - #Generamos una copia de Item para trabajar sobre ella - item_local = item.clone() #Ahora pintamos el link del Torrent, si lo hay - if link_torrent: # Hay Torrent ? + if link_torrent: # Hay Torrent ? + #Generamos una copia de Item para trabajar sobre ella + item_local = item.clone() + if item_local.quality: item_local.quality += " " item_local.quality += "[Torrent]" @@ -332,10 +387,27 @@ def findvideos(item): item_local.action = "play" #Visualizar vídeo item_local.server = "torrent" #Seridor Torrent - itemlist.append(item_local.clone()) #Pintar pantalla + itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas + + # Requerido para FilterTools + if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra + itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío + + if len(itemlist_f) > 0: #Si hay entradas filtradas... + itemlist.extend(itemlist_f) #Pintamos pantalla filtrada + else: + if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ... + thumb_separador = get_thumb("next.png") #... pintamos todo con aviso + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador)) + itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado #Ahora pintamos el link del Magnet, si lo hay + itemlist_t = [] #Itemlist total de enlaces + itemlist_f = [] #Itemlist de enlaces filtrados if link_magnet: # Hay Magnet ? + #Generamos una copia de Item para trabajar sobre ella + item_local = item.clone() + if item_local.quality: item_local.quality += " " item_local.quality = item_local.quality.replace("[Torrent]", "") + "[Magnet]" @@ -347,11 +419,26 @@ def findvideos(item): item_local.action = "play" #Visualizar vídeo item_local.server = "torrent" #Seridor Torrent - itemlist.append(item_local.clone()) #Pintar pantalla + itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas + + # Requerido para FilterTools + if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra + itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío + + if len(itemlist_f) > 0: #Si hay entradas filtradas... + itemlist.extend(itemlist_f) #Pintamos pantalla filtrada + else: + if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ... + thumb_separador = get_thumb("next.png") #... pintamos todo con aviso + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador)) + itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado #logger.debug("TORRENT: " + link_torrent + "MAGNET: " + link_magnet + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + size + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName) #logger.debug(item_local) + # Requerido para AutoPlay + autoplay.start(itemlist, item) #Lanzamos Autoplay + return itemlist @@ -387,7 +474,7 @@ def newest(categoria): itemlist = [] item = Item() try: - if categoria == 'torrent': + if categoria == 'peliculas': item.url = host item.extra = "peliculas" item.category_new= 'newest' diff --git a/plugin.video.alfa/channels/estrenosgo.json b/plugin.video.alfa/channels/estrenosgo.json index bf922a70..5ebbbafc 100755 --- a/plugin.video.alfa/channels/estrenosgo.json +++ b/plugin.video.alfa/channels/estrenosgo.json @@ -31,6 +31,22 @@ "type": "bool", "visible": true }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "CAST", + "LAT", + "VO", + "VOS", + "VOSE" + ] + }, { "id": "timeout_downloadpage", "type": "list", diff --git a/plugin.video.alfa/channels/estrenosgo.py b/plugin.video.alfa/channels/estrenosgo.py index 52b00848..796c77e6 100644 --- a/plugin.video.alfa/channels/estrenosgo.py +++ b/plugin.video.alfa/channels/estrenosgo.py @@ -14,12 +14,23 @@ from core.item import Item from platformcode import config, logger, platformtools from core import tmdb from lib import generictools +from channels import filtertools +from channels import autoplay + + +#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'} +IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = ['torrent'] host = 'http://estrenosby.net/' # 'http://estrenosli.org/' +channel = "estrenosgo" + color1, color2, color3 = ['0xFF58D3F7', '0xFF2E64FE', '0xFF0404B4'] -__modo_grafico__ = config.get_setting('modo_grafico', 'estrenosgo') -modo_ultima_temp = config.get_setting('seleccionar_ult_temporadda_activa', 'estrenosgo') #Actualización sólo últ. Temporada? -timeout = config.get_setting('timeout_downloadpage', 'estrenosgo') +__modo_grafico__ = config.get_setting('modo_grafico', channel) +modo_ultima_temp = config.get_setting('seleccionar_ult_temporadda_activa', channel) #Actualización sólo últ. Temporada? +timeout = config.get_setting('timeout_downloadpage', channel) def mainlist(item): @@ -37,28 +48,41 @@ def mainlist(item): thumb_buscar = get_thumb("search.png") thumb_separador = get_thumb("next.png") thumb_cabecera = get_thumb("nofolder.png") + thumb_settings = get_thumb("setting_0.png") + + autoplay.init(item.channel, list_servers, list_quality) itemlist.append(Item(channel=item.channel, url=host, title="PELÍCULAS: ", folder=False, thumbnail=thumb_pelis)) - itemlist.append(Item(channel=item.channel, title=" - Cartelera", action="categorias", url=item.url + "descarga-0-58126", thumbnail=thumb_cartelera, extra="cartelera")) - itemlist.append(Item(channel=item.channel, title=" - DVD-RIP", action="categorias", url=item.url + "descarga-0-581210", thumbnail=thumb_pelis, extra="DVD-RIP")) - itemlist.append(Item(channel=item.channel, title=" - HD-RIP", action="categorias", url=item.url + "descarga-0-58128", thumbnail=thumb_pelis_hd, extra="HD-RIP")) - itemlist.append(Item(channel=item.channel, title=" - Subtituladas", action="categorias", url=item.url + "descarga-0-58127", thumbnail=thumb_pelis_VO, extra="VOSE")) - itemlist.append(Item(channel=item.channel, title=" - Versión Original", action="categorias", url=item.url + "descarga-0-5812255", thumbnail=thumb_pelis_VO, extra="VO")) - - itemlist.append(Item(channel=item.channel, url=host, title="", folder=False, thumbnail=thumb_separador)) + itemlist.append(Item(channel=item.channel, title=" - Cartelera", action="categorias", url=item.url + "descarga-0-58126", thumbnail=thumb_cartelera, extra="cartelera", filter_lang=True)) + itemlist.append(Item(channel=item.channel, title=" - DVD-RIP", action="categorias", url=item.url + "descarga-0-581210", thumbnail=thumb_pelis, extra="DVD-RIP", filter_lang=True)) + itemlist.append(Item(channel=item.channel, title=" - HD-RIP", action="categorias", url=item.url + "descarga-0-58128", thumbnail=thumb_pelis_hd, extra="HD-RIP", filter_lang=True)) + itemlist.append(Item(channel=item.channel, title=" - Subtituladas", action="categorias", url=item.url + "descarga-0-58127", thumbnail=thumb_pelis_VO, extra="VOSE", filter_lang=False)) + itemlist.append(Item(channel=item.channel, title=" - Versión Original", action="categorias", url=item.url + "descarga-0-5812255", thumbnail=thumb_pelis_VO, extra="VO", filter_lang=False)) itemlist.append(Item(channel=item.channel, url=host, title="Series", action="submenu", thumbnail=thumb_series, extra="series")) - - itemlist.append(Item(channel=item.channel, url=host, title="", folder=False, thumbnail=thumb_separador)) itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host + "descarga-0-0-0-0-fx-1-%s-sch-titulo-", thumbnail=thumb_buscar, extra="search")) + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador)) + + itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings)) + + autoplay.show_option(item.channel, itemlist) #Activamos Autoplay + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return def submenu(item): logger.info() itemlist = [] + item.filter_lang = True thumb_cartelera = get_thumb("now_playing.png") thumb_pelis = get_thumb("channels_movie.png") @@ -183,7 +207,7 @@ def listado(item): cnt_tot = 40 # Poner el num. máximo de items por página cnt_title = 0 # Contador de líneas insertadas en Itemlist inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable - fin = inicio + 10 # Después de este tiempo pintamos (segundos) + fin = inicio + 5 # Después de este tiempo pintamos (segundos) timeout_search = timeout # Timeout para descargas if item.extra == 'search': timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas @@ -306,7 +330,7 @@ def listado(item): elif "Archivo Torrent" not in scrapedenlace and "Video Online" not in scrapedenlace: #Si no tiene enlaces pasamos continue - cnt_title += 1 + #cnt_title += 1 item_local = item.clone() #Creamos copia de Item para trabajar if item_local.tipo: #... y limpiamos del item_local.tipo @@ -326,6 +350,8 @@ def listado(item): del item_local.text_bold item_local.text_color = True del item_local.text_color + item_local.filter_lang = True + del item_local.filter_lang title_subs = [] #creamos una lista para guardar info importante item_local.language = [] #creamos lista para los idiomas @@ -473,7 +499,13 @@ def listado(item): item_local.contentSeason_save = item_local.contentSeason del item_local.infoLabels['season'] - itemlist.append(item_local.clone()) #Pintar pantalla + #Ahora se filtra por idioma, si procede, y se pinta lo que vale + if config.get_setting('filter_languages', channel) > 0 and item.filter_lang: #Si hay idioma seleccionado, se filtra + itemlist = filtertools.get_link(itemlist, item_local, list_language) + else: + itemlist.append(item_local.clone()) #Si no, pintar pantalla + + cnt_title = len(itemlist) #Contador de líneas añadidas #logger.debug(item_local) @@ -490,7 +522,7 @@ def listado(item): else: title = '%s' % curr_page-1 - itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente " + title, title_lista=title_lista, url=item.url, extra=item.extra, extra2=item.extra2, last_page=str(last_page), curr_page=str(curr_page))) + itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente " + title, title_lista=title_lista, url=item.url, extra=item.extra, extra2=item.extra2, last_page=str(last_page), curr_page=str(curr_page), filter_lang=item.filter_lang)) return itemlist @@ -572,8 +604,12 @@ def listado_series(item): item_local.title = title.strip().lower().title() item_local.from_title = title.strip().lower().title() - itemlist.append(item_local.clone()) #Pintar pantalla - + #Ahora se filtra por idioma, si procede, y se pinta lo que vale + if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra + itemlist = filtertools.get_link(itemlist, item_local, list_language) + else: + itemlist.append(item_local.clone()) #Si no, pintar pantalla + #logger.debug(item_local) #if not item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global @@ -598,6 +634,10 @@ def listado_series(item): def findvideos(item): logger.info() itemlist = [] + itemlist_t = [] #Itemlist total de enlaces + itemlist_f = [] #Itemlist de enlaces filtrados + if not item.language: + item.language = ['CAST'] #Castellano por defecto #logger.debug(item) @@ -682,7 +722,7 @@ def findvideos(item): #Ahora tratamos los enlaces .torrent itemlist_alt = [] #Usamos una lista intermedia para poder ordenar los episodios if matches_torrent: - for scrapedurl, scrapedquality, scrapedlang in matches_torrent: #leemos los torrents con la diferentes calidades + for scrapedurl, scrapedquality, scrapedlang in matches_torrent: #leemos los torrents con la diferentes calidades #Generamos una copia de Item para trabajar sobre ella item_local = item.clone() @@ -695,7 +735,7 @@ def findvideos(item): if not item_local.quality: item_local.quality = item.quality elif scrapertools.find_single_match(item.quality, '(\[\d+:\d+ h\])'): #Salvamos la duración - item_local.quality += ' [/COLOR][COLOR white]%s' % scrapertools.find_single_match(item.quality, '(\[\d+:\d+ h\])') #Copiamos duración + item_local.quality += ' [/COLOR][COLOR white]%s' % scrapertools.find_single_match(item.quality, '(\[\d+:\d+ h\])') #Copiamos duración if scrapedlang in IDIOMAS: #Salvamos el idioma, si lo hay item_local.language = ["%s" % IDIOMAS[scrapedlang]] @@ -718,6 +758,7 @@ def findvideos(item): #logger.debug(data) for scrapedtorrent, scrapedtitle in matches: + item_local = item_local.clone() quality = item_local.quality qualityscraped = '' if not item_local.contentEpisodeNumber and item_local.contentType == 'episode': @@ -773,11 +814,23 @@ def findvideos(item): item_local.action = "play" #Visualizar vídeo item_local.server = "torrent" #Seridor Torrent - itemlist_alt.append(item_local.clone(quality=quality)) #Pintar pantalla + itemlist_t.append(item_local.clone(quality=quality)) #Pintar pantalla, si no se filtran idiomas + + # Requerido para FilterTools + if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra + itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío #logger.debug("TORRENT: " + scrapedtorrent + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + scrapedsize + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName) #logger.debug(item_local) + if len(itemlist_f) > 0: #Si hay entradas filtradas... + itemlist_alt.extend(itemlist_f) #Pintamos pantalla filtrada + else: + if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ... + thumb_separador = get_thumb("next.png") #... pintamos todo con aviso + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador)) + itemlist_alt.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado + #Si son múltiples episodios, ordenamos if len(itemlist_alt) > 1 and (item.contentType == 'episode' or item.contentType == 'season'): itemlist_alt = sorted(itemlist_alt, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) #clasificamos @@ -786,6 +839,8 @@ def findvideos(item): #Ahora tratamos los servidores directo itemlist_alt = [] + itemlist_t = [] #Itemlist total de enlaces + itemlist_f = [] #Itemlist de enlaces filtrados if matches_directo: for scrapedurl, scrapedquality, scrapedlang in matches_directo: #leemos los torrents con la diferentes calidades #Generamos una copia de Item para trabajar sobre ella @@ -824,6 +879,8 @@ def findvideos(item): #logger.debug(data) for scrapedtitle, scrapedenlace in matches: + item_local = item_local.clone() + enlace = '' devuelve = '' mostrar_server = '' @@ -918,18 +975,33 @@ def findvideos(item): item_local.action = "play" #Visualizar vídeo item_local.server = servidor #Seridor Directo - itemlist_alt.append(item_local.clone(quality=quality)) #Pintar pantalla + itemlist_t.append(item_local.clone(quality=quality)) #Pintar pantalla, si no se filtran idiomas + + # Requerido para FilterTools + if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra + itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío except: logger.error('ERROR al procesar enlaces DIRECTOS: ' + servidor + ' / ' + scrapedenlace) #logger.debug("DIRECTO: " + scrapedenlace + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + scrapedsize + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName) #logger.debug(item_local) + if len(itemlist_f) > 0: #Si hay entradas filtradas... + itemlist_alt.extend(itemlist_f) #Pintamos pantalla filtrada + else: + if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ... + thumb_separador = get_thumb("next.png") #... pintamos todo con aviso + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador)) + itemlist_alt.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado + #Si son múltiples episodios, ordenamos if len(itemlist_alt) > 1 and (item.contentType == 'episode' or item.contentType == 'season'): itemlist_alt = sorted(itemlist_alt, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) #clasificamos tmdb.set_infoLabels(itemlist_alt, True) #TMDB de la lista de episodios itemlist.extend(itemlist_alt) + + # Requerido para AutoPlay + autoplay.start(itemlist, item) #Lanzamos Autoplay return itemlist diff --git a/plugin.video.alfa/channels/goovie.py b/plugin.video.alfa/channels/goovie.py index 2948cf73..4de00a9c 100644 --- a/plugin.video.alfa/channels/goovie.py +++ b/plugin.video.alfa/channels/goovie.py @@ -17,10 +17,10 @@ from channels import autoplay from platformcode import config, logger -IDIOMAS = {'1':'Cast', '2':'Lat', '3':'VOSE', '4':'VO'} +IDIOMAS = {'EspaL':'Cast', 'LatinoL':'Lat', 'SubL':'VOSE', 'OriL':'VO'} list_language = IDIOMAS.values() -CALIDADES = {'1':'1080','2':'720','3':'480','4':'360'} +CALIDADES = {'1080p':'1080','720p':'720','480p':'480','360p':'360'} list_quality = ['1080', '720', '480', '360'] @@ -89,17 +89,20 @@ def section(item): logger.info() itemlist=[] data = get_source(host+item.type) - if 'Genero' in item.title: - data = scrapertools.find_single_match(data, 'genero.*?') + data = scrapertools.find_single_match(data, 'Generos.*?') elif 'Año' in item.title: - data = scrapertools.find_single_match(data, 'año.*?') - patron = '(.*?)' + data = scrapertools.find_single_match(data, 'Años.*?') + patron = "
  • " matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: + for scrapedtitle in matches: title = scrapedtitle - itemlist.append(Item(channel=item.channel, url=scrapedurl, title=title, action='list_all', + if r'\d+' in scrapedtitle: + url = '%s%s/filtro/,/%s,' % (host, item.type, title) + else: + url = '%s%s/filtro/%s,/,' % (host, item.type, title) + itemlist.append(Item(channel=item.channel, url=url, title=title, action='list_all', type=item.type)) return itemlist @@ -109,46 +112,33 @@ def list_all(item): itemlist = [] data = get_source(item.url) - #logger.debug(data) - #return - if item.type == 'peliculas': - patron = '
    .*?.*?

    (.*?)

    .*?' - patron += "

    (.*?)

    (\d{4}) /.*?.*?'(\d+)'" - matches = re.compile(patron, re.DOTALL).findall(data) + patron = '
    ' + matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedplot, year, video_id in matches: + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: - title = '%s [%s]' % (scrapedtitle, year) - contentTitle = scrapedtitle - thumbnail = scrapedthumbnail - url = scrapedurl + title = scrapedtitle + thumbnail = scrapedthumbnail.strip() + url = scrapedurl + filter_thumb = thumbnail.replace("https://image.tmdb.org/t/p/w154", "") + filter_list = {"poster_path": filter_thumb} + filter_list = filter_list.items() + new_item = Item(channel=item.channel, + title=title, + url=url, + thumbnail=thumbnail, + plot=thumbnail, + infoLabels={'filtro':filter_list}) - itemlist.append(item.clone(action='findvideos', - title=title, - url=url, - thumbnail=thumbnail, - contentTitle=contentTitle, - video_id=video_id, - infoLabels={'year':year})) + if item.type == 'peliculas': + new_item.action = 'findvideos' + new_item.contentTitle = scrapedtitle + else: + new_item.action = 'seasons' + new_item.contentSerieName = scrapedtitle - elif item.type == 'series': - patron = '
    .*?.*?.*?' - patron +='

    (.*?)

    (.*?)

    (\d{4}) /' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, year in matches: - title = scrapedtitle - contentSerieName = scrapedtitle - thumbnail = scrapedthumbnail - url = scrapedurl - - itemlist.append(item.clone(action='seasons', - title=title, - url=url, - thumbnail=thumbnail, - plot=scrapedplot, - contentSerieName=contentSerieName, - infoLabels={'year':year})) + itemlist.append(new_item) tmdb.set_infoLabels(itemlist, seekTmdb=True) # Paginación @@ -199,21 +189,18 @@ def episodesxseasons(item): itemlist = [] data=get_source(item.url) - logger.debug(data) - patron= "ViewEpisode\('(\d+)', this\)>
    %s - (\d+)
    " % item.infoLabels['season'] - patron += ".*?src=(.*?) />.*?namep>(.*?)" - + patron= "
  • ]+)>%s - (\d+)

    ([^>]+)

    " % item.infoLabels['season'] matches = re.compile(patron, re.DOTALL).findall(data) infoLabels = item.infoLabels - for video_id, scrapedepisode, scrapedthumbnail, scrapedtitle in matches: + for url, scrapedepisode, scrapedtitle in matches: infoLabels['episode'] = scrapedepisode title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle) - itemlist.append(Item(channel=item.channel, title= title, url=item.url, thumbnail=scrapedthumbnail, - action='findvideos', video_id=video_id, infoLabels=infoLabels)) + itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', + infoLabels=infoLabels)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) @@ -224,87 +211,45 @@ def findvideos(item): logger.info() from lib import jsunpack itemlist = [] - headers = {'referer':item.url} - if item.video_id == '': - find_id = get_source(item.url) - #logger.debug(find_id) - #return - item.video_id = scrapertools.find_single_match(find_id, 'var centerClick = (\d+);') - url = 'https://goovie.co/api/links/%s' % item.video_id - data = httptools.downloadpage(url, headers=headers).data - video_list = jsontools.load(data) - for video_info in video_list: - logger.debug(video_info) - url = video_info['visor'] - plot = 'idioma: %s calidad: %s' % (video_info['idioma'], video_info['calidad']) + data = get_source(item.url) + + patron = "onclick=clickLink\(this, '([^']+)', '([^']+)', '([^']+)'\);>" + matches = re.compile(patron, re.DOTALL).findall(data) + headers = {'referer': item.url} + for url, quality, language in matches: + data = httptools.downloadpage(url, headers=headers, follow_redirects=False).data data = re.sub(r'"|\n|\r|\t| |
    |\s{2,}', "", data) packed = scrapertools.find_single_match(data, '(eval\(.*?);var') unpacked = jsunpack.unpack(packed) - logger.debug('unpacked %s' % unpacked) server = scrapertools.find_single_match(unpacked, "src:.'(http://\D+)/") id = scrapertools.find_single_match(unpacked, "src:.'http://\D+/.*?description:.'(.*?).'") if server == '': if 'powvideo' in unpacked: - id = scrapertools.find_single_match(unpacked ,",description:.'(.*?).'") - server= 'https://powvideo.net' + id = scrapertools.find_single_match(unpacked, ",description:.'(.*?).'") + server = 'https://powvideo.net' url = '%s/%s' % (server, id) if server != '' and id != '': - language = IDIOMAS[video_info['idioma']] - quality = CALIDADES[video_info['calidad']] + language = IDIOMAS[language] + quality = CALIDADES[quality] title = ' [%s] [%s]' % (language, quality) - itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=language, - quality=quality)) - - itmelist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) + itemlist.append(Item(channel=item.channel, title='%s' + title, url=url, action='play', language=language, + quality=quality, infoLabels=item.infoLabels)) + itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) return sorted(itemlist, key=lambda i: i.language) + def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = item.url + texto item.type = 'peliculas' if texto != '': - return search_results(item) + return list_all(item) else: return [] -def search_results(item): - logger.info() - - itemlist=[] - - data=get_source(item.url) - logger.debug(data) - patron = '
    .*?href=(.*?)>.*?typeContent>(.*?)<.*?' - patron += '.*?

    (.*?)

    (.*?)

    (\d{4})<' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, content_type ,scrapedthumb, scrapedtitle, scrapedplot, year in matches: - - title = scrapedtitle - url = scrapedurl - thumbnail = scrapedthumb - plot = scrapedplot - if content_type != 'Serie': - action = 'findvideos' - else: - action = 'seasons' - - new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, plot=plot, - action=action, type=content_type, infoLabels={'year':year}) - if new_item.action == 'findvideos': - new_item.contentTitle = new_item.title - else: - new_item.contentSerieName = new_item.title - - itemlist.append(new_item) - - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - - return itemlist - def newest(categoria): logger.info() itemlist = [] @@ -313,9 +258,9 @@ def newest(categoria): if categoria in ['peliculas']: item.url = host + 'peliculas' elif categoria == 'infantiles': - item.url = host + 'peliculas/generos/animación' + item.url = host + 'peliculas/filtro/Animación,/,' elif categoria == 'terror': - item.url = host + 'peliculas/generos/terror' + item.url = host + 'peliculas/filtro/Terror,/,' item.type='peliculas' itemlist = list_all(item) if itemlist[-1].title == 'Siguiente >>': diff --git a/plugin.video.alfa/channels/grantorrent.json b/plugin.video.alfa/channels/grantorrent.json index e3f8591e..37483759 100644 --- a/plugin.video.alfa/channels/grantorrent.json +++ b/plugin.video.alfa/channels/grantorrent.json @@ -22,6 +22,30 @@ "enabled": true, "visible": true }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra (TMDB)", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "CAST", + "LAT", + "VO", + "VOS", + "VOSE" + ] + }, { "id": "seleccionar_serie_temporada", "type": "list", @@ -43,12 +67,25 @@ "visible": true }, { - "id": "modo_grafico", - "type": "bool", - "label": "Buscar información extra (TMDB)", - "default": true, + "id": "timeout_downloadpage", + "type": "list", + "label": "Timeout (segs.) en descarga de páginas o verificación de servidores", + "default": 5, "enabled": true, - "visible": true + "visible": true, + "lvalues": [ + "None", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10" + ] } ] } diff --git a/plugin.video.alfa/channels/grantorrent.py b/plugin.video.alfa/channels/grantorrent.py index 072f81fb..9a42f6fb 100644 --- a/plugin.video.alfa/channels/grantorrent.py +++ b/plugin.video.alfa/channels/grantorrent.py @@ -4,6 +4,7 @@ import re import sys import urllib import urlparse +import time from channelselector import get_thumb from core import httptools @@ -13,13 +14,24 @@ from core.item import Item from platformcode import config, logger from core import tmdb from lib import generictools +from channels import filtertools +from channels import autoplay + + +#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'} +IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = ['torrent'] host = "https://grantorrent.net/" +channel = "grantorrent" dict_url_seasons = dict() -__modo_grafico__ = config.get_setting('modo_grafico', 'grantorrent') -modo_serie_temp = config.get_setting('seleccionar_serie_temporada', 'grantorrent') -modo_ultima_temp = config.get_setting('seleccionar_ult_temporadda_activa', 'grantorrent') +__modo_grafico__ = config.get_setting('modo_grafico', channel) +timeout = config.get_setting('timeout_downloadpage', channel) +modo_serie_temp = config.get_setting('seleccionar_serie_temporada', channel) +modo_ultima_temp = config.get_setting('seleccionar_ult_temporadda_activa', channel) def mainlist(item): @@ -32,7 +44,10 @@ def mainlist(item): thumb_series = get_thumb("channels_tvshow.png") thumb_series_hd = get_thumb("channels_tvshow_hd.png") thumb_buscar = get_thumb("search.png") + thumb_separador = get_thumb("next.png") thumb_settings = get_thumb("setting_0.png") + + autoplay.init(item.channel, list_servers, list_quality) itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host, extra="peliculas", thumbnail=thumb_pelis)) @@ -43,11 +58,12 @@ def mainlist(item): #Buscar series itemlist.append(Item(channel=item.channel, action="search", title="Buscar en Series >>", url=host + "series/", extra="series", thumbnail=thumb_buscar)) + + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador)) - itemlist.append( - Item(channel=item.channel, action="", title="[COLOR yellow]Configuración del Canal:[/COLOR]", url="", thumbnail=thumb_settings)) - itemlist.append( - Item(channel=item.channel, action="settingCanal", title="Opciones de Videoteca y TMDB", url="", thumbnail=thumb_settings)) + itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configurar canal", thumbnail=thumb_settings)) + + autoplay.show_option(item.channel, itemlist) #Activamos Autoplay return itemlist @@ -120,6 +136,8 @@ def listado(item): cnt_tot = 40 # Poner el num. máximo de items por página cnt_title = 0 # Contador de líneas insertadas en Itemlist result_mode = config.get_setting("result_mode", channel="search") # Búsquedas globales: listado completo o no + if not item.extra2: + item.extra2 = '' #Sistema de paginado para evitar páginas vacías o semi-vacías en casos de búsquedas con series con muchos episodios title_lista = [] # Guarda la lista de series que ya están en Itemlist, para no duplicar lineas @@ -133,18 +151,25 @@ def listado(item): cnt_top = 10 #max. num de páginas web a leer antes de pintar total_pag = 1 post_num = 1 #num pagina actual + inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable + fin = inicio + 5 # Después de este tiempo pintamos (segundos) + timeout_search = timeout # Timeout para descargas + if item.extra == 'search': + timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas + if timeout_search < 5: + timeout_search = 5 # Timeout un poco más largo para las búsquedas #Máximo num. de líneas permitidas por TMDB (40). Máx de 5 páginas por Itemlist para no degradar el rendimiento. #Si itemlist sigue vacío después de leer 5 páginas, se pueden llegar a leer hasta 10 páginas para encontrar algo - while cnt_title <= cnt_tot and cnt_next < cnt_top: + while cnt_title <= cnt_tot and cnt_next < cnt_top and fin > time.time(): # Descarga la página data = '' try: if not item.post: item.post = item.url video_section = '' - data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.post).data) + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.post, timeout=timeout_search).data) video_section = scrapertools.find_single_match(data, '
    (.*?
    )') except: pass @@ -234,6 +259,10 @@ def listado(item): if scrapedurl_alt in title_lista_alt or scrapedurl_alt in title_lista_alt_for: # si ya se ha tratado, pasamos al siguiente item continue + + #Verificamos si el idioma está dentro del filtro, si no pasamos + if not lookup_idiomas_paginacion(item, scrapedurl, scrapedtitle, lang, list_language): + continue title_lista_alt_for += [scrapedurl_alt] cnt_title += 1 # Sería una línea real más para Itemlist @@ -278,7 +307,7 @@ def listado(item): if scrapedurl_alt in title_lista: # si ya se ha tratado, pasamos al siguiente item continue # solo guardamos la url para series y docus title_lista += [scrapedurl_alt] - cnt_title += 1 # Sería una línea real más para Itemlist + #cnt_title += 1 # Sería una línea real más para Itemlist item_local = item.clone() #Creamos copia de Item para trabajar y limpiamos campos innecesarios if item_local.media: #Viene de Búsquedas @@ -315,6 +344,9 @@ def listado(item): if "dual" in lang.lower() or "dual" in title.lower(): item_local.language[0:0] = ["DUAL"] + if item_local.language == []: + item_local.language = ['CAST'] #Por defecto + #Limpiamos el título de la basuna innecesaria title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("Sub", "").replace("sub", "").replace("(Reparado)", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("(Latino)", "").replace("Latino", "") title = title.replace("- HDRip", "").replace("(HDRip)", "").replace("- Hdrip", "").replace("(microHD)", "").replace("(DVDRip)", "").replace("(HDRip)", "").replace("(BR-LINE)", "").replace("(HDTS-SCREENER)", "").replace("(BDRip)", "").replace("(BR-Screener)", "").replace("(DVDScreener)", "").replace("TS-Screener", "").replace(" TS", "").replace(" Ts", "") @@ -346,8 +378,13 @@ def listado(item): item_local.from_title = title.strip() #Guardamos esta etiqueta para posible desambiguación de título item_local.infoLabels['year'] = "-" #Reseteamos el año para que TMDB nos lo de - #Agrega el item local a la lista itemlist - itemlist.append(item_local.clone()) + #Ahora se filtra por idioma, si procede, y se pinta lo que vale + if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra + itemlist = filtertools.get_link(itemlist, item_local, list_language) + else: + itemlist.append(item_local.clone()) #Si no, pintar pantalla + + cnt_title = len(itemlist) #Contador de líneas añadidas #if not item.category and result_mode == 0: #Si este campo no existe, viene de la primera pasada de una búsqueda global # return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorrar tiempo @@ -373,7 +410,7 @@ def listado(item): else: title = '[COLOR gold]Página siguiente >>[/COLOR] %s' % post_num - itemlist.append(item.clone(action="listado", title=title, url=next_page, thumbnail=get_thumb("next.png"), title_lista=title_lista)) + itemlist.append(item.clone(action="listado", title=title, url=next_page, thumbnail=get_thumb("next.png"), title_lista=title_lista, language='')) return itemlist @@ -381,13 +418,17 @@ def listado(item): def findvideos(item): logger.info() itemlist = [] + itemlist_t = [] #Itemlist total de enlaces + itemlist_f = [] #Itemlist de enlaces filtrados + if not item.language: + item.language = ['CAST'] #Castellano por defecto #logger.debug(item) #Bajamos los datos de la página data = '' try: - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) + data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, timeout=timeout).data) except: pass @@ -420,6 +461,14 @@ def findvideos(item): #Ahora recorremos todos los links por calidades for lang, quality, size, scrapedurl in matches: temp_epi = '' + if scrapertools.find_single_match(quality, '\s?\(Contrase.+?: ]*>(.*?)<\/font>\)') + quality = re.sub(r'\s?\(Contrase.+?: ]*>(.*?)<\/font>\)'): + password = scrapertools.find_single_match(size, '\s?\(Contrase.+?: ]*>(.*?)<\/font>\)', '', size) + size += ' [Contraseña=%s]' % password if item.contentType == "episode": #En Series los campos están en otro orden. No hay size, en su lugar sxe temp_epi = quality quality = size @@ -485,10 +534,10 @@ def findvideos(item): else: item_local.quality = '%s [/COLOR][COLOR white][%s]' % (item_local.quality, size) if item_local.action == 'show_result': #Viene de una búsqueda global - channel = item_local.channel.capitalize() + channel_alt = item_local.channel.capitalize() if item_local.from_channel: - channel = item_local.from_channel.capitalize() - item_local.quality = '[COLOR yellow][%s][/COLOR] %s' % (channel, item_local.quality) + channel_alt = item_local.from_channel.capitalize() + item_local.quality = '[COLOR yellow][%s][/COLOR] %s' % (channel_alt, item_local.quality) #Salvamos la url del .torrent if scrapedurl: @@ -507,11 +556,26 @@ def findvideos(item): item_local.action = "play" #Visualizar vídeo item_local.server = "torrent" #Seridor Torrent - itemlist.append(item_local.clone()) #Pintar pantalla + itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas + + # Requerido para FilterTools + if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra + itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío #logger.debug("TORRENT: " + item_local.url + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality) #logger.debug(item_local) + if len(itemlist_f) > 0: #Si hay entradas filtradas... + itemlist.extend(itemlist_f) #Pintamos pantalla filtrada + else: + if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ... + thumb_separador = get_thumb("next.png") #... pintamos todo con aviso + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador)) + itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado + + # Requerido para AutoPlay + autoplay.start(itemlist, item) #Lanzamos Autoplay + return itemlist @@ -530,7 +594,7 @@ def episodios(item): data = '' try: - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) #Cargamos los datos de la página + data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, timeout=timeout).data) #Cargamos los datos de la página patron_actual = ' 0 and item.extra2 != 'categorias': + itemlist = filtertools.get_link(itemlist, item, list_language) + + if len(itemlist) == 0: + estado = False + + #Volvemos a la siguiente acción en el canal + return estado def actualizar_titulos(item): diff --git a/plugin.video.alfa/channels/infoplus.py b/plugin.video.alfa/channels/infoplus.py index 0f2f8bfe..35af3db9 100755 --- a/plugin.video.alfa/channels/infoplus.py +++ b/plugin.video.alfa/channels/infoplus.py @@ -275,7 +275,11 @@ class main(xbmcgui.WindowDialog): skin = xbmc.getSkinDir() self.fonts = get_fonts(skin) - self.setCoordinateResolution(2) + + #### Compatibilidad con Kodi 18 #### + if config.get_platform(True)['num_version'] < 18: + self.setCoordinateResolution(2) + self.actorButton = xbmcgui.ControlButton(995, 475, 55, 55, '', font='Font40', alignment=0x00000006, noFocusTexture='https://s17.postimg.cc/40acsuihb/thumb_search_star_no.png', focusTexture='https://s33.postimg.cc/ikk0qyvrj/thumb_search_star.png', @@ -805,7 +809,10 @@ class related(xbmcgui.WindowDialog): import traceback logger.error(traceback.format_exc()) - self.setCoordinateResolution(2) + #### Compatibilidad con Kodi 18 #### + if config.get_platform(True)['num_version'] < 18: + self.setCoordinateResolution(2) + self.background = xbmcgui.ControlImage(178, 50, 1053, 634, self.infoLabels.get("fanart", "http://s6.postimg.cc/fflvear2p/nofanart.png")) self.addControl(self.background) @@ -1207,6 +1214,7 @@ def busqueda_global(item, infoLabels, org_title=False): cat = ["serie"] else: cat = ["movie"] + cat += ["infoPlus"] new_item = Item() new_item.extra = infoLabels.get("title", "") @@ -1546,7 +1554,10 @@ class ActorInfo(xbmcgui.WindowDialog): elif not actor_tmdb.result.get("biography"): actor_tmdb.result["biography"] = "Sin información" - self.setCoordinateResolution(2) + #### Compatibilidad con Kodi 18 #### + if config.get_platform(True)['num_version'] < 18: + self.setCoordinateResolution(2) + self.background = xbmcgui.ControlImage(30, -5, 1250, 730, 'http://imgur.com/7ccBX3g.png') self.addControl(self.background) if set_animation: @@ -1952,7 +1963,10 @@ class images(xbmcgui.WindowDialog): for imagen, title in self.mal: self.imagenes.append(imagen) - self.setCoordinateResolution(2) + #### Compatibilidad con Kodi 18 #### + if config.get_platform(True)['num_version'] < 18: + self.setCoordinateResolution(2) + self.shadow = xbmcgui.ControlImage(245, 10, 1011, 700, 'http://imgur.com/66VSLTo.png') self.addControl(self.shadow) if set_animation: @@ -2175,7 +2189,10 @@ class Trailer(xbmcgui.WindowXMLDialog): self.doModal() def onInit(self): - self.setCoordinateResolution(0) + #### Compatibilidad con Kodi 18 #### + if config.get_platform(True)['num_version'] < 18: + self.setCoordinateResolution(0) + if not self.video_url: platformtools.dialog_notification(config.get_localized_string(60507), config.get_localized_string(60508), 2) diff --git a/plugin.video.alfa/channels/maxipelis24.py b/plugin.video.alfa/channels/maxipelis24.py index 456cd828..525f8f84 100644 --- a/plugin.video.alfa/channels/maxipelis24.py +++ b/plugin.video.alfa/channels/maxipelis24.py @@ -4,6 +4,7 @@ import re import urlparse import urllib +from core import tmdb from core import servertools from core import httptools from core import scrapertools @@ -11,7 +12,7 @@ from core.item import Item from platformcode import config, logger from channelselector import get_thumb -host="http://maxipelis24.com" +host = "http://maxipelis24.com" def mainlist(item): @@ -19,11 +20,11 @@ def mainlist(item): itemlist = [] - itemlist.append(Item(channel=item.channel, title="peliculas", action="movies", url=host, thumbnail=get_thumb('movies', auto=True))) - itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host, cat='year', thumbnail=get_thumb('year', auto=True))) - itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host, cat='genre', thumbnail=get_thumb('genres', auto=True))) - itemlist.append(Item(channel=item.channel, action="category", title="Calidad", url=host, cat='quality', thumbnail=get_thumb("quality", auto=True))) - itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"?s=", thumbnail=get_thumb("search", auto=True))) + itemlist.append(Item(channel = item.channel, title = "peliculas", action = "movies", url = host, thumbnail = get_thumb('movies', auto = True))) + itemlist.append(Item(channel = item.channel, action = "category", title = "Año de Estreno", url = host, cat = 'year', thumbnail = get_thumb('year', auto = True))) + itemlist.append(Item(channel = item.channel, action = "category", title = "Géneros", url = host, cat = 'genre', thumbnail = get_thumb('genres', auto = True))) + itemlist.append(Item(channel = item.channel, action = "category", title = "Calidad", url = host, cat = 'quality', thumbnail = get_thumb("quality", auto = True))) + itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "?s=", thumbnail = get_thumb("search", auto = True))) return itemlist @@ -51,8 +52,8 @@ def category(item): patron = 'li>
    ([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl , scrapedtitle in matches: - itemlist.append(Item(channel=item.channel, action='movies', title=scrapedtitle, url=scrapedurl, type='cat', first=0)) + for scrapedurl, scrapedtitle in matches: + itemlist.append(Item(channel = item.channel, action = 'movies', title =scrapedtitle, url = scrapedurl, type = 'cat', first = 0)) return itemlist def movies(item): @@ -70,56 +71,60 @@ def movies(item): matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, img, scrapedtitle, ranking, resto, year, quality in matches: + scrapedtitle = re.sub(r'\d{4}|[()]','', scrapedtitle) plot = scrapertools.htmlclean(resto).strip() - title = '%s [COLOR yellow](%s)[/COLOR] [COLOR red][%s][/COLOR]'% (scrapedtitle, ranking, quality) - itemlist.append(Item(channel=item.channel, - title=title, - url=scrapedurl, - action="findvideos", - plot=plot, - thumbnail=img, + title = ' %s [COLOR yellow](%s)[/COLOR] [COLOR red][%s][/COLOR]' % (scrapedtitle, ranking, quality) + itemlist.append(Item(channel = item.channel, + title = title, + url = scrapedurl, + action = "findvideos", + plot = plot, + thumbnail = img, contentTitle = scrapedtitle, contentType = "movie", - quality=quality)) + quality = quality, + infoLabels = {'year': year})) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) #Paginacion - next_page = '
    Siguiente<' - matches = re.compile(next_page, re.DOTALL).findall(data) + matches = re.compile('
    Siguiente<', re.DOTALL).findall(data) if matches: url = urlparse.urljoin(item.url, matches[0]) - itemlist.append(Item(channel=item.channel, action = "movies", title = "Página siguiente >>",url = url)) + itemlist.append(Item(channel = item.channel, action = "movies", title = "Página siguiente >>", url = url)) return itemlist def findvideos(item): logger.info() - itemlist=[] + itemlist = [] data = httptools.downloadpage(item.url).data - data = scrapertools.get_match(data, '
    (.*?)
    ') - # Busca los enlaces a los videos listavideos = servertools.findvideos(data) - for video in listavideos: videotitle = scrapertools.unescape(video[0]) url = video[1] server = video[2] - - itemlist.append(Item(channel=item.channel, action="play", server=server, title=videotitle, url=url, - thumbnail=item.thumbnail, plot=item.plot, fulltitle=item.title, folder=False)) - + itemlist.append(Item(channel = item.channel, + action = "play", + server = server, + title = videotitle, + url = url, + thumbnail = item.thumbnail, + plot = item.plot, + contentTitle = item.contentTitle, + infoLabels = item.infoLabels, + folder = False)) # Opción "Añadir esta película a la biblioteca de KODI" if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': - itemlist.append( - Item(channel=item.channel, - title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', - url=item.url, - action="add_pelicula_to_library", - extra="findvideos", - contentTitle=item.contentTitle, - thumbnail=item.thumbnail - )) + itemlist.append(Item(channel = item.channel, + title = '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + url = item.url, + action = "add_pelicula_to_library", + extra = "findvideos", + contentTitle = item.contentTitle, + thumbnail = item.thumbnail + )) return itemlist diff --git a/plugin.video.alfa/channels/mejortorrent1.json b/plugin.video.alfa/channels/mejortorrent1.json index a06a4b5f..03dc4afa 100644 --- a/plugin.video.alfa/channels/mejortorrent1.json +++ b/plugin.video.alfa/channels/mejortorrent1.json @@ -38,6 +38,22 @@ "enabled": true, "visible": true }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "CAST", + "LAT", + "VO", + "VOS", + "VOSE" + ] + }, { "id": "seleccionar_ult_temporadda_activa", "type": "bool", @@ -61,6 +77,27 @@ "default": true, "enabled": true, "visible": true + }, + { + "id": "timeout_downloadpage", + "type": "list", + "label": "Timeout (segs.) en descarga de páginas o verificación de servidores", + "default": 5, + "enabled": true, + "visible": true, + "lvalues": [ + "None", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10" + ] } ] } \ No newline at end of file diff --git a/plugin.video.alfa/channels/mejortorrent1.py b/plugin.video.alfa/channels/mejortorrent1.py index feffe91b..6814d79e 100644 --- a/plugin.video.alfa/channels/mejortorrent1.py +++ b/plugin.video.alfa/channels/mejortorrent1.py @@ -4,6 +4,7 @@ import re import sys import urllib import urlparse +import time from channelselector import get_thumb from core import httptools @@ -13,10 +14,23 @@ from core.item import Item from platformcode import config, logger from core import tmdb from lib import generictools +from channels import filtertools +from channels import autoplay -host = config.get_setting('domain_name', 'mejortorrent1') -__modo_grafico__ = config.get_setting('modo_grafico', 'mejortorrent1') +#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'} +IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = ['torrent'] + +channel = "mejortorrent1" +host = config.get_setting('domain_name', channel) + +categoria = channel.capitalize() +__modo_grafico__ = config.get_setting('modo_grafico', channel) +timeout = config.get_setting('timeout_downloadpage', channel) + def mainlist(item): logger.info() @@ -30,7 +44,10 @@ def mainlist(item): thumb_series_az = get_thumb("channels_tvshow_az.png") thumb_docus = get_thumb("channels_documentary.png") thumb_buscar = get_thumb("search.png") + thumb_separador = get_thumb("next.png") thumb_settings = get_thumb("setting_0.png") + + autoplay.init(item.channel, list_servers, list_quality) #itemlist.append(Item(channel=item.channel, title="Novedades", action="listado_busqueda", extra="novedades", tipo=False, # url= host + "ultimos-torrents/", thumbnail=thumb_buscar)) @@ -46,19 +63,20 @@ def mainlist(item): itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", thumbnail=thumb_buscar, tipo=False)) - itemlist.append( - Item(channel=item.channel, action="", title="[COLOR yellow]Configuración del Canal:[/COLOR]", url="", thumbnail=thumb_settings)) - itemlist.append( - Item(channel=item.channel, action="settingCanal", title="URL del Canal y otros", url="", thumbnail=thumb_settings)) + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador)) + + itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings)) + + autoplay.show_option(item.channel, itemlist) #Activamos Autoplay return itemlist -def settingCanal(item): +def configuracion(item): from platformcode import platformtools - platformtools.show_channel_settings() + ret = platformtools.show_channel_settings() platformtools.itemlist_refresh() - return + return def submenu(item): @@ -126,7 +144,7 @@ def listado(item): try: data = '' - data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url, timeout=timeout).data) data = re.sub('\r\n', '', data).decode('utf8').encode('utf8') data = data.replace("'", '"') except: @@ -234,16 +252,15 @@ def listado(item): url_next_page = urlparse.urljoin(item.url, scrapertools.find_single_match(data, patron_next_page) + str(cnt_pag_num + 2)) #url_last_page = re.sub(r"\d+$", "9999", url_next_page) #data_last = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(url_last_page).data) - if "/documentales" in item.url: - patron_last_page = '
    \d+<\/a> <\/div>' - else: - patron_last_page = '&\w+;<\/a>&\w+;<\/div>' + #if "/documentales" in item.url: + #patron_last_page = '\d+<\/a> <\/div>' + patron_last_page = '&\w+;<\/a>&\w+;<\/div>' #patron_last_page = '(\d+)<\/span>' - if "/documentales" in item.url: - item.last_page = int(scrapertools.find_single_match(data, patron_last_page)) - else: + try: #item.last_page = int(scrapertools.find_single_match(data, patron_last_page)) * (len(matches) / cnt_tot) item.last_page = int(scrapertools.find_single_match(data, patron_last_page)) + except: + item.last_page = 0 if matches_cnt > cnt_tot and item.extra == "documentales" and pag: item.next_page = '' @@ -323,7 +340,7 @@ def listado(item): real_title, item_local.contentSeason, episodio, item_local.quality = scrapertools.find_single_match(scrapedurl, patron_title_ep) #Hay que buscar la raiz de la temporada - data_epi = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item_local.url).data) + data_epi = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item_local.url, timeout=timeout).data) url = scrapertools.find_single_match(data_epi, '.*

    time.time(): status = False # Calidad de los datos leídos data = '' try: - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_next_page, post=item.post).data) + data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url_next_page, post=item.post, timeout=timeout_search).data) data = re.sub('\r\n', '', data).decode('utf8').encode('utf8') data = data.replace("'", '"') except: @@ -566,8 +593,13 @@ def listado_busqueda(item): if len(matches_alt) > 0: status = True for scrapedurl, scrapedtitle, scrapedquality, scrapedtype in matches_alt: - if scrapedtype not in ['Juegos', 'Capitulos', 'Musica']: #limpiamos de contenidos no deseados - matches.append(matches_alt[i]) #acumulamos los títulos + if scrapedtype in ['Juegos', 'Capitulos', 'Musica']: #limpiamos de contenidos no deseados + i += 1 + continue + if not lookup_idiomas_paginacion(item, scrapedurl, scrapedtitle, scrapedquality, list_language): + i += 1 + continue + matches.append(matches_alt[i]) #acumulamos los títulos i += 1 cnt_title = len(matches) #número de títulos a pintar @@ -646,7 +678,7 @@ def listado_busqueda(item): title = title.replace(" Latino", "").replace(" latino", "").replace(" Argentina", "").replace(" argentina", "") title = title.replace("Castellano", "").replace("castellano", "").replace("inglés", "").replace("ingles", "").replace("Inglés", "").replace("Ingles", "") - if "audio" in title.lower(): #Reservamos info de audio para después de TMDB + if "audio" in title.lower(): #Reservamos info de audio para después de TMDB title_subs += ['[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])')] title = re.sub(r'\[[a|A]udio.*?\]', '', title) if "[dual" in title.lower(): @@ -655,6 +687,9 @@ def listado_busqueda(item): if scrapertools.find_single_match(title, r'-\s[m|M].*?serie'): title = re.sub(r'-\s[m|M].*?serie', '', title) title_subs += ["Miniserie"] + + if item_local.language == []: + item_local.language = ['CAST'] #Por defecto if title.endswith('.'): title = title[:-1] @@ -742,7 +777,13 @@ def listado_busqueda(item): item_local.contentSeason_save = item_local.contentSeason del item_local.infoLabels['season'] - itemlist.append(item_local.clone()) + #Ahora se filtra por idioma, si procede, y se pinta lo que vale + if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra + itemlist = filtertools.get_link(itemlist, item_local, list_language) + else: + itemlist.append(item_local.clone()) #Si no, pintar pantalla + + cnt_title = len(itemlist) #Contador de líneas añadidas #logger.debug(item_local) @@ -768,6 +809,10 @@ def listado_busqueda(item): def findvideos(item): logger.info() itemlist = [] + itemlist_t = [] #Itemlist total de enlaces + itemlist_f = [] #Itemlist de enlaces filtrados + if not item.language: + item.language = ['CAST'] #Castellano por defecto #logger.debug(item) @@ -781,7 +826,7 @@ def findvideos(item): #Bajamos los datos de la página de todo menos de Documentales y Varios if not item.post: try: - data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url, timeout=timeout).data) data = data.replace('"', "'") patron = "
    0: #Si hay idioma seleccionado, se filtra + itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío #logger.debug("title=[" + item.title + "], torrent=[ " + item_local.url + " ], url=[ " + url + " ], post=[" + item.post + "], thumbnail=[ " + item.thumbnail + " ]" + " size: " + size) + #logger.debug(item_local) + if len(itemlist_f) > 0: #Si hay entradas filtradas... + itemlist.extend(itemlist_f) #Pintamos pantalla filtrada + else: + if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ... + thumb_separador = get_thumb("next.png") #... pintamos todo con aviso + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador)) + itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado + + # Requerido para AutoPlay + autoplay.start(itemlist, item) #Lanzamos Autoplay + return itemlist @@ -901,7 +962,7 @@ def episodios(item): # Carga la página data_ini = '' try: - data_ini = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + data_ini = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url, timeout=timeout).data) data_ini = data_ini.replace('"', "'") except: #Algún error de proceso, salimos pass @@ -1016,8 +1077,34 @@ def episodios(item): item, itemlist = generictools.post_tmdb_episodios(item, itemlist) return itemlist + + +def lookup_idiomas_paginacion(item, url, title, calidad, list_language): + logger.info() + estado = True + item.language = [] + itemlist = [] + + if "[subs" in title.lower() or "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower(): + item.language += ["VOS"] + if "latino" in title.lower() or "argentina" in title.lower(): + item.language += ["LAT"] + if item.language == []: + item.language = ['CAST'] #Por defecto + + #Ahora se filtra por idioma, si procede, y se pinta lo que vale. Excluye categorías en otros idiomas. + if config.get_setting('filter_languages', channel) > 0: + itemlist = filtertools.get_link(itemlist, item, list_language) + + if len(itemlist) == 0: + estado = False + + #Volvemos a la siguiente acción en el canal + return estado + + def actualizar_titulos(item): logger.info() diff --git a/plugin.video.alfa/channels/newpct1.json b/plugin.video.alfa/channels/newpct1.json index 7c6f0412..15295888 100644 --- a/plugin.video.alfa/channels/newpct1.json +++ b/plugin.video.alfa/channels/newpct1.json @@ -32,6 +32,22 @@ "enabled": true, "visible": true }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "CAST", + "LAT", + "VO", + "VOS", + "VOSE" + ] + }, { "id": "clonenewpct1_channel_default", "type": "list", diff --git a/plugin.video.alfa/channels/newpct1.py b/plugin.video.alfa/channels/newpct1.py index 354f1b67..ceabe4ea 100644 --- a/plugin.video.alfa/channels/newpct1.py +++ b/plugin.video.alfa/channels/newpct1.py @@ -16,6 +16,15 @@ from core.item import Item from platformcode import config, logger from core import tmdb from lib import generictools +from channels import filtertools +from channels import autoplay + + +#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'} +IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = ['torrent'] channel_py = 'newpct1' @@ -84,12 +93,15 @@ def mainlist(item): thumb_series_az = get_thumb("channels_tvshow_az.png") thumb_docus = get_thumb("channels_documentary.png") thumb_buscar = get_thumb("search.png") + thumb_separador = get_thumb("next.png") thumb_settings = get_thumb("setting_0.png") if channel_clone_name == "*** DOWN ***": #Ningún clones activo !!! itemlist.append(item.clone(action='', title="[COLOR yellow]Ningún canal NewPct1 activo[/COLOR]")) return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos y salimos + autoplay.init(item.channel, list_servers, list_quality) + itemlist.append(Item(channel=item.channel, action="submenu_novedades", title="Novedades", url=item.channel_host + "ultimas-descargas/", extra="novedades", thumbnail=thumb_pelis, category=item.category, channel_host=item.channel_host)) itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=item.channel_host, @@ -103,10 +115,11 @@ def mainlist(item): itemlist.append( Item(channel=item.channel, action="search", title="Buscar", url=item.channel_host + "buscar", thumbnail=thumb_buscar, category=item.category, channel_host=item.channel_host)) - itemlist.append( - Item(channel=item.channel, action="", title="[COLOR yellow]Configuración de Servidores:[/COLOR]", url="", thumbnail=thumb_settings, category=item.category, channel_host=item.channel_host)) - itemlist.append( - Item(channel=item.channel, action="settingCanal", title="Servidores para Ver Online y Descargas", url="", thumbnail=thumb_settings, category=item.category, channel_host=item.channel_host)) + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador, category=item.category, channel_host=item.channel_host)) + + itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configurar canal", thumbnail=thumb_settings, category=item.category, channel_host=item.channel_host)) + + autoplay.show_option(item.channel, itemlist) #Activamos Autoplay item.category = '%s / %s' % (channel_py.title(), item.category.title()) #Newpct1 / nombre de clone en pantalla de Mainlist @@ -124,6 +137,7 @@ def submenu(item): logger.info() itemlist = [] + item.extra2 = '' data = '' try: @@ -180,6 +194,13 @@ def submenu(item): #Preguntamos por las entradas que no corresponden al "extra" if item.extra in scrapedtitle.lower() or (item.extra == "peliculas" and ("cine" in scrapedurl or "anime" in scrapedurl)) or (item.extra == "varios" and ("documentales" in scrapedurl or "varios" in scrapedurl)): + + #Si tiene filtro de idiomas, marcamos estas páginas como no filtrables + if "castellano" in title.lower() or "latino" in title.lower() or "subtituladas" in title.lower() or "vo" in title.lower() or "v.o" in title.lower() or "- es" in title.lower(): + item.extra2 = "categorias" + else: + item.extra2 = "" + itemlist.append(item.clone(action="listado", title=title, url=scrapedurl)) itemlist.append(item.clone(action="alfabeto", title=title + " [A-Z]", url=scrapedurl)) @@ -195,6 +216,7 @@ def submenu_novedades(item): itemlist = [] itemlist_alt = [] + item.extra2 = '' data = '' timeout_search=timeout * 2 #Más tiempo para Novedades, que es una búsqueda @@ -273,6 +295,13 @@ def submenu_novedades(item): itemlist_alt = sorted(itemlist_alt, key=lambda it: it.title) #clasificamos for item_local in itemlist_alt: item_local.title = re.sub(r'^\d{2}', '', item_local.title) #Borramos la secuencia + + #Si tiene filtro de idiomas, marcamos estas páginas como no filtrables + if "castellano" in item_local.title.lower() or "latino" in item_local.title.lower() or "subtituladas" in item_local.title.lower() or "vo" in item_local.title.lower() or "v.o" in item_local.title.lower() or "- es" in item_local.title.lower(): + item_local.extra2 = "categorias" + else: + item_local.extra2 = "" + itemlist.append(item_local.clone()) itemlist.append( @@ -550,6 +579,9 @@ def listado(item): title = re.sub(r'- [m|M].*?serie ?\w+', '', title) title_subs += ["[Miniserie]"] + if not item_local.language: + item_local.language = ["CAST"] + #Limpiamos restos en título title = title.replace("Castellano", "").replace("castellano", "").replace("inglés", "").replace("ingles", "").replace("Inglés", "").replace("Ingles", "").replace("Ingl", "").replace("Engl", "").replace("Calidad", "").replace("de la Serie", "").replace("Spanish", "") title_alt = title_alt.replace("Castellano", "").replace("castellano", "").replace("inglés", "").replace("ingles", "").replace("Inglés", "").replace("Ingles", "").replace("Ingl", "").replace("Engl", "").replace("Calidad", "").replace("de la Serie", "").replace("Spanish", "") @@ -565,8 +597,9 @@ def listado(item): title = re.sub(r'\(\d{4}\)$', '', title) if re.sub(r'\d{4}$', '', title).strip(): title = re.sub(r'\d{4}$', '', title) - title = re.sub(r'\d+x\d+', '', title) - title = re.sub(r'x\d+', '', title).strip() + if item_local.contentType != "movie": + title = re.sub(r'\d+x\d+', '', title) + title = re.sub(r'x\d+', '', title).strip() if title.endswith("torrent gratis"): title = title[:-15] if title.endswith("gratis"): title = title[:-7] @@ -617,8 +650,11 @@ def listado(item): #Guarda la variable temporal que almacena la info adicional del título a ser restaurada después de TMDB item_local.title_subs = title_subs - #Agrega el item local a la lista itemlist - itemlist.append(item_local.clone()) + #Ahora se filtra por idioma, si procede, y se pinta lo que vale. Excluye categorías en otros idiomas. + #if config.get_setting('filter_languages', channel_py) > 0 and item.extra2 != 'categorias': + # itemlist = filtertools.get_link(itemlist, item_local, list_language) + #else: + itemlist.append(item_local.clone()) #Si no, pintar pantalla #logger.debug(item_local) @@ -802,6 +838,10 @@ def listado_busqueda(item): if "juego/" in scrapedurl: # no mostramos lo que no sean videos continue + + #Verificamos si el idioma está dentro del filtro, si no pasamos + if not lookup_idiomas_paginacion(item, scrapedurl, scrapedtitle, calidad, list_language): + continue cnt_title += 1 # Sería una línea real más para Itemlist #Control de página @@ -850,7 +890,7 @@ def listado_busqueda(item): if ("juego/" in scrapedurl or "xbox" in scrapedurl.lower()) and not "/serie" in scrapedurl or "xbox" in scrapedtitle.lower() or "windows" in scrapedtitle.lower() or "windows" in calidad.lower() or "nintendo" in scrapedtitle.lower() or "xbox" in calidad.lower() or "epub" in calidad.lower() or "pdf" in calidad.lower() or "pcdvd" in calidad.lower() or "crack" in calidad.lower(): # no mostramos lo que no sean videos continue - cnt_title += 1 # Sería una línea real más para Itemlist + #cnt_title += 1 # Sería una línea real más para Itemlist #Creamos una copia de Item para cada contenido item_local = item.clone() @@ -1022,6 +1062,9 @@ def listado_busqueda(item): title = re.sub(r'- [m|M].*?serie ?\w+', '', title) title_subs += ["[Miniserie]"] + if not item_local.language: + item_local.language = ["CAST"] + #Limpiamos restos en título title = title.replace("Castellano", "").replace("castellano", "").replace("inglés", "").replace("ingles", "").replace("Inglés", "").replace("Ingles", "").replace("Ingl", "").replace("Engl", "").replace("Calidad", "").replace("de la Serie", "").replace("Spanish", "") @@ -1036,8 +1079,9 @@ def listado_busqueda(item): title = re.sub(r'\(\d{4}\)$', '', title) if re.sub(r'\d{4}$', '', title).strip(): title = re.sub(r'\d{4}$', '', title) - title = re.sub(r'\d+x\d+', '', title) - title = re.sub(r'x\d+', '', title).strip() + if item_local.contentType != "movie": + title = re.sub(r'\d+x\d+', '', title) + title = re.sub(r'x\d+', '', title).strip() if "pelisyseries.com" in host and item_local.contentType == "tvshow": titulo = '' @@ -1152,11 +1196,11 @@ def listado_busqueda(item): data_serie = unicode(data_serie, "iso-8859-1", errors="replace").encode("utf-8") data_serie = data_serie.replace("chapters", "buscar-list") - if not scrapertools.find_single_match(data_serie, pattern): #No ha habido suerte ... - item_local.contentType = "movie" #tratarlo el capítulo como película + if not scrapertools.find_single_match(data_serie, pattern): #No ha habido suerte ... + item_local.contentType = "movie" #tratarlo el capítulo como película item_local.extra = "peliculas" else: - item_local.url = url_tvshow #Cambiamos url de episodio por el de serie + item_local.url = url_tvshow #Cambiamos url de episodio por el de serie else: item_local.url = url_id #Cambiamos url de episodio por el de serie @@ -1165,8 +1209,13 @@ def listado_busqueda(item): item_local.title = real_title_mps.replace('-', ' ').title().strip() #Esperemos que el nuevo título esté bien item_local.contentSerieName = item_local.title - #Agrega el item local a la lista itemlist - itemlist.append(item_local.clone()) + #Ahora se filtra por idioma, si procede, y se pinta lo que vale. Excluye categorías en otros idiomas. + if config.get_setting('filter_languages', channel_py) > 0 and item.extra2 != 'categorias': + itemlist = filtertools.get_link(itemlist, item_local, list_language) + else: + itemlist.append(item_local.clone()) #Si no, pintar pantalla + + cnt_title = len(itemlist) #Contador de líneas añadidas #logger.debug(item_local) @@ -1180,7 +1229,7 @@ def listado_busqueda(item): item, itemlist = generictools.post_tmdb_listado(item, itemlist) if post: - itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title="[COLOR gold][B]Pagina siguiente >> [/B][/COLOR]" + str(post_num) + " de " + str(total_pag), thumbnail=get_thumb("next.png"), title_lista=title_lista, cnt_pag=cnt_pag)) + itemlist.append(item.clone(channel=item.channel, action="listado_busqueda", title="[COLOR gold][B]Pagina siguiente >> [/B][/COLOR]" + str(post_num) + " de " + str(total_pag), thumbnail=get_thumb("next.png"), title_lista=title_lista, cnt_pag=cnt_pag, language='')) #logger.debug("Titulos: " + str(len(itemlist)) + " Matches: " + str(len(matches)) + " Post: " + str(item.post) + " / " + str(post_actual) + " / " + str(total_pag)) @@ -1189,6 +1238,10 @@ def listado_busqueda(item): def findvideos(item): logger.info() itemlist = [] + itemlist_t = [] #Itemlist total de enlaces + itemlist_f = [] #Itemlist de enlaces filtrados + if not item.language: + item.language = ['CAST'] #Castellano por defecto #logger.debug(item) @@ -1362,13 +1415,12 @@ def findvideos(item): data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") data = data.replace("$!", "#!").replace("'", "\"").replace("ñ", "ñ").replace("//pictures", "/pictures") - #Añadimos el tamaño para todos size = scrapertools.find_single_match(data, '
    Size:<\/strong>?\s(\d+?\.?\d*?\s\w[b|B])<\/span>') size = size.replace(".", ",") #sustituimos . por , porque Unify lo borra if not size: - size = scrapertools.find_single_match(item.quality, '\s\[(\d+,?\d*?\s\w\s?[b|B])\]') + size = scrapertools.find_single_match(item.quality, '\s?\[(\d+.?\d*?\s?\w\s?[b|B])\]') if not size: size = generictools.get_torrent_size(item.url) #Buscamos el tamaño en el .torrent if size: @@ -1400,7 +1452,7 @@ def findvideos(item): quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final del título else: quality = item_local.quality - item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (quality, str(item_local.language)) #Preparamos título de Torrent + item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (quality, str(item_local.language)) #Preparamos título de Torrent #Preparamos título y calidad, quitamos etiquetas vacías item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title) @@ -1408,18 +1460,32 @@ def findvideos(item): item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', quality) quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', quality) - quality = quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() + item_local.quality = quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() - item_local.alive = "??" #Calidad del link sin verificar - item_local.action = "play" #Visualizar vídeo - item_local.server = "torrent" #Servidor - - itemlist.append(item_local.clone(quality=quality)) #Pintar pantalla + item_local.alive = "??" #Calidad del link sin verificar + item_local.action = "play" #Visualizar vídeo + item_local.server = "torrent" #Servidor + + itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas + + # Requerido para FilterTools + if config.get_setting('filter_languages', channel_py) > 0: #Si hay idioma seleccionado, se filtra + itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío logger.debug("TORRENT: " + item_local.url + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + size + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName) #logger.debug(item_local) + if len(itemlist_f) > 0: #Si hay entradas filtradas... + itemlist.extend(itemlist_f) #Pintamos pantalla filtrada + else: + if config.get_setting('filter_languages', channel_py) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ... + thumb_separador = get_thumb("next.png") #... pintamos todo con aviso + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador)) + itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado + + itemlist_t = [] #Itemlist total de enlaces + itemlist_f = [] #Itemlist de enlaces filtrados # VER vídeos, descargar vídeos un link, o múltiples links data = scrapertools.find_single_match(data, '
    (?:)?<\/div><\/div><\/div>)') #Seleccionar el bloque para evitar duplicados @@ -1442,7 +1508,9 @@ def findvideos(item): for logo, servidor, idioma, calidad, enlace, title in enlaces_ver: if ver_enlaces_veronline == 0: #Si no se quiere Ver Online, se sale del bloque break + if "ver" in title.lower(): + item_local = item.clone() servidor = servidor.replace("streamin", "streaminto") if servidor.capitalize() in excluir_enlaces_veronline: #Servidor excluido, pasamos al siguiente @@ -1503,11 +1571,25 @@ def findvideos(item): item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality) item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() - itemlist.append(item_local.clone()) + itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas + + # Requerido para FilterTools + if config.get_setting('filter_languages', channel_py) > 0: #Si hay idioma seleccionado, se filtra + itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío except: logger.error('ERROR al procesar enlaces VER DIRECTOS: ' + servidor + ' / ' + enlace) + if len(itemlist_f) > 0: #Si hay entradas filtradas... + itemlist.extend(itemlist_f) #Pintamos pantalla filtrada + else: + if config.get_setting('filter_languages', channel_py) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ... + thumb_separador = get_thumb("next.png") #... pintamos todo con aviso + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador)) + itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado + + itemlist_t = [] #Itemlist total de enlaces + itemlist_f = [] #Itemlist de enlaces filtrados #Ahora vemos los enlaces de DESCARGAR if len(enlaces_descargar) > 0 and ver_enlaces_descargas != 0: @@ -1525,6 +1607,7 @@ def findvideos(item): break if "Ver" not in title: + item_local = item.clone() servidor = servidor.replace("uploaded", "uploadedto") partes = enlace.split(" ") #Partimos el enlace en cada link de las partes title = "Descarga" #Usamos la palabra reservada de Unify para que no formatee el título @@ -1607,11 +1690,26 @@ def findvideos(item): item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality) item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() - itemlist.append(item_local.clone()) + itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas + + # Requerido para FilterTools + if config.get_setting('filter_languages', channel_py) > 0: #Si hay idioma seleccionado, se filtra + itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío except: logger.error('ERROR al procesar enlaces DESCARGAR DIRECTOS: ' + servidor + ' / ' + enlace) + if len(itemlist_f) > 0: #Si hay entradas filtradas... + itemlist.extend(itemlist_f) #Pintamos pantalla filtrada + else: + if config.get_setting('filter_languages', channel_py) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ... + thumb_separador = get_thumb("next.png") #... pintamos todo con aviso + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador)) + itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado + + # Requerido para AutoPlay + autoplay.start(itemlist, item) #Lanzamos Autoplay + return itemlist @@ -1926,6 +2024,32 @@ def episodios(item): return itemlist +def lookup_idiomas_paginacion(item, url, title, calidad, list_language): + logger.info() + estado = True + item.language = [] + itemlist = [] + + if "[vos" in title.lower() or "v.o.s" in title.lower() or "vo" in title.lower() or "subs" in title.lower() or ".com/pelicula/" in url or ".com/series-vo" in url or "-vo/" in url or "vos" in calidad.lower() or "vose" in calidad.lower() or "v.o.s" in calidad.lower() or "sub" in calidad.lower() or ".com/peliculas-vo" in item.url: + item.language += ["VOS"] + + if "latino" in title.lower() or "argentina" in title.lower() or "-latino/" in url or "latino" in calidad.lower() or "argentina" in calidad.lower(): + item.language += ["LAT"] + + if item.language == []: + item.language = ['CAST'] #Por defecto + + #Ahora se filtra por idioma, si procede, y se pinta lo que vale. Excluye categorías en otros idiomas. + if config.get_setting('filter_languages', channel_py) > 0 and item.extra2 != 'categorias': + itemlist = filtertools.get_link(itemlist, item, list_language) + + if len(itemlist) == 0: + estado = False + + #Volvemos a la siguiente acción en el canal + return estado + + def actualizar_titulos(item): logger.info() diff --git a/plugin.video.alfa/channels/pelisipad.py b/plugin.video.alfa/channels/pelisipad.py index 63034e92..2abea066 100644 --- a/plugin.video.alfa/channels/pelisipad.py +++ b/plugin.video.alfa/channels/pelisipad.py @@ -77,10 +77,10 @@ def submenu(item): url=host % "list/ultimas-peliculas" + ext, text_color=color2, thumbnail=host % "list/ultimas-peliculas/thumbnail_167x250.jpg", fanart=host % "list/ultimas-peliculas/background_1080.jpg", viewmode="movie_with_plot")) - itemlist.append(Item(channel=item.channel, title="Destacados", action="entradas", - url=host % "list/000-novedades" + ext, text_color=color2, - thumbnail=host % "list/screener/thumbnail_167x250.jpg", - fanart=host % "list/screener/background_1080.jpg", viewmode="movie_with_plot")) + # itemlist.append(Item(channel=item.channel, title="Destacados", action="entradas", + # url=host % "list/000-novedades" + ext, text_color=color2, + # thumbnail=host % "list/screener/thumbnail_167x250.jpg", + # fanart=host % "list/screener/background_1080.jpg", viewmode="movie_with_plot")) itemlist.append(Item(channel=item.channel, title="Más vistas", action="entradas", url=host % "list/peliculas-mas-vistas" + ext, text_color=color2, thumbnail=host % "list/peliculas-mas-vistas/thumbnail_167x250.jpg", @@ -167,7 +167,7 @@ def entradas(item): #if child['year']: # title += " (" + child['year'] + ")" #title += quality - + thumbnail += "|User-Agent=%s" % httptools.get_user_agent video_urls = [] for k, v in child.get("video", {}).items(): for vid in v: @@ -232,6 +232,7 @@ def entradasconlistas(item): thumbnail = host % "list/%s/thumbnail_167x250.jpg" % child["id"] fanart = host % "list/%s/background_1080.jpg" % child["id"] + thumbnail += "|User-Agent=%s" % httptools.get_user_agent itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, show=show, infoLabels=infolabels, contentTitle=fulltitle, viewmode="movie_with_plot", @@ -295,7 +296,7 @@ def entradasconlistas(item): for vid in v: video_urls.append(["http://%s.pelisipad.com/s/transcoder/%s" % (vid["server"], vid["url"]) + "?%s", vid["height"]]) - + thumbnail += "|User-Agent=%s" % httptools.get_user_agent itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, video_urls=video_urls, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels, contentTitle=fulltitle, viewmode="movie_with_plot", text_color=color3)) @@ -347,6 +348,7 @@ def series(item): if child.get("numberOfSeasons") and "- Temporada" not in title: title += " (Temps:%s)" % child['numberOfSeasons'] + thumbnail += "|User-Agent=%s" % httptools.get_user_agent itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, text_color=color3, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels, contentTitle=fulltitle, viewmode="movie_with_plot", show=fulltitle)) @@ -414,6 +416,7 @@ def episodios(item): title = fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1] except: title = fulltitle = child['id'].replace("-", " ") + thumbnail += "|User-Agent=%s" % httptools.get_user_agent itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, contentTitle=fulltitle, viewmode="movie", show=item.show, infoLabels=infoLabels, video_urls=video_urls, extra="episodios", @@ -491,6 +494,7 @@ def nuevos_cap(item): else: title = fulltitle = child['name'] + thumbnail += "|User-Agent=%s" % httptools.get_user_agent itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, contentTitle=fulltitle, viewmode="movie", show=item.fulltitle, infoLabels=infoLabels, video_urls=video_urls, extra="nuevos", @@ -571,6 +575,7 @@ def listas(item): infolabels['title'] = title try: from core import videolibrarytools + thumbnail += "|User-Agent=%s" % httptools.get_user_agent new_item = item.clone(title=title, url=url, fulltitle=title, fanart=fanart, extra="findvideos", thumbnail=thumbnail, infoLabels=infolabels, category="Cine") videolibrarytools.add_movie(new_item) diff --git a/plugin.video.alfa/channels/pelismagnet.json b/plugin.video.alfa/channels/pelismagnet.json index 03a2f6a1..b8c65189 100755 --- a/plugin.video.alfa/channels/pelismagnet.json +++ b/plugin.video.alfa/channels/pelismagnet.json @@ -17,7 +17,7 @@ "id": "include_in_global_search", "type": "bool", "label": "Incluir en busqueda global", - "default": false, + "default": true, "enabled": true, "visible": true }, @@ -29,6 +29,22 @@ "enabled": true, "visible": true }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "CAST", + "LAT", + "VO", + "VOS", + "VOSE" + ] + }, { "id": "include_in_newest_torrent", "type": "bool", @@ -36,6 +52,35 @@ "default": true, "enabled": true, "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "timeout_downloadpage", + "type": "list", + "label": "Timeout (segs.) en descarga de páginas o verificación de servidores", + "default": 10, + "enabled": true, + "visible": true, + "lvalues": [ + "None", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10" + ] } ] } \ No newline at end of file diff --git a/plugin.video.alfa/channels/pelismagnet.py b/plugin.video.alfa/channels/pelismagnet.py index 9a6f08a1..0157347b 100644 --- a/plugin.video.alfa/channels/pelismagnet.py +++ b/plugin.video.alfa/channels/pelismagnet.py @@ -1,372 +1,702 @@ -# -*- coding: utf-8 -*- - -import re -import urllib - -from core import httptools -from core import jsontools -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger - -host = 'http://pelismag.net' -api = host + '/api' -api_serie = host + "/seapi" -api_temp = host + "/sapi" -__modo_grafico__ = config.get_setting("modo_grafico", "pelismagnet") - - -def mainlist(item): - logger.info() - - itemlist = list() - itemlist.append(Item(channel=item.channel, action="pelis", title="[B]Peliculas[/B]", - url=api + "?sort_by=''&page=0")) - itemlist.append(Item(channel=item.channel, action="pelis", title=" Estrenos", - url=api + "?sort_by=date_added&page=0")) - itemlist.append(Item(channel=item.channel, action="pelis", title=" + Populares", url=api + "?page=0")) - itemlist.append(Item(channel=item.channel, action="pelis", title=" + Valoradas", - url=api + "?sort_by=rating&page=0")) - itemlist.append(Item(channel=item.channel, action="menu_ord", title=" Ordenado por...", - url=api)) - itemlist.append( - Item(channel=item.channel, action="search", title=" Buscar...", url=api + "?keywords=%s&page=0")) - itemlist.append(Item(channel=item.channel, action="series", title="[B]Series[/B]", - url=api_serie + "?sort_by=''&page=0")) - itemlist.append(Item(channel=item.channel, action="series", title=" Recientes", - url=api_serie + "?sort_by=date_added&page=0")) - itemlist.append(Item(channel=item.channel, action="series", title=" + Populares", url=api_serie + "?page=0")) - itemlist.append(Item(channel=item.channel, action="series", title=" + Valoradas", - url=api_serie + "?sort_by=rating&page=0")) - itemlist.append(Item(channel=item.channel, action="menu_ord", title=" Ordenado por...", - url=api_serie)) - itemlist.append(Item(channel=item.channel, action="search", title=" Buscar...", - url=api_serie + "?keywords=%s&page=0")) - itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal")) - - return itemlist - - -def configuracion(item): - from platformcode import platformtools - ret = platformtools.show_channel_settings() - platformtools.itemlist_refresh() - return ret - - -def menu_ord(item): - logger.info() - - itemlist = list() - itemlist.append(Item(channel=item.channel, action="menu_alf", title="Alfabético", - url=item.url)) - itemlist.append(Item(channel=item.channel, action="menu_genero", title="Género", - url=item.url)) - - return itemlist - - -def menu_alf(item): - logger.info() - - itemlist = [] - - for letra in ['[0-9]', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', - 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']: - if 'series' in item.url: - action = 'series' - else: - action = 'pelis' - itemlist.append(Item(channel=item.channel, action=action, title=letra, - url=item.url + "?keywords=^" + letra + "&page=0")) - - return itemlist - - -def menu_genero(item): - logger.info() - - itemlist = [] - # TODO: SOLO FUNCIONA POR AHORA A PARTIR DE KODI 17 - # httptools.downloadpage("https://kproxy.com/") - # url = "https://kproxy.com/doproxy.jsp" - # post = "page=%s&x=34&y=14" % urllib.quote(host + "/principal") - # response = httptools.downloadpage(url, post, follow_redirects=False).data - # url = scrapertools.find_single_match(response, '|
    |
    |
    |
    |-\s", "", data) - - data = scrapertools.find_single_match(data, '

    ') patron = 'href="([^"]+)".*?>(.*?).*?(?:(.*?)|)' @@ -96,27 +93,22 @@ def listado(item): itemlist.append(Item(channel=item.channel, action="findvideos", url=url, title=title, thumbnail=thumb, contentTitle=contenttitle, show=contenttitle, contentType=tipo, infoLabels={'filtro': filtro_tmdb}, text_color=color1)) - if ("cat=4" in item.url or item.extra == "busqueda") and not item.extra == "novedades": from core import tmdb tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - next_page = scrapertools.find_single_match(data, ".*? item.pagina + 20: pagina = item.pagina + 20 itemlist.append(Item(channel=item.channel, action="descargas", url=item.url, title=">> Página Siguiente", thumbnail=item.thumbnail, pagina=pagina, letra=item.letra, text_color=color2)) - return itemlist def letra(item): logger.info() - itemlist = list() data = httptools.downloadpage(item.url).data patron = '
  • (?:|)([A-z#]{1})(?:|)
  • ' @@ -163,20 +148,16 @@ def letra(item): for match in matches: itemlist.append(Item(channel=item.channel, title=match, action="descargas", letra=match, url=item.url, thumbnail=item.thumbnail, text_color=color1)) - return itemlist def torrents(item): logger.info() - itemlist = list() if not item.pagina: item.pagina = 0 - post = "utf8=%E2%9C%93&busqueda=puyasubs&search=Buscar&tab=anime&con_seeds=con_seeds" data = httptools.downloadpage(item.url, post).data - patron = ".*?href='([^']+)' title='descargar torrent'>.*?title='informacion de (.*?)'.*?.*?(.*?)" \ ".*?(\d+).*?(\d+)" matches = scrapertools.find_multiple_matches(data, patron) @@ -184,20 +165,15 @@ def torrents(item): contentTitle = title if "(" in contentTitle: contentTitle = contentTitle.split("(")[0] - size = size.strip() filtro_tmdb = {"original_language": "ja"}.items() title += " [COLOR %s][Semillas:%s[/COLOR]|[COLOR %s]Leech:%s[/COLOR]|%s]" % ( color4, seeds, color5, leechers, size) url = "https://www.frozen-layer.com" + url - itemlist.append(Item(channel=item.channel, action="play", url=url, title=title, contentTitle=contentTitle, server="torrent", show=contentTitle, contentType="tvshow", text_color=color1, infoLabels={'filtro': filtro_tmdb})) - - from core import tmdb tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - if len(matches) > item.pagina + 25: pagina = item.pagina + 25 itemlist.append(Item(channel=item.channel, action="torrents", url=item.url, title=">> Página Siguiente", @@ -208,43 +184,39 @@ def torrents(item): next_page = "https://www.frozen-layer.com" + next_page itemlist.append(Item(channel=item.channel, action="torrents", url=next_page, title=">> Página Siguiente", thumbnail=item.thumbnail, pagina=0, text_color=color2)) - return itemlist def findvideos(item): logger.info() - if item.infoLabels["tmdb_id"] and not item.infoLabels["plot"]: - from core import tmdb - tmdb.set_infoLabels_item(item, True, idioma_busqueda="en") - - itemlist = list() - + itemlist = [] data = httptools.downloadpage(item.url).data + data2 = data.replace("\n","") idiomas = scrapertools.find_single_match(data, 'Subtitulo:\s*(.*?)
    ') - calidades = ['720p', '1080p'] - torrentes = scrapertools.find_multiple_matches(data, '
    720p" in data and ">1080p" in data: - try: - title = "[%s] %s" % (calidades[i], title) - except: - pass - itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent")) - + if ">720p" in data2 and ">1080p" in data2: + title = "[%s] %s" % (calidades[i], title) + if "nyaa" in enlace: + data1 = httptools.downloadpage(url=enlace).data + enlace = "https://nyaa.si" + scrapertools.find_single_match(data1, 'a href="(/do[^"]+)') + itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent")) + enlace = scrapertools.find_single_match(data1, '720p" in data and ">1080p" in data: + if ">720p" in data and ">1080p" in data2: try: title = "[%s] %s" % (calidades[i], title) except: pass itemlist.append(item.clone(title=title, action="play", url=enlace, server="onefichier")) - safelink = scrapertools.find_multiple_matches(data, '.*?href='([^']+)'") - if enlace: - itemlist.append(item.clone(url=enlace)) - else: - itemlist.append(item) - return itemlist @@ -365,7 +308,7 @@ def newest(categoria): logger.info() item = Item() try: - item.url = "http://puya.si/?cat=4" + item.url = host + "/?cat=4" item.extra = "novedades" itemlist = listado(item) @@ -373,12 +316,10 @@ def newest(categoria): itemlist.pop() for it in itemlist: it.contentTitle = it.title - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] - return itemlist diff --git a/plugin.video.alfa/channels/renumbertools.py b/plugin.video.alfa/channels/renumbertools.py index 9c5c933e..4f4cbad4 100755 --- a/plugin.video.alfa/channels/renumbertools.py +++ b/plugin.video.alfa/channels/renumbertools.py @@ -304,10 +304,12 @@ if xbmcgui: def __init__(self, *args, **kwargs): logger.debug() - if xbmcgui.__version__ == "1.2": - self.setCoordinateResolution(1) - else: - self.setCoordinateResolution(5) + #### Compatibilidad con Kodi 18 #### + if config.get_platform(True)['num_version'] < 18: + if xbmcgui.__version__ == "1.2": + self.setCoordinateResolution(1) + else: + self.setCoordinateResolution(5) self.show = kwargs.get("show") self.channel = kwargs.get("channel") diff --git a/plugin.video.alfa/channels/repelis.py b/plugin.video.alfa/channels/repelis.py index dff9c978..508ed981 100644 --- a/plugin.video.alfa/channels/repelis.py +++ b/plugin.video.alfa/channels/repelis.py @@ -9,11 +9,11 @@ from channelselector import get_thumb from channels import autoplay from channels import filtertools from core import httptools +from core import jsontools from core import scrapertools from core import servertools from core import tmdb from core.item import Item -from lib import jsunpack from platformcode import config, logger, platformtools diff --git a/plugin.video.alfa/channels/search.py b/plugin.video.alfa/channels/search.py index 5daecf53..63075bbb 100644 --- a/plugin.video.alfa/channels/search.py +++ b/plugin.video.alfa/channels/search.py @@ -394,6 +394,10 @@ def show_result(item): return channel.search(item, tecleado) else: # Mostrar resultados: todos juntos + if item.infoPlus: #Si viene de una ventana de InfoPlus, hay que salir de esta forma... + del item.infoPlus #si no, se mete en un bucle mostrando la misma pantalla, + item.title = item.title.strip() #dando error en "handle -1" + return getattr(channel, item.action)(item) try: from platformcode import launcher launcher.run(item) @@ -489,7 +493,7 @@ def do_search(item, categories=None): if categories: # Si no se ha seleccionado torrent no se muestra - if "torrent" not in categories: + if "torrent" not in categories and "infoPlus" not in categories: if "torrent" in channel_parameters["categories"]: logger.info("%s -torrent-" % basename_without_extension) continue @@ -601,6 +605,8 @@ def do_search(item, categories=None): for i in element["itemlist"]: if i.action: title = " " + i.title + if "infoPlus" in categories: #Se manrca vi viene de una ventana de InfoPlus + i.infoPlus = True itemlist.append(i.clone(title=title, from_action=i.action, from_channel=i.channel, channel="search", action="show_result", adult=element["adult"])) diff --git a/plugin.video.alfa/channels/seriecanal.json b/plugin.video.alfa/channels/seriecanal.json deleted file mode 100644 index e53459ae..00000000 --- a/plugin.video.alfa/channels/seriecanal.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "id": "seriecanal", - "name": "Seriecanal", - "active": false, - "adult": false, - "language": ["cast"], - "thumbnail": "http://i.imgur.com/EwMK8Yd.png", - "banner": "seriecanal.png", - "categories": [ - "tvshow", - "vos" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": true, - "visible": true - }, - { - "id": "modo_grafico", - "type": "bool", - "label": "Buscar información extra", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "user", - "type": "text", - "label": "Usuario", - "color": "0xFFd50b0b", - "enabled": true, - "visible": true - }, - { - "id": "password", - "type": "text", - "label": "Contraseña", - "color": "0xFFd50b0b", - "enabled": true, - "visible": true, - "hidden": true - }, - { - "id": "perfil", - "type": "list", - "label": "Perfil de color", - "default": 2, - "enabled": true, - "visible": true, - "lvalues": [ - "Perfil 3", - "Perfil 2", - "Perfil 1" - ] - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/channels/seriecanal.py b/plugin.video.alfa/channels/seriecanal.py deleted file mode 100644 index 843966c8..00000000 --- a/plugin.video.alfa/channels/seriecanal.py +++ /dev/null @@ -1,226 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urllib -import urlparse - -from core import httptools -from core import scrapertools -from core import servertools -from core import tmdb -from platformcode import config, logger - -__modo_grafico__ = config.get_setting('modo_grafico', "seriecanal") -__perfil__ = config.get_setting('perfil', "seriecanal") - -# Fijar perfil de color -perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], - ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], - ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] -color1, color2, color3 = perfil[__perfil__] - -host = "https://www.seriecanal.com/" - - -def login(): - logger.info() - data = httptools.downloadpage(host).data - if "Cerrar Sesion" in data: - return True, "" - usuario = config.get_setting("user", "seriecanal") - password = config.get_setting("password", "seriecanal") - if usuario == "" or password == "": - return False, 'Regístrate en www.seriecanal.com e introduce tus datos en "Configurar Canal"' - else: - post = urllib.urlencode({'username': usuario, 'password': password}) - data = httptools.downloadpage(host + "index.php?page=member&do=login&tarea=acceder", post=post).data - if "Bienvenid@, se ha identificado correctamente en nuestro sistema" in data: - return True, "" - else: - return False, "Error en el login. El usuario y/o la contraseña no son correctos" - - -def mainlist(item): - logger.info() - itemlist = [] - item.text_color = color1 - result, message = login() - if result: - itemlist.append(item.clone(action="series", title="Últimos episodios", url=host)) - itemlist.append(item.clone(action="genero", title="Series por género")) - itemlist.append(item.clone(action="alfabetico", title="Series por orden alfabético")) - itemlist.append(item.clone(action="search", title="Buscar...")) - else: - itemlist.append(item.clone(action="", title=message, text_color="red")) - itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) - return itemlist - - -def configuracion(item): - from platformcode import platformtools - ret = platformtools.show_channel_settings() - platformtools.itemlist_refresh() - return ret - - -def search(item, texto): - logger.info() - item.url = host + "index.php?page=portada&do=category&method=post&category_id=0&order=" \ - "C_Create&view=thumb&pgs=1&p2=1" - try: - post = "keyserie=" + texto - item.extra = post - return series(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def genero(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(host).data - data = scrapertools.find_single_match(data, '
      (.*?)
    ') - matches = scrapertools.find_multiple_matches(data, '([^"]+)
    ') - for scrapedurl, scrapedtitle in matches: - scrapedtitle = scrapedtitle.capitalize() - url = urlparse.urljoin(host, scrapedurl) - itemlist.append(item.clone(action="series", title=scrapedtitle, url=url)) - return itemlist - - -def alfabetico(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(host).data - data = scrapertools.find_single_match(data, '
      (.*?)
    ') - matches = scrapertools.find_multiple_matches(data, '([^"]+)') - for scrapedurl, scrapedtitle in matches: - url = urlparse.urljoin(host, scrapedurl) - itemlist.append(item.clone(action="series", title=scrapedtitle, url=url)) - return itemlist - - -def series(item): - logger.info() - itemlist = [] - item.infoLabels = {} - item.text_color = color2 - if item.extra != "": - data = httptools.downloadpage(item.url, post=item.extra).data - else: - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - - patron = '
    ([^"]+).*?([^"]+)

    .*?' \ - '

    (.*?)

    ' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedthumbnail, scrapedurl, scrapedplot, scrapedtitle, scrapedtemp, scrapedepi in matches: - title = scrapedtitle + " - " + scrapedtemp + " - " + scrapedepi - url = urlparse.urljoin(host, scrapedurl) - temporada = scrapertools.find_single_match(scrapedtemp, "\d+") - episode = scrapertools.find_single_match(scrapedepi, "\d+") - #item.contentType = "tvshow" - if temporada != "": - item.infoLabels['season'] = temporada - #item.contentType = "season" - if episode != "": - item.infoLabels['episode'] = episode - #item.contentType = "episode" - itemlist.append(item.clone(action="findvideos", title=title, url=url, - contentSerieName=scrapedtitle, - context=["buscar_trailer"])) - tmdb.set_infoLabels(itemlist) - # Extra marca siguiente página - next_page = scrapertools.find_single_match(data, 'Episodio - Enlaces de Descarga(.*?)') - patron = '

    ([^"]+)' - matches = scrapertools.find_multiple_matches(data_download, patron) - for scrapedurl, scrapedepi in matches: - new_item = item.clone() - if "Episodio" not in scrapedepi: - scrapedtitle = "[Torrent] Episodio " + scrapedepi - else: - scrapedtitle = "[Torrent] " + scrapedepi - scrapedtitle = scrapertools.htmlclean(scrapedtitle) - new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)") - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]") - itemlist.append(new_item.clone(action="play", title=scrapedtitle, url=scrapedurl, server="torrent", - contentType="episode")) - # Busca en la seccion online - data_online = scrapertools.find_single_match(data, "Enlaces de Visionado Online(.*?)") - patron = '([^"]+)' - matches = scrapertools.find_multiple_matches(data_online, patron) - for scrapedurl, scrapedthumb, scrapedtitle in matches: - # Deshecha enlaces de trailers - scrapedtitle = scrapertools.htmlclean(scrapedtitle) - if (scrapedthumb != "images/series/youtube.png") & (scrapedtitle != "Trailer"): - new_item = item.clone() - server = scrapertools.find_single_match(scrapedthumb, "images/series/(.*?).png") - title = "[" + server.capitalize() + "]" + " " + scrapedtitle - - new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)") - itemlist.append(new_item.clone(action="play", title=title, url=scrapedurl, contentType="episode")) - # Comprueba si hay otras temporadas - if not "No hay disponible ninguna Temporada adicional" in data: - data_temp = scrapertools.find_single_match(data, '

    (.*?)') - data_temp = re.sub(r"\n|\r|\t|\s{2}| ", "", data_temp) - patron = '

    ([^"]+)' - matches = scrapertools.find_multiple_matches(data_temp, patron) - for scrapedurl, scrapedtitle in matches: - new_item = item.clone() - url = urlparse.urljoin(host, scrapedurl) - scrapedtitle = scrapedtitle.capitalize() - temporada = scrapertools.find_single_match(scrapedtitle, "Temporada (\d+)") - if temporada != "": - new_item.infoLabels['season'] = temporada - new_item.infoLabels['episode'] = "" - itemlist.append(new_item.clone(action="findvideos", title=scrapedtitle, url=url, text_color="red", - contentType="season")) - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - new_item = item.clone() - if config.is_xbmc(): - new_item.contextual = True - itemlist.append(new_item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="", - text_color="magenta")) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - if item.extra == "torrent": - itemlist.append(item.clone()) - else: - # Extrae url de enlace bit.ly - if item.url.startswith("http://bit.ly/"): - item.url = scrapertools.getLocationHeaderFromResponse(item.url) - video_list = servertools.findvideos(item.url) - if video_list: - url = video_list[0][1] - server = video_list[0][2] - itemlist.append(item.clone(server=server, url=url)) - - return itemlist diff --git a/plugin.video.alfa/channels/seriesblanco.py b/plugin.video.alfa/channels/seriesblanco.py index 45a6f607..0eed39d9 100644 --- a/plugin.video.alfa/channels/seriesblanco.py +++ b/plugin.video.alfa/channels/seriesblanco.py @@ -212,21 +212,21 @@ def new_episodes(item): itemlist = [] data = get_source(item.url) - data = scrapertools.find_single_match(data, '

    Series Online : Capítulos estrenados recientemente
    .*?') - patron = '
  • .*?src="([^"]+)".*? data-original-title=" (\d+x\d+).*?' + patron = '
  • .*?src="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) - for lang_data, scrapedurl, scrapedthumbnail, scrapedinfo, in matches: + for lang_data, scrapedinfo, scrapedurl, scrapedthumbnail in matches: - url = host+scrapedurl + url =scrapedurl thumbnail = scrapedthumbnail scrapedinfo = scrapedinfo.split('x') season = scrapedinfo[0] episode = scrapedinfo[1] - scrapedtitle = scrapertools.find_single_match(url, 'capitulo/([^/]+)/').replace("-", " ") - title = '%s - %sx%s' % (scrapedtitle, season, episode ) + scrapedtitle = scrapertools.find_single_match(url, 'capitulo/([^/]+)/') + url = '%scapitulos/%s' % (host, scrapedtitle) + title = '%s - %sx%s' % (scrapedtitle.replace('-', ' '), season, episode ) title, language = add_language(title, lang_data) itemlist.append(Item(channel=item.channel, action='seasons', diff --git a/plugin.video.alfa/channels/seriesyonkis.json b/plugin.video.alfa/channels/seriesyonkis.json deleted file mode 100755 index c1f15fd5..00000000 --- a/plugin.video.alfa/channels/seriesyonkis.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "id": "seriesyonkis", - "name": "Seriesyonkis", - "active": false, - "adult": false, - "language": ["cast"], - "thumbnail": "seriesyonkis.png", - "banner": "seriesyonkis.png", - "fanart": "seriesyonkis.jpg", - "categories": [ - "tvshow", - "anime", - "vos" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": true, - "visible": true - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/channels/seriesyonkis.py b/plugin.video.alfa/channels/seriesyonkis.py deleted file mode 100755 index c9b7c9e4..00000000 --- a/plugin.video.alfa/channels/seriesyonkis.py +++ /dev/null @@ -1,197 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import httptools -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger - -host = 'https://yonkis.to' - - -def mainlist(item): - logger.info() - - itemlist = list() - itemlist.append(Item(channel=item.channel, action="alfabetico", title="Listado alfabetico", url=host)) - itemlist.append(Item(channel=item.channel, action="mas_vistas", title="Series más vistas", - url=host + "/series-mas-vistas")) - itemlist.append(Item(channel=item.channel, action="ultimos", title="Últimos episodios añadidos", - url=host)) - itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + "/buscar/serie")) - - return itemlist - - -def alfabetico(item): - logger.info() - - itemlist = list() - - itemlist.append(Item(channel=item.channel, action="series", title="0-9", url=host + "/lista-de-series/0-9")) - for letra in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': - itemlist.append(Item(channel=item.channel, action="series", title=letra, url=host+"/lista-de-series/"+letra)) - - return itemlist - - -def mas_vistas(item): - logger.info() - - data = httptools.downloadpage(item.url).data - matches = re.compile('', re.S).findall(data) - - itemlist = [] - for scrapedtitle, scrapedurl, scrapedthumbnail in matches: - scrapedurl = urlparse.urljoin(item.url, scrapedurl) - scrapedthumbnail = urlparse.urljoin(item.url, scrapedthumbnail.replace("/90/", "/150/")) - - itemlist.append( - Item(channel=item.channel, action="episodios", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, show=scrapedtitle, fanart=item.fanart)) - - return itemlist - - -def search(item, texto): - logger.info() - - itemlist = [] - post = "keyword=%s&search_type=serie" % texto - data = httptools.downloadpage(item.url, post=post).data - - try: - patron = '([^<]+)
  • ' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle, scrapedthumb, scrapedplot in matches: - title = scrapedtitle.strip() - url = host + scrapedurl - thumb = host + scrapedthumb.replace("/90/", "/150/") - plot = re.sub(r"\n|\r|\t|\s{2,}", "", scrapedplot.strip()) - logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumb + "]") - - itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url, - thumbnail=thumb, plot=plot, show=title)) - - return itemlist - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def ultimos(item): - logger.info() - - itemlist = [] - - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) - logger.debug("data %s" % data) - matches = re.compile('data-href="([^"]+)" data-src="([^"]+)" data-alt="([^"]+)".*?]+>(.*?)', re.S).findall(data) - - for url, thumb, show, title in matches: - - url = host + url - - itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=thumb, show=show.strip(), - action="findvideos", fulltitle=title)) - - return itemlist - - -def series(item): - logger.info() - itemlist = [] - - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) - - matches = scrapertools.find_single_match(data, '
      (.*?)
    ') - matches = re.compile('title="([^"]+)" href="([^"]+)"', re.S).findall(matches) - for title, url in matches: - itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title, - url=urlparse.urljoin(item.url, url), thumbnail=item.thumbnail, show=title)) - - # Paginador - matches = re.compile('>', re.S).findall(data) - - paginador = None - if len(matches) > 0: - paginador = Item(channel=item.channel, action="series", title="!Página siguiente", - url=urlparse.urljoin(item.url, matches[0]), thumbnail=item.thumbnail, show=item.show) - - if paginador and len(itemlist) > 0: - itemlist.insert(0, paginador) - itemlist.append(paginador) - - return itemlist - - -def episodios(item): - logger.info() - - itemlist = [] - - # Descarga la pagina - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) - - pattern = '(.*?)(.*?)', re.S).findall(data) - - for url, s_e, title in matches: - url = host + url - title = s_e.strip() + title - itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=thumb, show=item.show, plot=plot, - action="findvideos", fulltitle=title)) - - if config.get_videolibrary_support(): - itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, - action="add_serie_to_library", extra="episodios", show=item.show)) - itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, - action="download_all_episodes", extra="episodios", show=item.show)) - - return itemlist - - -def findvideos(item): - logger.info() - itemlist = [] - - # Descarga la pagina - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) - - pattern = ']+>]+alt="([^"]+)" />|\s{2,}', "", data) - return data - - -def list_all(item): +def categorias(item): logger.info() + itemlist = [] + + data = '' + try: + data = re.sub(r"\n|\r|\t| |
    |\s{2}|()", "", httptools.downloadpage(item.url, timeout=timeout).data) + data = unicode(data, "utf-8", errors="replace").encode("utf-8") + except: + pass + + patron = '
  • ') - if url_next_page: - itemlist.append(item.clone(title="Siguiente >>", url=host+url_next_page, action='list_all')) - return itemlist - -def section(item): - logger.info() - itemlist = [] - - data = get_source(host) - patron = '
  • <\/a>' + elif item.extra2 == 'categorias': + patron = '
    ' + else: + patron = '
    .*?src="([^"]+)" onload' + + matches = re.compile(patron, re.DOTALL).findall(data) + if not matches and not 'Total: 0 resultados encontrados' in data: #error + item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada + if item.intervencion: #Sí ha sido clausurada judicialmente + item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error + return itemlist #Salimos + + logger.error("ERROR 02: LISTADO: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data) + itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: LISTADO: Ha cambiado la estructura de la Web. Reportar el error con el log')) + break #si no hay más datos, algo no funciona, pintamos lo que tenemos + + #logger.debug("PATRON: " + patron) + #logger.debug(matches) + #logger.debug(data) + + #Buscamos la url de paginado y la última página + patron = 'Siguiente<\/a>' + try: + next_page_url, curr_page = scrapertools.find_single_match(data, patron) + curr_page = int(curr_page) / len(matches) + except: #Si no lo encuentra, lo ponemos a 1 + #logger.error('ERROR 03: LISTADO: Al obtener la paginación: ' + patron + ' / ' + data) + fin = 0 #Forzamos a salir del WHILE al final del FOR + cnt_title = 0 #Evitamos pié de página + curr_page = 1 + next_page_url = item.url + next_page_url = urlparse.urljoin(host, next_page_url) + #logger.debug('curr_page: ' + str(curr_page) + ' / url: ' + next_page_url) + + #Empezamos el procesado de matches + for scrapedtitle, scrapedurl, scrapedthumb in matches: + if item.extra2 == 'categorias': #Cambia el orden de tres parámetros (Categorías) + title = scrapedthumb + url = urlparse.urljoin(host, scrapedtitle) + thumb = scrapedurl + else: #lo estándar + title = scrapedtitle + url = urlparse.urljoin(host, scrapedurl) + thumb = scrapedthumb + + + quality = scrapertools.find_single_match(title, '\[(.*?)\]') #capturamos quality + title = re.sub(r'\[.*?\]', '', title) #y lo borramos de title + + title = title.replace("á", "a").replace("é", "e").replace("í", "i").replace("ó", "o").replace("ú", "u").replace("ü", "u").replace("�", "ñ").replace("ñ", "ñ").replace("ã", "a").replace("&etilde;", "e").replace("ĩ", "i").replace("õ", "o").replace("ũ", "u").replace("ñ", "ñ").replace("’", "'") + + item_local = item.clone() #Creamos copia de Item para trabajar + if item_local.tipo: #... y limpiamos + del item_local.tipo + if item_local.totalItems: + del item_local.totalItems + if item_local.post_num: + del item_local.post_num + if item_local.intervencion: + del item_local.intervencion + if item_local.viewmode: + del item_local.viewmode + item_local.text_bold = True + del item_local.text_bold + item_local.text_color = True + del item_local.text_color + + title_subs = [] #creamos una lista para guardar info importante + item_local.language = [] #iniciamos Lenguaje + item_local.quality = quality #guardamos la calidad, si la hay + item_local.url = url #guardamos el thumb + item_local.thumbnail = thumb #guardamos el thumb + item_local.context = "['buscar_trailer']" + + item_local.contentType = "movie" #por defecto, son películas + item_local.action = "findvideos" + + #Ajustamos los idiomas + if ("-latino-" in url.lower() or "(latino)" in title.lower()) and "LAT" not in item_local.language: + item_local.language += ['LAT'] + elif ('-vos-' in url.lower() or '-vose-' in url.lower() or '(vos)' in title.lower() or '(vose)' in title.lower()) and "VOSE" not in item_local.language: + item_local.language += ['VOSE'] + elif ('-vo-' in url.lower() or '(vo)' in title.lower()) and "VO" not in item_local.language: + item_local.language += ['VO'] + + if item_local.language == []: + item_local.language = ['CAST'] #Por defecto + + title = re.sub(r'\(.*?\)', '', title) #Limpiamos del idioma de title + + #Detectamos info interesante a guardar para después de TMDB + if scrapertools.find_single_match(title, '[m|M].*?serie'): + title = re.sub(r'[m|M]iniserie', '', title) + title_subs += ["Miniserie"] + if scrapertools.find_single_match(title, '[s|S]aga'): + title = re.sub(r'[s|S]aga', '', title) + title_subs += ["Saga"] + if scrapertools.find_single_match(title, '[c|C]olecc'): + title = re.sub(r'[c|C]olecc...', '', title) + title_subs += ["Colección"] + + if "duolog" in title.lower(): + title_subs += ["[Saga]"] + title = title.replace(" Duologia", "").replace(" duologia", "").replace(" Duolog", "").replace(" duolog", "") + if "trilog" in title.lower(): + title_subs += ["[Saga]"] + title = title.replace(" Trilogia", "").replace(" trilogia", "").replace(" Trilog", "").replace(" trilog", "") + if "extendida" in title.lower() or "v.e." in title.lower()or "v e " in title.lower(): + title_subs += ["[V. Extendida]"] + title = title.replace("Version Extendida", "").replace("(Version Extendida)", "").replace("V. Extendida", "").replace("VExtendida", "").replace("V Extendida", "").replace("V.Extendida", "").replace("V Extendida", "").replace("V.E.", "").replace("V E ", "").replace("V:Extendida", "") + + item_local.infoLabels["year"] = '-' #Reseteamos el año para TMDB + + #Limpiamos el título de la basura innecesaria + title = re.sub(r'- $', '', title) + title = re.sub(r'TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title, flags=re.IGNORECASE) + + #Terminamos de limpiar el título + title = re.sub(r'\??\s?\d*?\&.*', '', title) + title = re.sub(r'[\(|\[]\s+[\)|\]]', '', title) + title = title.replace('()', '').replace('[]', '').strip().lower().title() + + item_local.from_title = title.strip().lower().title() #Guardamos esta etiqueta para posible desambiguación de título + + #Salvamos el título según el tipo de contenido + if item_local.contentType == "movie": + item_local.contentTitle = title.strip().lower().title() + else: + item_local.contentSerieName = title.strip().lower().title() + + item_local.title = title.strip().lower().title() #Guardamos el título + + #Guarda la variable temporal que almacena la info adicional del título a ser restaurada después de TMDB + item_local.title_subs = title_subs + + #Ahora se filtra por idioma, si procede, y se pinta lo que vale + if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra + itemlist = filtertools.get_link(itemlist, item_local, list_language) + else: + itemlist.append(item_local.clone()) #Si no, pintar pantalla + + cnt_title = len(itemlist) #Contador de líneas añadidas + + #logger.debug(item_local) + + #Pasamos a TMDB la lista completa Itemlist + tmdb.set_infoLabels(itemlist, __modo_grafico__) + + #Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB + item, itemlist = generictools.post_tmdb_listado(item, itemlist) + + # Si es necesario añadir paginacion + if cnt_title >= cnt_tot * cnt_pct: + + title = '%s' % curr_page + + itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente " + title, url=next_page_url, extra=item.extra, extra2=item.extra2)) return itemlist - + def findvideos(item): logger.info() - itemlist = [] - data = get_source(item.url) + itemlist_t = [] #Itemlist total de enlaces + itemlist_f = [] #Itemlist de enlaces filtrados + if not item.language: + item.language = ['CAST'] #Castellano por defecto - second_url = scrapertools.find_single_match(data, '

    )| ", "", httptools.downloadpage(item.url, timeout=timeout).data) + data = unicode(data, "utf-8", errors="replace").encode("utf-8") + except: + pass + + if not data: + logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url) + itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log')) + return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos - if url != '': - quality = item.quality - title = 'Torrent [%s]' % quality - itemlist.append(item.clone(title=title, url=url, quality=quality, action='play', server='torrent', - language='cast')) + matches = re.compile(patron, re.DOTALL).findall(data) + if not matches: #error + logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data) + itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log')) + return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + + #logger.debug("PATRON: " + patron) + #logger.debug(matches) + #logger.debug(data) + + #Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB + item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) - # Requerido para FilterTools + #Ahora tratamos los enlaces .torrent + for scrapedurl in matches: #leemos los torrents con la diferentes calidades + if 'javascript' in scrapedurl: #evitamos la basura + continue + + url = urlparse.urljoin(host, scrapedurl) + #Leemos la siguiente página, que es de verdad donde está el magnet/torrent + try: + data = re.sub(r"\n|\r|\t|\s{2}|()| ", "", httptools.downloadpage(url, timeout=timeout).data) + data = unicode(data, "utf-8", errors="replace").encode("utf-8") + except: + pass + + patron = "window.open\('([^']+)'" + url = scrapertools.find_single_match(data, patron) + if not url: #error + logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data) + itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log')) + continue #si no hay más datos, algo no funciona, pasamos al siguiente + + #Generamos una copia de Item para trabajar sobre ella + item_local = item.clone() + + item_local.url = urlparse.urljoin(host, url) + + #Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent + size = scrapertools.find_single_match(item_local.quality, '\s\[(\d+,?\d*?\s\w\s?[b|B])\]') + if not size: + size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent + if size: + item_local.title = re.sub(r'\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía + item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título + size = size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b') + item_local.quality = re.sub(r'\s\[\d+,?\d*?\s\w\s?[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía + item_local.quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final de la calidad + + #Ahora pintamos el link del Torrent + item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) + + #Preparamos título y calidad, quitamos etiquetas vacías + item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title) + item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title) + item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() + item_local.quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.quality) + item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality).strip() + item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip() - itemlist = filtertools.get_links(itemlist, item, list_language) + item_local.alive = "??" #Calidad del link sin verificar + item_local.action = "play" #Visualizar vídeo + item_local.server = "torrent" #Servidor Torrent + + itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas + + # Requerido para FilterTools + if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra + itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío + + #logger.debug("TORRENT: " + scrapedurl + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName) + + #logger.debug(item_local) + + if len(itemlist_f) > 0: #Si hay entradas filtradas... + itemlist.extend(itemlist_f) #Pintamos pantalla filtrada + else: + if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ... + thumb_separador = get_thumb("next.png") #... pintamos todo con aviso + itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador)) + itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado # Requerido para AutoPlay - - autoplay.start(itemlist, item) - - if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': - itemlist.append( - Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, - action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) - + autoplay.start(itemlist, item) #Lanzamos Autoplay + return itemlist +def actualizar_titulos(item): + logger.info() + + item = generictools.update_title(item) #Llamamos al método que actualiza el título con tmdb.find_and_set_infoLabels + + #Volvemos a la siguiente acción en el canal + return item + + def search(item, texto): logger.info() texto = texto.replace(" ", "+") - item.url = item.url + texto - item.type = 'buscar' + + try: + item.url = item.url + texto - if texto != '': - return list_all(item) - else: + if texto != '': + return listado(item) + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) return [] - - + + def newest(categoria): logger.info() itemlist = [] item = Item() + try: if categoria in ['torrent', 'peliculas']: - item.url = host+'torrents' + item.url = host + 'torrents' elif categoria == '4k': - item.url = 'http://www.todo-peliculas.com/tags/4k' - item.type='section' - itemlist = list_all(item) + item.url = host + 'tags/4k' + item.extra2 = 'categorias' + item.extra = "peliculas" + item.channel = channel + item.category_new= 'newest' - if itemlist[-1].title == 'Siguiente >>': + itemlist = listado(item) + if ">> Página siguiente" in itemlist[-1].title: itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] - return itemlist + return itemlist \ No newline at end of file diff --git a/plugin.video.alfa/channels/tupornotv.json b/plugin.video.alfa/channels/tupornotv.json deleted file mode 100755 index d4fe5877..00000000 --- a/plugin.video.alfa/channels/tupornotv.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "id": "tupornotv", - "name": "tuporno.tv", - "active": true, - "adult": true, - "language": ["*"], - "banner": "tupornotv.png", - "thumbnail": "tupornotv.png", - "categories": [ - "adult" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": true, - "enabled": true, - "visible": true - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/channels/tupornotv.py b/plugin.video.alfa/channels/tupornotv.py deleted file mode 100755 index bd7edca4..00000000 --- a/plugin.video.alfa/channels/tupornotv.py +++ /dev/null @@ -1,264 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import scrapertools -from core.item import Item -from platformcode import logger - - -def mainlist(item): - logger.info() - - itemlist = [] - itemlist.append(Item(channel=item.channel, title="Pendientes de Votación", action="novedades", - url="http://tuporno.tv/pendientes")) - itemlist.append( - Item(channel=item.channel, title="Populares", action="masVistos", url="http://tuporno.tv/", folder=True)) - itemlist.append( - Item(channel=item.channel, title="Categorias", action="categorias", url="http://tuporno.tv/categorias/", - folder=True)) - itemlist.append(Item(channel=item.channel, title="Videos Recientes", action="novedades", - url="http://tuporno.tv/videosRecientes/", folder=True)) - itemlist.append(Item(channel=item.channel, title="Top Videos (mas votados)", action="masVotados", - url="http://tuporno.tv/topVideos/", folder=True)) - itemlist.append(Item(channel=item.channel, title="Nube de Tags", action="categorias", url="http://tuporno.tv/tags/", - folder=True)) - itemlist.append(Item(channel=item.channel, title="Buscar", action="search")) - - return itemlist - - -def novedades(item): - logger.info() - url = item.url - # ------------------------------------------------------ - # Descarga la página - # ------------------------------------------------------ - data = scrapertools.cachePage(url) - # logger.info(data) - - # ------------------------------------------------------ - # Extrae las entradas - # ------------------------------------------------------ - # seccion novedades - ''' -
    - Cogiendo en el bosque -

    Cogiendo en el bosque

    - ''' - patronvideos = '
    (.*?)
    (.+?)<').findall(match)[0] - except: - try: - duracion = re.compile('\((.+?)\)Siguiente - patronsiguiente = 'Siguiente ' - siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data) - if len(siguiente) > 0: - scrapedurl = urlparse.urljoin(url, siguiente[0]) - itemlist.append(Item(channel=item.channel, action="novedades", title="!Next page", url=scrapedurl, folder=True)) - - return itemlist - - -def masVistos(item): - logger.info() - - itemlist = [] - itemlist.append( - Item(channel=item.channel, title="Hoy", action="novedades", url="http://tuporno.tv/hoy", folder=True)) - itemlist.append(Item(channel=item.channel, title="Recientes", action="novedades", url="http://tuporno.tv/recientes", - folder=True)) - itemlist.append( - Item(channel=item.channel, title="Semana", action="novedades", url="http://tuporno.tv/semana", folder=True)) - itemlist.append( - Item(channel=item.channel, title="Mes", action="novedades", url="http://tuporno.tv/mes", folder=True)) - itemlist.append( - Item(channel=item.channel, title="Año", action="novedades", url="http://tuporno.tv/ano", folder=True)) - return itemlist - - -def categorias(item): - logger.info() - - url = item.url - # ------------------------------------------------------ - # Descarga la página - # ------------------------------------------------------ - data = scrapertools.cachePage(url) - # logger.info(data) - # ------------------------------------------------------ - # Extrae las entradas - # ------------------------------------------------------ - # seccion categorias - # Patron de las entradas - if url == "http://tuporno.tv/categorias/": - patronvideos = '
  • |
    |
    |
    |
    |-\s", "", data) - patronvideos = '
  • ' - matches = re.compile(patronvideos, re.DOTALL).findall(data) - - if len(matches) > 0: - itemlist = [] - for match in matches: - # Titulo - scrapedtitle = match[2].replace("", "") - scrapedtitle = scrapedtitle.replace("", "") - scrapedurl = urlparse.urljoin("http://tuporno.tv/", match[0]) - scrapedthumbnail = urlparse.urljoin("http://tuporno.tv/", match[1]) - scrapedplot = "" - duracion = match[3] - - itemlist.append( - Item(channel=item.channel, action="play", title=scrapedtitle + " [" + duracion + "]", url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False)) - - '''Siguiente ''' - patronsiguiente = 'Siguiente ' - siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data) - if len(siguiente) > 0: - patronultima = '