diff --git a/plugin.video.alfa/channels/animeflv.json b/plugin.video.alfa/channels/animeflv.json index 01ed328b..81e1416a 100755 --- a/plugin.video.alfa/channels/animeflv.json +++ b/plugin.video.alfa/channels/animeflv.json @@ -37,6 +37,17 @@ "enabled": true, "visible": true }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar" + ] + }, { "id": "include_in_newest_anime", "type": "bool", diff --git a/plugin.video.alfa/channels/animeflv.py b/plugin.video.alfa/channels/animeflv.py index ec68dd95..c7d80f6c 100644 --- a/plugin.video.alfa/channels/animeflv.py +++ b/plugin.video.alfa/channels/animeflv.py @@ -9,6 +9,22 @@ from core import jsontools from core import scrapertools from core.item import Item from platformcode import logger +from channels import filtertools +from channels import autoplay + +list_language = ['No filtrar'] +logger.debug('lista_language: %s' % list_language) + +list_quality = ['default'] +list_servers = [ + 'izanagi', + 'yourupload', + 'okru', + 'netutv', + 'openload', + 'streamango', + 'mp4upload' +] HOST = "https://animeflv.net/" @@ -16,6 +32,8 @@ HOST = "https://animeflv.net/" def mainlist(item): logger.info() + autoplay.init(item.channel, list_servers, list_quality) + itemlist = list() itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", url=HOST)) @@ -35,6 +53,8 @@ def mainlist(item): itemlist = renumbertools.show_option(item.channel, itemlist) + autoplay.show_option(item.channel, itemlist) + return itemlist @@ -131,7 +151,7 @@ def novedades_episodios(item): thumbnail = urlparse.urljoin(HOST, thumbnail) new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, thumbnail=thumbnail, - fulltitle=title) + fulltitle=title, context = autoplay.context) itemlist.append(new_item) @@ -155,7 +175,7 @@ def novedades_anime(item): thumbnail = urlparse.urljoin(HOST, thumbnail) new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, - fulltitle=title, plot=plot) + fulltitle=title, plot=plot, context = autoplay.context) if _type != "Película": new_item.show = title new_item.context = renumbertools.context(item) @@ -189,7 +209,7 @@ def listado(item): thumbnail = urlparse.urljoin(HOST, thumbnail) new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, - fulltitle=title, plot=plot) + fulltitle=title, plot=plot, context = autoplay.context) if _type == "Anime": new_item.show = title @@ -242,7 +262,7 @@ def episodios(item): title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2)) itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb, fulltitle=title, - fanart=item.thumbnail, contentType="episode")) + fanart=item.thumbnail, contentType="episode", context = autoplay.context)) else: # no hay thumbnail matches = re.compile(']+>(.*?)<', re.DOTALL).findall(data) @@ -299,11 +319,15 @@ def findvideos(item): if video_urls: video_urls.sort(key=lambda v: int(v[0])) itemlist.append(item.clone(title="Enlace encontrado en %s" % server, action="play", - video_urls=video_urls)) + video_urls=video_urls, language='No filtrar', quality ='default', + server=server)) else: url = scrapertools.find_single_match(data, '"file":"([^"]+)"') if url: - itemlist.append(item.clone(title="Enlace encontrado en %s" % server, url=url, action="play")) + if server == 'izanagi': + server = 'directo' + itemlist.append(item.clone(title="Enlace encontrado en %s" % server, url=url, action="play", + language='No filtrar', quality ='default', server=server)) else: aux_url.append(e) @@ -315,6 +339,14 @@ def findvideos(item): videoitem.channel = item.channel videoitem.thumbnail = item.thumbnail + # Requerido para FilterTools + + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + return itemlist diff --git a/plugin.video.alfa/channels/animeshd.json b/plugin.video.alfa/channels/animeshd.json index fdf04e49..293ac85a 100755 --- a/plugin.video.alfa/channels/animeshd.json +++ b/plugin.video.alfa/channels/animeshd.json @@ -21,8 +21,24 @@ "description": "First release" } ], - "categories": [ + "categories": [ "latino", "anime" + ], + "settings": [ + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "LAT", + "CAST", + "SUB" + ] + } ] } diff --git a/plugin.video.alfa/channels/animeshd.py b/plugin.video.alfa/channels/animeshd.py index 3fd34f84..99e51916 100755 --- a/plugin.video.alfa/channels/animeshd.py +++ b/plugin.video.alfa/channels/animeshd.py @@ -8,6 +8,19 @@ from core import scrapertools from core import servertools from core.item import Item from platformcode import logger +from channels import autoplay +from channels import filtertools + +IDIOMAS = {'Latino': 'LAT', 'Castellano':'CAST','Subtitulado': 'VOS'} +list_language = IDIOMAS.values() +logger.debug('lista_language: %s' % list_language) + +list_quality = ['default'] +list_servers = [ + 'rapidvideo', + 'downace', + 'openload' +] tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png", "Drama": "https://s16.postimg.org/94sia332d/drama.png", @@ -35,6 +48,8 @@ headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/ def mainlist(item): logger.info() + + autoplay.init(item.channel, list_servers, list_quality) itemlist = [] itemlist.append(item.clone(title="Ultimas", @@ -65,6 +80,8 @@ def mainlist(item): fanart='https://s30.postimg.org/pei7txpa9/buscar.png' )) + autoplay.show_option(item.channel, itemlist) + return itemlist @@ -161,10 +178,16 @@ def episodios(item): matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedlang, scrapedtitle in matches: - language = scrapedlang + language = IDIOMAS[scrapedlang] + title = scrapedtitle + ' (%s)' % language url = scrapedurl - itemlist.append(item.clone(title=title, url=url, action='findvideos', language=language)) + itemlist.append(item.clone(title=title, + url=url, + action='findvideos', + language=language, + quality ='default' + )) return itemlist @@ -180,6 +203,13 @@ def findvideos(item): videoitem.channel = item.channel videoitem.title = title videoitem.action = 'play' + videoitem.language = item.language + # Requerido para FilterTools + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) return itemlist diff --git a/plugin.video.alfa/channels/autoplay.py b/plugin.video.alfa/channels/autoplay.py index f42a46d5..450b134e 100755 --- a/plugin.video.alfa/channels/autoplay.py +++ b/plugin.video.alfa/channels/autoplay.py @@ -381,10 +381,10 @@ def check_value(channel, itemlist): quality_list = channel_node['quality'] = list() for item in itemlist: - if item.server not in server_list: + if item.server not in server_list and item.server !='': server_list.append(item.server) change = True - if item.quality not in quality_list: + if item.quality not in quality_list and item.quality !='': quality_list.append(item.quality) change = True diff --git a/plugin.video.alfa/channels/hdfull.json b/plugin.video.alfa/channels/hdfull.json index c1512fa4..f0e4b9f1 100755 --- a/plugin.video.alfa/channels/hdfull.json +++ b/plugin.video.alfa/channels/hdfull.json @@ -49,6 +49,21 @@ "enabled": "!eq(-1,'')", "visible": true }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Latino", + "Español", + "VOS", + "VOSE" + ] + }, { "id": "include_in_global_search", "type": "bool", @@ -58,4 +73,4 @@ "visible": true } ] -} \ No newline at end of file +} diff --git a/plugin.video.alfa/channels/hdfull.py b/plugin.video.alfa/channels/hdfull.py index 5156104b..19d1b90c 100755 --- a/plugin.video.alfa/channels/hdfull.py +++ b/plugin.video.alfa/channels/hdfull.py @@ -12,6 +12,20 @@ from core import servertools from core.item import Item from platformcode import config, logger from platformcode import platformtools +from channels import filtertools +from channels import autoplay + +IDIOMAS = {'LAT': 'Latino', 'ESP': 'Español', 'ESPSUB': 'VOS', 'ENGSUB' : 'VOSE'} +list_language = IDIOMAS.values() +list_quality = ['RHDTV', 'HD0180M', 'HD720M', 'TS'] +list_servers = [ + 'openload', + 'powvideo', + 'streamplay', + 'streamcloud', + 'nowvideo' + +] host = "http://hdfull.tv" @@ -42,7 +56,7 @@ def login(): def mainlist(item): logger.info() - + autoplay.init(item.channel, list_servers, list_quality) itemlist = [] itemlist.append(Item(channel=item.channel, action="menupeliculas", title="Películas", url=host, folder=True)) @@ -56,6 +70,7 @@ def mainlist(item): login() itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url="")) + autoplay.show_option(item.channel, itemlist) return itemlist @@ -465,7 +480,8 @@ def episodios(item): 'id'] + ";3" itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, - thumbnail=thumbnail, show=item.show, folder=True, contentType="episode")) + thumbnail=thumbnail, show=item.show, folder=True, contentType="episode", + context =autoplay.context)) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=url_targets, @@ -696,11 +712,13 @@ def findvideos(item): fanart = scrapertools.find_single_match(data, '
(.*?)
") - patron = "(.*?)" + pattern = 'id="hentai2">]+>(.*?)
' + data = scrapertools.find_single_match(data, pattern) + + patron = 'href="([^"]+)"[^>]+>(.*?)' matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: - title = scrapertools.entityunescape(scrapedtitle) - url = urlparse.urljoin(item.url, scrapedurl) + for url, title in matches: # logger.debug("title=[{0}], url=[{1}]".format(title, url)) - itemlist.append(Item(channel=item.channel, action="series", title=title, url=url)) return itemlist -def search(item, texto): - logger.info() - if item.url == "": - item.url = urlparse.urljoin(CHANNEL_HOST, "animes/?buscar=") - texto = texto.replace(" ", "+") - item.url = "%s%s" % (item.url, texto) - - try: - return series(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - def series(item): logger.info() - data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(item.url).data) - patron = '

' \ - '(.*?)[^<]+

[^<]+[^<]+ 0: - scrapedurl = match - scrapedtitle = ">> Pagina Siguiente" + if pagination: + page = scrapertools.find_single_match(pagination, '>Página\s*(\d+)\s*de\s*\d+<') + pattern = 'href="([^"]+)">%s<' % (int(page) + 1) + url_page = scrapertools.find_single_match(pagination, pattern) - itemlist.append(Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, - folder=True, viewmode="movies_with_plot")) + if url_page: + itemlist.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url=url_page)) return itemlist @@ -124,9 +89,11 @@ def episodios(item): logger.info() itemlist = [] - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data, '
(.*?)
') - patron = '([^<]+)' + data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(item.url).data) + pattern = '
Lista de Capítulos
(.*?)
' + + data = scrapertools.find_single_match(data, pattern) + patron = ']+>([^<]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: @@ -136,10 +103,9 @@ def episodios(item): plot = item.plot # logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail)) - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, show=item.show, fulltitle="%s %s" % (item.show, title), - fanart=thumbnail, viewmode="movies_with_plot", folder=True)) + fanart=thumbnail)) return itemlist @@ -148,7 +114,8 @@ def findvideos(item): logger.info() data = httptools.downloadpage(item.url).data - patron = '
[^<]+<[iframe|IFRAME].*?[src|SRC]="([^"]+)"' + + patron = '<(?:iframe)?(?:IFRAME)?\s*(?:src)?(?:SRC)?="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) for url in matches: diff --git a/plugin.video.alfa/channels/seriesblanco.py b/plugin.video.alfa/channels/seriesblanco.py index e4ba57cc..fc770bf6 100644 --- a/plugin.video.alfa/channels/seriesblanco.py +++ b/plugin.video.alfa/channels/seriesblanco.py @@ -10,16 +10,21 @@ from core import scrapertoolsV2 from core import servertools from core.item import Item from platformcode import config, logger +from channels import autoplay HOST = "http://seriesblanco.com/" IDIOMAS = {'es': 'Español', 'en': 'Inglés', 'la': 'Latino', 'vo': 'VO', 'vos': 'VOS', 'vosi': 'VOSI', 'otro': 'OVOS'} list_idiomas = IDIOMAS.values() CALIDADES = ['SD', 'HDiTunes', 'Micro-HD-720p', 'Micro-HD-1080p', '1080p', '720p'] +list_servers =['youwatch','powvideo', 'openload', 'streamplay', 'streaminto', 'flashx', 'gamovideo', 'nowvideo', + 'rockfile'] def mainlist(item): logger.info() + autoplay.init(item.channel, list_servers, CALIDADES) + thumb_series = get_thumb("channels_tvshow.png") thumb_series_az = get_thumb("channels_tvshow_az.png") thumb_buscar = get_thumb("search.png") @@ -44,6 +49,7 @@ def mainlist(item): thumbnail=thumb_buscar)) itemlist = filtertools.show_option(itemlist, item.channel, list_idiomas, CALIDADES) + autoplay.show_option(item.channel, itemlist) return itemlist @@ -199,7 +205,8 @@ def episodios(item): display_title = "%s - %s %s" % (item.show, title, idiomas) # logger.debug("Episode found %s: %s" % (display_title, urlparse.urljoin(HOST, url))) itemlist.append(item.clone(title=display_title, url=urlparse.urljoin(HOST, url), - action="findvideos", plot=plot, fanart=fanart, language=filter_lang)) + action="findvideos", plot=plot, fanart=fanart, language=filter_lang, + context = autoplay.context)) itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES) @@ -248,7 +255,7 @@ def parse_videos(item, type_str, data): itemlist.append( item.clone(title=title, fulltitle=item.title, url=urlparse.urljoin(HOST, v_fields.get("link")), action="play", language=IDIOMAS.get(v_fields.get("language"), "OVOS"), - quality=quality)) + quality=quality, server= v_fields.get("server"))) if len(itemlist) > 0: return itemlist @@ -284,6 +291,14 @@ def findvideos(item): list_links = filtertools.get_links(list_links, item, list_idiomas, CALIDADES) + # Requerido para FilterTools + + itemlist = filtertools.get_links(list_links, item, list_idiomas) + + # Requerido para AutoPlay + + autoplay.start(list_links, item) + return list_links diff --git a/plugin.video.alfa/channels/x18hentai.py b/plugin.video.alfa/channels/x18hentai.py index 5ed87510..b76e8605 100755 --- a/plugin.video.alfa/channels/x18hentai.py +++ b/plugin.video.alfa/channels/x18hentai.py @@ -6,8 +6,9 @@ from core import httptools from core import scrapertools from core.item import Item from platformcode import logger +from core import servertools -host = 'http://www.18hentaionline.eu/' +host = 'http://www.18hentaionline.net/' headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], ['Referer', host]] @@ -92,17 +93,58 @@ def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url, headers=headers).data - patron = '([^<]+)<\/td>.([^<]+)<\/td>.([^<]+)<\/td>.([^<]+)<\/td>.Ver Capitulo<\/a><\/td>' + old_mode = scrapertools.find_single_match(data, 'Censura<\/th>') + if old_mode: + patron = '(\d+)<\/td>(.*?)<\/td>(.*?)<\/td>(.*?)<\/td>Ver Capitulo<\/a><\/td>' - matches = re.compile(patron, re.DOTALL).findall(data) + matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedcap, scrapedaud, scrapedsub, scrapedcen, scrapedurl in matches: - url = scrapedurl - title = 'CAPITULO ' + scrapedcap + ' AUDIO: ' + scrapedaud + ' SUB:' + scrapedsub + ' ' + censura[scrapedcen] - thumbnail = '' - plot = '' - fanart = '' - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.fulltitle, url=url, - thumbnail=item.thumbnail, plot=plot)) + for scrapedcap, scrapedaud, scrapedsub, scrapedcen, scrapedurl in matches: + url = scrapedurl + title = 'CAPITULO ' + scrapedcap + ' AUDIO: ' + scrapedaud + ' SUB:' + scrapedsub + ' ' + censura[scrapedcen] + thumbnail = '' + plot = '' + fanart = '' + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.fulltitle, url=url, + thumbnail=item.thumbnail, plot=plot)) + else: + patron = '<\/i>.*?(.\d+)<\/td>MP4<\/td>(.*?)<\/td>.*?' + patron +='' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedcap, scrapedsub, scrapedurl in matches: + url = scrapedurl + if scrapedsub !='': + subs= scrapedsub + else: + sub = 'No' + title = 'CAPITULO %s SUB %s'%(scrapedcap, subs) + thumbnail = '' + plot = '' + fanart = '' + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.fulltitle, url=url, + thumbnail=item.thumbnail, plot=plot)) return itemlist + +def findvideos(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + gvideo = scrapertools.find_single_match(data,'
  • ') + headers = {'Host':'www.18hentaionline.net', 'Referer':item.url} + gvideo_data = httptools.downloadpage(gvideo, headers = headers).data + gvideo_url = scrapertools.find_single_match(gvideo_data, 'file: "(.*?)"') + server = 'directo' + new_item = (item.clone(url=gvideo_url, server=server)) + itemlist.append(new_item) + itemlist.extend(servertools.find_video_items(data=data)) + for videoitem in itemlist: + videoitem.channel = item.channel + videoitem.title = item.title+' (%s)'%videoitem.server + videoitem.action = 'play' + return itemlist + + diff --git a/plugin.video.alfa/servers/mp4upload.py b/plugin.video.alfa/servers/mp4upload.py index 39b27a56..2efbcf56 100755 --- a/plugin.video.alfa/servers/mp4upload.py +++ b/plugin.video.alfa/servers/mp4upload.py @@ -1,17 +1,23 @@ # -*- coding: utf-8 -*- +import re + +from core import httptools from core import scrapertools +from lib import jsunpack from platformcode import logger def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("(page_url='%s')" % page_url) - data = scrapertools.cache_page(page_url) - logger.info("data=" + data) - media_url = scrapertools.find_single_match(data, '"file": "(.+?)"') - logger.info("media_url=" + media_url) - media_url = media_url.replace("?start=0", "") + data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(page_url).data) + + match = scrapertools.find_single_match(data, "") + data = jsunpack.unpack(match) + data = data.replace("\\'", "'") + + media_url = scrapertools.find_single_match(data, '{type:"video/mp4",src:"([^"]+)"}') logger.info("media_url=" + media_url) video_urls = list()