diff --git a/channels/dreamsub.py b/channels/dreamsub.py index e85cb9c0..b4a671c6 100644 --- a/channels/dreamsub.py +++ b/channels/dreamsub.py @@ -14,8 +14,8 @@ from platformcode import logger, config __channel__ = "dreamsub" host = config.get_channel_url(__channel__) -list_servers = ['animeworld', 'verystream', 'streamango', 'openload', 'directo'] -list_quality = ['default', '480p', '720p', '1080p'] +list_servers = ['verystream', 'streamango', 'openload'] +list_quality = ['default'] def mainlist(item): @@ -25,7 +25,7 @@ def mainlist(item): menu(itemlist, 'Anime / Cartoni', 'peliculas', host + '/anime', 'tvshow') menu(itemlist, 'Categorie', 'categorie', host + '/filter?genere=', 'tvshow') menu(itemlist, 'Ultimi Episodi', 'last', host, 'episode') - menu(itemlist, 'Cerca', 'search') + menu(itemlist, 'Cerca...', 'search') support.aplay(item, itemlist, list_servers, list_quality) support.channel_config(item, itemlist) diff --git a/servers/wstream.json b/servers/wstream.json index ec73269c..eaf9de76 100644 --- a/servers/wstream.json +++ b/servers/wstream.json @@ -9,11 +9,7 @@ { "pattern": "wstream.video/(?:embed-|videos/|video/)?([a-z0-9A-Z]+)", "url": "http:\/\/wstream.video\/\\1" - }, - { - "pattern": "wstream.video/(?:embed-|videos/|video/)?([a-z0-9A-Z]+)", - "url": "http:\/\/wstream.video\/video\/\\1" - } + } ], "ignore_urls": [ ] }, diff --git a/servers/wstream.py b/servers/wstream.py index 5b0d4cd3..35ff751e 100644 --- a/servers/wstream.py +++ b/servers/wstream.py @@ -24,15 +24,14 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= video_urls = [] data = httptools.downloadpage(page_url, headers=headers).data.replace('https', 'http') - # logger.info("[wstream.py] data=" + data) + logger.info("[wstream.py] data=" + data) vid = scrapertools.find_multiple_matches(data, 'download_video.*?>.*?<.*?([^\,,\s]+)') - headers.append(['Referer', page_url]) post_data = scrapertools.find_single_match(data,"\s*") if post_data != "": from lib import jsunpack data = jsunpack.unpack(post_data) - + logger.info("[wstream.py] data=" + data) block = scrapertools.find_single_match(data, 'sources:\s*\[[^\]]+\]') if block: data = block @@ -41,7 +40,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= i = 0 for media_url in media_urls: - video_urls.append([vid[i] + " mp4 [wstream] ", media_url + '|' + _headers]) + video_urls.append([vid[i] if vid else 'video' + " mp4 [wstream] ", media_url + '|' + _headers]) i = i + 1 for video_url in video_urls: @@ -54,7 +53,7 @@ def find_videos(data): encontrados = set() devuelve = [] - patronvideos = r"wstream.video/(?:embed-)?([a-z0-9A-Z]+)" + patronvideos = r"wstream.video/(?:embed-|videos/|video/)?([a-z0-9A-Z]+)" logger.info("[wstream.py] find_videos #" + patronvideos + "#") matches = re.compile(patronvideos, re.DOTALL).findall(data) @@ -69,4 +68,4 @@ def find_videos(data): else: logger.info(" url duplicada=" + url) - return devuelve + return devuelve \ No newline at end of file