From 342bb070da78d3054163aeb9f09328727526ac2f Mon Sep 17 00:00:00 2001 From: marco Date: Thu, 4 Jun 2020 20:22:24 +0200 Subject: [PATCH] wstream: aggiunto support ai link presenti sul geniodellostreaming --- channels/ilgeniodellostreaming.py | 28 ++++++++-------------------- servers/wstream.json | 8 ++++++-- servers/wstream.py | 18 +++++++++++------- 3 files changed, 25 insertions(+), 29 deletions(-) diff --git a/channels/ilgeniodellostreaming.py b/channels/ilgeniodellostreaming.py index b88c76dc..e5291fac 100644 --- a/channels/ilgeniodellostreaming.py +++ b/channels/ilgeniodellostreaming.py @@ -173,7 +173,6 @@ def newest(categoria): def findvideos(item): - from lib import unshortenit log() matches = support.match(item, patron=[r'class="metaframe rptss" src="([^"]+)"',r' href="#option-\d">([^\s]+)\s*([^\s]+)']).matches itemlist = [] @@ -188,24 +187,13 @@ def findvideos(item): if 'player.php' in match: match = support.httptools.downloadpage(match, follow_redirect=True).url list_url.append(match) - - def unshorten(i, url): - url = unshortenit.unshorten(url)[0] - return support.Item( - channel=item.channel, - title=list_servers[i], - url=url, - action='play', - quality=list_quality[i], - infoLabels=item.infoLabels) - import sys - if sys.version_info[0] >= 3: - from concurrent import futures - else: - from concurrent_py2 import futures - with futures.ThreadPoolExecutor() as executor: - unshList = [executor.submit(unshorten, i, url) for i, url in enumerate(list_url)] - for it in futures.as_completed(unshList): - itemlist.append(it.result()) + for i, url in enumerate(list_url): + itemlist.append(support.Item( + channel=item.channel, + title=list_servers[i], + url=url, + action='play', + quality=list_quality[i], + infoLabels=item.infoLabels)) return support.server(item, itemlist=itemlist) diff --git a/servers/wstream.json b/servers/wstream.json index 86a41c0c..517b6ce0 100644 --- a/servers/wstream.json +++ b/servers/wstream.json @@ -7,12 +7,16 @@ "find_videos": { "patterns": [ { - "pattern": "wstream\\.video(?!<)(?:=|/)(?:video[a-zA-Z0-9.?_]*|embed[a-zA-Z0-9]*|)?(?!api|swembedid)(?:-|/|=)?(?:[a-z0-9A-Z]+/)?([a-z0-9A-Z]+)", + "pattern": "(?:wstream\\.video)(?!<)(?:=|/)(?:video[a-zA-Z0-9.?_]*|embed[a-zA-Z0-9]*|)?(?!api|swembedid)(?:-|/|=)?(?:[a-z0-9A-Z]+/)?([a-z0-9A-Z]+)", "url": "http://wstream.video/video.php?file_code=\\1" }, { - "pattern": "wstream\\.video/((?:api/vcmod/fastredirect/streaming\\.php\\?id=|swembedid/)[$0-9]+)", + "pattern": "(?:wstream\\.video)/((?:api/vcmod/fastredirect/streaming\\.php\\?id=|swembedid/)[$0-9]+)", "url": "http://wstream.video/\\1" + }, + { + "pattern": "https://nored.icu/swembedid/([0-9]+)", + "url": "http://nored.icu/swembedid/\\1" } ], "ignore_urls": [] diff --git a/servers/wstream.py b/servers/wstream.py index ceda52ce..479f5087 100644 --- a/servers/wstream.py +++ b/servers/wstream.py @@ -13,16 +13,19 @@ except ImportError: from core import httptools, scrapertools from platformcode import logger, config, platformtools -headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0'], ['Host', 'wstream.video']] def test_video_exists(page_url): + global headers + headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0'], + ['Host', scrapertools.get_domain_from_url(page_url)]] + logger.info("(page_url='%s')" % page_url) - resp = httptools.downloadpage(page_url.replace('wstream.video', '116.202.226.34'), headers=headers, verify=False) + resp = httptools.downloadpage(page_url.replace(headers[1][1], '116.202.226.34'), headers=headers, verify=False) global data, real_url data = resp.data - page_url = resp.url.replace('wstream.video', '116.202.226.34') + page_url = resp.url.replace(headers[1][1], '116.202.226.34') if '/streaming.php' in page_url in page_url: code = httptools.downloadpage(page_url, headers=headers, follow_redirects=False, only_headers=True, verify=False).headers['location'].split('/')[-1].replace('.html', '') # logger.info('WCODE=' + code) @@ -30,7 +33,7 @@ def test_video_exists(page_url): data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True, verify=False).data real_url = page_url - if "Not Found" in data or "File was deleted" in data or 'Video is processing' in data: + if "Not Found" in data or "File was deleted" in data or 'Video is processing' in data or 'Sorry this video is unavailable' in data: return False, config.get_localized_string(70449) % 'Wstream' else: return True, "" @@ -75,11 +78,12 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= logger.info("[Wstream] url=" + page_url) video_urls = [] - global data, real_url + global data, real_url, headers # logger.info(data) + sitekey = scrapertools.find_multiple_matches(data, """data-sitekey=['"] *([^"']+)""") if sitekey: sitekey = sitekey[-1] - captcha = platformtools.show_recaptcha(sitekey, page_url.replace('116.202.226.34', 'wstream.video')) if sitekey else '' + captcha = platformtools.show_recaptcha(sitekey, page_url.replace('116.202.226.34', headers[1][1])) if sitekey else '' possibleParam = scrapertools.find_multiple_matches(data,r"""|>)""") if possibleParam[0][0]: @@ -95,7 +99,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(707434)) return [] - headers.append(['Referer', real_url.replace('116.202.226.34', 'wstream.video')]) + headers.append(['Referer', real_url.replace('116.202.226.34', headers[1][1])]) _headers = urllib.urlencode(dict(headers)) post_data = scrapertools.find_single_match(data, r"\s*")