From f7d661dabf6fafc70a74c078612db94eb4a2c54a Mon Sep 17 00:00:00 2001 From: marco <10120390+mac12m99@users.noreply.github.com> Date: Wed, 22 Jun 2022 20:08:46 +0200 Subject: [PATCH] Nuovi domini streamlare e streamsb, possibile fix streamsb --- core/scrapertools.py | 7 ++++--- servers/streamlare.json | 2 +- servers/streamsb.json | 2 +- servers/streamsb.py | 23 +++++++++++------------ 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/core/scrapertools.py b/core/scrapertools.py index 07b59bea..26ed4985 100644 --- a/core/scrapertools.py +++ b/core/scrapertools.py @@ -513,7 +513,7 @@ def girc(page_data, url, co): and https://github.com/addon-lab/addon-lab_resolver_Project Copyright (C) 2021 ADDON-LAB, KAR10S """ - import re + import re, random, string from core import httptools hdrs = {'Referer': url} rurl = 'https://www.google.com/recaptcha/api.js' @@ -527,10 +527,11 @@ def girc(page_data, url, co): rdata = {'ar': 1, 'k': key, 'co': co, - 'hl': 'en', + 'hl': 'it', 'v': v, 'size': 'invisible', - 'cb': '123456789'} + 'sa': 'submit', + 'cb': ''.join([random.choice(string.ascii_lowercase + string.digits) for i in range(12)])} page_data2 = httptools.downloadpage('{0}/anchor?{1}'.format(aurl, httptools.urlparse.urlencode(rdata)), headers=hdrs).data rtoken = re.search('recaptcha-token.+?="([^"]+)', page_data2) if rtoken: diff --git a/servers/streamlare.json b/servers/streamlare.json index 3ed6c586..31906389 100644 --- a/servers/streamlare.json +++ b/servers/streamlare.json @@ -4,7 +4,7 @@ "ignore_urls": [], "patterns": [ { - "pattern": "https://streamlare.com/[ve]/(\\w+)", + "pattern": "https?://(?:streamlare|sl(?:maxed|tube|watch))\\.(?:com?|org)/[ve]/(\\w+)", "url": "https://streamlare.com/e/\\1" } ] diff --git a/servers/streamsb.json b/servers/streamsb.json index e233e353..83905fba 100644 --- a/servers/streamsb.json +++ b/servers/streamsb.json @@ -4,7 +4,7 @@ "ignore_urls": [], "patterns": [ { - "pattern": "(?:streamsb|sbembed|sbembed1|sbplay1|sbplay|pelistop|tubesb|playersb|embedsb|watchsb|streamas|sbfast|sbfull|viewsb).\\w{2,5}/(?:embed-|d/|e/)?([A-z0-9]+)", + "pattern": "(?:streamsb|sbembed|sbembed1|sbplay1|sbplay|pelistop|tubesb|playersb|embedsb|watchsb|streamas|sbfast|sbfull|viewsb|sbvideo|cloudemb|sbplay2|japopav|javplaya|ssbstream)\\.\\w{2,5}/(?:embed-|d/|e/)?([A-z0-9]+)", "url": "https://streamsb.net/d/\\1" }, { diff --git a/servers/streamsb.py b/servers/streamsb.py index e8d0ed32..d1bdfcd1 100644 --- a/servers/streamsb.py +++ b/servers/streamsb.py @@ -24,18 +24,17 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= if sources: sources.sort(key=lambda x: int(x[1]), reverse=True) sources = [(x[1] + 'p', x[0]) for x in sources] - for s in sources: - code, mode, hash = eval(s[1]) - dl_url = 'https://{0}/dl?op=download_orig&id={1}&mode={2}&hash={3}'.format(host, code, mode, hash) - data = httptools.downloadpage(dl_url).data - - captcha = scrapertools.girc(data, 'https://{0}/'.format(host), base64.b64encode('https://{0}:443'.format(host).encode('utf-8')).decode('utf-8').replace('=', '')) - if captcha: - data = httptools.downloadpage(dl_url, post={'op': 'download_orig', 'id': code, 'mode': mode, - 'hash': hash, 'g-recaptcha-response': captcha}, timeout=10).data - media_url = scrapertools.find_single_match(data, 'href="([^"]+)">Direct') - if media_url: - video_urls.append([s[0], media_url]) + s = sources[0] # only the first to reduce the number of requests to google recaptcha + code, mode, hash = eval(s[1]) + dl_url = 'https://{0}/dl?op=download_orig&id={1}&mode={2}&hash={3}'.format(host, code, mode, hash) + data = httptools.downloadpage(dl_url).data + captcha = scrapertools.girc(data, 'https://{0}/'.format(host), base64.b64encode('https://{0}:443'.format(host).encode('utf-8')).decode('utf-8').replace('=', '')) + if captcha: + data = httptools.downloadpage(dl_url, post={'op': 'download_orig', 'id': code, 'mode': mode, + 'hash': hash, 'g-recaptcha-response': captcha}, timeout=10).data + media_url = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Download') + if media_url: + video_urls.append([s[0], media_url]) return video_urls