Fix StreamSB

This commit is contained in:
marco
2023-02-11 13:39:10 +01:00
parent 5fe60b7b83
commit e3ab536339
2 changed files with 26 additions and 39 deletions

View File

@@ -3,12 +3,12 @@
"find_videos": {
"ignore_urls": [],
"patterns": [{
"pattern": "(?:streamsb|sblanh|sbembed|sbembed1|sbplay1|sbplay|pelistop|tubesb|playersb|embedsb|watchsb|streamas|sbfast|sbfull|viewsb|sbvideo|cloudemb|sbplay2|japopav|javplaya|ssbstream|sbthe|sbspeed|sbanh|sblongvu|sbchill)\\.\\w{2,5}/(?:embed-|d/|e/)?([A-z0-9]+)",
"url": "https://streamsb.net/d/\\1"
"pattern": "(?:streamsb|sblanh|sbembed|sbembed1|sbplay1|sbplay|pelistop|tubesb|playersb|embedsb|watchsb|streamas|sbfast|sbfull|viewsb|sbvideo|cloudemb|sbplay2|japopav|javplaya|ssbstream|sbthe|sbspeed|sbanh|sblongvu|sbchill|sbhight)\\.\\w{2,5}/(?:embed-|d/|e/)?([A-z0-9]+)",
"url": "https://streamas.cloud/e/\\1.html"
},
{
"pattern": "(?:cloudemb.com)/([A-z0-9]+)",
"url": "https://streamsb.net/d/\\1"
"url": "https://streamas.cloud/e/\\1.html"
}
]
},

View File

@@ -1,50 +1,37 @@
# adapted from
# https://github.com/tvaddonsco/script.module.urlresolver/blob/master/lib/urlresolver/plugins/streamsb.py
from core import httptools
from platformcode import config, logger
import random, string
import codecs
from core import httptools, scrapertools
from platformcode import config
import base64
def get_sources(page_url):
code = page_url.split('/')[-1].split('.html')[0]
rand1 = "".join([random.choice(string.ascii_letters) for y in range(12)])
rand2 = "".join([random.choice(string.ascii_letters) for y in range(12)])
_0x470d0b = '{}||{}||{}||streamsb'.format(rand1, code, rand2)
sources = 'https://streamas.cloud/sources50/' + codecs.getencoder('hex')(_0x470d0b.encode())[0].decode()
# does not lite other headers different than watchsb and useragent
ret = httptools.downloadpage(sources, headers={'watchsb': 'sbstream', 'User-Agent': httptools.get_user_agent()}, replace_headers=True).json
logger.debug(ret)
return ret
def test_video_exists(page_url):
global data
data = httptools.downloadpage(page_url).data
global sources
sources = get_sources(page_url)
if 'File Not Found' in data:
if 'error' in sources:
return False, config.get_localized_string(70449) % "StreamSB"
else:
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
video_urls = []
global data
dl_url = 'https://{}/dl?op=download_orig&id={}&mode={}&hash={}'
host = scrapertools.get_domain_from_url(page_url)
sources = scrapertools.find_multiple_matches(data, r'download_video([^"]+)[^\d]+(\d+)p')
hash = scrapertools.find_single_match(data, r"file_id',\s'(\d+)")
if sources:
sources.sort(key=lambda x: int(x[1]), reverse=True)
sources = [(x[1] + 'p', x[0]) for x in sources]
s = sources[0] # only the first to reduce the number of requests to google recaptcha
code, mode, null = eval(s[1])
data = httptools.downloadpage(dl_url.format(host, code, mode, hash)).data
captcha = scrapertools.girc(data, 'https://{0}/'.format(host), base64.b64encode('https://{0}:443'.format(host).encode('utf-8')).decode('utf-8').replace('=', ''))
if captcha:
hash = scrapertools.find_single_match(data, r'"hash" value="([^"]+)')
data = httptools.downloadpage(dl_url.format(host, code, mode, hash), post={'op': 'download_orig', 'id': code, 'mode': mode, 'hash': hash, 'g-recaptcha-response': captcha}, timeout=10, header={'Referer':dl_url}).data
media_url = scrapertools.find_single_match(data, r'href="([^"]+)"[^>]+>Download')
if media_url:
video_urls.append([s[0], media_url])
return video_urls
global sources
file = sources['stream_data']['file']
backup = sources['stream_data']['backup']
return [["m3u8 [StreamSB]", file], ["m3u8-altern [StreamSB]", backup]]
def get_filename(page_url):
# from core.support import dbg;dbg()
title = httptools.downloadpage(page_url).data.split('<h3>')[1].split('</h3>')[0]
prefix = 'Download '
if title.startswith(prefix):
return title[len(prefix):]
return ""
return get_sources(page_url)['stream_data']['title']