Fix vcrypt e wstream

This commit is contained in:
mac12m99
2021-05-06 21:24:26 +02:00
parent 36ea5be436
commit be9832e934
3 changed files with 34 additions and 29 deletions

View File

@@ -509,7 +509,10 @@ class UnshortenIt(object):
if 'shield' in uri.split('/')[-2]:
uri = decrypt_aes(uri.split('/')[-1], b"naphajU2usWUswec")
else:
if 'sb/' in uri or 'akv/' in uri or 'wss/' in uri or 'wsd/' in uri:
spl = uri.split('/')
spl[0] = 'http:'
if 'sb/' in uri or 'akv/' in uri or 'wss/' in uri:
import datetime, hashlib
from base64 import b64encode
# ip = urlopen('https://api.ipify.org/').read()
@@ -519,11 +522,11 @@ class UnshortenIt(object):
headers = {
"Cookie": hashlib.md5(ip+day).hexdigest() + "=1;saveMe=1"
}
spl = uri.split('/')
spl[3] += '1'
if spl[3] in ['wss1', 'sb1']:
spl[4] = b64encode(spl[4].encode('utf-8')).decode('utf-8')
uri = '/'.join(spl)
uri = '/'.join(spl)
r = httptools.downloadpage(uri, timeout=self._timeout, headers=headers, follow_redirects=False, verify=False)
if 'Wait 1 hour' in r.data:
uri = ''
@@ -637,10 +640,12 @@ class UnshortenIt(object):
return uri, str(e)
def _unshorten_snip(self, uri):
new_uri = ''
if 'out_generator' in uri:
uri = re.findall('url=(.*)$', uri)[0]
elif '/decode/' in uri:
new_uri = re.findall('url=(.*)$', uri)[0]
if not new_uri.startswith('http'):
new_uri = httptools.downloadpage(uri, follow_redirects=False).headers['Location']
uri = new_uri
if '/decode/' in uri:
uri = decrypt_aes(uri.split('/')[-1], b"whdbegdhsnchdbeh")
# scheme, netloc, path, query, fragment = urlsplit(uri)
@@ -712,22 +717,22 @@ def findlinks(text):
text += '\n' + unshorten(matches[0])[0]
elif matches:
# non threaded for webpdb
for match in matches:
sh = unshorten(match)[0]
text += '\n' + sh
# import sys
# if sys.version_info[0] >= 3:
# from concurrent import futures
# else:
# from concurrent_py2 import futures
# with futures.ThreadPoolExecutor() as executor:
# unshList = [executor.submit(unshorten, match) for match in matches]
# for link in futures.as_completed(unshList):
# if link.result()[0] not in matches:
# links = link.result()[0]
# if type(links) == list:
# for l in links:
# text += '\n' + l
# else:
# text += '\n' + str(link.result()[0])
# for match in matches:
# sh = unshorten(match)[0]
# text += '\n' + sh
import sys
if sys.version_info[0] >= 3:
from concurrent import futures
else:
from concurrent_py2 import futures
with futures.ThreadPoolExecutor() as executor:
unshList = [executor.submit(unshorten, match) for match in matches]
for link in futures.as_completed(unshList):
if link.result()[0] not in matches:
links = link.result()[0]
if type(links) == list:
for l in links:
text += '\n' + l
else:
text += '\n' + str(link.result()[0])
return text

View File

@@ -11,7 +11,7 @@
"url": "http://wstream.video/video.php?file_code=\\1"
},
{
"pattern": "(?:wstream\\.video|wstream\\.icu|krask\\.xyz)/((?:api/vcmod/fastredirect/[a-z]+\\.php\\?id=|swembedid|swvideoid/)[$0-9]+)",
"pattern": "(?:wstream\\.video|wstream\\.icu|krask\\.xyz)/((?:api/vcmod/fastredirect/streaming[a-z]*\\.php\\?id=|swembedid|swvideoid/)[$0-9]+)",
"url": "http://wstream.video/\\1"
},
{

View File

@@ -24,7 +24,7 @@ def test_video_exists(page_url):
resp = httptools.downloadpage(page_url, verify=False, disable_directIP=disable_directIP, follow_redirects=False)
while resp.headers.get('location'):
page_url = resp.headers.get('location')
resp = httptools.downloadpage(page_url, verify=False, disable_directIP=disable_directIP, follow_redirects=False)
resp = httptools.downloadpage(page_url.replace('https:', 'http:'), verify=False, disable_directIP=disable_directIP, follow_redirects=False)
global data, real_url
data = resp.data
@@ -32,7 +32,7 @@ def test_video_exists(page_url):
if '/streaming.php' in page_url in page_url:
code = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True, verify=False).headers['location'].split('/')[-1].replace('.html', '')
# logger.debug('WCODE=' + code)
page_url = 'https://wstream.video/video.php?file_code=' + code
page_url = 'http://wstream.video/video.php?file_code=' + code
data = httptools.downloadpage(page_url, follow_redirects=True, verify=False).data
if 'nored.icu' in page_url:
@@ -42,7 +42,7 @@ def test_video_exists(page_url):
dec = ''
for v in var.split(','):
dec += chr(int(v) - int(value))
page_url = 'https://wstream.video/video.php?file_code=' + scrapertools.find_single_match(dec, "src='([^']+)").split('/')[-1].replace('.html','')
page_url = 'http://wstream.video/video.php?file_code=' + scrapertools.find_single_match(dec, "src='([^']+)").split('/')[-1].replace('.html','')
new_data = httptools.downloadpage(page_url, follow_redirects=True, verify=False).data
logger.debug('NEW DATA: \n' + new_data)
if new_data:
@@ -64,7 +64,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
if not page_url:
page_url = scrapertools.find_single_match(data, r"""<form action=['"]([^'"]+)['"]""")
if page_url.startswith('/'):
page_url = 'https://wstream.video' + page_url
page_url = 'http://wstream.video' + page_url
if page_url:
data = httptools.downloadpage(page_url, follow_redirects=True, post={'g-recaptcha-response': captcha}, verify=False).data