fix streamz e supporto a nuovi url shortener
This commit is contained in:
@@ -43,10 +43,12 @@ class UnshortenIt(object):
|
||||
_stayonline_regex = r'stayonline\.pro'
|
||||
# for services that only include real link inside iframe
|
||||
_simple_iframe_regex = r'cryptmango|xshield\.net|vcrypt\.club'
|
||||
# for services that only do redirects
|
||||
_simple_redirect = r'streamcrypt\.net/[^/]+'
|
||||
|
||||
listRegex = [_adfly_regex, _linkbucks_regex, _adfocus_regex, _lnxlu_regex, _shst_regex, _hrefli_regex, _anonymz_regex,
|
||||
_shrink_service_regex, _rapidcrypt_regex, _simple_iframe_regex, _vcrypt_regex, _linkup_regex, _linkhub_regex,
|
||||
_swzz_regex, _stayonline_regex]
|
||||
_swzz_regex, _stayonline_regex, _simple_redirect]
|
||||
|
||||
_maxretries = 5
|
||||
|
||||
@@ -92,6 +94,10 @@ class UnshortenIt(object):
|
||||
uri, code = self._unshorten_swzz(uri)
|
||||
if re.search(self._stayonline_regex, uri, re.IGNORECASE):
|
||||
uri, code = self._unshorten_stayonline(uri)
|
||||
if re.search(self._simple_redirect, uri, re.IGNORECASE):
|
||||
p = httptools.downloadpage(uri)
|
||||
uri = p.url
|
||||
code = p.code
|
||||
|
||||
if oldUri == uri:
|
||||
break
|
||||
@@ -127,7 +133,7 @@ class UnshortenIt(object):
|
||||
if not r.success:
|
||||
return uri, -1
|
||||
|
||||
if '4snip' not in r.url and 'location' in r.headers and retries < self._maxretries:
|
||||
if 'snip.' not in r.url and 'location' in r.headers and retries < self._maxretries:
|
||||
r = httptools.downloadpage(
|
||||
r.headers['location'],
|
||||
cookies=False,
|
||||
@@ -538,8 +544,9 @@ class UnshortenIt(object):
|
||||
if uri == prev_uri:
|
||||
logger.info('Use Cloudscraper')
|
||||
uri = httptools.downloadpage(uri, timeout=self._timeout, headers=headers, follow_redirects=False, cf=True).headers['location']
|
||||
|
||||
if "4snip" in uri:
|
||||
# from core import support
|
||||
# support.dbg()
|
||||
if "snip." in uri:
|
||||
if 'out_generator' in uri:
|
||||
uri = re.findall('url=(.*)$', uri)[0]
|
||||
elif '/decode/' in uri:
|
||||
@@ -578,7 +585,7 @@ class UnshortenIt(object):
|
||||
if not r:
|
||||
r = httptools.downloadpage(uri, follow_redirects=False, timeout=self._timeout, cookies=False)
|
||||
uri = r.headers['location']
|
||||
if "4snip" in uri:
|
||||
if "snip." in uri:
|
||||
if 'out_generator' in uri:
|
||||
uri = re.findall('url=(.*)$', uri)[0]
|
||||
elif '/decode/' in uri:
|
||||
@@ -688,22 +695,22 @@ def findlinks(text):
|
||||
text += '\n' + unshorten(matches[0])[0]
|
||||
elif matches:
|
||||
# non threaded for webpdb
|
||||
for match in matches:
|
||||
sh = unshorten(match)[0]
|
||||
text += '\n' + sh
|
||||
# import sys
|
||||
# if sys.version_info[0] >= 3:
|
||||
# from concurrent import futures
|
||||
# else:
|
||||
# from concurrent_py2 import futures
|
||||
# with futures.ThreadPoolExecutor() as executor:
|
||||
# unshList = [executor.submit(unshorten, match) for match in matches]
|
||||
# for link in futures.as_completed(unshList):
|
||||
# if link.result()[0] not in matches:
|
||||
# links = link.result()[0]
|
||||
# if type(links) == list:
|
||||
# for l in links:
|
||||
# text += '\n' + l
|
||||
# else:
|
||||
# text += '\n' + str(link.result()[0])
|
||||
# for match in matches:
|
||||
# sh = unshorten(match)[0]
|
||||
# text += '\n' + sh
|
||||
import sys
|
||||
if sys.version_info[0] >= 3:
|
||||
from concurrent import futures
|
||||
else:
|
||||
from concurrent_py2 import futures
|
||||
with futures.ThreadPoolExecutor() as executor:
|
||||
unshList = [executor.submit(unshorten, match) for match in matches]
|
||||
for link in futures.as_completed(unshList):
|
||||
if link.result()[0] not in matches:
|
||||
links = link.result()[0]
|
||||
if type(links) == list:
|
||||
for l in links:
|
||||
text += '\n' + l
|
||||
else:
|
||||
text += '\n' + str(link.result()[0])
|
||||
return text
|
||||
@@ -4,14 +4,14 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(https://streamz.cc/[A-z0-9]+)",
|
||||
"pattern": "(https://streamz(?:z|).\\w{2,3}/[A-z0-9]+)",
|
||||
"url": "\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "streamz",
|
||||
"name": "streamZ",
|
||||
"name": "streamz",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
@@ -38,4 +38,4 @@
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,19 +1,33 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger, config
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
global data
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
if "<font color=\"red\"><b>File not found, sorry!" in data:
|
||||
return False, config.get_localized_string(70449) % "Streamz"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, video_password):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
url = httptools.downloadpage(page_url).url
|
||||
data = httptools.downloadpage(url).data
|
||||
data = scrapertools.find_single_match(data, '<footer id="Footer" class="clearfix">(.*?)</html>')
|
||||
packed = scrapertools.find_single_match(data, r'(eval.*?)</script>')
|
||||
unpacked = jsunpack.unpack(packed)
|
||||
url = scrapertools.find_single_match(unpacked, '(https://streamz.cc/getlink.*?.dll)')
|
||||
video_urls.append(["[streamz]", url])
|
||||
return video_urls
|
||||
|
||||
packed = scrapertools.find_single_match(data, r'(eval\(function\(p,a,c,k,e,d\).*?)\s+</script>')
|
||||
unpacked = jsunpack.unpack(packed)
|
||||
|
||||
url = scrapertools.find_single_match(unpacked, '(https://streamz.*?/get.*?.dll)')
|
||||
|
||||
url = url.replace("getmp4", "getlink").replace("getIink", "getlink")
|
||||
|
||||
url += "|User-Agent=%s" % httptools.get_user_agent()
|
||||
video_urls.append(["[streamz]", url])
|
||||
|
||||
return video_urls
|
||||
Reference in New Issue
Block a user