Fix maxstream

This commit is contained in:
mac12m99
2021-10-22 19:03:30 +02:00
parent ebf520aaf2
commit ec4510aea4
2 changed files with 45 additions and 42 deletions

View File

@@ -49,7 +49,6 @@ directIP = {
'wstream.icu': '31.220.1.77', 'wstream.icu': '31.220.1.77',
'wstream.video': '31.220.1.77', 'wstream.video': '31.220.1.77',
'krask.xyz': '31.220.1.77', 'krask.xyz': '31.220.1.77',
'maxstream.video': '109.236.81.23'
} }
# Maximum wait time for downloadpage, if nothing is specified # Maximum wait time for downloadpage, if nothing is specified

View File

@@ -4,6 +4,8 @@
# -------------------------------------------------------- # --------------------------------------------------------
import ast, sys import ast, sys
import requests
from core import httptools, scrapertools, support from core import httptools, scrapertools, support
from lib import jsunpack from lib import jsunpack
from platformcode import logger, config, platformtools from platformcode import logger, config, platformtools
@@ -29,7 +31,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.debug("url=" + page_url) logger.debug("url=" + page_url)
video_urls = [] video_urls = []
global data global data
if 'captcha' in data:
httptools.set_cookies(requests.get('http://lozioangie.altervista.org/maxcookie.php').json())
data = httptools.downloadpage(page_url).data
# sitekey = scrapertools.find_multiple_matches(data, """data-sitekey=['"] *([^"']+)""") # sitekey = scrapertools.find_multiple_matches(data, """data-sitekey=['"] *([^"']+)""")
# if sitekey: sitekey = sitekey[-1] # if sitekey: sitekey = sitekey[-1]
@@ -46,44 +50,44 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
# platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(707434)) # platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(707434))
# return [] # return []
# packed = support.match(data, patron=r"(eval\(function\(p,a,c,k,e,d\).*?)\s*</script").match packed = support.match(data, patron=r"(eval\(function\(p,a,c,k,e,d\).*?)\s*</script").match
# unpack = jsunpack.unpack(packed) unpack = jsunpack.unpack(packed)
# url = scrapertools.find_single_match(unpack, 'src:\s*"([^"]+)') url = scrapertools.find_single_match(unpack, 'src:\s*"([^"]+)')
# if url: if url:
# video_urls.append(['m3u8 [MaxStream]', url]) video_urls.append(['m3u8 [MaxStream]', url])
return video_urls
# support.dbg() # support.dbg()
possible_cast_url = support.match('http://maxstream.video/?op=page&tmpl=../../download1', patron='<a[^<]+href="(?:https://maxstream\.video/)?([^".?]+/)"').matches # possible_cast_url = support.match('http://maxstream.video/?op=page&tmpl=../../download1', patron='<a[^<]+href="(?:https://maxstream\.video/)?([^".?]+/)"').matches
for cast_url in possible_cast_url: # for cast_url in possible_cast_url:
data = httptools.downloadpage('http://maxstream.video/' + cast_url + page_url.split('/')[-1]).data # data = httptools.downloadpage('http://maxstream.video/' + cast_url + page_url.split('/')[-1]).data
url_video = '' # url_video = ''
#
lastIndexStart = data.rfind('<script>') # lastIndexStart = data.rfind('<script>')
lastIndexEnd = data.rfind('</script>') # lastIndexEnd = data.rfind('</script>')
#
script = data[ (lastIndexStart + len('<script>')):lastIndexEnd ] # script = data[ (lastIndexStart + len('<script>')):lastIndexEnd ]
#
match = scrapertools.find_single_match(script, r'(\[[^\]]+\])[^\{]*\{[^\(]+\(parseInt\(value\)\s?-\s?([0-9]+)') # match = scrapertools.find_single_match(script, r'(\[[^\]]+\])[^\{]*\{[^\(]+\(parseInt\(value\)\s?-\s?([0-9]+)')
if match: # if match:
char_codes = ast.literal_eval(match[0]) # char_codes = ast.literal_eval(match[0])
hidden_js = "".join([chr(c - int(match[1])) for c in char_codes]) # hidden_js = "".join([chr(c - int(match[1])) for c in char_codes])
#
newurl = scrapertools.find_single_match(hidden_js, r'\$.get\(\'([^\']+)').replace('https://', 'http://') # newurl = scrapertools.find_single_match(hidden_js, r'\$.get\(\'([^\']+)').replace('https://', 'http://')
castpage = httptools.downloadpage(newurl, headers={'x-requested-with': 'XMLHttpRequest', 'Referer': page_url}).data # castpage = httptools.downloadpage(newurl, headers={'x-requested-with': 'XMLHttpRequest', 'Referer': page_url}).data
url_video = scrapertools.find_single_match(castpage, r"cc\.cast\('(http[s]?.[^']+)'") # url_video = scrapertools.find_single_match(castpage, r"cc\.cast\('(http[s]?.[^']+)'")
else: # else:
logger.debug('Something wrong: no url found before that :(') # logger.debug('Something wrong: no url found before that :(')
#
if url_video and url_video.split('/')[-1] == page_url.split('/')[-1]: # if url_video and url_video.split('/')[-1] == page_url.split('/')[-1]:
import random, string # import random, string
parse = urlparse.urlparse(url_video) # parse = urlparse.urlparse(url_video)
video_urls = [['mp4 [MaxStream]', url_video]] # video_urls = [['mp4 [MaxStream]', url_video]]
try: # try:
r1 = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(19)) # r1 = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(19))
r2 = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(19)) # r2 = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(19))
r3 = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(19)) # r3 = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(19))
video_urls.append(['m3u8 [MaxStream]', '{}://{}/hls/{},{},{},{},.urlset/master.m3u8'.format(parse.scheme, parse.netloc, parse.path.split('/')[1], r1, r2, r3)]) # video_urls.append(['m3u8 [MaxStream]', '{}://{}/hls/{},{},{},{},.urlset/master.m3u8'.format(parse.scheme, parse.netloc, parse.path.split('/')[1], r1, r2, r3)])
# video_urls.append(['m3u8 [MaxStream]', '{}://{}/hls/{},wpsc2hllm5g5fkjvslq,4jcc2hllm5gzykkkgha,fmca2hllm5jtpb7cj5q,.urlset/master.m3u8'.format(parse.scheme, parse.netloc, parse.path.split('/')[1])]) # # video_urls.append(['m3u8 [MaxStream]', '{}://{}/hls/{},wpsc2hllm5g5fkjvslq,4jcc2hllm5gzykkkgha,fmca2hllm5jtpb7cj5q,.urlset/master.m3u8'.format(parse.scheme, parse.netloc, parse.path.split('/')[1])])
except: # except:
logger.debug('Something wrong: Impossible get HLS stream') # logger.debug('Something wrong: Impossible get HLS stream')
return video_urls # return video_urls