Modifiche ad Alpha
This commit is contained in:
25
servers/maxstream.json
Normal file
25
servers/maxstream.json
Normal file
@@ -0,0 +1,25 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https?://maxstream.video/(?:e/|embed-|cast/)?([a-z0-9]+)",
|
||||
"url": "https://maxstream.video/cast/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "maxstream",
|
||||
"name": "MaxStream",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@70708",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
65
servers/maxstream.py
Normal file
65
servers/maxstream.py
Normal file
@@ -0,0 +1,65 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------
|
||||
# Conector MaxStream
|
||||
# --------------------------------------------------------
|
||||
|
||||
from core import httptools, scrapertools, support
|
||||
from platformcode import logger, config
|
||||
import ast, sys
|
||||
|
||||
if sys.version_info[0] >= 3:
|
||||
import urllib.parse as urlparse
|
||||
else:
|
||||
import urlparse
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.debug("(page_url='%s')" % page_url)
|
||||
|
||||
global data
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
if "file was deleted" in data:
|
||||
return False, config.get_localized_string(70449) % "MaxStream"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.debug("url=" + page_url)
|
||||
video_urls = []
|
||||
url_video = ''
|
||||
|
||||
lastIndexStart = data.rfind('<script>')
|
||||
lastIndexEnd = data.rfind('</script>')
|
||||
|
||||
script = data[ (lastIndexStart + len('<script>')):lastIndexEnd ]
|
||||
|
||||
match = scrapertools.find_single_match(script, r'(\[[^\]]+\])[^\{]*\{[^\(]+\(parseInt\(value\)\s?-\s?([0-9]+)')
|
||||
if match:
|
||||
char_codes = ast.literal_eval(match[0])
|
||||
hidden_js = "".join([chr(c - int(match[1])) for c in char_codes])
|
||||
|
||||
newurl = scrapertools.find_single_match(hidden_js, r'\$.get\(\'([^\']+)')
|
||||
castpage = httptools.downloadpage(newurl, headers={'x-requested-with': 'XMLHttpRequest', 'Referer': page_url }).data
|
||||
url_video = scrapertools.find_single_match(castpage, r"cc\.cast\('(http[s]?.[^']+)'")
|
||||
else:
|
||||
logger.debug('Something wrong: no url found before that :(')
|
||||
|
||||
if url_video:
|
||||
import random, string
|
||||
parse = urlparse.urlparse(url_video)
|
||||
video_urls.append(['mp4 [MaxStream]', url_video])
|
||||
try:
|
||||
r1 = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(19))
|
||||
r2 = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(19))
|
||||
r3 = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(19))
|
||||
video_urls.append(['m3u8 [MaxStream]', '{}://{}/hls/{},{},{},{},.urlset/master.m3u8'.format(parse.scheme, parse.netloc, parse.path.split('/')[1], r1, r2, r3)])
|
||||
# video_urls.append(['m3u8 [MaxStream]', '{}://{}/hls/{},wpsc2hllm5g5fkjvslq,4jcc2hllm5gzykkkgha,fmca2hllm5jtpb7cj5q,.urlset/master.m3u8'.format(parse.scheme, parse.netloc, parse.path.split('/')[1])])
|
||||
except:
|
||||
logger.debug('Something wrong: Impossible get HLS stream')
|
||||
return video_urls
|
||||
|
||||
|
||||
|
||||
|
||||
15
servers/streamingcommunityws.json
Normal file
15
servers/streamingcommunityws.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "streamingcommunityws",
|
||||
"name": "StreamingCommunityWS",
|
||||
"premium": [
|
||||
],
|
||||
"settings": [
|
||||
]
|
||||
}
|
||||
30
servers/streamingcommunityws.py
Normal file
30
servers/streamingcommunityws.py
Normal file
@@ -0,0 +1,30 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import sys
|
||||
PY3 = False
|
||||
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
|
||||
|
||||
import json
|
||||
import random
|
||||
from core import httptools, support, scrapertools
|
||||
from platformcode import platformtools, logger
|
||||
from lib.streamingcommunity import Client as SCClient
|
||||
|
||||
files = None
|
||||
|
||||
def test_video_exists(page_url):
|
||||
|
||||
# page_url is the {VIDEO_ID}. Es: 5957
|
||||
return True, ""
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
|
||||
video_urls = []
|
||||
|
||||
global c
|
||||
c = SCClient("",video_id=page_url, is_playing_fnc=platformtools.is_playing)
|
||||
|
||||
media_url = c.get_manifest_url()
|
||||
|
||||
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [Streaming Community]", media_url])
|
||||
|
||||
return video_urls
|
||||
17
servers/streamon.json
Normal file
17
servers/streamon.json
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [{
|
||||
"pattern": "streamon\\.to/d/(\\w+)",
|
||||
"url": "https://streamon.to/d/\\1"
|
||||
}]
|
||||
},
|
||||
"free": true,
|
||||
"id": "streamon",
|
||||
"name": "Streamon",
|
||||
"premium": [
|
||||
],
|
||||
"settings": [
|
||||
]
|
||||
}
|
||||
124
servers/streamon.py
Normal file
124
servers/streamon.py
Normal file
@@ -0,0 +1,124 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from core import httptools, scrapertools, config
|
||||
import base64
|
||||
import math
|
||||
import re
|
||||
|
||||
files = None
|
||||
|
||||
def test_video_exists(page_url):
|
||||
|
||||
global htmldata
|
||||
htmldata = httptools.downloadpage(page_url).data
|
||||
|
||||
if 'Oops! video not found' in htmldata:
|
||||
return False, config.get_localized_string(70449) % "Streamon"
|
||||
else:
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
|
||||
tabbler = httptools.downloadpage('https://streamon.to/assets/js/tabber.js').data
|
||||
params_tabber = scrapertools.find_single_match(tabbler, r'\}\(([^\)]+)')
|
||||
|
||||
|
||||
params_tabber_decoder = params_tabber.split(',')
|
||||
decoded_tabler = eval_fn(
|
||||
params_tabber_decoder[0].replace('"', ''),
|
||||
int(params_tabber_decoder[1]),
|
||||
params_tabber_decoder[2].replace('"', ''),
|
||||
int(params_tabber_decoder[3]),
|
||||
int(params_tabber_decoder[4]),
|
||||
int(params_tabber_decoder[5])
|
||||
)
|
||||
|
||||
decoder = scrapertools.find_single_match(decoded_tabler, r'var res = ([a-z]{12})\.replace\("([^"]+)[^\.]+\.replace\("([^"]+)')
|
||||
|
||||
params_from_page = scrapertools.find_single_match(htmldata, '<script\s+?type=[\'|"].*?[\'|"]>\s?var.*?\}\((.*?)\)\)<\/script>')
|
||||
|
||||
params_from_page_decoder = params_from_page.split(',')
|
||||
|
||||
first_decoder_fn = eval_fn(
|
||||
params_from_page_decoder[0].replace('"', ''),
|
||||
int(params_from_page_decoder[1]),
|
||||
params_from_page_decoder[2].replace('"', ''),
|
||||
int(params_from_page_decoder[3]),
|
||||
int(params_from_page_decoder[4]),
|
||||
int(params_from_page_decoder[5])
|
||||
)
|
||||
|
||||
variable_value = scrapertools.find_single_match(first_decoder_fn, 'var {}="([^"]+)"'.format(decoder[0]))
|
||||
|
||||
res = variable_value.replace(decoder[1], "")
|
||||
res2 = res.replace(decoder[2], "")
|
||||
media_url = base64.b64decode( res2 ).decode('ascii')
|
||||
|
||||
video_urls = []
|
||||
|
||||
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [Streamon]", media_url])
|
||||
|
||||
return video_urls
|
||||
|
||||
|
||||
|
||||
|
||||
"""
|
||||
" I don't know what following lines do. Maybe neither God knows...
|
||||
" but they seem to be working :)
|
||||
"""
|
||||
|
||||
def loop_reduce(lst, h, e):
|
||||
"""
|
||||
" Simulate the Array.reduce functionality
|
||||
"""
|
||||
acc = 0
|
||||
for index, val in enumerate(lst):
|
||||
indexOf = h.find(val)
|
||||
if indexOf > -1:
|
||||
pow = int(math.pow(e, index))
|
||||
acc = acc + indexOf * pow
|
||||
|
||||
return acc
|
||||
|
||||
|
||||
def decrypt_string(d, e, f):
|
||||
"""
|
||||
" Decrypt char-sequence from given key
|
||||
"""
|
||||
g = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/'
|
||||
|
||||
h = g[0 : e]
|
||||
i = g[0 : f]
|
||||
|
||||
j = loop_reduce(list(d)[::-1], h, e)
|
||||
k = ''
|
||||
while j > 0:
|
||||
j = int(j)
|
||||
k = i[j % f] + k
|
||||
j = (j - (j % f)) / f
|
||||
|
||||
return k or ''
|
||||
|
||||
|
||||
|
||||
def eval_fn(h, u, n, t, e, r):
|
||||
"""
|
||||
" Extract decrypter key and convert decrypted string into a ASCII string
|
||||
"""
|
||||
r = ""
|
||||
i = -1
|
||||
while i < len(h) - 1:
|
||||
i = i + 1
|
||||
s = ''
|
||||
while h[i] != n[e]:
|
||||
s += h[i]
|
||||
i = i + 1
|
||||
for j in range(0, len(n)):
|
||||
reg = re.compile(n[j])
|
||||
s = re.sub(reg, str(j), s)
|
||||
|
||||
res = decrypt_string(s, e, 10)
|
||||
r += chr( int( res ) - t )
|
||||
|
||||
return r
|
||||
@@ -2,10 +2,11 @@
|
||||
# --------------------------------------------------------
|
||||
# Conector streamtape By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
from core import httptools, scrapertools
|
||||
from core import httptools
|
||||
from platformcode import logger, config
|
||||
from core.support import match
|
||||
import sys
|
||||
from lib import js2py
|
||||
|
||||
PY3 = False
|
||||
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
|
||||
@@ -27,17 +28,10 @@ def test_video_exists(page_url):
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.debug("url=" + page_url)
|
||||
# from core .support import dbg;dbg()
|
||||
|
||||
video_urls = []
|
||||
possible_url = ''
|
||||
find_url = match(data, patron=r'''innerHTML = ["']([^"]+)["'](?:\s*\+\s*['"]([^"']+))?''').match
|
||||
for m in find_url:
|
||||
possible_url += m
|
||||
|
||||
if not possible_url:
|
||||
possible_url = match(data, patron=r"innerHTML\\'\]=\\'([^']+)").match
|
||||
find_url = match(data, patron=r'innerHTML = ([^;]+)').match
|
||||
possible_url = js2py.eval_js(find_url)
|
||||
url = "https:" + possible_url
|
||||
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
video_urls.append(['MP4 [Streamtape]', url])
|
||||
return video_urls
|
||||
return video_urls
|
||||
|
||||
Reference in New Issue
Block a user