KoD 1.7.7

- fix di routine ai canali/server\n\n
This commit is contained in:
marco
2023-06-30 19:39:03 +02:00
parent c3e02636fb
commit d29efd4ec2
68 changed files with 1784 additions and 543 deletions

View File

@@ -4,8 +4,8 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "dood(?:stream)?.[^/]+/+(?:e|d)/([a-z0-9]+)",
"url": "https://dood.yt/e/\\1"
"pattern": "(do*d(?:stream)?.[^/]+)/+(?:e|d)/([a-z0-9]+)",
"url": "https://\\1/e/\\2"
}
]
},

View File

@@ -1,15 +1,20 @@
# -*- coding: utf-8 -*-
import time, string, random
from core import httptools, support, servertools
import time, string, random, sys
from core import httptools, support
from platformcode import logger, config
if sys.version_info[0] >= 3:
from urllib.parse import urlparse
else:
from urllib import urlparse
def test_video_exists(page_url):
global data
logger.debug('page url=', page_url)
response = httptools.downloadpage(page_url)
response = httptools.downloadpage(page_url, cloudscraper=True)
if response.code == 404 or 'dsplayer' not in response.data:
return False, config.get_localized_string(70449) % 'DooD Stream'
else:
@@ -22,14 +27,13 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.debug("URL", page_url)
video_urls = []
host = 'https://' + servertools.get_server_host('doodstream')[0]
headers = {'User-Agent': httptools.get_user_agent(), 'Referer': page_url}
support.dbg()
host = 'https://{}'.format(urlparse(page_url).netloc)
headers = {'User-Agent': httptools.get_user_agent(), 'Referer': host}
match = support.match(data, patron=r'''dsplayer\.hotkeys[^']+'([^']+).+?function\s*makePlay.+?return[^?]+([^"]+)''').match
if match:
url, token = match
ret = httptools.downloadpage(host + url, headers=headers).data
ret = httptools.downloadpage(host + url, headers=headers, cloudscraper=True).data
video_urls.append(['mp4 [DooD Stream]', '{}{}{}{}|Referer={}'.format(randomize(ret), url, token, int(time.time() * 1000), host)])

View File

@@ -20,5 +20,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
packed = support.match(data, patron=r'(eval\(function\(p.*?)</').match
if packed:
data = jsunpack.unpack(packed).replace("\\", "")
video_urls = support.get_jwplayer_mediaurl(data, 'filemoon')
video_urls = support.get_jwplayer_mediaurl(data, 'filemoon', hls=True)
return video_urls

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "mixdro?ps?.[^/]+/(?:f|e)/([a-z0-9]+)",
"pattern": "mixdro?o?ps?.[^/]+/(?:f|e)/([a-z0-9]+)",
"url": "https://mixdrop.co/e/\\1"
},
{

27
servers/streamhide.json Normal file
View File

@@ -0,0 +1,27 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "ahvsh.com/[a-z]/([\\d\\w]+)",
"url": "https://ahvsh.com/e/\\1"
}
]
},
"free": true,
"id": "streamhide",
"name": "StreamHide",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@70708",
"type": "bool",
"visible": true
}
],
"cloudflare": true
}

27
servers/streamhide.py Normal file
View File

@@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
##
from core import httptools, support
from core import scrapertools
from platformcode import logger, config
def test_video_exists(page_url):
logger.debug("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "File is no longer available" in data:
return False, config.get_localized_string(70449) % "StreamHide"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.debug("url=" + page_url)
global data
return support.get_jwplayer_mediaurl(data, 'StreamHide', hls=True)

View File

@@ -1,68 +1,77 @@
# -*- coding: utf-8 -*-
import urllib.parse
import ast
import xbmc
from core import httptools, support, filetools
from platformcode import logger, config
UA = httptools.random_useragent()
from concurrent import futures
from urllib.parse import urlparse
vttsupport = False if int(xbmc.getInfoLabel('System.BuildVersion').split('.')[0]) < 20 else True
def test_video_exists(page_url):
global scws_id
logger.debug('page url=', page_url)
scws_id = ''
global iframe
global iframeParams
if page_url.isdigit():
scws_id = page_url
else:
page = httptools.downloadpage(page_url)
if page.url == page_url: # se non esiste, reindirizza all'ultimo url chiamato esistente
scws_id = support.scrapertools.find_single_match(page.data, r'scws_id[^:]+:(\d+)')
else:
return 'StreamingCommunity', 'Prossimamente'
iframe = support.scrapertools.decodeHtmlentities(support.match(page_url, patron='<iframe [^>]+src="([^"]+)').match)
iframeParams = support.match(iframe, patron='window\.masterPlaylistParams\s=\s({.*?})').match
if not iframeParams:
return 'StreamingCommunity', 'Prossimamente'
if not scws_id:
return False, config.get_localized_string(70449) % 'StreamingCommunityWS'
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
from time import time
from base64 import b64encode
from hashlib import md5
global scws_id
urls = list()
subs = list()
local_subs = list()
video_urls = list()
# clientIp = httptools.downloadpage(f'https://scws.work/videos/{scws_id}').json.get('client_ip')
clientIp = httptools.downloadpage('http://ip-api.com/json/').json.get('query')
if clientIp:
expires = int(time() + 172800)
token = b64encode(md5('{}{} Yc8U6r8KjAKAepEA'.format(expires, clientIp).encode('utf-8')).digest()).decode('utf-8').replace('=', '').replace('+', '-').replace('/', '_')
url = 'https://scws.work/master/{}?token={}&expires={}&n=1'.format(scws_id, token, expires)
if page_url.isdigit():
video_urls.append(['m3u8', '{}|User-Agent={}'.format(url, UA)])
else:
video_urls = compose(url)
scws_id = urlparse(iframe).path.split('/')[-1]
masterPlaylistParams = ast.literal_eval(iframeParams)
url = 'https://scws.work/v2/playlist/{}?{}&n=1'.format(scws_id, urllib.parse.urlencode(masterPlaylistParams))
return video_urls
info = support.match(url, patron=r'LANGUAGE="([^"]+)",\s*URI="([^"]+)|(http.*?rendition=(\d+)[^\s]+)').matches
def compose(url):
subs = []
video_urls = []
info = support.match(url, patron=r'LANGUAGE="([^"]+)",\s*URI="([^"]+)|RESOLUTION=\d+x(\d+).*?(http[^"\s]+)', headers={'User-Agent':UA}).matches
if info and not logger.testMode: # ai test non piace questa parte
for lang, sub, res, url in info:
if sub:
while True:
match = support.match(sub, patron=r'(http[^\s\n]+)').match
if match:
sub = httptools.downloadpage(match).data
else:
break
if info:
for lang, sub, url, res in info:
if sub:
if lang == 'auto': lang = 'ita-forced'
s = config.get_temp_file(lang +'.srt')
subs.append(s)
filetools.write(s, support.vttToSrt(sub))
elif url:
video_urls.append(['m3u8 [{}]'.format(res), '{}|User-Agent={}'.format(url, UA), 0, subs])
subs.append([lang, sub])
elif not 'token=&' in url:
urls.append([res, url])
if subs:
local_subs = subs_downloader(subs)
video_urls = [['m3u8 [{}]'.format(res), url, 0, local_subs] for res, url in urls]
else:
video_urls = [['m3u8 [{}]'.format(res), url] for res, url in urls]
else:
video_urls.append(['m3u8', '{}|User-Agent={}'.format(url, UA)])
video_urls = [['hls', url]]
return video_urls
def subs_downloader(subs):
def subs_downloader_thread(n, s):
lang, url = s
match = support.match(url, patron=r'(http[^\s\n]+)').match
if match:
data = httptools.downloadpage(match).data
if lang == 'auto': lang = 'ita-forced'
sub = config.get_temp_file('{}.{}'.format(lang, 'vtt' if vttsupport else 'str'))
filetools.write(sub, data if vttsupport else support.vttToSrt(data))
return n, sub
local_subs = list()
with futures.ThreadPoolExecutor() as executor:
itlist = [executor.submit(subs_downloader_thread, n, s) for n, s in enumerate(subs)]
for res in futures.as_completed(itlist):
if res.result():
local_subs.append(res.result())
return [s[1] for s in sorted(local_subs, key=lambda n: n[0])]

View File

@@ -3,16 +3,16 @@
"find_videos": {
"ignore_urls": [],
"patterns": [{
"pattern": "(?:streamsb|sblanh|sbembed|sbembed1|sbplay1|sbplay|pelistop|tubesb|playersb|embedsb|watchsb|streamas|sbfast|sbfull|viewsb|sbvideo|cloudemb|sbplay2|japopav|javplaya|ssbstream|sbthe|sbspeed|sbanh|sblongvu|sbchill|sbhight|sbbrisk)\\.\\w{2,5}/(?:embed-|d/|e/)?([A-z0-9]+)",
"url": "https://streamas.cloud/e/\\1.html"
"pattern": "(?:streamsb|sblanh|sblona|sbembed|sbembed1|sbplay1|sbplay|pelistop|tubesb|playersb|embedsb|watchsb|streamas|sbfast|sbfull|viewsb|sbvideo|cloudemb|sbplay2|japopav|javplaya|ssbstream|sbthe|sbspeed|sbanh|sblongvu|sbchill|sbhight|sbbrisk)\\.\\w{2,5}/(?:embed-|d/|e/)?([A-z0-9]+)",
"url": "https://streamas.cloud/d/\\1.html"
},
{
"pattern": "(?:cloudemb.com)/([A-z0-9]+)",
"url": "https://streamas.cloud/e/\\1.html"
"url": "https://streamas.cloud/d/\\1.html"
},
{
"pattern": "animeworld.biz/(?:embed-|d/|e/)?([A-z0-9]+)",
"url": "https://streamas.cloud/e/\\1.html"
"url": "https://streamas.cloud/d/\\1.html"
}
]
},

View File

@@ -1,53 +1,81 @@
from core import httptools
import re
from core import httptools, support, scrapertools
from platformcode import config, logger, platformtools
import random, string
import codecs
try:
import urllib.parse as urllib
except ImportError:
import urllib
import re, sys
if sys.version_info[0] >= 3:
from concurrent import futures
else:
from concurrent_py2 import futures
from base64 import b64encode
host = 'https://streamas.cloud'
def get_sources(page_url):
code = page_url.split('/')[-1].split('.html')[0]
rand1 = "".join([random.choice(string.ascii_letters) for y in range(12)])
rand2 = "".join([random.choice(string.ascii_letters) for y in range(12)])
_0x470d0b = '{}||{}||{}||streamsb'.format(rand1, code, rand2)
prefix = 'https://streamas.cloud/sources'
suffix = '/' + codecs.getencoder('hex')(_0x470d0b.encode())[0].decode()
number = config.get_setting('number', server='streamsb')
sources = prefix + str(number) + suffix
# does not lite other headers different than watchsb and useragent
ret = httptools.downloadpage(sources, headers={'watchsb': 'sbstream', 'User-Agent': httptools.get_user_agent()}, replace_headers=True).json
if not ret: # probably number changed
wait = platformtools.dialog_progress('StreamSB', config.get_localized_string(60293))
for number in range(100):
if httptools.downloadpage(prefix + str(number) + '/').code == 200:
config.set_setting('number', server='streamsb', value=number)
sources = prefix + str(number) + suffix
# does not lite other headers different than watchsb and useragent
ret = httptools.downloadpage(sources,
headers={'watchsb': 'sbstream', 'User-Agent': httptools.get_user_agent()},
replace_headers=True).json
break
wait.close()
logger.debug(ret)
return ret
sources = support.match(page_url, headers={'watchsb': 'sbstream', 'User-Agent': httptools.get_user_agent()}, replace_headers=True, patron=r'download_video([^"]+).*?<span>\s*(\d+)').matches
if sources:
sources = {s[1]: s[0].replace('(','').replace(')','').replace("'",'').split(',') for s in sources}
return sources
def test_video_exists(page_url):
global sources
sources = get_sources(page_url)
if 'error' in sources:
return False, config.get_localized_string(70449) % "StreamSB"
else:
if sources:
return True, ""
else:
return False, config.get_localized_string(70449) % "StreamSB"
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
global sources
file = sources['stream_data']['file']
backup = sources['stream_data']['backup']
return [["m3u8 [StreamSB]", file], ["m3u8-altern [StreamSB]", backup]]
video_urls = list()
if sources:
action = config.get_setting('default_action')
if action == 0:
progress = platformtools.dialog_progress_bg("StreamSB", message="Risoluzione URLs")
step = int(100 / len(sources))
percent = 0
for res, url in sources.items():
progress.update(percent, "Risoluzione URL: {}p".format(res))
r, u = resolve_url(res, url)
percent += step
progress.update(percent, "Risoluzione URL: {}p".format(res))
video_urls.append(['{} [{}]'.format(u.split('.')[-1], r), u])
progress.close()
else:
res = sorted([* sources])[0 if action == 1 else -1]
progress = platformtools.dialog_progress_bg("StreamSB", message="Risoluzione URL: {}p".format(res))
url = sources[res]
r, u = resolve_url(res, url)
progress.close()
video_urls.append(['{} [{}]'.format(u.split(',')[-1], r), u])
return video_urls
def get_filename(page_url):
return get_sources(page_url)['stream_data']['title']
def get_payloads(data, token):
# support.dbg()
payloads = {'g-recaptcha-response': token}
for name, value in support.match(data, patron=r'input type="hidden" name="([^"]+)" value="([^"]+)').matches:
payloads[name] = value
return payloads
def resolve_url(res, params):
url = ''
source_url = '{}/dl?op=download_orig&id={}&mode={}&hash={}'.format(host, params[0], params[1], params[2])
data = httptools.downloadpage(source_url).data
co = b64encode((host + ':443').encode('utf-8')).decode('utf-8').replace('=', '')
token = scrapertools.girc(data, host, co)
payload = get_payloads(data, token)
if token:
url = support.match(source_url, patron=r'href="([^"]+)"\s*class="btn\s*btn-light', post=payload).match
return res, url

View File

@@ -21,7 +21,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.debug("(page_url='%s')" % page_url)
video_urls = []
video_id = scrapertools.find_single_match(page_url, '(?:v=|embed/)([A-z0-9_-]{11})')
inputstream = platformtools.install_inputstream()
try: