fix wstream
This commit is contained in:
@@ -8,11 +8,11 @@
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "wstream\\.video(?!<)(?:=|/)(?:video[a-zA-Z0-9.?_]*|embed[a-zA-Z0-9]*|)?(?!api|swembedid)(?:-|/|=)?(?:[a-z0-9A-Z]+/)?([a-z0-9A-Z]+)",
|
||||
"url": "http://116.202.226.34/video.php?file_code=\\1"
|
||||
"url": "http://wstream.video/video.php?file_code=\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "wstream\\.video/(api/vcmod/fastredirect/streaming\\.php\\?id=[$0-9]+)",
|
||||
"url": "http://116.202.226.34/\\1"
|
||||
"url": "http://wstream.video/\\1"
|
||||
}
|
||||
],
|
||||
"ignore_urls": []
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
# Kodi on Demand - Kodi Addon - Kodi Addon
|
||||
# by DrZ3r0 - Fix Alhaziel
|
||||
|
||||
import re, json
|
||||
import json
|
||||
import re
|
||||
|
||||
try:
|
||||
import urllib.parse as urllib
|
||||
except ImportError:
|
||||
@@ -15,17 +17,17 @@ headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
resp = httptools.downloadpage(page_url, headers=headers)
|
||||
resp = httptools.downloadpage(page_url.replace('wstream.video', '116.202.226.34'), headers=headers, verify=False)
|
||||
|
||||
global data, real_url
|
||||
data = resp.data
|
||||
|
||||
page_url = resp.url.replace('wstream.video', '116.202.226.34')
|
||||
if '/streaming.php' in page_url in page_url:
|
||||
code = httptools.downloadpage(page_url, headers=headers, follow_redirects=False).headers['location'].split('/')[-1].replace('.html', '')
|
||||
code = httptools.downloadpage(page_url, headers=headers, follow_redirects=False, only_headers=True, verify=False).headers['location'].split('/')[-1].replace('.html', '')
|
||||
# logger.info('WCODE=' + code)
|
||||
page_url = 'https://116.202.226.34/video.php?file_code=' + code
|
||||
data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True).data
|
||||
data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True, verify=False).data
|
||||
|
||||
real_url = page_url
|
||||
if "Not Found" in data or "File was deleted" in data:
|
||||
@@ -40,7 +42,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
global data,headers
|
||||
page_url = scrapertools.find_single_match(data, r"""<center><a href='(https?:\/\/wstream[^']+)'\s*title='bkg'""")
|
||||
if page_url:
|
||||
data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True, post={'g-recaptcha-response': captcha}).data
|
||||
data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True, post={'g-recaptcha-response': captcha}, verify=False).data
|
||||
|
||||
def getSources(data):
|
||||
possibileSources = scrapertools.find_multiple_matches(data, r'sources:\s*(\[[^\]]+\])')
|
||||
@@ -57,6 +59,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
video_urls.append(['%s [%s]' % (key['type'].replace('video/', ''), key['label']),
|
||||
key['src'].replace('https', 'http') + '|' + _headers])
|
||||
else:
|
||||
if not 'src' in key and 'file' in key: key['src'] = key['file']
|
||||
if key['file'].split('.')[-1] == 'mpd': pass
|
||||
video_urls.append([key['src'].split('.')[-1], key['src'].replace('https', 'http') + '|' + _headers])
|
||||
except:
|
||||
pass
|
||||
@@ -74,7 +78,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
post = {param[0]: param[1] for param in possibleParam if param[0]}
|
||||
if captcha: post['g-recaptcha-response'] = captcha
|
||||
if post:
|
||||
data = httptools.downloadpage(real_url, headers=headers, post=post, follow_redirects=True).data
|
||||
data = httptools.downloadpage(real_url, headers=headers, post=post, follow_redirects=True, verify=False).data
|
||||
elif captcha:
|
||||
int_bckup_method()
|
||||
elif captcha:
|
||||
|
||||
Reference in New Issue
Block a user