diff --git a/servers/wstream.json b/servers/wstream.json index 7b331fd7..97e73570 100644 --- a/servers/wstream.json +++ b/servers/wstream.json @@ -1,9 +1,9 @@ { "id": "wstream", - "name": "wstream", + "name": "Wstream", "active": true, "free": true, - "thumbnail": "http:\/\/media.tvalacarta.info\/servers\/server_wstream.png", + "thumbnail": "server_wstream.png", "find_videos": { "patterns": [ { @@ -23,7 +23,7 @@ "url": "https://wstream.video/video.php?file_code=\\1" }, { - "pattern": "wstream\\.video/(?!api/|stream/)([a-z0-9A-Z]+)", + "pattern": "wstream\\.video/(?!api/|stream/|embed-)([a-z0-9A-Z]+)", "url": "https://wstream.video/video.php?file_code=\\1" } ], diff --git a/servers/wstream.py b/servers/wstream.py index 1a80faf5..71b15c0c 100644 --- a/servers/wstream.py +++ b/servers/wstream.py @@ -2,11 +2,10 @@ # Kodi on Demand - Kodi Addon - Kodi Addon # by DrZ3r0 - Fix Alhaziel -import re -import urllib +import re, json, urllib from core import httptools, scrapertools -from platformcode import logger +from platformcode import logger, config headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0']] @@ -14,13 +13,13 @@ def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "Not Found" in data or "File was deleted" in data: - return False, "[wstream.py] Il File Non esiste" + return False, config.get_localized_string(70449) % 'Wstream' return True, "" # Returns an array of possible video url's from the page_url def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("[wstream.py] url=" + page_url) + logger.info("[Wstream] url=" + page_url) video_urls = [] if '/streaming.php' in page_url: @@ -28,33 +27,27 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= page_url = 'https://wstream.video/video.php?file_code=' + code code = page_url.split('=')[-1] - post = urllib.urlencode({ - 'videox': code - }) + post = urllib.urlencode({'videox': code}) - data = httptools.downloadpage(page_url, headers=headers, post=post, follow_redirects=True).data.replace('https','http') - logger.info("[wstream.py] data=" + data) - vid = scrapertools.find_multiple_matches(data, 'download_video.*?>.*?<.*?([^\,,\s]+)') + data = httptools.downloadpage(page_url, headers=headers, post=post, follow_redirects=True).data headers.append(['Referer', page_url]) - post_data = scrapertools.find_single_match(data,"\s*") + _headers = urllib.urlencode(dict(headers)) + + post_data = scrapertools.find_single_match(data, r"\s*") if post_data != "": from lib import jsunpack data = jsunpack.unpack(post_data) - logger.info("[wstream.py] data=" + data) - block = scrapertools.find_single_match(data, 'sources:\s*\[[^\]]+\]') - data = block - media_urls = scrapertools.find_multiple_matches(data, '(http.*?\.mp4)') - _headers = urllib.urlencode(dict(headers)) - i = 0 + data = scrapertools.find_single_match(data, r'sources:\s*(\[[^\]]+\])') + data = re.sub('([A-z]+):(?!/)','"\\1":',data) + keys = json.loads(data) - for media_url in media_urls: - video_urls.append([vid[i] if vid else 'video' + " mp4 [wstream] ", media_url + '|' + _headers]) - i = i + 1 + for key in keys: + video_urls.append(['%s [%sp]' % (key['type'].replace('video/',''), key['label']), key['src'].replace('https','http') + '|' + _headers]) + else: + media_urls = scrapertools.find_multiple_matches(data, '(http.*?\.mp4)') - for video_url in video_urls: - logger.info("[wstream.py] %s - %s" % (video_url[0], video_url[1])) - - logger.info(video_urls) + for media_url in media_urls: + video_urls.append(['video' + " mp4 [wstream] ", media_url + '|' + _headers]) return video_urls