diff --git a/servers/akstream.py b/servers/akstream.py index 99b04900..4c13701b 100644 --- a/servers/akstream.py +++ b/servers/akstream.py @@ -11,10 +11,10 @@ from platformcode import logger, config def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) return True, "" - data = httptools.downloadpage(page_url).data - if "File was deleted" in data or "Page Cannot Be Found" in data: - return False, config.get_localized_string(70449) % "Akvideo" - return True, "" + # data = httptools.downloadpage(page_url).data + # if "File was deleted" in data or "Page Cannot Be Found" in data: + # return False, config.get_localized_string(70449) % "Akvideo" + # return True, "" def get_video_url(page_url, premium=False, user="", password="", video_password=""): diff --git a/servers/wstream.py b/servers/wstream.py index c8910e73..4993fb75 100644 --- a/servers/wstream.py +++ b/servers/wstream.py @@ -11,6 +11,7 @@ headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20 def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) + global data data = httptools.downloadpage(page_url).data if "Not Found" in data or "File was deleted" in data: return False, config.get_localized_string(70449) % 'Wstream' @@ -21,14 +22,18 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("[Wstream] url=" + page_url) video_urls = [] + global data if '/streaming.php' in page_url or 'html' in page_url: - code = httptools.downloadpage(page_url, headers=headers, follow_redirects=False).headers['location'].split('/')[-1].replace('.html','') - logger.info('WCODE='+code) - page_url = 'https://wstream.video/video.php?file_code=' + code - + try: + code = httptools.downloadpage(page_url, headers=headers, follow_redirects=False).headers['location'].split('/')[-1].replace('.html','') + logger.info('WCODE='+code) + page_url = 'https://wstream.video/video.php?file_code=' + code + data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True).data + except: + pass + code = page_url.split('=')[-1] - data = httptools.downloadpage(page_url, headers=headers, follow_redirects=False).data ID = scrapertools.find_single_match(data, r'''input\D*id=(?:'|")([^'"]+)(?:'|")''') post = urllib.urlencode({ID: code})