Little Fix to Wstream
This commit is contained in:
@@ -28,17 +28,19 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
vid = scrapertools.find_multiple_matches(data, 'download_video.*?>.*?<.*?<td>([^\,,\s]+)')
|
||||
|
||||
headers.append(['Referer', page_url])
|
||||
post_data = scrapertools.find_single_match(data,
|
||||
"</div>\s*<script type='text/javascript'>(eval.function.p,a,c,k,e,.*?)\s*</script>")
|
||||
post_data = scrapertools.find_single_match(data,"</div>\s*<script type='text/javascript'>(eval.function.p,a,c,k,e,.*?)\s*</script>")
|
||||
if post_data != "":
|
||||
from lib import jsunpack
|
||||
data = jsunpack.unpack(post_data)
|
||||
|
||||
media_url = scrapertools.find_multiple_matches(data, '(http.*?\.mp4)')
|
||||
block = scrapertools.find_single_match(data, 'sources:\s*\[[^\]]+\]')
|
||||
if block: data = block
|
||||
|
||||
media_urls = scrapertools.find_multiple_matches(data, '(http.*?\.mp4)')
|
||||
_headers = urllib.urlencode(dict(headers))
|
||||
i = 0
|
||||
|
||||
for media_url in media_url:
|
||||
for media_url in media_urls:
|
||||
video_urls.append([vid[i] + " mp4 [wstream] ", media_url + '|' + _headers])
|
||||
i = i + 1
|
||||
|
||||
|
||||
Reference in New Issue
Block a user