Added vcrypt/4snip unshorten, fiexes for zcrypt
Avoid page download if vcrypt link is found in support
This commit is contained in:
@@ -273,7 +273,8 @@ def swzz_get_url(item):
|
||||
data = match
|
||||
if data.startswith('/'):
|
||||
data = urlparse.urljoin("http://swzz.xyz", data)
|
||||
data = httptools.downloadpage(data).data
|
||||
if not "vcrypt" in data:
|
||||
data = httptools.downloadpage(data).data
|
||||
logger.debug("##### play /link/ data ##\n%s\n##" % data)
|
||||
else:
|
||||
data = item.url
|
||||
|
||||
@@ -36,6 +36,7 @@ class UnshortenIt(object):
|
||||
_shrink_service_regex = r'shrink-service\.it'
|
||||
_rapidcrypt_regex = r'rapidcrypt\.net'
|
||||
_cryptmango_regex = r'cryptmango'
|
||||
_vcrypt_regex = r'vcrypt\.net'
|
||||
|
||||
_maxretries = 5
|
||||
|
||||
@@ -75,6 +76,8 @@ class UnshortenIt(object):
|
||||
return self._unshorten_rapidcrypt(uri)
|
||||
if re.search(self._cryptmango_regex, uri, re.IGNORECASE):
|
||||
return self._unshorten_cryptmango(uri)
|
||||
if re.search(self._vcrypt_regex, uri, re.IGNORECASE):
|
||||
return self._unshorten_vcrypt(uri)
|
||||
|
||||
return uri, 0
|
||||
|
||||
@@ -464,6 +467,33 @@ class UnshortenIt(object):
|
||||
except Exception as e:
|
||||
return uri, str(e)
|
||||
|
||||
def _unshorten_vcrypt(self, uri):
|
||||
try:
|
||||
req = httptools.downloadpage(uri, timeout=self._timeout, follow_redirects=False)
|
||||
idata = req.data
|
||||
from core import scrapertools
|
||||
|
||||
patron = r"document.cookie\s=\s.*?'(.*)'"
|
||||
match_str = re.compile(patron, re.MULTILINE).findall(idata)[0]
|
||||
|
||||
patron = r';URL=([^\"]+)\">'
|
||||
dest = scrapertools.find_single_match(idata, patron)
|
||||
http_headers = {"Cookie": match_str}
|
||||
r = httptools.downloadpage(dest, post=' ', headers=http_headers)
|
||||
uri = r.url
|
||||
|
||||
if "4snip" in uri:
|
||||
desturl = uri.replace("/out/", "/outlink/")
|
||||
import os
|
||||
par = os.path.basename(desturl)
|
||||
post = 'url=' + par
|
||||
r = httptools.downloadpage(desturl, post=post)
|
||||
uri = r.url
|
||||
|
||||
return uri, r.code
|
||||
|
||||
except Exception, e:
|
||||
return uri, str(e)
|
||||
|
||||
def unwrap_30x_only(uri, timeout=10):
|
||||
unshortener = UnshortenIt()
|
||||
@@ -483,4 +513,4 @@ def unshorten(uri, type=None, timeout=10):
|
||||
uri, status = unshortener.unshorten(uri, type=type)
|
||||
if status == 200:
|
||||
uri, status = unshortener.unwrap_30x(uri, timeout=timeout)
|
||||
return uri, status
|
||||
return uri, status
|
||||
@@ -32,9 +32,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
logger.info(" url=" + url)
|
||||
encontrados.add(url)
|
||||
|
||||
import requests
|
||||
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:59.0) Gecko/20100101 Firefox/59.0'}
|
||||
|
||||
if host == 'gestyy':
|
||||
resp = httptools.downloadpage(
|
||||
url,
|
||||
@@ -45,44 +42,12 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
headers={'User-Agent': 'curl/7.59.0'})
|
||||
data = resp.headers.get("location", "")
|
||||
elif 'vcrypt.net' in url:
|
||||
# req = httptools.downloadpage(url)
|
||||
req = requests.get(url, headers=headers)
|
||||
idata = req.content
|
||||
print(idata)
|
||||
patron = r"document.cookie\s=\s.*?'(.*)'"
|
||||
# matches = re.compile(patron, re.IGNORECASE).findall(idata)
|
||||
matches = re.finditer(patron, idata, re.MULTILINE)
|
||||
mcookie = {}
|
||||
for matchNum, match in enumerate(matches, start=1):
|
||||
for c in match.group(1).split("; "):
|
||||
c, v = c.split('=')
|
||||
mcookie[c] = v
|
||||
from lib import unshortenit
|
||||
data, status = unshortenit.unshorten(url)
|
||||
|
||||
try:
|
||||
patron = r';URL=([^\"]+)\">'
|
||||
dest = scrapertools.get_match(idata, patron)
|
||||
r = requests.post(dest, cookies=mcookie, headers=headers)
|
||||
url = r.url
|
||||
except:
|
||||
r = requests.get(req.url, headers=headers)
|
||||
if r.url == url:
|
||||
url = ""
|
||||
|
||||
if "4snip" in url:
|
||||
desturl = url.replace("/out/", "/outlink/")
|
||||
import os
|
||||
par = os.path.basename(desturl)
|
||||
rdata = requests.post(desturl, data={'url': par})
|
||||
url = rdata.url
|
||||
|
||||
if "wstream" in url:
|
||||
|
||||
url = url.replace("/video/", "/")
|
||||
|
||||
data = url
|
||||
elif 'linkup' in url:
|
||||
idata = httptools.downloadpage(url).data
|
||||
data = scrapertoolsV2.get_match(idata, "<iframe[^<>]*src=\\'([^'>]*)\\'[^<>]*>")
|
||||
data = scrapertoolsV2.find_single_match(idata, "<iframe[^<>]*src=\\'([^'>]*)\\'[^<>]*>")
|
||||
else:
|
||||
data = ""
|
||||
while host in url:
|
||||
|
||||
Reference in New Issue
Block a user