Ip diretto per alcuni siti

This commit is contained in:
marco
2020-12-20 13:32:13 +01:00
parent e55da824fc
commit 923beb5bba
5 changed files with 33 additions and 18 deletions

View File

@@ -36,6 +36,20 @@ default_headers["Accept-Language"] = "it-IT,it;q=0.8,en-US;q=0.5,en;q=0.3"
default_headers["Accept-Charset"] = "UTF-8"
default_headers["Accept-Encoding"] = "gzip"
# direct IP access for some hosts
directIP = {
'akki.monster': '31.220.1.77',
'akvi.club': '31.220.1.77',
'akvi.icu': '31.220.1.77',
'akvideo.stream': '31.220.1.77',
'vcrypt.net': '31.220.1.77',
'vcrypt.pw': '31.220.1.77',
'vidtome.host': '94.75.219.1"',
'nored.icu': '31.220.1.77',
'wstream.icu': '31.220.1.77',
'wstream.video': '31.220.1.77',
}
# Maximum wait time for downloadpage, if nothing is specified
HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = config.get_setting('httptools_timeout', default=15)
if HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT == 0: HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = None
@@ -267,7 +281,9 @@ def downloadpage(url, **opt):
"""
url = scrapertools.unescape(url)
domain = urlparse.urlparse(url).netloc
parse = urlparse.urlparse(url)
domain = parse.netloc
from lib import requests
session = requests.session()
@@ -284,6 +300,10 @@ def downloadpage(url, **opt):
else:
req_headers = dict(opt['headers'])
if domain in directIP.keys():
req_headers['Host'] = domain
url = urlparse.urlunparse(parse._replace(netloc=directIP.get(domain)))
if opt.get('random_headers', False) or HTTPTOOLS_DEFAULT_RANDOM_HEADERS:
req_headers['User-Agent'] = random_useragent()
url = urllib.quote(url, safe="%/:=&?~#+!$,;'@()*[]")

View File

@@ -493,7 +493,8 @@ class UnshortenIt(object):
return uri, str(e)
def _unshorten_vcrypt(self, uri):
uri = uri.replace('.net', '.pw')
httptools.set_cookies({'domain': 'vcrypt.net', 'name': 'saveMe', 'value': '1'})
httptools.set_cookies({'domain': 'vcrypt.pw', 'name': 'saveMe', 'value': '1'})
try:
headers = {}
if 'myfoldersakstream.php' in uri or '/verys/' in uri:

View File

@@ -5,11 +5,11 @@
"patterns": [
{
"pattern": "https?://(?:akvideo\\.stream|akvi\\.club)/[^\\s]+[/=]([$0-9]+)(?:[\\s<]|$|\\.html)",
"pattern": "https?://(?:akvideo\\.stream|akvi\\.club|akvi\\.icu|akki\\.monster)/[^\\s]+[/=]([$0-9]+)(?:[\\s<]|$|\\.html)",
"url": "http://akvideo.stream/swembedid/\\1"
},
{
"pattern": "https?://(?:akvideo\\.stream|akvi\\.club)/(?:embed-|video/|video\\.php\\?file_code=)?([a-z0-9]+)(?:[\\s<]|$|\\.html)",
"pattern": "https?://(?:akvideo\\.stream|akvi\\.club|akvi\\.icu|akki\\.monster)/(?:embed-|video/|video\\.php\\?file_code=)?([a-z0-9]+)(?:[\\s<]|$|\\.html)",
"url": "http://akvi.club/embed-\\1.html"
}
]

View File

@@ -7,11 +7,11 @@
"find_videos": {
"patterns": [
{
"pattern": "(?:wstream\\.video)(?!<)(?:=|/)(?:video[a-zA-Z0-9.?_]*|embed[a-zA-Z0-9]*|)?(?!api|swembedid)(?:-|/|=)?(?:[a-z0-9A-Z]+/)?([a-z0-9A-Z]+)",
"pattern": "(?:wstream\\.video|wstream\\.icu)(?!<)(?:=|/)(?:video[a-zA-Z0-9.?_]*|embed[a-zA-Z0-9]*|)?(?!api|swembedid)(?:-|/|=)?(?:[a-z0-9A-Z]+/)?([a-z0-9A-Z]+)",
"url": "http://wstream.video/video.php?file_code=\\1"
},
{
"pattern": "(?:wstream\\.video)/((?:api/vcmod/fastredirect/streaming\\.php\\?id=|swembedid/)[$0-9]+)",
"pattern": "(?:wstream\\.video|wstream\\.icu)/((?:api/vcmod/fastredirect/streaming\\.php\\?id=|swembedid/)[$0-9]+)",
"url": "http://wstream.video/\\1"
},
{

View File

@@ -18,24 +18,18 @@ errorsStr = ['Sorry this file is not longer available', 'Sorry this video is una
def test_video_exists(page_url):
global headers
real_host = '116.202.226.34'
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0'],
['Host', scrapertools.get_domain_from_url(page_url)]]
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0']]
logger.debug("(page_url='%s')" % page_url)
if 'wstream' in page_url:
resp = httptools.downloadpage(page_url.replace(headers[1][1], real_host), headers=headers, verify=False)
else:
resp = httptools.downloadpage(page_url, headers=headers, verify=False)
resp = httptools.downloadpage(page_url, headers=headers, verify=False)
global data, real_url
data = resp.data
page_url = resp.url.replace(headers[1][1], real_host)
if '/streaming.php' in page_url in page_url:
code = httptools.downloadpage(page_url, headers=headers, follow_redirects=False, only_headers=True, verify=False).headers['location'].split('/')[-1].replace('.html', '')
# logger.debug('WCODE=' + code)
page_url = 'https://' + real_host + '/video.php?file_code=' + code
page_url = 'https://wstream.video/video.php?file_code=' + code
data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True, verify=False).data
if 'nored.icu' in str(headers):
@@ -45,7 +39,7 @@ def test_video_exists(page_url):
dec = ''
for v in var.split(','):
dec += chr(int(v) - int(value))
page_url = 'https://' + real_host + '/video.php?file_code=' + scrapertools.find_single_match(dec, "src='([^']+)").split('/')[-1].replace('.html','')
page_url = 'https://wstream.video/video.php?file_code=' + scrapertools.find_single_match(dec, "src='([^']+)").split('/')[-1].replace('.html','')
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0'],['Host', 'wstream.video']]
new_data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True, verify=False).data
logger.debug('NEW DATA: \n' + new_data)
@@ -102,7 +96,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
sitekey = scrapertools.find_multiple_matches(data, """data-sitekey=['"] *([^"']+)""")
if sitekey: sitekey = sitekey[-1]
captcha = platformtools.show_recaptcha(sitekey, page_url.replace('116.202.226.34', headers[1][1]).replace('nored.icu', headers[1][1])) if sitekey else ''
captcha = platformtools.show_recaptcha(sitekey, page_url) if sitekey else ''
possibleParam = scrapertools.find_multiple_matches(data,r"""<input.*?(?:name=["']([^'"]+).*?value=["']([^'"]*)['"]>|>)""")
if possibleParam and possibleParam[0][0]:
@@ -118,7 +112,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(707434))
return []
headers.append(['Referer', real_url.replace('116.202.226.34', headers[1][1]).replace('nored.icu', headers[1][1])])
headers.append(['Referer', real_url])
_headers = urllib.urlencode(dict(headers))
post_data = scrapertools.find_single_match(data, r"<script type='text/javascript'>(eval.function.p,a,c,k,e,.*?)\s*</script>")