zcript incluso in unshortenit

This commit is contained in:
mac12m99
2020-02-01 14:39:30 +01:00
committed by marco
parent 3bd2560c2a
commit afee68c01d
3 changed files with 60 additions and 122 deletions

View File

@@ -14,7 +14,8 @@ from core import jsontools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
from servers.decrypters import zcrypt
# from servers.decrypters import zcrypt
from lib import unshortenit
dict_servers_parameters = {}
@@ -40,7 +41,7 @@ def find_video_items(item=None, data=None):
if data is None:
data = httptools.downloadpage(item.url).data
data = zcrypt.get_video_url(data)
data = unshortenit.findlinks(data)
# Crea un item si no hay item
if item is None:

View File

@@ -39,6 +39,9 @@ class UnshortenIt(object):
_vcrypt_regex = r'vcrypt\.net'
_linkup_regex = r'linkup\.pro|buckler.link'
listRegex = [_adfly_regex, _linkbucks_regex, _adfocus_regex, _lnxlu_regex, _shst_regex, _hrefli_regex, _anonymz_regex,
_shrink_service_regex, _rapidcrypt_regex, _cryptmango_regex, _linkup_regex]
_maxretries = 5
_this_dir, _this_filename = os.path.split(__file__)
@@ -473,14 +476,16 @@ class UnshortenIt(object):
def _unshorten_vcrypt(self, uri):
try:
if 'myfoldersakstream.php' in uri or '/verys/' in uri:
return uri, 0
r = None
import base64, pyaes
import pyaes
def decrypt(str):
str = str.replace("_ppl_", "+").replace("_eqq_", "=").replace("_sll_", "/")
iv = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
key = "naphajU2usWUswec"
decoded = base64.b64decode(str)
decoded = b64decode(str)
decoded = decoded + '\0' * (len(decoded) % 16)
crypt_object = pyaes.AESModeOfOperationCBC(key, iv)
decrypted = ''
@@ -521,8 +526,31 @@ class UnshortenIt(object):
def _unshorten_linkup(self, uri):
try:
r = httptools.downloadpage(uri, follow_redirect=True, timeout=self._timeout, cookies=False)
return r.url, r.code
r = None
if '/tv/' in uri:
uri = uri.replace('/tv/', '/tva/')
elif 'delta' in uri:
uri = uri.replace('/delta/', '/adelta/')
elif '/ga/' in uri:
uri = b64decode(uri.split('/')[-1]).strip()
elif '/speedx/' in uri:
uri = uri.replace('http://linkup.pro/speedx', 'http://speedvideo.net')
else:
r = httptools.downloadpage(uri, follow_redirect=True, timeout=self._timeout, cookies=False)
uri = r.url
link = re.findall("<iframe[^<>]*src=\\'([^'>]*)\\'[^<>]*>", r.data)
# fix by greko inizio
if not link:
link = re.findall('action="(?:[^/]+.*?/[^/]+/([a-zA-Z0-9_]+))">', r.data)
if link:
uri = link
short = re.findall('^https?://.*?(https?://.*)', uri)
if short:
uri = short[0]
if not r:
r = httptools.downloadpage(uri, follow_redirect=True, timeout=self._timeout, cookies=False)
uri = r.url
return uri, r.code
except Exception as e:
return uri, str(e)
@@ -546,3 +574,28 @@ def unshorten(uri, type=None, timeout=10):
if status == 200:
uri, status = unshortener.unwrap_30x(uri, timeout=timeout)
return uri, status
def findlinks(text):
unshortener = UnshortenIt()
matches = []
for regex in unshortener.listRegex:
regex = '(?:https?://(?:[\w\d]+\.)?)?(?:' + regex + ')/[a-zA-Z0-9_=/]+'
for match in re.findall(regex, text):
matches.append(match)
if len(matches) == 1:
text += '\n' + unshorten(matches[0])[0]
elif matches:
# non threaded for webpdb
# for match in matches:
# sh = unshorten(match)[0]
# text += '\n' + sh
from concurrent import futures
with futures.ThreadPoolExecutor() as executor:
unshList = [executor.submit(unshorten, match) for match in matches]
for link in futures.as_completed(unshList):
if link.result()[0] not in matches:
text += '\n' + link.result()[0]
return text

View File

@@ -1,116 +0,0 @@
# -*- coding: utf-8 -*-
# Ringraziamo errmax e dr-z3r0
import re
from core import httptools, scrapertools
from platformcode import logger
from servers.decrypters import expurl
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
encontrados = {
'https://vcrypt.net/images/logo', 'https://vcrypt.net/css/out',
'https://vcrypt.net/images/favicon', 'https://vcrypt.net/css/open',
'http://linkup.pro/js/jquery', 'https://linkup.pro/js/jquery'#,
#'http://www.rapidcrypt.net/open'
}
devuelve = []
patronvideos = [
r'(https?://(gestyy|rapidteria|sprysphere)\.com/[a-zA-Z0-9]+)',
r'(https?://(?:www\.)?(vcrypt|linkup)\.[^/]+/[^/]+/[a-zA-Z0-9_=]+)',
r'(https?://(?:www\.)?(bit|buckler)\.[^/]+/[a-zA-Z0-9]+)',
r'(https?://(?:www\.)?(xshield)\.[^/]+/[^/]+/[^/]+/[a-zA-Z0-9_\.]+)'
]
for patron in patronvideos:
# from core.support import dbg; dbg()
logger.info(" find_videos #" + patron + "#")
matches = re.compile(patron).findall(page_url)
for url, host in matches:
if url not in encontrados:
logger.info(" url=" + url)
encontrados.add(url)
if host == 'gestyy':
resp = httptools.downloadpage(
url,
follow_redirects=False,
cookies=False,
only_headers=True,
replace_headers=True,
headers={'User-Agent': 'curl/7.59.0'})
data = resp.headers.get("location", "")
elif 'xshield' in url:
from lib import unshortenit
data, status = unshortenit.unshorten(url)
logger.info("Data - Status zcrypt xshield.net: [%s] [%s] " %(data, status))
elif 'vcrypt.net' in url:
if 'myfoldersakstream.php' in url or '/verys/' in url:
continue
else:
from lib import unshortenit
sh = unshortenit.UnshortenIt()
data, status = sh.unshorten(url)
logger.info("Data - Status zcrypt vcrypt.net: [%s] [%s] " %(data, status))
elif 'linkup' in url or 'bit.ly' in url or 'buckler' in url:
logger.info("DATA LINK {}".format(url))
if '/tv/' in url:
url = url.replace('/tv/','/tva/')
elif 'delta' in url:
url = url.replace('/delta/','/adelta/')
elif '/ga/' in url:
import base64
url = base64.b64decode(url.split('/')[-1]).strip()
else:
idata = httptools.downloadpage(url).data
url = scrapertools.find_single_match(idata, "<iframe[^<>]*src=\\'([^'>]*)\\'[^<>]*>")
#fix by greko inizio
if not url:
url = scrapertools.find_single_match(idata, 'action="(?:[^/]+.*?/[^/]+/([a-zA-Z0-9_]+))">')
from lib import unshortenit
data, status = unshortenit.unshorten(url)
short = scrapertools.find_single_match(data, '^https?://.*?(https?://.*)')
if short:
data = short
if '/speedx/' in data: # aggiunto per server speedvideo
data = data.replace('http://linkup.pro/speedx', 'http://speedvideo.net')
# fix by greko fine
else:
data = ""
while host in url:
resp = httptools.downloadpage(
url, follow_redirects=False)
url = resp.headers.get("location", "")
if not url:
data = resp.data
elif host not in url:
data = url
if data:
devuelve.append(data)
else:
logger.info(" url duplicada=" + url)
patron = r"""(https?://(?:www\.)?(?:threadsphere\.bid|adf\.ly|q\.gs|j\.gs|u\.bb|ay\.gy|linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co|adfoc\.us|lnx\.lu|sh\.st|href\.li|anonymz\.com|shrink-service\.it|rapidcrypt\.net|ecleneue\.com)/[^"']+)"""
logger.info(" find_videos #" + patron + "#")
matches = re.compile(patron).findall(page_url)
for url in matches:
if url not in encontrados:
if 'https://rapidcrypt.net/open/' in url or 'https://rapidcrypt.net/verys/' in url:
continue
logger.info(" url=" + url)
encontrados.add(url)
long_url = expurl.expand_url(url)
if long_url:
devuelve.append(long_url)
else:
logger.info(" url duplicada=" + url)
ret = page_url+" "+str(devuelve) if devuelve else page_url
logger.info(" RET=" + str(ret))
return ret