Merge branch 'stable' of github.com:kodiondemand/addon into stable
This commit is contained in:
@@ -1,13 +1,10 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per AnimeSaturn
|
||||
# Thanks to 4l3x87
|
||||
# ----------------------------------------------------------
|
||||
|
||||
from core import support
|
||||
|
||||
# __channel__ = "animesaturn"
|
||||
# host = support.config.get_setting("channel_host", __channel__)
|
||||
host = support.config.get_channel_url()
|
||||
headers={'X-Requested-With': 'XMLHttpRequest'}
|
||||
|
||||
@@ -31,11 +28,8 @@ def search(item, texto):
|
||||
search = texto
|
||||
item.contentType = 'tvshow'
|
||||
anime = True
|
||||
patron = r'href="(?P<url>[^"]+)"[^>]+>[^>]+>(?P<title>[^<|(]+)(?:(?P<lang>\(([^\)]+)\)))?<|\)'
|
||||
patron = r'<a href="(?P<url>[^"]+)"[^>]+> <span>(?P<title>[^<\(]+)(?:\s*\((?P<year>\d+)\))?(?:\s*\((?P<lang>[A-Za-z-]+)\))?'
|
||||
action = 'check'
|
||||
def itemHook(item):
|
||||
item.url = item.url.replace('www.','')
|
||||
return item
|
||||
return locals()
|
||||
|
||||
|
||||
@@ -59,37 +53,52 @@ def newest(categoria):
|
||||
|
||||
@support.scrape
|
||||
def menu(item):
|
||||
patronMenu = r'u>(?P<title>[^<]+)<u>(?P<url>.*?)</div> </div>'
|
||||
patronMenu = r'<div class="col-md-13 bg-dark-as-box-shadow p-2 text-white text-center">(?P<title>[^"<]+)<(?P<url>.*?)(?:"lista-top"|"clearfix")'
|
||||
action = 'peliculas'
|
||||
def itemHook(item):
|
||||
item.url = item.url.replace('www.','')
|
||||
return item
|
||||
item.args = 'top'
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
anime = True
|
||||
|
||||
deflang= 'Sub-ITA'
|
||||
if item.args == 'updated':
|
||||
post = "page=" + str(item.page if item.page else 1) if item.page > 1 else None
|
||||
page= support.match(item, patron=r'data-page="(\d+)" title="Next">', post=post, headers=headers).match
|
||||
patron = r'<img alt="[^"]+" src="(?P<thumb>[^"]+)" [^>]+></div></a>\s*<a href="(?P<url>[^"]+)"><div class="testo">(?P<title>[^\(<]+)(?:(?P<lang>\(([^\)]+)\)))?</div></a>\s*<a href="[^"]+"><div class="testo2">[^\d]+(?P<episode>\d+)</div></a>'
|
||||
if page: nextpage = page
|
||||
item.contentType='episode'
|
||||
action = 'findvideos'
|
||||
elif item.args == 'top':
|
||||
data = item.url
|
||||
patron = r'<a href="(?P<url>[^"]+)">[^>]+>(?P<title>[^<\(]+)(?:\((?P<year>[0-9]+)\))?(?:\((?P<lang>[A-Za-z]+)\))?</div></a><div class="numero">(?P<title2>[^<]+)</div>.*?src="(?P<thumb>[^"]+)"'
|
||||
action = 'check'
|
||||
action = 'check'
|
||||
|
||||
page = None
|
||||
post = "page=" + str(item.page if item.page else 1) if item.page > 1 else None
|
||||
|
||||
if item.args == 'top':
|
||||
data=item.url
|
||||
patron = r'light">(?P<title2>[^<]+)</div>\s(?P<title>[^<]+)[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)">(?:<a[^>]+>|\s*)<img alt="[^"]+" src="(?P<thumb>[^"]+)"'
|
||||
|
||||
else:
|
||||
pagination = ''
|
||||
if item.args == 'incorso': patron = r'"slider_title"\s*href="(?P<url>[^"]+)"><img src="(?P<thumb>[^"]+)"[^>]+>(?P<title>[^\(<]+)(?:\((?P<year>\d+)\))?</a>'
|
||||
else: patron = r'href="(?P<url>[^"]+)"[^>]+>[^>]+>(?P<title>.+?)(?:\((?P<lang>ITA)\))?(?:(?P<year>\((\d+)\)))?</span>'
|
||||
action = 'check'
|
||||
def itemHook(item):
|
||||
item.url = item.url.replace('www.','')
|
||||
return item
|
||||
data = support.match(item, post=post, headers=headers).data
|
||||
if item.args == 'updated':
|
||||
page= support.match(data, patron=r'data-page="(\d+)" title="Next">').match
|
||||
patron = r'<a href="(?P<url>[^"]+)" title="(?P<title>[^"(]+)(?:\s*\((?P<year>\d+)\))?(?:\s*\((?P<lang>[A-Za-z-]+)\))?"><img src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s\s*(?P<type>[^\s]+)\s*(?P<episode>\d+)'
|
||||
typeContentDict = {'Movie':'movie', 'Episodio':'episode'} #item.contentType='episode'
|
||||
action = 'findvideos'
|
||||
def itemlistHook(itemlist):
|
||||
if page:
|
||||
itemlist.append(
|
||||
support.Item(channel=item.channel,
|
||||
action = item.action,
|
||||
contentType=item.contentType,
|
||||
title=support.typo(support.config.get_localized_string(30992), 'color kod bold'),
|
||||
url=item.url,
|
||||
page= page,
|
||||
args=item.args,
|
||||
thumbnail=support.thumb()))
|
||||
return itemlist
|
||||
else:
|
||||
pagination = ''
|
||||
if item.args == 'incorso':
|
||||
patron = r'<a href="(?P<url>[^"]+)"[^>]+>(?P<title>[^<(]+)(?:\s*\((?P<year>\d+)\))?(?:\s*\((?P<lang>[A-za-z-]+)\))?</a>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<plot>[^<]+)<'
|
||||
else:
|
||||
patron = r'href="(?P<url>[^"]+)"[^>]+>[^>]+>(?P<title>.+?)(?:\((?P<lang>ITA)\))?(?:(?P<year>\((\d+)\)))?</span>'
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
@@ -110,21 +119,18 @@ def check(item):
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
if item.contentType != 'movie': anime = True
|
||||
patron = r'<strong" style="[^"]+">(?P<title>[^<]+)</b></strong></td>\s*<td style="[^"]+"><a href="(?P<url>[^"]+)"'
|
||||
def itemHook(item):
|
||||
item.url = item.url.replace('www.','')
|
||||
return item
|
||||
patron = r'<a href="(?P<url>[^"]+)"[^>]+>\s*(?P<title>[^<]+)</a>'
|
||||
return locals()
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
support.log()
|
||||
itemlist = []
|
||||
# support.dbg()
|
||||
urls = support.match(item, patron=r'<a href="([^"]+)"><div class="downloadestreaming">', headers=headers, debug=False).matches
|
||||
if urls:
|
||||
links = support.match(urls[0].replace('www.',''), patron=r'(?:<source type="[^"]+"\s*src=|file:\s*)"([^"]+)"', headers=headers, debug=False)
|
||||
for link in links.matches:
|
||||
url = support.match(item, patron=r'<a href="([^"]+)">[^>]+>[^>]+>G', headers=headers, debug=False).match
|
||||
support.log(url)
|
||||
if url:
|
||||
links = support.match(url, patron=r'(?:<source type="[^"]+"\s*src=|file:\s*)"([^"]+)"', headers=headers, debug=False).matches
|
||||
for link in links:
|
||||
itemlist.append(
|
||||
support.Item(channel=item.channel,
|
||||
action="play",
|
||||
@@ -136,7 +142,7 @@ def findvideos(item):
|
||||
show=item.show,
|
||||
contentType=item.contentType,
|
||||
folder=False))
|
||||
return support.server(item, links, itemlist=itemlist)
|
||||
return support.server(item, itemlist=itemlist)
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -106,7 +106,7 @@ def episodios(item):
|
||||
anime = True
|
||||
data = support.match(item, headers=headers).data
|
||||
if 'https://vcrypt.net' in data:
|
||||
patron = r'(?:<br /> |<p>)(?P<title>[^<]+)<a href="(?P<url>[^"]+)"'
|
||||
patron = r'(?: /> |<p>)(?P<title>[^<]+)<a (?P<url>.*?)(?:<br|</p)'
|
||||
else:
|
||||
patron = r'<br />\s*<a href="(?P<url>[^"]+)" target="_blank" rel="noopener[^>]+>(?P<title>[^<]+)</a>'
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ HTTPTOOLS_DEFAULT_RANDOM_HEADERS = False
|
||||
|
||||
domainCF = list()
|
||||
channelsCF = ['guardaserieclick', 'casacinema', 'dreamsub', 'ilgeniodellostreaming', 'piratestreaming', 'altadefinizioneclick', 'altadefinizione01_link']
|
||||
otherCF = ['altadefinizione-nuovo.link', 'wstream.video', 'akvideo.stream', 'backin.net']
|
||||
otherCF = ['altadefinizione-nuovo.link', 'wstream.video', 'akvideo.stream', 'backin.net', 'vcrypt.net']
|
||||
for ch in channelsCF:
|
||||
domainCF.append(urlparse.urlparse(config.get_channel_url(name=ch)).hostname)
|
||||
domainCF.extend(otherCF)
|
||||
@@ -261,7 +261,7 @@ def downloadpage(url, **opt):
|
||||
domain = urlparse.urlparse(url).netloc
|
||||
global domainCF
|
||||
CF = False
|
||||
if domain in domainCF:
|
||||
if domain in domainCF or opt.get('cf', False):
|
||||
from lib import cloudscraper
|
||||
session = cloudscraper.create_scraper()
|
||||
CF = True
|
||||
|
||||
@@ -54,7 +54,7 @@ except ImportError:
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
__version__ = '1.2.30'
|
||||
__version__ = '1.2.32'
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@@ -148,6 +148,15 @@ class CloudScraper(Session):
|
||||
def __getstate__(self):
|
||||
return self.__dict__
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Raise an Exception with no stacktrace and reset depth counter.
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def simpleException(self, exception, msg):
|
||||
self._solveDepthCnt = 0
|
||||
sys.tracebacklimit = 0
|
||||
raise exception(msg)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# debug the request via the response
|
||||
# ------------------------------------------------------------------------------- #
|
||||
@@ -219,9 +228,8 @@ class CloudScraper(Session):
|
||||
|
||||
if self._solveDepthCnt >= self.solveDepth:
|
||||
_ = self._solveDepthCnt
|
||||
self._solveDepthCnt = 0
|
||||
sys.tracebacklimit = 0
|
||||
raise CloudflareLoopProtection(
|
||||
self.simpleException(
|
||||
CloudflareLoopProtection,
|
||||
"!!Loop Protection!! We have tried to solve {} time(s) in a row.".format(_)
|
||||
)
|
||||
|
||||
@@ -303,8 +311,10 @@ class CloudScraper(Session):
|
||||
|
||||
def is_Challenge_Request(self, resp):
|
||||
if self.is_Firewall_Blocked(resp):
|
||||
sys.tracebacklimit = 0
|
||||
raise CloudflareCode1020('Cloudflare has blocked this request (Code 1020 Detected).')
|
||||
self.simpleException(
|
||||
CloudflareCode1020,
|
||||
'Cloudflare has blocked this request (Code 1020 Detected).'
|
||||
)
|
||||
|
||||
if self.is_reCaptcha_Challenge(resp) or self.is_IUAM_Challenge(resp):
|
||||
return True
|
||||
@@ -317,16 +327,29 @@ class CloudScraper(Session):
|
||||
|
||||
def IUAM_Challenge_Response(self, body, url, interpreter):
|
||||
try:
|
||||
challengeUUID = re.search(
|
||||
r'id="challenge-form" action="(?P<challengeUUID>\S+)"',
|
||||
body, re.M | re.DOTALL
|
||||
).groupdict().get('challengeUUID', '')
|
||||
formPayload = re.search(
|
||||
r'<form (?P<form>id="challenge-form" action="(?P<challengeUUID>.*?'
|
||||
r'__cf_chl_jschl_tk__=\S+)"(.*?)</form>)',
|
||||
body,
|
||||
re.M | re.DOTALL
|
||||
).groupdict()
|
||||
|
||||
payload = OrderedDict(re.findall(r'name="(r|jschl_vc|pass)"\svalue="(.*?)"', body))
|
||||
if not all(key in formPayload for key in ['form', 'challengeUUID']):
|
||||
self.simpleException(
|
||||
CloudflareIUAMError,
|
||||
"Cloudflare IUAM detected, unfortunately we can't extract the parameters correctly."
|
||||
)
|
||||
|
||||
payload = OrderedDict(
|
||||
re.findall(
|
||||
r'name="(r|jschl_vc|pass)"\svalue="(.*?)"',
|
||||
formPayload['form']
|
||||
)
|
||||
)
|
||||
|
||||
except AttributeError:
|
||||
sys.tracebacklimit = 0
|
||||
raise CloudflareIUAMError(
|
||||
self.simpleException(
|
||||
CloudflareIUAMError,
|
||||
"Cloudflare IUAM detected, unfortunately we can't extract the parameters correctly."
|
||||
)
|
||||
|
||||
@@ -337,8 +360,8 @@ class CloudScraper(Session):
|
||||
interpreter
|
||||
).solveChallenge(body, hostParsed.netloc)
|
||||
except Exception as e:
|
||||
sys.tracebacklimit = 0
|
||||
raise CloudflareIUAMError(
|
||||
self.simpleException(
|
||||
CloudflareIUAMError,
|
||||
'Unable to parse Cloudflare anti-bots page: {}'.format(
|
||||
getattr(e, 'message', e)
|
||||
)
|
||||
@@ -348,7 +371,7 @@ class CloudScraper(Session):
|
||||
'url': '{}://{}{}'.format(
|
||||
hostParsed.scheme,
|
||||
hostParsed.netloc,
|
||||
self.unescape(challengeUUID)
|
||||
self.unescape(formPayload['challengeUUID'])
|
||||
),
|
||||
'data': payload
|
||||
}
|
||||
@@ -359,34 +382,62 @@ class CloudScraper(Session):
|
||||
|
||||
def reCaptcha_Challenge_Response(self, provider, provider_params, body, url):
|
||||
try:
|
||||
payload = re.search(
|
||||
r'(name="r"\svalue="(?P<r>\S+)"|).*?challenge-form" action="(?P<challengeUUID>\S+)".*?'
|
||||
r'data-ray="(?P<data_ray>\S+)".*?data-sitekey="(?P<site_key>\S+)"',
|
||||
body, re.M | re.DOTALL
|
||||
formPayload = re.search(
|
||||
r'<form class="challenge-form" (?P<form>id="challenge-form" '
|
||||
r'action="(?P<challengeUUID>.*?__cf_chl_captcha_tk__=\S+)"(.*?)</form>)',
|
||||
body,
|
||||
re.M | re.DOTALL
|
||||
).groupdict()
|
||||
except (AttributeError):
|
||||
sys.tracebacklimit = 0
|
||||
raise CloudflareReCaptchaError(
|
||||
|
||||
if not all(key in formPayload for key in ['form', 'challengeUUID']):
|
||||
self.simpleException(
|
||||
CloudflareReCaptchaError,
|
||||
"Cloudflare reCaptcha detected, unfortunately we can't extract the parameters correctly."
|
||||
)
|
||||
|
||||
payload = OrderedDict(
|
||||
re.findall(
|
||||
r'(name="r"\svalue|data-ray|data-sitekey|name="cf_captcha_kind"\svalue)="(.*?)"',
|
||||
formPayload['form']
|
||||
)
|
||||
)
|
||||
|
||||
captchaType = 'reCaptcha' if payload['name="cf_captcha_kind" value'] == 're' else 'hCaptcha'
|
||||
|
||||
except (AttributeError, KeyError):
|
||||
self.simpleException(
|
||||
CloudflareReCaptchaError,
|
||||
"Cloudflare reCaptcha detected, unfortunately we can't extract the parameters correctly."
|
||||
)
|
||||
|
||||
captchaResponse = reCaptcha.dynamicImport(
|
||||
provider.lower()
|
||||
).solveCaptcha(
|
||||
captchaType,
|
||||
url,
|
||||
payload['data-sitekey'],
|
||||
provider_params
|
||||
)
|
||||
|
||||
dataPayload = OrderedDict([
|
||||
('r', payload.get('name="r" value', '')),
|
||||
('cf_captcha_kind', payload['name="cf_captcha_kind" value']),
|
||||
('id', payload.get('data-ray')),
|
||||
('g-recaptcha-response', captchaResponse)
|
||||
])
|
||||
|
||||
if captchaType == 'hCaptcha':
|
||||
dataPayload.update({'h-captcha-response': captchaResponse})
|
||||
|
||||
hostParsed = urlparse(url)
|
||||
|
||||
return {
|
||||
'url': '{}://{}{}'.format(
|
||||
hostParsed.scheme,
|
||||
hostParsed.netloc,
|
||||
self.unescape(payload.get('challengeUUID', ''))
|
||||
self.unescape(formPayload['challengeUUID'])
|
||||
),
|
||||
'data': OrderedDict([
|
||||
('r', payload.get('r', '')),
|
||||
('id', payload.get('data_ray')),
|
||||
(
|
||||
'g-recaptcha-response',
|
||||
reCaptcha.dynamicImport(
|
||||
provider.lower()
|
||||
).solveCaptcha(url, payload.get('site_key'), provider_params)
|
||||
)
|
||||
])
|
||||
'data': dataPayload
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
@@ -412,8 +463,8 @@ class CloudScraper(Session):
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
if not self.recaptcha or not isinstance(self.recaptcha, dict) or not self.recaptcha.get('provider'):
|
||||
sys.tracebacklimit = 0
|
||||
raise CloudflareReCaptchaProvider(
|
||||
self.simpleException(
|
||||
CloudflareReCaptchaProvider,
|
||||
"Cloudflare reCaptcha detected, unfortunately you haven't loaded an anti reCaptcha provider "
|
||||
"correctly via the 'recaptcha' parameter."
|
||||
)
|
||||
@@ -448,8 +499,10 @@ class CloudScraper(Session):
|
||||
if isinstance(delay, (int, float)):
|
||||
self.delay = delay
|
||||
except (AttributeError, ValueError):
|
||||
sys.tracebacklimit = 0
|
||||
raise CloudflareIUAMError("Cloudflare IUAM possibility malformed, issue extracing delay value.")
|
||||
self.simpleException(
|
||||
CloudflareIUAMError,
|
||||
"Cloudflare IUAM possibility malformed, issue extracing delay value."
|
||||
)
|
||||
|
||||
sleep(self.delay)
|
||||
|
||||
@@ -507,6 +560,7 @@ class CloudScraper(Session):
|
||||
|
||||
if not challengeSubmitResponse.is_redirect:
|
||||
return challengeSubmitResponse
|
||||
|
||||
else:
|
||||
cloudflare_kwargs = deepcopy(kwargs)
|
||||
cloudflare_kwargs['headers'] = updateAttr(
|
||||
@@ -535,6 +589,7 @@ class CloudScraper(Session):
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
return self.request(resp.request.method, resp.url, **kwargs)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@classmethod
|
||||
@@ -587,8 +642,8 @@ class CloudScraper(Session):
|
||||
cookie_domain = d
|
||||
break
|
||||
else:
|
||||
sys.tracebacklimit = 0
|
||||
raise CloudflareIUAMError(
|
||||
cls.simpleException(
|
||||
CloudflareIUAMError,
|
||||
"Unable to find Cloudflare cookies. Does the site actually "
|
||||
"have Cloudflare IUAM (I'm Under Attack Mode) enabled?"
|
||||
)
|
||||
|
||||
@@ -2,7 +2,6 @@ from __future__ import absolute_import
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
from ..exceptions import (
|
||||
reCaptchaServiceUnavailable,
|
||||
reCaptchaAPIError,
|
||||
@@ -81,7 +80,7 @@ class captchaSolver(reCaptcha):
|
||||
}
|
||||
}
|
||||
|
||||
if response.json().get('status') is False and response.json().get('request') in errors.get(request_type):
|
||||
if response.json().get('status') == 0 and response.json().get('request') in errors.get(request_type):
|
||||
raise reCaptchaAPIError(
|
||||
'{} {}'.format(
|
||||
response.json().get('request'),
|
||||
@@ -113,7 +112,8 @@ class captchaSolver(reCaptcha):
|
||||
'action': 'reportbad',
|
||||
'id': jobID,
|
||||
'json': '1'
|
||||
}
|
||||
},
|
||||
timeout=30
|
||||
),
|
||||
check_success=_checkRequest,
|
||||
step=5,
|
||||
@@ -149,7 +149,8 @@ class captchaSolver(reCaptcha):
|
||||
'action': 'get',
|
||||
'id': jobID,
|
||||
'json': '1'
|
||||
}
|
||||
},
|
||||
timeout=30
|
||||
),
|
||||
check_success=_checkRequest,
|
||||
step=5,
|
||||
@@ -165,7 +166,7 @@ class captchaSolver(reCaptcha):
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def requestSolve(self, site_url, site_key):
|
||||
def requestSolve(self, captchaType, url, siteKey):
|
||||
def _checkRequest(response):
|
||||
if response.ok and response.json().get("status") == 1 and response.json().get('request'):
|
||||
return response
|
||||
@@ -174,18 +175,29 @@ class captchaSolver(reCaptcha):
|
||||
|
||||
return None
|
||||
|
||||
data = {
|
||||
'key': self.api_key,
|
||||
'pageurl': url,
|
||||
'json': 1,
|
||||
'soft_id': 5507698
|
||||
}
|
||||
|
||||
data.update(
|
||||
{
|
||||
'method': 'userrcaptcha',
|
||||
'googlekey': siteKey
|
||||
} if captchaType == 'reCaptcha' else {
|
||||
'method': 'hcaptcha',
|
||||
'sitekey': siteKey
|
||||
}
|
||||
)
|
||||
|
||||
response = polling.poll(
|
||||
lambda: self.session.post(
|
||||
'{}/in.php'.format(self.host),
|
||||
data={
|
||||
'key': self.api_key,
|
||||
'method': 'userrecaptcha',
|
||||
'googlekey': site_key,
|
||||
'pageurl': site_url,
|
||||
'json': '1',
|
||||
'soft_id': '5507698'
|
||||
},
|
||||
allow_redirects=False
|
||||
data=data,
|
||||
allow_redirects=False,
|
||||
timeout=30
|
||||
),
|
||||
check_success=_checkRequest,
|
||||
step=5,
|
||||
@@ -201,7 +213,7 @@ class captchaSolver(reCaptcha):
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def getCaptchaAnswer(self, site_url, site_key, reCaptchaParams):
|
||||
def getCaptchaAnswer(self, captchaType, url, siteKey, reCaptchaParams):
|
||||
jobID = None
|
||||
|
||||
if not reCaptchaParams.get('api_key'):
|
||||
@@ -215,7 +227,7 @@ class captchaSolver(reCaptcha):
|
||||
self.session.proxies = reCaptchaParams.get('proxies')
|
||||
|
||||
try:
|
||||
jobID = self.requestSolve(site_url, site_key)
|
||||
jobID = self.requestSolve(captchaType, url, siteKey)
|
||||
return self.requestJob(jobID)
|
||||
except polling.TimeoutException:
|
||||
try:
|
||||
|
||||
@@ -12,6 +12,7 @@ except ImportError:
|
||||
)
|
||||
|
||||
from ..exceptions import (
|
||||
reCaptchaException,
|
||||
reCaptchaServiceUnavailable,
|
||||
reCaptchaAPIError,
|
||||
reCaptchaTimeout,
|
||||
@@ -143,7 +144,7 @@ class captchaSolver(reCaptcha):
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def requestSolve(self, site_url, site_key):
|
||||
def requestSolve(self, url, siteKey):
|
||||
def _checkRequest(response):
|
||||
if response.ok and response.text.startswith('{') and response.json().get('captchaid'):
|
||||
return response
|
||||
@@ -159,9 +160,9 @@ class captchaSolver(reCaptcha):
|
||||
'apikey': self.api_key,
|
||||
'action': 'usercaptchaupload',
|
||||
'interactive': 1,
|
||||
'file-upload-01': site_key,
|
||||
'file-upload-01': siteKey,
|
||||
'oldsource': 'recaptchav2',
|
||||
'pageurl': site_url,
|
||||
'pageurl': url,
|
||||
'maxtimeout': self.maxtimeout,
|
||||
'json': 1
|
||||
},
|
||||
@@ -179,12 +180,17 @@ class captchaSolver(reCaptcha):
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def getCaptchaAnswer(self, site_url, site_key, reCaptchaParams):
|
||||
def getCaptchaAnswer(self, captchaType, url, siteKey, reCaptchaParams):
|
||||
jobID = None
|
||||
|
||||
if not reCaptchaParams.get('api_key'):
|
||||
raise reCaptchaParameter("9kw: Missing api_key parameter.")
|
||||
|
||||
if captchaType == 'hCaptcha':
|
||||
raise reCaptchaException(
|
||||
'Provider does not support hCaptcha.'
|
||||
)
|
||||
|
||||
self.api_key = reCaptchaParams.get('api_key')
|
||||
|
||||
if reCaptchaParams.get('maxtimeout'):
|
||||
@@ -194,7 +200,7 @@ class captchaSolver(reCaptcha):
|
||||
self.session.proxies = reCaptchaParams.get('proxies')
|
||||
|
||||
try:
|
||||
jobID = self.requestSolve(site_url, site_key)
|
||||
jobID = self.requestSolve(url, siteKey)
|
||||
return self.requestJob(jobID)
|
||||
except polling.TimeoutException:
|
||||
raise reCaptchaTimeout(
|
||||
|
||||
@@ -37,10 +37,10 @@ class reCaptcha(ABC):
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@abc.abstractmethod
|
||||
def getCaptchaAnswer(self, site_url, site_key, reCaptchaParams):
|
||||
def getCaptchaAnswer(self, captchaType, url, siteKey, reCaptchaParams):
|
||||
pass
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def solveCaptcha(self, site_url, site_key, reCaptchaParams):
|
||||
return self.getCaptchaAnswer(site_url, site_key, reCaptchaParams)
|
||||
def solveCaptcha(self, captchaType, url, siteKey, reCaptchaParams):
|
||||
return self.getCaptchaAnswer(captchaType, url, siteKey, reCaptchaParams)
|
||||
|
||||
@@ -1,16 +1,22 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
from ..exceptions import reCaptchaParameter
|
||||
from ..exceptions import (
|
||||
reCaptchaParameter,
|
||||
reCaptchaTimeout,
|
||||
reCaptchaAPIError
|
||||
)
|
||||
|
||||
try:
|
||||
from python_anticaptcha import (
|
||||
AnticaptchaClient,
|
||||
NoCaptchaTaskProxylessTask
|
||||
NoCaptchaTaskProxylessTask,
|
||||
HCaptchaTaskProxyless,
|
||||
AnticaptchaException
|
||||
)
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Please install the python module 'python_anticaptcha' via pip or download it from "
|
||||
"https://github.com/ad-m/python-anticaptcha"
|
||||
"Please install/upgrade the python module 'python_anticaptcha' via "
|
||||
"pip install python-anticaptcha or https://github.com/ad-m/python-anticaptcha/"
|
||||
)
|
||||
|
||||
from . import reCaptcha
|
||||
@@ -23,7 +29,7 @@ class captchaSolver(reCaptcha):
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def getCaptchaAnswer(self, site_url, site_key, reCaptchaParams):
|
||||
def getCaptchaAnswer(self, captchaType, url, siteKey, reCaptchaParams):
|
||||
if not reCaptchaParams.get('api_key'):
|
||||
raise reCaptchaParameter("anticaptcha: Missing api_key parameter.")
|
||||
|
||||
@@ -32,16 +38,30 @@ class captchaSolver(reCaptcha):
|
||||
if reCaptchaParams.get('proxy'):
|
||||
client.session.proxies = reCaptchaParams.get('proxies')
|
||||
|
||||
task = NoCaptchaTaskProxylessTask(site_url, site_key)
|
||||
captchaMap = {
|
||||
'reCaptcha': NoCaptchaTaskProxylessTask,
|
||||
'hCaptcha': HCaptchaTaskProxyless
|
||||
}
|
||||
|
||||
task = captchaMap[captchaType](url, siteKey)
|
||||
|
||||
if not hasattr(client, 'createTaskSmee'):
|
||||
raise NotImplementedError(
|
||||
"Please upgrade 'python_anticaptcha' via pip or download it from "
|
||||
"https://github.com/ad-m/python-anticaptcha"
|
||||
"https://github.com/ad-m/python-anticaptcha/tree/hcaptcha"
|
||||
)
|
||||
|
||||
job = client.createTaskSmee(task)
|
||||
return job.get_solution_response()
|
||||
|
||||
try:
|
||||
job.join(maximum_time=180)
|
||||
except (AnticaptchaException) as e:
|
||||
raise reCaptchaTimeout('{}'.format(getattr(e, 'message', e)))
|
||||
|
||||
if 'solution' in job._last_result:
|
||||
return job.get_solution_response()
|
||||
else:
|
||||
raise reCaptchaAPIError('Job did not return `solution` key in payload.')
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@@ -12,6 +12,7 @@ except ImportError:
|
||||
)
|
||||
|
||||
from ..exceptions import (
|
||||
reCaptchaException,
|
||||
reCaptchaServiceUnavailable,
|
||||
reCaptchaAccountError,
|
||||
reCaptchaTimeout,
|
||||
@@ -154,7 +155,7 @@ class captchaSolver(reCaptcha):
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def requestSolve(self, site_url, site_key):
|
||||
def requestSolve(self, url, siteKey):
|
||||
def _checkRequest(response):
|
||||
if response.ok and response.json().get("is_correct") and response.json().get('captcha'):
|
||||
return response
|
||||
@@ -172,8 +173,8 @@ class captchaSolver(reCaptcha):
|
||||
'password': self.password,
|
||||
'type': '4',
|
||||
'token_params': json.dumps({
|
||||
'googlekey': site_key,
|
||||
'pageurl': site_url
|
||||
'googlekey': siteKey,
|
||||
'pageurl': url
|
||||
})
|
||||
},
|
||||
allow_redirects=False
|
||||
@@ -192,7 +193,7 @@ class captchaSolver(reCaptcha):
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def getCaptchaAnswer(self, site_url, site_key, reCaptchaParams):
|
||||
def getCaptchaAnswer(self, captchaType, url, siteKey, reCaptchaParams):
|
||||
jobID = None
|
||||
|
||||
for param in ['username', 'password']:
|
||||
@@ -202,11 +203,16 @@ class captchaSolver(reCaptcha):
|
||||
)
|
||||
setattr(self, param, reCaptchaParams.get(param))
|
||||
|
||||
if captchaType == 'hCaptcha':
|
||||
raise reCaptchaException(
|
||||
'Provider does not support hCaptcha.'
|
||||
)
|
||||
|
||||
if reCaptchaParams.get('proxy'):
|
||||
self.session.proxies = reCaptchaParams.get('proxies')
|
||||
|
||||
try:
|
||||
jobID = self.requestSolve(site_url, site_key)
|
||||
jobID = self.requestSolve(url, siteKey)
|
||||
return self.requestJob(jobID)
|
||||
except polling.TimeoutException:
|
||||
try:
|
||||
|
||||
@@ -19,8 +19,7 @@
|
||||
"ECDHE-RSA-CHACHA20-POLY1305",
|
||||
"AES128-GCM-SHA256",
|
||||
"AES256-GCM-SHA384",
|
||||
"AES128-SHA",
|
||||
"AES256-SHA"
|
||||
"AES128-SHA"
|
||||
],
|
||||
"releases": {
|
||||
"Chrome/50.0.0.0": {
|
||||
@@ -12825,8 +12824,7 @@
|
||||
"ECDHE-ECDSA-AES128-SHA",
|
||||
"DHE-RSA-AES128-SHA",
|
||||
"DHE-RSA-AES256-SHA",
|
||||
"AES128-SHA",
|
||||
"AES256-SHA"
|
||||
"AES128-SHA"
|
||||
],
|
||||
"releases": {
|
||||
"Firefox/50.0": {
|
||||
|
||||
@@ -482,6 +482,7 @@ class UnshortenIt(object):
|
||||
|
||||
|
||||
def _unshorten_vcrypt(self, uri):
|
||||
uri = uri.replace('.net','.pw')
|
||||
try:
|
||||
headers = {}
|
||||
if 'myfoldersakstream.php' in uri or '/verys/' in uri:
|
||||
@@ -519,12 +520,18 @@ class UnshortenIt(object):
|
||||
uri = ''
|
||||
logger.info('IP bannato da vcrypt, aspetta un ora')
|
||||
else:
|
||||
prev_uri = uri
|
||||
uri = r.headers['location']
|
||||
if uri == prev_uri:
|
||||
logger.info('Use Cloudscraper')
|
||||
uri = httptools.downloadpage(uri, timeout=self._timeout, headers=headers, follow_redirects=False, cf=True).headers['location']
|
||||
|
||||
if "4snip" in uri:
|
||||
if 'out_generator' in uri:
|
||||
uri = re.findall('url=(.*)$', uri)[0]
|
||||
elif '/decode/' in uri:
|
||||
uri = decrypt(uri.split('/')[-1])
|
||||
uri = httptools.downloadpage(uri, follow_redirects=True).url
|
||||
# uri = decrypt(uri.split('/')[-1])
|
||||
|
||||
return uri, r.code if r else 200
|
||||
except Exception as e:
|
||||
|
||||
@@ -10,10 +10,6 @@
|
||||
"pattern": "wstream\\.video(?!<)(?:=|/)(?:video[a-zA-Z0-9.?_]*|embed[a-zA-Z0-9]*|)?(?!api|swembedid)(?:-|/|=)?(?:[a-z0-9A-Z]+/)?([a-z0-9A-Z]+)",
|
||||
"url": "https://wstream.video/video.php?file_code=\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "wstream\\.video/(?!api)([a-zA-Z0-9/]+.html)",
|
||||
"url": "https://wstream.video/\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "wstream\\.video/(api/vcmod/fastredirect/streaming\\.php\\?id=[$0-9]+)",
|
||||
"url": "https://wstream.video/\\1"
|
||||
|
||||
@@ -52,11 +52,16 @@ TAG_TYPE = "Type"
|
||||
|
||||
def renumber(itemlist, item='', typography=''):
|
||||
log()
|
||||
dict_series = load(itemlist[0])
|
||||
dict_series = load(itemlist[0]) if len(itemlist) > 0 else {}
|
||||
|
||||
if item:
|
||||
item.channel = item.from_channel if item.from_channel else item.channel
|
||||
title = item.fulltitle.rstrip()
|
||||
if item.channel in item.channel_prefs and TAG_TVSHOW_RENUMERATE in item.channel_prefs[item.channel] and title not in dict_series:
|
||||
from core.videolibrarytools import check_renumber_options
|
||||
from specials.videolibrary import update_videolibrary
|
||||
check_renumber_options(item)
|
||||
update_videolibrary(item)
|
||||
|
||||
if inspect.stack()[2][3] == 'find_episodes':
|
||||
return itemlist
|
||||
|
||||
Reference in New Issue
Block a user