Fix altadefinizionecommunity
This commit is contained in:
@@ -6,8 +6,6 @@ from lib.fakeMail import Gmailnator
|
||||
from platformcode import config, platformtools, logger
|
||||
from core import scrapertools, httptools
|
||||
|
||||
from lib import cloudscraper
|
||||
|
||||
|
||||
def findhost(url):
|
||||
return support.match(url, patron=r'<a href="([^"]+)/\w+">Accedi').match
|
||||
@@ -17,7 +15,6 @@ host = config.get_channel_url(findhost)
|
||||
register_url = 'https://altaregistrazione.com'
|
||||
headers = {'Referer': host, 'x-requested-with': 'XMLHttpRequest'}
|
||||
|
||||
cf = cloudscraper.create_scraper()
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
@@ -44,8 +41,8 @@ def mainlist(item):
|
||||
|
||||
|
||||
def login():
|
||||
r = cf.get(host)
|
||||
Token = support.match(r.text, patron=r'name=\s*"_token"\s*value=\s*"([^"]+)').match
|
||||
r = support.httptools.downloadpage(host, cloudscraper=True)
|
||||
Token = support.match(r.data, patron=r'name=\s*"_token"\s*value=\s*"([^"]+)', cloudscraper=True).match
|
||||
if 'id="logged"' in r.text:
|
||||
logger.info('Già loggato')
|
||||
else:
|
||||
@@ -55,7 +52,7 @@ def login():
|
||||
'email': config.get_setting('username', channel='altadefinizionecommunity'),
|
||||
'password':config.get_setting('password', channel='altadefinizionecommunity')}
|
||||
|
||||
r = cf.post(host + '/login', json=post, headers={'referer': host})
|
||||
r = support.httptools.downloadpage(host + '/login', post=post, headers={'referer': host}, cloudscraper=True)
|
||||
if not r.status_code in [200, 302] or 'Email o Password non validi' in r.text:
|
||||
platformtools.dialog_ok('AltadefinizioneCommunity', 'Username/password non validi')
|
||||
return False
|
||||
@@ -95,7 +92,7 @@ def registerOrLogin():
|
||||
reg = platformtools.dialog_register(register_url, email=True, password=True, email_default=mailbox.address, password_default=randPsw)
|
||||
if not reg:
|
||||
return False
|
||||
regPost = httptools.downloadpage(register_url, post={'email': reg['email'], 'password': reg['password']})
|
||||
regPost = httptools.downloadpage(register_url, post={'email': reg['email'], 'password': reg['password']}, cloudscraper=True)
|
||||
|
||||
if regPost.url == register_url:
|
||||
error = scrapertools.htmlclean(scrapertools.find_single_match(regPost.data, 'Impossibile proseguire.*?</div>'))
|
||||
@@ -111,7 +108,7 @@ def registerOrLogin():
|
||||
if mail:
|
||||
checkUrl = scrapertools.find_single_match(mail.body, '<a href="([^"]+)[^>]+>Verifica').replace(r'\/', '/')
|
||||
logger.debug('CheckURL: ' + checkUrl)
|
||||
httptools.downloadpage(checkUrl)
|
||||
httptools.downloadpage(checkUrl, cloudscraper=True)
|
||||
config.set_setting('username', mailbox.address, channel='altadefinizionecommunity')
|
||||
config.set_setting('password', randPsw, channel='altadefinizionecommunity')
|
||||
platformtools.dialog_ok('AltadefinizioneCommunity',
|
||||
@@ -136,13 +133,13 @@ def peliculas(item):
|
||||
support.info(item)
|
||||
if '/load-more-film' not in item.url and '/search' not in item.url: # generi o altri menu, converto
|
||||
import ast
|
||||
ajax = support.match(item.url, patron='ajax_data\s*=\s*"?\s*([^;]+)').match
|
||||
ajax = support.match(item.url, patron='ajax_data\s*=\s*"?\s*([^;]+)', cloudscraper=True).match
|
||||
item.url = host + '/load-more-film?' + support.urlencode(ast.literal_eval(ajax)) + '&page=1'
|
||||
if not '/search' in item.url:
|
||||
json = support.httptools.downloadpage(item.url, headers=headers).json
|
||||
json = support.httptools.downloadpage(item.url, headers=headers, cloudscraper=True).json
|
||||
data = "\n".join(json['data'])
|
||||
else:
|
||||
data = support.httptools.downloadpage(item.url, headers=headers).data
|
||||
data = support.httptools.downloadpage(item.url, headers=headers, cloudscraper=True).data
|
||||
patron = r'wrapFilm">\s*<a href="(?P<url>[^"]+)">\s*<span class="year">(?P<year>[0-9]{4})</span>\s*<span[^>]+>[^<]+</span>\s*<span class="qual">(?P<quality>[^<]+).*?<img src="(?P<thumbnail>[^"]+)[^>]+>\s*<h3>(?P<title>[^<[]+)(?:\[(?P<lang>[sSuUbBiItTaA-]+))?'
|
||||
|
||||
# paginazione
|
||||
@@ -176,7 +173,7 @@ def search(item, texto):
|
||||
@support.scrape
|
||||
def genres(item):
|
||||
support.info(item)
|
||||
data = cf.get(item.url).text
|
||||
data = support.httptools.downloadpage(item.url, cloudscraper=True).data
|
||||
|
||||
patronMenu = r'<a href="(?P<url>[^"]+)">(?P<title>[^<]+)'
|
||||
if item.args == 'quality':
|
||||
@@ -209,7 +206,7 @@ def findvideos(item):
|
||||
itemlist = []
|
||||
video_url = item.url
|
||||
if '/watch-unsubscribed' not in video_url:
|
||||
playWindow = support.match(cf.get(item.url).text, patron='playWindow" href="([^"]+)')
|
||||
playWindow = support.match(support.httptools.downloadpage(item.url, cloudscraper=True).data, patron='playWindow" href="([^"]+)')
|
||||
video_url = playWindow.match
|
||||
if '/tvshow' in video_url:
|
||||
item.data = playWindow.data
|
||||
@@ -217,7 +214,7 @@ def findvideos(item):
|
||||
return episodios(item)
|
||||
item.contentType = 'movie'
|
||||
itemlist.append(item.clone(action='play', url=support.match(video_url.replace('/watch-unsubscribed', '/watch-external'),
|
||||
patron='allowfullscreen[^<]+src="([^"]+)"').match, quality=''))
|
||||
patron='allowfullscreen[^<]+src="([^"]+)"', cloudscraper=True).match, quality=''))
|
||||
# itemlist.append(item.clone(action='play', server='directo', title=support.config.get_localized_string(30137),
|
||||
# url=video_url.replace('/watch-unsubscribed', '/watch')))
|
||||
return support.server(item, itemlist=itemlist)
|
||||
@@ -226,7 +223,7 @@ def findvideos(item):
|
||||
def play(item):
|
||||
if host in item.url: # intercetto il server proprietario
|
||||
if registerOrLogin():
|
||||
return support.get_jwplayer_mediaurl(cf.get(item.url).text, 'Diretto')
|
||||
return support.get_jwplayer_mediaurl(support.httptools.downloadpage(item.url, cloudscraper=True).data, 'Diretto')
|
||||
else:
|
||||
platformtools.play_canceled = True
|
||||
return []
|
||||
|
||||
@@ -285,12 +285,16 @@ def downloadpage(url, **opt):
|
||||
parse = urlparse.urlparse(url)
|
||||
domain = parse.netloc
|
||||
|
||||
from lib import requests
|
||||
session = requests.session()
|
||||
if opt.get('cloudscraper'):
|
||||
from lib import cloudscraper
|
||||
session = cloudscraper.create_scraper()
|
||||
else:
|
||||
from lib import requests
|
||||
session = requests.session()
|
||||
|
||||
if config.get_setting('resolver_dns') and not opt.get('use_requests', False):
|
||||
from core import resolverdns
|
||||
session.mount('https://', resolverdns.CipherSuiteAdapter(domain))
|
||||
if config.get_setting('resolver_dns') and not opt.get('use_requests', False):
|
||||
from core import resolverdns
|
||||
session.mount('https://', resolverdns.CipherSuiteAdapter(domain))
|
||||
|
||||
req_headers = default_headers.copy()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user