This commit is contained in:
Alhaziel01
2022-10-01 19:48:04 +02:00
parent 754ebcab61
commit 1e9c8debcb

View File

@@ -18,7 +18,8 @@ else:
host = support.config.get_channel_url()
headers = {}
headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36 Edg/105.0.1343.53',
'referer': host + '/browse'}
def getHeaders(forced=False):
global headers
@@ -31,7 +32,7 @@ def getHeaders(forced=False):
# host = support.config.get_channel_url(findhost, forceFindhost=True)
csrf_token = support.match(response.data, patron='name="csrf-token" content="([^"]+)"').match
headers = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14',
'content-type': 'application/json;charset=UTF-8',
# 'content-type': 'application/json;charset=UTF-8',
'Referer': host,
'x-csrf-token': csrf_token,
'Cookie': '; '.join([x.name + '=' + x.value for x in response.cookies])}
@@ -39,7 +40,7 @@ def getHeaders(forced=False):
# host = support.config.get_channel_url(findhost, forceFindhost=True)
# if not forced: getHeaders(True)
getHeaders()
# getHeaders()
@support.menu
def mainlist(item):
@@ -59,7 +60,7 @@ def mainlist(item):
def genres(item):
# getHeaders()
logger.debug()
# logger.debug()
itemlist = []
data = support.scrapertools.decodeHtmlentities(support.match(item).data)
args = support.match(data, patronBlock=r'genre-options-json="([^\]]+)\]', patron=r'name"\s*:\s*"([^"]+)').matches
@@ -130,11 +131,12 @@ def peliculas(item):
data = support.scrapertools.decodeHtmlentities(support.match(item).data)
records = json.loads(support.match(data, patron=r'slider-title titles-json="(.*?)"\s*slider-name="').matches[item.args])
elif not item.search:
payload = json.dumps({'type': videoType, 'offset':offset, 'genre':item.args})
records = httptools.downloadpage(host + '/api/browse', headers=headers, post=payload).json['records']
payload = {'type': videoType, 'offset':offset, 'genre':item.args}
records = requests.post(host + '/api/browse', headers=headers, json=payload).json()['records']
else:
payload = json.dumps({'q': item.search})
records = httptools.downloadpage(host + '/api/search', headers=headers, post=payload).json['records']
payload = {'q': item.search}
headers['referer'] = host + '/search'
records = requests.post(host + '/api/search', headers=headers, json=payload).json()['records']
if records and type(records[0]) == list:
@@ -150,6 +152,8 @@ def peliculas(item):
else:
recordlist.append(it)
itlist = [makeItem(i, it, item) for i, it in enumerate(items)]
with futures.ThreadPoolExecutor() as executor:
itlist = [executor.submit(makeItem, i, it, item) for i, it in enumerate(items)]
for res in futures.as_completed(itlist):
@@ -170,7 +174,7 @@ def peliculas(item):
def makeItem(n, it, item):
info = httptools.downloadpage(host + '/api/titles/preview/{}'.format(it['id']), headers=headers, post={}).json
logger.debug(it)
logger.debug(jsontools.dump(info))
title = info['name']
lang = 'Sub-ITA' if 'sub-ita' in title.lower() else 'ITA'
title = support.cleantitle(re.sub('\[|\]|[Ss][Uu][Bb]-[Ii][Tt][Aa]', '', title))