diff --git a/channels/animeunity.py b/channels/animeunity.py index 79954d89..641be941 100644 --- a/channels/animeunity.py +++ b/channels/animeunity.py @@ -5,7 +5,7 @@ import cloudscraper, json, copy, inspect from core import jsontools, support -from platformcode import autorenumber, logger +from platformcode import autorenumber session = cloudscraper.create_scraper() @@ -154,15 +154,19 @@ def peliculas(item): payload = json.dumps(item.args) records = session.post(host + '/archivio/get-animes', headers=headers, data=payload).json()['records'] - for it in records: + if not it['title']: + it['title'] = '' lang = support.match(it['title'], patron=r'\(([It][Tt][Aa])\)').match title = support.re.sub(r'\s*\([^\)]+\)', '', it['title']) if 'ita' in lang.lower(): language = 'ITA' else: language = 'Sub-ITA' - itm = item.clone(title=support.typo(title,'bold') + support.typo(language,'_ [] color kod') + (support.typo(it['title_eng'],'_ ()') if it['title_eng'] else '')) + if title: + itm = item.clone(title=support.typo(title,'bold') + support.typo(language,'_ [] color kod') + (support.typo(it['title_eng'],'_ ()') if it['title_eng'] else '')) + else: + itm = item.clone(title=support.typo(it['title_eng'],'bold') + support.typo(language,'_ [] color kod')) itm.contentLanguage = language itm.type = it['type'] itm.thumbnail = it['imageurl'] diff --git a/channels/cinemalibero.py b/channels/cinemalibero.py index 99a3ef57..c7b0f3fc 100644 --- a/channels/cinemalibero.py +++ b/channels/cinemalibero.py @@ -155,7 +155,11 @@ def episodios(item): for s in servers: executor.submit(get_ep, s) # logger.debug(it.contentLanguage) - ret.extend([it.clone(title=typo(ep, 'bold')+typo(it.contentLanguage, '_ [] color kod bold'), contentSeason=int(ep.split('x')[0]), contentEpisodeNumber=int(ep.split('x')[1]), servers=[srv.tourl() for srv in episodes[ep]]) for ep in episodes]) + ret.extend([it.clone(title=typo(ep, 'bold')+typo(it.contentLanguage, '_ [] color kod bold'), servers=[srv.tourl() for srv in episodes[ep]]) for ep in episodes]) + if item.args != 'anime': + for n, r in enumerate(ret): + r.contentSeason = int(episodes[n].split('x')[0]) + r.contentEpisodeNumber = int(episodes[n].split('x')[1]) elif ep: ret.append(it) return sorted(ret, key=lambda i: i.title) diff --git a/core/httptools.py b/core/httptools.py index 93e7b2c6..00722b16 100755 --- a/core/httptools.py +++ b/core/httptools.py @@ -420,7 +420,7 @@ def downloadpage(url, **opt): if 'Px-Host' in req_headers: # first try with proxy logger.debug("CF retry with google translate for domain: %s" % domain) from lib import proxytranslate - gResp = proxytranslate.process_request_proxy(url) + gResp = proxytranslate.process_request_proxy(opt.get('real-url', url)) if gResp: req = gResp['result'] response_code = req.status_code @@ -431,7 +431,10 @@ def downloadpage(url, **opt): if not opt.get('headers'): opt['headers'] = [] opt['headers'].extend([['Px-Host', domain], ['Px-Token', cf_proxy['token']]]) - return downloadpage(urlparse.urlunparse((parse.scheme, cf_proxy['url'], parse.path, parse.params, parse.query, parse.fragment)), **opt) + opt['real-url'] = url + ret = downloadpage(urlparse.urlunparse((parse.scheme, cf_proxy['url'], parse.path, parse.params, parse.query, parse.fragment)), **opt) + ret.url = url + return ret if not response['data']: response['data'] = '' diff --git a/servers/doodstream.py b/servers/doodstream.py index bad906b2..8aa00e05 100644 --- a/servers/doodstream.py +++ b/servers/doodstream.py @@ -3,19 +3,17 @@ import time, string, random from core import httptools, support from platformcode import logger, config -import cloudscraper -scraper = cloudscraper.create_scraper() def test_video_exists(page_url): global data logger.debug('page url=', page_url) - response = scraper.get(page_url) - # support.dbg() - if response.status_code == 404 or 'dsplayer' not in response.text: + response = httptools.downloadpage(page_url) + support.dbg() + if response.code == 404 or 'dsplayer' not in response.data: return False, config.get_localized_string(70449) % 'DooD Stream' else: - data = response.text + data = response.data return True, "" @@ -30,7 +28,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= match = support.match(data, patron=r'''dsplayer\.hotkeys[^']+'([^']+).+?function\s*makePlay.+?return[^?]+([^"]+)''').match if match: url, token = match - ret = scraper.get(host + url, headers=headers).text + ret = httptools.downloadpage(host + url, headers=headers).data video_urls.append(['mp4 [DooD Stream]', '{}{}{}{}|Referer={}'.format(randomize(ret), url, token, int(time.time() * 1000), host)])