diff --git a/channels.json b/channels.json index 01df889e..db050f12 100644 --- a/channels.json +++ b/channels.json @@ -1,8 +1,7 @@ { "altadefinizione01": "https://www.altadefinizione01.tel", "altadefinizione01_link": "https://altadefinizione01.date", - "altadefinizioneclick": "https://altadefinizione.date", - "animeforce": "https://ww1.animeforce.org", + "animeforce": "https://ww1.animeforce.org", "animeleggendari": "https://animepertutti.com", "animespace": "https://animespace.tv", "animestream": "https://www.animeworld.it", @@ -10,7 +9,7 @@ "animetubeita": "http://www.animetubeita.com", "animeworld": "https://www1.animeworld.tv", "casacinema": "https://www.casacinema.cloud", - "casacinemainfo": "https://www.casacinema.info", + "casacinemainfo": "https://casacinema.kim", "cb01anime": "https://www.cineblog01.ink", "cinemalibero": "https://www.cinemalibero.live", "cinetecadibologna" : "http://cinestore.cinetecadibologna.it", diff --git a/core/httptools.py b/core/httptools.py index 869c5f7b..c8d35764 100755 --- a/core/httptools.py +++ b/core/httptools.py @@ -356,7 +356,6 @@ def downloadpage(url, **opt): except Exception as e: from lib import requests if not opt.get('ignore_response_code', False) and not proxy_data.get('stat', ''): - req = requests.Response() response['data'] = '' response['sucess'] = False info_dict.append(('Success', 'False')) diff --git a/core/support.py b/core/support.py index f5fe9e4c..a0f6954a 100755 --- a/core/support.py +++ b/core/support.py @@ -381,9 +381,11 @@ def scrape(func): # if url may be changed and channel has findhost to update if (not page.data or scrapertools.get_domain_from_url(page.url) != scrapertools.get_domain_from_url(item.url)) and 'findhost' in func.__globals__: host = func.__globals__['findhost']() + parse = list(urlparse.urlparse(item.url)) from core import jsontools jsontools.update_node(host, func.__module__.split('.')[-1], 'url') - item.url = item.url.replace(scrapertools.get_domain_from_url(item.url), scrapertools.get_domain_from_url(host)) + parse[1] = scrapertools.get_domain_from_url(host) + item.url = urlparse.urlunparse(parse) page = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True, session=item.session) data = page.data.replace("'", '"') diff --git a/specials/resolverdns.py b/specials/resolverdns.py index 7e25c32e..61b17b82 100644 --- a/specials/resolverdns.py +++ b/specials/resolverdns.py @@ -102,8 +102,14 @@ class session(requests.Session): return self.request(method, realUrl, flushedDns=True, **kwargs) def request(self, method, url, headers=None, flushedDns=False, **kwargs): - parse = urlparse.urlparse(url) - domain = parse.netloc + try: + parse = urlparse.urlparse(url) + except: + raise requests.exceptions.InvalidURL + if parse.netloc: + domain = parse.netloc + else: + raise requests.exceptions.URLRequired ip = self.getIp(domain) self.mount('https://', CipherSuiteAdapter(domain, cipherSuite='ALL')) realUrl = url @@ -145,4 +151,6 @@ class session(requests.Session): logger.info('Flushing dns cache for ' + domain) return self.flushDns(method, realUrl, domain, **kwargs) + if not ret: + raise requests.exceptions.RequestException return ret