diff --git a/core/httptools.py b/core/httptools.py old mode 100644 new mode 100755 index f1de34dc..8eb67264 --- a/core/httptools.py +++ b/core/httptools.py @@ -407,8 +407,8 @@ def downloadpage(url, **opt): """ load_cookies() - import requests - from lib import cloudscraper + if not opt.get('session', False): + from lib import cloudscraper # Headers by default, if nothing is specified req_headers = default_headers.copy() @@ -435,8 +435,10 @@ def downloadpage(url, **opt): files = {} file_name = '' opt['proxy_retries_counter'] += 1 - - session = cloudscraper.create_scraper() + if not opt.get('session', False): + session = cloudscraper.create_scraper() + else: + session = opt['session'] # session.verify = False if opt.get('cookies', True): session.cookies = cj @@ -511,6 +513,7 @@ def downloadpage(url, **opt): except Exception as e: if not opt.get('ignore_response_code', False) and not proxy_data.get('stat', ''): + import requests req = requests.Response() response['data'] = '' response['sucess'] = False diff --git a/core/support.py b/core/support.py old mode 100644 new mode 100755 index 292114fa..43bff71b --- a/core/support.py +++ b/core/support.py @@ -377,7 +377,7 @@ def scrape(func): log('PATRON= ', patron) if not data: - data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data.replace("'", '"') + data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True, session=item.session).data.replace("'", '"') data = re.sub('\n|\t', ' ', data) data = re.sub(r'>\s+<', '> <', data) # replace all ' with " and eliminate newline, so we don't need to worry about diff --git a/specials/search.py b/specials/search.py old mode 100644 new mode 100755 index 2c90501e..30988d68 --- a/specials/search.py +++ b/specials/search.py @@ -424,7 +424,7 @@ def show_result(item): return getattr(channel, item.action)(item) -def channel_search(search_results, channel_parameters, tecleado): +def channel_search(search_results, channel_parameters, tecleado, session): try: exec("from channels import " + channel_parameters["channel"] + " as module") mainlist = module.mainlist(Item(channel=channel_parameters["channel"])) @@ -433,6 +433,7 @@ def channel_search(search_results, channel_parameters, tecleado): search_items = [Item(channel=channel_parameters["channel"], action="search")] for item in search_items: + item.session = session result = module.search(item.clone(), tecleado) if result is None: result = [] @@ -548,6 +549,8 @@ def do_search(item, categories=None): logger.error(traceback.format_exc()) continue + from lib import cloudscraper + session = cloudscraper.create_scraper() for index, infile in enumerate(list_channels_search): try: @@ -566,7 +569,7 @@ def do_search(item, categories=None): logger.info("Búsqueda cancelada") return itemlist if multithread: - t = Thread(target=channel_search, args=[search_results, channel_parameters, tecleado], + t = Thread(target=channel_search, args=[search_results, channel_parameters, tecleado, session], name=channel_parameters["title"]) t.setDaemon(True) t.start()