diff --git a/channels.json b/channels.json index fc00a85e..01df889e 100644 --- a/channels.json +++ b/channels.json @@ -15,8 +15,7 @@ "cinemalibero": "https://www.cinemalibero.live", "cinetecadibologna" : "http://cinestore.cinetecadibologna.it", "documentaristreamingda": "https://documentari-streaming-da.com", - "dreamsub": "https://www.dreamsub.stream", - "eurostreaming": "https://eurostreaming.pink", + "dreamsub": "https://www.dreamsub.stream", "fastsubita": "https://fastsubita.com", "filmgratis": "https://www.filmaltadefinizione.org", "filmigratis": "https://filmigratis.org", diff --git a/channels/eurostreaming.py b/channels/eurostreaming.py index e19accdd..2101e2d4 100644 --- a/channels/eurostreaming.py +++ b/channels/eurostreaming.py @@ -14,12 +14,10 @@ import re from core import scrapertools, httptools, support from core.item import Item -from platformcode import config -#impostati dinamicamente da findhost() def findhost(): permUrl = httptools.downloadpage('https://eurostreaming.link/', follow_redirects=False).headers - host = 'https://www.'+permUrl['location'].replace('https://www.google.it/search?q=site:', '') + host = 'https://'+permUrl['location'].replace('https://www.google.it/search?q=site:', '') return host host = support.config.get_channel_url(findhost) diff --git a/core/httptools.py b/core/httptools.py index e2460e01..c51d0c73 100755 --- a/core/httptools.py +++ b/core/httptools.py @@ -251,19 +251,18 @@ def downloadpage(url, **opt): """ load_cookies() - if config.get_setting('resolver_dns'): + if opt.get('session', False): + session = opt['session'] # same session to speed up search + logger.info('same session') + elif opt.get('use_requests', False): + from lib import requests + session = requests.session() + elif urlparse.urlparse(url).netloc in ['www.guardaserie.media']: + from lib import cloudscraper + session = cloudscraper.create_scraper() + elif config.get_setting('resolver_dns'): from specials import resolverdns session = resolverdns.session() - else: - if opt.get('session', False): - session = opt['session'] # same session to speed up search - logger.info('same session') - elif opt.get('use_requests', False): - from lib import requests - session = requests.session() - else: - from lib import cloudscraper - session = cloudscraper.create_scraper() req_headers = default_headers.copy() diff --git a/core/support.py b/core/support.py index 3cf3e6f5..f5fe9e4c 100755 --- a/core/support.py +++ b/core/support.py @@ -173,7 +173,7 @@ def cleantitle(title): def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang): itemlist = [] - log("scrapeBlock qui", block, patron) + log("scrapeBlock qui") matches = scrapertools.find_multiple_matches_groups(block, patron) log('MATCHES =', matches) diff --git a/specials/resolverdns.py b/specials/resolverdns.py index f980a5bf..bbde6348 100644 --- a/specials/resolverdns.py +++ b/specials/resolverdns.py @@ -4,10 +4,10 @@ import ssl import urlparse from lib.requests_toolbelt.adapters import host_header_ssl -from lib import cloudscraper +# from lib import cloudscraper from lib import doh from platformcode import logger, config -import re +import requests try: import _sqlite3 as sql @@ -37,21 +37,29 @@ class CustomContext(ssl.SSLContext): _context=self) -class CipherSuiteAdapter(host_header_ssl.HostHeaderSSLAdapter, cloudscraper.CipherSuiteAdapter): +class CipherSuiteAdapter(host_header_ssl.HostHeaderSSLAdapter): def __init__(self, hostname, *args, **kwargs): - self.cipherSuite = kwargs.get('cipherSuite', None) + self.ssl_context = kwargs.pop('ssl_context', None) + self.cipherSuite = kwargs.pop('cipherSuite', None) self.hostname = hostname - self.ssl_context = CustomContext(ssl.PROTOCOL_TLS, hostname) - self.ssl_context.set_ciphers(self.cipherSuite) - self.ssl_context.options |= (ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1) + + if not self.ssl_context: + self.ssl_context = CustomContext(ssl.PROTOCOL_TLS, hostname) + self.ssl_context.set_ciphers(self.cipherSuite) + + super(CipherSuiteAdapter, self).__init__(**kwargs) + + def init_poolmanager(self, *args, **kwargs): kwargs['ssl_context'] = self.ssl_context + return super(CipherSuiteAdapter, self).init_poolmanager(*args, **kwargs) - cloudscraper.CipherSuiteAdapter.__init__(self, *args, **kwargs) + def proxy_manager_for(self, *args, **kwargs): + kwargs['ssl_context'] = self.ssl_context + return super(CipherSuiteAdapter, self).proxy_manager_for(*args, **kwargs) - -class session(cloudscraper.CloudScraper): +class session(requests.Session): def __init__(self, *args, **kwargs): self.conn = sql.connect(db) self.cur = self.conn.cursor() @@ -97,7 +105,7 @@ class session(cloudscraper.CloudScraper): parse = urlparse.urlparse(url) domain = headers['Host'] if headers and 'Host' in headers.keys() else parse.netloc ip = self.getIp(domain) - self.mount('https://', CipherSuiteAdapter(domain, cipherSuite=self.adapters['https://'].cipherSuite)) + self.mount('https://', CipherSuiteAdapter(domain, cipherSuite='ALL')) realUrl = url if headers: