resolverdns senza cloudscrape + fix findhost eurostreaming

This commit is contained in:
marco
2020-01-05 23:13:55 +01:00
parent e4762e882b
commit 4212bef5ff
5 changed files with 32 additions and 28 deletions

View File

@@ -15,8 +15,7 @@
"cinemalibero": "https://www.cinemalibero.live",
"cinetecadibologna" : "http://cinestore.cinetecadibologna.it",
"documentaristreamingda": "https://documentari-streaming-da.com",
"dreamsub": "https://www.dreamsub.stream",
"eurostreaming": "https://eurostreaming.pink",
"dreamsub": "https://www.dreamsub.stream",
"fastsubita": "https://fastsubita.com",
"filmgratis": "https://www.filmaltadefinizione.org",
"filmigratis": "https://filmigratis.org",

View File

@@ -14,12 +14,10 @@
import re
from core import scrapertools, httptools, support
from core.item import Item
from platformcode import config
#impostati dinamicamente da findhost()
def findhost():
permUrl = httptools.downloadpage('https://eurostreaming.link/', follow_redirects=False).headers
host = 'https://www.'+permUrl['location'].replace('https://www.google.it/search?q=site:', '')
host = 'https://'+permUrl['location'].replace('https://www.google.it/search?q=site:', '')
return host
host = support.config.get_channel_url(findhost)

View File

@@ -251,19 +251,18 @@ def downloadpage(url, **opt):
"""
load_cookies()
if config.get_setting('resolver_dns'):
if opt.get('session', False):
session = opt['session'] # same session to speed up search
logger.info('same session')
elif opt.get('use_requests', False):
from lib import requests
session = requests.session()
elif urlparse.urlparse(url).netloc in ['www.guardaserie.media']:
from lib import cloudscraper
session = cloudscraper.create_scraper()
elif config.get_setting('resolver_dns'):
from specials import resolverdns
session = resolverdns.session()
else:
if opt.get('session', False):
session = opt['session'] # same session to speed up search
logger.info('same session')
elif opt.get('use_requests', False):
from lib import requests
session = requests.session()
else:
from lib import cloudscraper
session = cloudscraper.create_scraper()
req_headers = default_headers.copy()

View File

@@ -173,7 +173,7 @@ def cleantitle(title):
def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang):
itemlist = []
log("scrapeBlock qui", block, patron)
log("scrapeBlock qui")
matches = scrapertools.find_multiple_matches_groups(block, patron)
log('MATCHES =', matches)

View File

@@ -4,10 +4,10 @@ import ssl
import urlparse
from lib.requests_toolbelt.adapters import host_header_ssl
from lib import cloudscraper
# from lib import cloudscraper
from lib import doh
from platformcode import logger, config
import re
import requests
try:
import _sqlite3 as sql
@@ -37,21 +37,29 @@ class CustomContext(ssl.SSLContext):
_context=self)
class CipherSuiteAdapter(host_header_ssl.HostHeaderSSLAdapter, cloudscraper.CipherSuiteAdapter):
class CipherSuiteAdapter(host_header_ssl.HostHeaderSSLAdapter):
def __init__(self, hostname, *args, **kwargs):
self.cipherSuite = kwargs.get('cipherSuite', None)
self.ssl_context = kwargs.pop('ssl_context', None)
self.cipherSuite = kwargs.pop('cipherSuite', None)
self.hostname = hostname
self.ssl_context = CustomContext(ssl.PROTOCOL_TLS, hostname)
self.ssl_context.set_ciphers(self.cipherSuite)
self.ssl_context.options |= (ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1)
if not self.ssl_context:
self.ssl_context = CustomContext(ssl.PROTOCOL_TLS, hostname)
self.ssl_context.set_ciphers(self.cipherSuite)
super(CipherSuiteAdapter, self).__init__(**kwargs)
def init_poolmanager(self, *args, **kwargs):
kwargs['ssl_context'] = self.ssl_context
return super(CipherSuiteAdapter, self).init_poolmanager(*args, **kwargs)
cloudscraper.CipherSuiteAdapter.__init__(self, *args, **kwargs)
def proxy_manager_for(self, *args, **kwargs):
kwargs['ssl_context'] = self.ssl_context
return super(CipherSuiteAdapter, self).proxy_manager_for(*args, **kwargs)
class session(cloudscraper.CloudScraper):
class session(requests.Session):
def __init__(self, *args, **kwargs):
self.conn = sql.connect(db)
self.cur = self.conn.cursor()
@@ -97,7 +105,7 @@ class session(cloudscraper.CloudScraper):
parse = urlparse.urlparse(url)
domain = headers['Host'] if headers and 'Host' in headers.keys() else parse.netloc
ip = self.getIp(domain)
self.mount('https://', CipherSuiteAdapter(domain, cipherSuite=self.adapters['https://'].cipherSuite))
self.mount('https://', CipherSuiteAdapter(domain, cipherSuite='ALL'))
realUrl = url
if headers: