fix override DNS e altre migliorie
This commit is contained in:
@@ -1,20 +1,19 @@
|
||||
{
|
||||
"altadefinizione01": "https://www.altadefinizione01.tel",
|
||||
"altadefinizione01_link": "https://altadefinizione01.date",
|
||||
"animeforce": "https://ww1.animeforce.org",
|
||||
"animeleggendari": "https://animepertutti.com",
|
||||
"animespace": "https://animespace.tv",
|
||||
"altadefinizione01_link": "https://altadefinizione01.cam",
|
||||
"animeforce": "https://ww1.animeforce.org",
|
||||
"animeleggendari": "https://animepertutti.com",
|
||||
"animestream": "https://www.animeworld.it",
|
||||
"animesubita": "http://www.animesubita.org",
|
||||
"animetubeita": "http://www.animetubeita.com",
|
||||
"animeworld": "https://www1.animeworld.tv",
|
||||
"casacinema": "https://www.casacinema.cloud",
|
||||
"casacinemaInfo": "https://casacinema.space",
|
||||
"casacinemaInfo": "https://casacinema.kim",
|
||||
"cb01anime": "https://www.cineblog01.ink",
|
||||
"cinemalibero": "https://www.cinemalibero.live",
|
||||
"cinetecadibologna" : "http://cinestore.cinetecadibologna.it",
|
||||
"cinetecadibologna": "http://cinestore.cinetecadibologna.it",
|
||||
"documentaristreamingda": "https://documentari-streaming-da.com",
|
||||
"dreamsub": "https://www.dreamsub.stream",
|
||||
"dreamsub": "https://www.dreamsub.stream",
|
||||
"fastsubita": "https://fastsubita.com",
|
||||
"filmgratis": "https://www.filmaltadefinizione.org",
|
||||
"filmigratis": "https://filmigratis.org",
|
||||
@@ -22,9 +21,9 @@
|
||||
"filmsenzalimiticc": "https://www.filmsenzalimiti.monster",
|
||||
"filmstreaming01": "https://filmstreaming01.com",
|
||||
"guardarefilm": "https://www.guardarefilm.red",
|
||||
"guardaserie_stream": "https://guardaserie.co",
|
||||
"guardaserie_stream": "https://guardaserie.store",
|
||||
"guardaserieclick": "https://www.guardaserie.media",
|
||||
"ilgeniodellostreaming": "https://igds.red",
|
||||
"ilgeniodellostreaming": "https://igds.one",
|
||||
"italiaserie": "https://italiaserie.org",
|
||||
"mondoserietv": "https://mondoserietv.com",
|
||||
"netfreex": "https://www.netfreex.online",
|
||||
@@ -38,6 +37,6 @@
|
||||
"streamtime": "https://t.me/s/StreamTime",
|
||||
"tantifilm": "https://www.tantifilm.eu",
|
||||
"toonitalia": "https://toonitalia.org",
|
||||
"vedohd": "https://vedohd.video",
|
||||
"vedohd": "https://vedohd.uno",
|
||||
"vvvvid": "https://www.vvvvid.it"
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "animespace",
|
||||
"name": "AnimeSpace",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": [],
|
||||
"thumbnail": "",
|
||||
|
||||
@@ -48,7 +48,6 @@ def peliculas(item):
|
||||
patron = r'<a href="(?P<url>(?:https:\/\/.+?\/(?P<title>[^\/]+[a-zA-Z0-9\-]+)(?P<year>\d{4})))/".+?url\((?P<thumb>[^\)]+)\)">'
|
||||
elif item.contentType == 'tvshow':
|
||||
if item.args == 'update':
|
||||
debug = True
|
||||
patron = r'<a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">\s<div class="titolo">(?P<title>.+?)(?: – Serie TV)?(?:\([sSuUbBiItTaA\-]+\))?[ ]?(?P<year>\d{4})?</div>[ ]<div class="genere">(?:[\w]+?\.?\s?[\s|S]?[\dx\-S]+?\s\(?(?P<lang>[iItTaA]+|[sSuUbBiItTaA\-]+)\)?\s?(?P<quality>[HD]+)?|.+?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?</div>)'
|
||||
pagination = 25
|
||||
else:
|
||||
|
||||
@@ -251,21 +251,24 @@ def downloadpage(url, **opt):
|
||||
|
||||
"""
|
||||
load_cookies()
|
||||
if urlparse.urlparse(url).netloc in ['www.guardaserie.media', 'casacinema.space']:
|
||||
domain = urlparse.urlparse(url).netloc
|
||||
CF = False
|
||||
if domain in ['www.guardaserie.media', 'casacinema.space']:
|
||||
from lib import cloudscraper
|
||||
session = cloudscraper.create_scraper()
|
||||
CF = True
|
||||
elif opt.get('session', False):
|
||||
session = opt['session'] # same session to speed up search
|
||||
logger.info('same session')
|
||||
elif config.get_setting('resolver_dns') and not opt.get('use_requests', False):
|
||||
from specials import resolverdns
|
||||
session = resolverdns.session()
|
||||
else:
|
||||
from lib import requests
|
||||
session = requests.session()
|
||||
|
||||
if config.get_setting('resolver_dns') and not opt.get('use_requests', False):
|
||||
from specials import resolverdns
|
||||
session.mount('https://', resolverdns.CipherSuiteAdapter(domain, CF))
|
||||
|
||||
req_headers = default_headers.copy()
|
||||
verify = opt.get('verify', True)
|
||||
|
||||
# Headers passed as parameters
|
||||
if opt.get('headers', None) is not None:
|
||||
@@ -287,7 +290,7 @@ def downloadpage(url, **opt):
|
||||
files = {}
|
||||
file_name = ''
|
||||
|
||||
session.verify = opt.get('verify', verify)
|
||||
session.verify = opt.get('verify', True)
|
||||
|
||||
if opt.get('cookies', True):
|
||||
session.cookies = cj
|
||||
|
||||
@@ -726,6 +726,8 @@ def play_video(item, strm=False, force_direct=False, autoplay=False):
|
||||
mediaurl, view, mpd = get_video_seleccionado(item, seleccion, video_urls)
|
||||
if mediaurl == "":
|
||||
return
|
||||
# no certificate verification
|
||||
mediaurl = mediaurl.replace('https://', 'http://')
|
||||
|
||||
# se obtiene la información del video.
|
||||
if not item.contentThumbnail:
|
||||
|
||||
@@ -10,9 +10,9 @@ from platformcode import logger, config
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
return True, ""
|
||||
global data
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "File was deleted" in data or "Page Cannot Be Found" in data:
|
||||
if "File Not Found" in data:
|
||||
return False, config.get_localized_string(70449) % "Akvideo"
|
||||
return True, ""
|
||||
|
||||
@@ -21,8 +21,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
logger.info(" url=" + page_url)
|
||||
video_urls = []
|
||||
|
||||
data = httptools.downloadpage(page_url).data.replace('https','http')
|
||||
|
||||
global data
|
||||
vres = scrapertools.find_multiple_matches(data, 'nowrap[^>]+>([^,]+)')
|
||||
data_pack = scrapertools.find_single_match(data, "</div>\n\s*<script[^>]+>(eval.function.p,a,c,k,e,.*?)\s*</script>")
|
||||
if data_pack != "":
|
||||
|
||||
@@ -31,7 +31,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
sources = scrapertools.find_single_match(data, r'sources: \[([^\]]+)\]')
|
||||
|
||||
for media_url in scrapertools.find_multiple_matches(sources, '"([^"]+)"'):
|
||||
media_url = media_url.replace('https:', 'http:')
|
||||
ext = scrapertools.get_filename_from_url(media_url)[-4:]
|
||||
video_urls.append(["%s [%s]" % (ext, server), media_url])
|
||||
return video_urls
|
||||
|
||||
@@ -11,6 +11,7 @@ headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
global data
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "Not Found" in data or "File was deleted" in data:
|
||||
return False, config.get_localized_string(70449) % 'Wstream'
|
||||
@@ -21,14 +22,18 @@ def test_video_exists(page_url):
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("[Wstream] url=" + page_url)
|
||||
video_urls = []
|
||||
global data
|
||||
|
||||
if '/streaming.php' in page_url or 'html' in page_url:
|
||||
code = httptools.downloadpage(page_url, headers=headers, follow_redirects=False).headers['location'].split('/')[-1].replace('.html','')
|
||||
logger.info('WCODE='+code)
|
||||
page_url = 'https://wstream.video/video.php?file_code=' + code
|
||||
|
||||
try:
|
||||
code = httptools.downloadpage(page_url, headers=headers, follow_redirects=False).headers['location'].split('/')[-1].replace('.html','')
|
||||
logger.info('WCODE='+code)
|
||||
page_url = 'https://wstream.video/video.php?file_code=' + code
|
||||
data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True).data
|
||||
except:
|
||||
pass
|
||||
|
||||
code = page_url.split('=')[-1]
|
||||
data = httptools.downloadpage(page_url, headers=headers, follow_redirects=False).data
|
||||
ID = scrapertools.find_single_match(data, r'''input\D*id=(?:'|")([^'"]+)(?:'|")''')
|
||||
post = urllib.urlencode({ID: code})
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import ssl
|
||||
import urlparse
|
||||
|
||||
from lib.requests_toolbelt.adapters import host_header_ssl
|
||||
# from lib import cloudscraper
|
||||
from lib import doh
|
||||
from platformcode import logger, config
|
||||
import requests
|
||||
@@ -16,7 +15,6 @@ except:
|
||||
|
||||
db = os.path.join(config.get_data_path(), 'kod_db.sqlite')
|
||||
|
||||
|
||||
class CustomSocket(ssl.SSLSocket):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CustomSocket, self).__init__(*args, **kwargs)
|
||||
@@ -39,31 +37,19 @@ class CustomContext(ssl.SSLContext):
|
||||
|
||||
class CipherSuiteAdapter(host_header_ssl.HostHeaderSSLAdapter):
|
||||
|
||||
def __init__(self, hostname, *args, **kwargs):
|
||||
self.ssl_context = kwargs.pop('ssl_context', None)
|
||||
self.cipherSuite = kwargs.pop('cipherSuite', None)
|
||||
self.hostname = hostname
|
||||
|
||||
if not self.ssl_context:
|
||||
self.ssl_context = CustomContext(ssl.PROTOCOL_TLS, hostname)
|
||||
self.ssl_context.set_ciphers(self.cipherSuite)
|
||||
def __init__(self, domain, CF=False, *args, **kwargs):
|
||||
self.conn = sql.connect(db)
|
||||
self.cur = self.conn.cursor()
|
||||
self.ssl_context = CustomContext(ssl.PROTOCOL_TLS, domain)
|
||||
self.CF = CF # if cloudscrape is in action
|
||||
self.cipherSuite = kwargs.pop('cipherSuite', ssl._DEFAULT_CIPHERS)
|
||||
|
||||
super(CipherSuiteAdapter, self).__init__(**kwargs)
|
||||
|
||||
def init_poolmanager(self, *args, **kwargs):
|
||||
kwargs['ssl_context'] = self.ssl_context
|
||||
return super(CipherSuiteAdapter, self).init_poolmanager(*args, **kwargs)
|
||||
|
||||
def proxy_manager_for(self, *args, **kwargs):
|
||||
kwargs['ssl_context'] = self.ssl_context
|
||||
return super(CipherSuiteAdapter, self).proxy_manager_for(*args, **kwargs)
|
||||
|
||||
|
||||
class session(requests.Session):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.conn = sql.connect(db)
|
||||
self.cur = self.conn.cursor()
|
||||
super(session, self).__init__(*args, **kwargs)
|
||||
def flushDns(self, request, domain, **kwargs):
|
||||
self.cur.execute('delete from dnscache where domain=?', (domain,))
|
||||
self.conn.commit()
|
||||
return self.send(request, flushedDns=True, **kwargs)
|
||||
|
||||
def getIp(self, domain):
|
||||
ip = None
|
||||
@@ -96,61 +82,50 @@ class session(requests.Session):
|
||||
);""")
|
||||
self.conn.commit()
|
||||
|
||||
def flushDns(self, method, realUrl, domain, **kwargs):
|
||||
self.cur.execute('delete from dnscache where domain=?', (domain,))
|
||||
self.conn.commit()
|
||||
return self.request(method, realUrl, flushedDns=True, **kwargs)
|
||||
def init_poolmanager(self, *args, **kwargs):
|
||||
kwargs['ssl_context'] = self.ssl_context
|
||||
return super(CipherSuiteAdapter, self).init_poolmanager(*args, **kwargs)
|
||||
|
||||
def request(self, method, url, headers=None, flushedDns=False, **kwargs):
|
||||
def proxy_manager_for(self, *args, **kwargs):
|
||||
kwargs['ssl_context'] = self.ssl_context
|
||||
return super(CipherSuiteAdapter, self).proxy_manager_for(*args, **kwargs)
|
||||
|
||||
def send(self, request, flushedDns=False, **kwargs):
|
||||
try:
|
||||
parse = urlparse.urlparse(url)
|
||||
parse = urlparse.urlparse(request.url)
|
||||
except:
|
||||
raise requests.exceptions.InvalidURL
|
||||
if parse.netloc:
|
||||
domain = parse.netloc
|
||||
else:
|
||||
raise requests.exceptions.URLRequired
|
||||
self.ssl_context = CustomContext(ssl.PROTOCOL_TLS, domain)
|
||||
if self.CF:
|
||||
self.ssl_context.options |= (ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1)
|
||||
self.ssl_context.set_ciphers(self.cipherSuite)
|
||||
self.init_poolmanager(self._pool_connections, self._pool_maxsize, block=self._pool_block)
|
||||
ip = self.getIp(domain)
|
||||
self.mount('https://', CipherSuiteAdapter(domain, cipherSuite='ALL'))
|
||||
realUrl = url
|
||||
|
||||
if headers:
|
||||
headers["Host"] = domain
|
||||
realUrl = request.url
|
||||
|
||||
if request.headers:
|
||||
request.headers["Host"] = domain
|
||||
else:
|
||||
headers = {"Host": domain}
|
||||
request.headers = {"Host": domain}
|
||||
ret = None
|
||||
tryFlush = False
|
||||
|
||||
parse = list(parse)
|
||||
parse[1] = ip
|
||||
url = urlparse.urlunparse(parse)
|
||||
|
||||
allow_redirects = kwargs.get('allow_redirects', True)
|
||||
if 'allow_redirects' in kwargs:
|
||||
del kwargs['allow_redirects']
|
||||
request.url = urlparse.urlunparse(parse)
|
||||
try:
|
||||
ret = super(session, self).request(method, url, headers=headers, allow_redirects=False, **kwargs)
|
||||
newUrl = urlparse.urlparse(ret.headers.get('Location', realUrl))
|
||||
if not newUrl.netloc and not newUrl.scheme:
|
||||
newUrl = list(newUrl)
|
||||
newUrl[0] = 'https://'
|
||||
newUrl[1] = domain
|
||||
newUrl = urlparse.urlunparse(newUrl)
|
||||
if allow_redirects:
|
||||
redirectN = 0
|
||||
while newUrl != realUrl and redirectN < self.max_redirects:
|
||||
ret = self.request(method, newUrl, headers=headers, **kwargs)
|
||||
newUrl = ret.headers.get('Location', realUrl)
|
||||
redirectN += 1
|
||||
ret.url = newUrl
|
||||
ret = super(CipherSuiteAdapter, self).send(request, **kwargs)
|
||||
except Exception as e:
|
||||
logger.info('Request for ' + domain + ' with ip ' + ip + ' failed')
|
||||
logger.info(e)
|
||||
tryFlush = True
|
||||
if (tryFlush or not ret) and not flushedDns: # re-request ips and update cache
|
||||
if tryFlush and not flushedDns: # re-request ips and update cache
|
||||
logger.info('Flushing dns cache for ' + domain)
|
||||
return self.flushDns(method, realUrl, domain, **kwargs)
|
||||
|
||||
if not ret:
|
||||
raise requests.exceptions.RequestException
|
||||
return self.flushDns(request, domain, **kwargs)
|
||||
ret.url = realUrl
|
||||
return ret
|
||||
|
||||
@@ -181,12 +181,8 @@ def channel_search(item):
|
||||
|
||||
channel_list, channel_titles = get_channels(item)
|
||||
|
||||
if config.get_setting('resolver_dns'):
|
||||
from specials import resolverdns
|
||||
session = resolverdns.session()
|
||||
else:
|
||||
import requests
|
||||
session = requests.Session()
|
||||
import requests
|
||||
session = requests.Session()
|
||||
|
||||
searching += channel_list
|
||||
searching_titles += channel_titles
|
||||
|
||||
Reference in New Issue
Block a user