From f5a6384653a24714f151e40c762f65b6f512b7e0 Mon Sep 17 00:00:00 2001 From: marco <10120390+mac12m99@users.noreply.github.com> Date: Thu, 29 Dec 2022 15:50:57 +0100 Subject: [PATCH] Migliorie cinemalibero e nuovo server HexUpload. Disattivato streamingita --- channels/cinemalibero.py | 51 ++++++++++---------- channels/streamingita.json | 2 +- lib/proxytranslate.py | 1 + lib/unshortenit.py | 95 +++++++++++++++++++++++++------------- servers/hexupload.json | 26 +++++++++++ servers/hexupload.py | 29 ++++++++++++ servers/mixdrop.json | 4 +- servers/streamsb.json | 2 +- 8 files changed, 150 insertions(+), 60 deletions(-) create mode 100644 servers/hexupload.json create mode 100644 servers/hexupload.py diff --git a/channels/cinemalibero.py b/channels/cinemalibero.py index c087422c..ceb2e09c 100644 --- a/channels/cinemalibero.py +++ b/channels/cinemalibero.py @@ -97,10 +97,11 @@ def peliculas(item): @support.scrape def episodios(item): data = item.data + # debug=True if item.args == 'anime': logger.debug("Anime :", item) - patron = r'[^>]+>(?PEpisodio\s(?P<episode>\d+))(?::)?(?:(?P<title2>[^<]+))?.*?(?:<br|</p))' + patron = r'<a target=(?P<url>[^>]+>(?P<title>Episodio\s(?P<episode>\d+))(?::)?(?:(?P<title2>[^<]+))?.*?(?:<br|</p))|(?P<data>.+)' patronBlock = r'(?:Stagione (?P<season>\d+))?(?:</span><br />|</span></p>|strong></p>)(?P<block>.*?)(?:<div style="margin-left|<span class="txt_dow">)' item.contentType = 'tvshow' elif item.args == 'sport': @@ -110,7 +111,6 @@ def episodios(item): item.contentType = 'tvshow' elif item.args == 'serie' or item.contentType == 'tvshow': logger.debug("Serie :", item) - # debug=True patron = r'(?:/>|<p>)\s*(?:(?P<episode>\d+(?:x|×|×)\d+|Puntata \d+)(?:-(?P<episode2>\d+))?[;]?[ ]?(?P<title>[^<–-]+))?(?P<data>.*?)(?:<br|</p)' patronBlock = r'Stagione\s(?:[Uu]nica)?(?:(?P<lang>iTA|ITA|Sub-ITA|Sub-iTA))?.*?</strong>(?P<block>.+?)(?:strong>|<div class="at-below)' item.contentType = 'tvshow' @@ -132,24 +132,28 @@ def episodios(item): if not ep and 'http' in it.data: # stagione intera from lib import unshortenit data = unshortenit.findlinks(it.data) + episodes = {} + def get_ep(s): srv_mod = __import__('servers.%s' % s.server, None, None, ["servers.%s" % s.server]) if hasattr(srv_mod, 'get_filename'): title = srv_mod.get_filename(s.url) - ep = scrapertools.get_season_and_episode(title) + if item.args == 'anime': + ep = title + else: + ep = scrapertools.get_season_and_episode(title) if ep: if ep not in episodes: episodes[ep] = [] episodes[ep].append(s) servers = support.server(item, data, CheckLinks=False, Download=False, Videolibrary=False) - episodes = {} - for s in servers: - get_ep(s) + # for s in servers: + # get_ep(s) # ottengo l'episodio dal nome del file - # with futures.ThreadPoolExecutor() as executor: - # for s in servers: - # executor.submit(get_ep, s) + with futures.ThreadPoolExecutor() as executor: + for s in servers: + executor.submit(get_ep, s) # logger.debug(it.contentLanguage) ret.extend([it.clone(title=typo(ep, 'bold')+typo(it.contentLanguage, '_ [] color kod bold'), contentSeason=int(ep.split('x')[0]), contentEpisodeNumber=int(ep.split('x')[1]), servers=[srv.tourl() for srv in episodes[ep]]) for ep in episodes]) elif ep: @@ -253,21 +257,20 @@ def findvideos(item): item.data = data servers = [] - if item.args == 'anime': - if item.urls: # this is a episode - return support.server(item, itemlist=[Item(url=support.unshortenit.FileCrypt().unshorten(u)) for u in item.urls]) - itemlist = [] - episodes = {} - # support.dbg() - for uri in support.unshortenit.FileCrypt().find(item.data): - for ep in support.unshortenit.FileCrypt(uri).list_files(): - ep = ('.'.join(ep[0].split('.')[:-1]), ep[1]) # remove extension - if not ep[0] in episodes: - episodes[ep[0]] = [] - episodes[ep[0]].append(ep[1]) - for ep in episodes.keys(): - itemlist.append(item.clone(title=ep, urls=episodes[ep], action='findvideos', data='')) - return itemlist + # if item.args == 'anime': + # if item.urls: # this is a episode + # return support.server(item, itemlist=[Item(url=support.unshortenit.FileCrypt().unshorten(u)) for u in item.urls]) + # itemlist = [] + # episodes = {} + # for uri in support.unshortenit.FileCrypt().find(item.data): + # for ep in support.unshortenit.FileCrypt(uri).list_files(): + # ep = ('.'.join(ep[0].split('.')[:-1]), ep[1]) # remove extension + # if not ep[0] in episodes: + # episodes[ep[0]] = [] + # episodes[ep[0]].append(ep[1]) + # for ep in episodes.keys(): + # itemlist.append(item.clone(title=ep, urls=episodes[ep], action='findvideos', data='')) + # return itemlist total_servers = support.server(item, data=item.data) if item.contentType == 'episode' and len(set([srv.server for srv in total_servers])) < len([srv.server for srv in total_servers]): diff --git a/channels/streamingita.json b/channels/streamingita.json index 940536e8..52e2fa18 100644 --- a/channels/streamingita.json +++ b/channels/streamingita.json @@ -2,7 +2,7 @@ "id": "streamingita", "name": "StreamingITA", "language": ["ita"], - "active": true, + "active": false, "thumbnail": "streamingita.png", "banner": "streamingita.png", "categories": ["tvshow", "movie"], diff --git a/lib/proxytranslate.py b/lib/proxytranslate.py index e6025f15..addf83e9 100644 --- a/lib/proxytranslate.py +++ b/lib/proxytranslate.py @@ -98,6 +98,7 @@ def process_request_proxy(url): data = re.sub('https://translate\.googleusercontent\.com/.*?u=(.*?)&usg=[A-Za-z0-9_-]+', '\\1', data) data = re.sub('https?://[a-zA-Z0-9-]*' + domain.replace('.', '-') + '\.translate\.goog(/[a-zA-Z0-9#/-]+)', 'https://' + domain + '\\1', data) data = re.sub('\s+<', '<', data) + data = re.sub('https://translate\.google\.com/website\?[^ ]+u=', '', data) data = data.replace('&', '&').replace('https://translate.google.com/website?sl=' + SL + '&tl=' + TL + '&ajax=1&u=', '') return {'url': url.strip(), 'result': result, 'data': data} diff --git a/lib/unshortenit.py b/lib/unshortenit.py index 3c5d5c73..393d3b0c 100644 --- a/lib/unshortenit.py +++ b/lib/unshortenit.py @@ -51,9 +51,10 @@ class UnshortenIt(object): _filecrypt_regex = r'filecrypt\.cc' listRegex = [_adfly_regex, _linkbucks_regex, _adfocus_regex, _lnxlu_regex, _shst_regex, _hrefli_regex, _anonymz_regex, - _shrink_service_regex, _rapidcrypt_regex, _simple_iframe_regex, _linkup_regex, _linkhub_regex, + _shrink_service_regex, _rapidcrypt_regex, _simple_iframe_regex, _linkup_regex, _swzz_regex, _stayonline_regex, _snip_regex, _linksafe_regex, _protectlink_regex, _uprot_regex, _simple_redirect, - _filecrypt_regex] + ] + folderRegex = [_filecrypt_regex, _linkhub_regex] _maxretries = 5 @@ -95,8 +96,6 @@ class UnshortenIt(object): # uri, code = self._unshorten_vcrypt(uri) if re.search(self._linkup_regex, uri, re.IGNORECASE): uri, code = self._unshorten_linkup(uri) - if re.search(self._linkhub_regex, uri, re.IGNORECASE): - uri, code = self._unshorten_linkhub(uri) if re.search(self._swzz_regex, uri, re.IGNORECASE): uri, code = self._unshorten_swzz(uri) if re.search(self._stayonline_regex, uri, re.IGNORECASE): @@ -109,8 +108,6 @@ class UnshortenIt(object): uri, code = self._unshorten_protectlink(uri) if re.search(self._uprot_regex, uri, re.IGNORECASE): uri, code = self._unshorten_uprot(uri) - if re.search(self._filecrypt_regex, uri, re.IGNORECASE): - uri, code = self._unshorten_filecrypt(uri) if re.search(self._simple_redirect, uri, re.IGNORECASE): p = httptools.downloadpage(uri) uri = p.url @@ -125,6 +122,14 @@ class UnshortenIt(object): raise Exception('Not un-shortened link: ' + uri) return uri, code + def expand_folder(self, uri): + links = [] + if re.search(self._linkhub_regex, uri, re.IGNORECASE): + links = self._unshorten_linkhub(uri) + if re.search(self._filecrypt_regex, uri, re.IGNORECASE): + links = self._unshorten_filecrypt(uri) + return links + def unwrap_30x(self, uri, timeout=10): def unwrap_30x(uri, timeout=10): @@ -602,13 +607,9 @@ class UnshortenIt(object): logger.info(uri) r = httptools.downloadpage(uri, follow_redirect=True, timeout=self._timeout, cookies=False) links = re.findall('<a href="(http[^"]+)', r.data) - if len(links) == 1: - uri = links[0] - else: - uri = "\n".join(links) # folder - return uri, r.code + return links except Exception as e: - return uri, str(e) + return [] def _unshorten_swzz(self, uri): try: @@ -705,19 +706,24 @@ class UnshortenIt(object): return link, 200 return uri, 200 - # container, for returning only the first result def _unshorten_filecrypt(self, uri): - url = '' + import sys + if sys.version_info[0] >= 3: + from concurrent import futures + else: + from concurrent_py2 import futures + links = [] try: fc = FileCrypt(uri) - url = fc.unshorten(fc.list_files()[0][1]) + # links = [fc.unshorten(f[2]) for f in fc.list_files()] + with futures.ThreadPoolExecutor() as executor: + unshList = [executor.submit(fc.unshorten, f[2]) for f in fc.list_files()] + for link in futures.as_completed(unshList): + links.append(link.result()) except: import traceback logger.error(traceback.format_exc()) - if url: - return url, 200 - else: - return uri, 200 + return links def decrypt_aes(text, key): @@ -758,9 +764,30 @@ def unshorten(uri, type=None, timeout=10): def findlinks(text): + import sys + if sys.version_info[0] >= 3: + from concurrent import futures + else: + from concurrent_py2 import futures + unshortener = UnshortenIt() matches = [] + # expand folders + regex = '(?:https?://(?:[\w\d]+\.)?)?(?:' + for rg in unshortener.folderRegex: + regex += rg + '|' + regex = regex[:-1] + ')/[a-zA-Z0-9_=/]+' + for match in re.findall(regex, text): + matches.append(match) + + with futures.ThreadPoolExecutor() as executor: + unshList = [executor.submit(unshortener.expand_folder, match) for match in matches] + for ret in futures.as_completed(unshList): + text += '\n'.join(ret.result()) + + # unshorten + matches = [] regex = '(?:https?://(?:[\w\d]+\.)?)?(?:' for rg in unshortener.listRegex: regex += rg + '|' @@ -776,11 +803,6 @@ def findlinks(text): # for match in matches: # sh = unshorten(match)[0] # text += '\n' + sh - import sys - if sys.version_info[0] >= 3: - from concurrent import futures - else: - from concurrent_py2 import futures with futures.ThreadPoolExecutor() as executor: unshList = [executor.submit(unshorten, match) for match in matches] for link in futures.as_completed(unshList): @@ -795,22 +817,31 @@ def findlinks(text): class FileCrypt: + toFilter = ('https://nitroflare.com',) + def __init__(self, uri=None): self.uri = uri def find(self, data): - _filecrypt_regex = r'https?://\w+\.filecrypt\.cc/[a-zA-Z0-9_=/]+' + _filecrypt_regex = r'https?://(?:\w+\.)?filecrypt\.cc/[a-zA-Z0-9_=/]+' return scrapertools.find_multiple_matches(data, _filecrypt_regex) def list_files(self): - reg = """<td title="([^"]+).*?<button onclick="openLink\('([^']+)""" + reg = """<td title="([^"]+).*?<a href="([^"]+).*?<button onclick="openLink\('([^']+)""" data = httptools.downloadpage(self.uri).data + if 'Richiamo alla sicurezza' in data: # captcha, try with gtranslate + from lib import proxytranslate + data = proxytranslate.process_request_proxy(self.uri).get('data', '') ret = scrapertools.find_multiple_matches(data, reg) - return ret + return [r for r in ret if r[1] not in self.toFilter] def unshorten(self, link): - link_data = httptools.downloadpage('https://www.filecrypt.cc/Link/' + link + '.html').data - time.sleep(0.1) - url = httptools.downloadpage(scrapertools.find_single_match(link_data, "location.href='([^']+)"), headers={'Referer': 'http://www.filecrypt.cc/'}, only_headers=True).url - logger.info(url) - return url + link_data = httptools.downloadpage('https://filecrypt.cc/Link/' + link + '.html').data + link_url = scrapertools.find_single_match(link_data, "location.href='([^']+)") + if link_url: + time.sleep(0.1) + url = httptools.downloadpage(link_url, headers={'Referer': 'http://filecrypt.cc/'}, only_headers=True).url + logger.info(url) + return url + else: + return '' diff --git a/servers/hexupload.json b/servers/hexupload.json new file mode 100644 index 00000000..ea3964e1 --- /dev/null +++ b/servers/hexupload.json @@ -0,0 +1,26 @@ +{ + "active": true, + "find_videos": { + "ignore_urls": [], + "patterns": [{ + "pattern": "hexupload\\.net/([A-z0-9]{12})", + "url": "https://hexupload.net/\\1" + }, + { + "pattern": "hexupload\\.net/embed-([A-z0-9]{12}).html", + "url": "https://hexupload.net/\\1" + } + ] + }, + "free": true, + "id": "hexupload", + "name": "HexUpload", + "settings": [{ + "default": false, + "enabled": true, + "id": "black_list", + "label": "@70708", + "type": "bool", + "visible": true + }] +} \ No newline at end of file diff --git a/servers/hexupload.py b/servers/hexupload.py new file mode 100644 index 00000000..ed0f3f77 --- /dev/null +++ b/servers/hexupload.py @@ -0,0 +1,29 @@ +from core import httptools, scrapertools +from platformcode import config +import base64 + + +def test_video_exists(page_url): + global data + data = httptools.downloadpage(page_url).data + + if 'File Not Found' in data: + return False, config.get_localized_string(70449) % "HexUpload" + else: + return True, "" + + +def get_video_url(page_url, premium=False, user="", password="", video_password=""): + video_urls = [] + global data + source = scrapertools.find_single_match(data, r'b4aa\.buy\("([^"]+)') + if source: + media_url = base64.b64decode(source).decode() + video_urls.append(["mp4", media_url]) + return video_urls + + +def get_filename(page_url): + # from core.support import dbg;dbg() + title = httptools.downloadpage(page_url).data.split('<h2 style="word-break: break-all;">')[1].split('</h2>')[0] + return title diff --git a/servers/mixdrop.json b/servers/mixdrop.json index 3bd631a1..201530b5 100644 --- a/servers/mixdrop.json +++ b/servers/mixdrop.json @@ -4,11 +4,11 @@ "ignore_urls": [], "patterns": [ { - "pattern": "mixdrop[s]?.[^/]+/(?:f|e)/([a-z0-9]+)", + "pattern": "mixdro?p[s]?.[^/]+/(?:f|e)/([a-z0-9]+)", "url": "https://mixdrop.co/e/\\1" }, { - "pattern": "(mixdrop[s]?.[^/]+/player\\.php\\?id=[a-z0-9-]+)", + "pattern": "(mixdro?p[s]?.[^/]+/player\\.php\\?id=[a-z0-9-]+)", "url": "https://\\1" } ] diff --git a/servers/streamsb.json b/servers/streamsb.json index 597adad3..ec353d0b 100644 --- a/servers/streamsb.json +++ b/servers/streamsb.json @@ -3,7 +3,7 @@ "find_videos": { "ignore_urls": [], "patterns": [{ - "pattern": "(?:streamsb|sblanh|sbembed|sbembed1|sbplay1|sbplay|pelistop|tubesb|playersb|embedsb|watchsb|streamas|sbfast|sbfull|viewsb|sbvideo|cloudemb|sbplay2|japopav|javplaya|ssbstream|sbthe|sbspeed|sbanh)\\.\\w{2,5}/(?:embed-|d/|e/)?([A-z0-9]+)", + "pattern": "(?:streamsb|sblanh|sbembed|sbembed1|sbplay1|sbplay|pelistop|tubesb|playersb|embedsb|watchsb|streamas|sbfast|sbfull|viewsb|sbvideo|cloudemb|sbplay2|japopav|javplaya|ssbstream|sbthe|sbspeed|sbanh|sblongvu|sbchill)\\.\\w{2,5}/(?:embed-|d/|e/)?([A-z0-9]+)", "url": "https://streamsb.net/d/\\1" }, {