diff --git a/channels/cinemalibero.py b/channels/cinemalibero.py index e7be9b5a..1500fb80 100644 --- a/channels/cinemalibero.py +++ b/channels/cinemalibero.py @@ -7,6 +7,7 @@ import re from core import httptools, support, scrapertools from core.item import Item +from core.support import typo from platformcode import config import sys if sys.version_info[0] >= 3: @@ -57,13 +58,13 @@ def peliculas(item): patron = r'
[^>]+>[^>]+>\s[^\)]+)\)">[^>]+>(?P[^<]+)<[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>.+?(?:[ ]\((?P<year>\d{4})\))?<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?' pagination = 25 elif item.contentType == 'movie': - action = 'findvideos' + # action = 'findvideos' patron = r'<a href="(?P<url>[^"]+)" title="(?P<title>.+?)(?:[ ]\[(?P<lang>[sSuUbB\-iItTaA]+)\])?(?:[ ]\((?P<year>\d{4})\))?"\s*alt="[^"]+"\s*class="[^"]+"(?: style="background-image: url\((?P<thumb>.+?)\)">)?\s*<div class="voto">[^>]+>[^>]+>.(?P<rating>[\d\.a-zA-Z\/]+)?[^>]+>[^>]+>[^>]+>(?:<div class="genere">(?P<quality>[^<]+)</div>)?' if item.args == 'update': patronBlock = r'<section id="slider">(?P<block>.*?)</section>' patron = r'<a href="(?P<url>(?:https:\/\/.+?\/(?P<title>[^\/]+[a-zA-Z0-9\-]+)(?P<year>\d{4})))/".+?url\((?P<thumb>[^\)]+)\)">' elif item.contentType == 'tvshow': - action = 'episodios' + # action = 'episodios' if item.args == 'update': patron = r'<a href="(?P<url>[^"]+)"[^<]+?url\((?P<thumb>.+?)\)">\s*?<div class="titolo">(?P<title>.+?)(?: – Serie TV)?(?:\([sSuUbBiItTaA\-]+\))?[ ]?(?P<year>\d{4})?</div>\s*?(?:<div class="genere">)?(?:[\w]+?\.?\s?[\s|S]?[\dx\-S]+?\s\(?(?P<lang>[iItTaA]+|[sSuUbBiItTaA\-]+)\)?\s?(?P<quality>[HD]+)?|.+?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?</div>)' pagination = 25 @@ -94,7 +95,7 @@ def peliculas(item): @support.scrape def episodios(item): data = item.data - # debug = True + # debugBlock = True if item.args == 'anime': support.info("Anime :", item) # blacklist = ['Clipwatching', 'Verystream', 'Easybytez', 'Flix555', 'Cloudvideo'] @@ -134,7 +135,7 @@ def episodios(item): with futures.ThreadPoolExecutor() as executor: for s in servers: executor.submit(get_ep, s) - ret.extend([it.clone(title=ep, contentSeason=int(ep.split('x')[0]), contentEpisodeNumber=int(ep.split('x')[1]), servers=[srv.tourl() for srv in episodes[ep]]) for ep in episodes]) + ret.extend([it.clone(title=ep+typo(it.contentLanguage, '_ [] color kod'), contentSeason=int(ep.split('x')[0]), contentEpisodeNumber=int(ep.split('x')[1]), servers=[srv.tourl() for srv in episodes[ep]]) for ep in episodes]) else: ret.append(it) return sorted(ret, key=lambda i: i.title) @@ -144,7 +145,6 @@ def episodios(item): @support.scrape def genres(item): - action='peliculas' patron_block=r'<div id="bordobar" class="dropdown-menu(?P<block>.*?)</li>' patronMenu=r'<a class="dropdown-item" href="(?P<url>[^"]+)" title="(?P<title>[A-z]+)"' @@ -191,8 +191,6 @@ def newest(categoria): def check(item): support.info() - # support.dbg() - from platformcode.launcher import run data = support.match(item.url, headers=headers).data if data: ck = support.match(data, patron=r'Supportaci condividendo quest[oa] ([^:]+)').match.lower() @@ -207,13 +205,17 @@ def check(item): item.contentType = 'tvshow' item.args = 'anime' item.data = data - return episodios(item) + itemlist = episodios(item) + if not itemlist: + item.data = data + item.action = 'findvideos' + return findvideos(item) elif ck == 'film': item.contentType = 'movie' item.data = data item.action = 'findvideos' - return run(item) + return findvideos(item) else: item.contentType = 'tvshow' @@ -223,7 +225,7 @@ def check(item): item.contentType = 'movie' item.data = data item.action = 'findvideos' - return run(item) + return findvideos(item) @@ -240,9 +242,27 @@ def findvideos(item): if item.servers: return support.server(item, itemlist=[Item().fromurl(s) for s in item.servers]) if not item.data: - item.data = support.match(item, patron='<p>\s*<strong>\s*<u>.*?</p>').match + item.data = httptools.downloadpage(item.url) + item.data = scrapertools.find_single_match(item.data, '<div class="at-above-post addthis_tool"(.*?)<div class="at-below-post') + servers = [] + if item.args == 'anime': + if item.urls: # this is a episode + return support.server(item, itemlist=[Item(url=support.unshortenit.FileCrypt().unshorten(u)) for u in item.urls]) + itemlist = [] + episodes = {} + # support.dbg() + for uri in support.unshortenit.FileCrypt().find(item.data): + for ep in support.unshortenit.FileCrypt(uri).list_files(): + ep = ('.'.join(ep[0].split('.')[:-1]), ep[1]) # remove extension + if not ep[0] in episodes: + episodes[ep[0]] = [] + episodes[ep[0]].append(ep[1]) + for ep in episodes.keys(): + itemlist.append(item.clone(title=ep, urls=episodes[ep], action='findvideos', data='')) + return itemlist total_servers = support.server(item, data=item.data) + if item.contentType == 'episode' and len(set([srv.server for srv in total_servers])) < len([srv.server for srv in total_servers]): # i link contengono più puntate, cerco quindi quella selezionata with futures.ThreadPoolExecutor() as executor: diff --git a/lib/unshortenit.py b/lib/unshortenit.py index 729cf154..25b83381 100644 --- a/lib/unshortenit.py +++ b/lib/unshortenit.py @@ -695,14 +695,13 @@ class UnshortenIt(object): return httptools.downloadpage(uri, only_headers=True, follow_redirects=False).headers.get('location', uri), 200 def _unshorten_uprot(self, uri): - # from core.support import dbg - # dbg() for link in scrapertools.find_multiple_matches(httptools.downloadpage(uri, cloudscraper=True).data, '<a[^>]+href="([^"]+)'): if link.startswith('https://maxstream.video') or link.startswith('https://uprot.net') and link != uri: return link, 200 return uri, 200 + def decrypt_aes(text, key): try: from Cryptodome.Cipher import AES @@ -775,3 +774,25 @@ def findlinks(text): else: text += '\n' + str(link.result()[0]) return text + + +class FileCrypt: + def __init__(self, uri=None): + self.uri = uri + + def find(self, data): + _filecrypt_regex = r'https?://\w+\.filecrypt\.cc/[a-zA-Z0-9_=/]+' + return scrapertools.find_multiple_matches(data, _filecrypt_regex) + + def list_files(self): + reg = """<td title="([^"]+).*?<button onclick="openLink\('([^']+)""" + data = httptools.downloadpage(self.uri).data + ret = scrapertools.find_multiple_matches(data, reg) + return ret + + def unshorten(self, link): + link_data = httptools.downloadpage('https://www.filecrypt.cc/Link/' + link + '.html').data + time.sleep(0.1) + url = httptools.downloadpage(scrapertools.find_single_match(link_data, "location.href='([^']+)"), headers={'Referer': 'http://www.filecrypt.cc/'}, only_headers=True).url + logger.info(url) + return url diff --git a/servers/rapidgator.json b/servers/rapidgator.json deleted file mode 100644 index f3288ce4..00000000 --- a/servers/rapidgator.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "active": true, - "find_videos": { - "ignore_urls": [], - "patterns": [ - { - "pattern": "(rapidgator.net/file/\\w+(?:\\.html|))", - "url": "http://\\1" - } - ] - }, - "free": false, - "id": "rapidgator", - "name": "Rapid Gator", - "premium": [ - "realdebrid", - "alldebrid" - ], - "settings": [ - { - "default": false, - "enabled": true, - "id": "black_list", - "label": "@70708", - "type": "bool", - "visible": true - } - ] -} diff --git a/servers/rapidgator.py b/servers/rapidgator.py deleted file mode 100644 index fdc262e3..00000000 --- a/servers/rapidgator.py +++ /dev/null @@ -1,13 +0,0 @@ -# -*- coding: utf-8 -*- - -from platformcode import logger - - -def test_video_exists(page_url): - return True, "" - - -def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.debug("(page_url='%s')" % page_url) - video_urls = [] - return video_urls