Fix parziale cinemalibero (SerieTV)
This commit is contained in:
@@ -8,7 +8,11 @@ import re
|
||||
from core import httptools, support, scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import config
|
||||
|
||||
import sys
|
||||
if sys.version_info[0] >= 3:
|
||||
from concurrent import futures
|
||||
else:
|
||||
from concurrent_py2 import futures
|
||||
|
||||
# rimanda a .today che contiene tutti link a .plus
|
||||
# def findhost(url):
|
||||
@@ -89,26 +93,51 @@ def peliculas(item):
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
data=item.data
|
||||
# debug=True
|
||||
data = item.data
|
||||
# debug = True
|
||||
if item.args == 'anime':
|
||||
support.info("Anime :", item)
|
||||
# blacklist = ['Clipwatching', 'Verystream', 'Easybytez', 'Flix555', 'Cloudvideo']
|
||||
patron = r'<a target=(?P<url>[^>]+>(?P<title>Episodio\s(?P<episode>\d+))(?::)?(?:(?P<title2>[^<]+))?.*?(?:<br|</p))'
|
||||
patronBlock = r'(?:Stagione (?P<season>\d+))?(?:</span><br />|</span></p>|strong></p>)(?P<block>.*?)(?:<div style="margin-left|<span class="txt_dow">)'
|
||||
item.contentType = 'tvshow'
|
||||
elif item.args == 'serie':
|
||||
elif item.args == 'serie' or item.contentType == 'tvshow':
|
||||
support.info("Serie :", item)
|
||||
patron = r'(?:>| )(?P<episode>\d+(?:x|×|×)\d+)[;]?[ ]?(?:(?P<title>[^<–-]+)(?P<data>.*?)|(\2[ ])(?:<(\3.*?)))(?:</a><br /|</a></p|$)'
|
||||
patron = r'(?:>| )(?P<episode>\d+(?:x|×|×)\d+|Puntata \d+)[;]?[ ]?(?:(?P<title>[^<–-]+)?(?P<data>.*?)|(\2[ ])(?:<(\3.*?)))(?:</a><br /|</a></p|$)|(?P<stagione>.+)'
|
||||
patronBlock = r'>(?:[^<]+[Ss]tagione\s|[Ss]tagione [Uu]nica)(?:(?P<lang>iTA|ITA|Sub-ITA|Sub-iTA))?.*?</strong>(?P<block>.+?)(?:<strong|<div class="at-below)'
|
||||
item.contentType = 'tvshow'
|
||||
else:
|
||||
patron = r'(?P<title>\s*[0-9]{2}/[0-9]{2}/[0-9]{4})(?P<data>.*?)(?:<br|</p)'
|
||||
|
||||
def itemHook(item):
|
||||
if not scrapertools.find_single_match(item.title, r'(\d+x\d+)'):
|
||||
item.title = re.sub(r'(\d+) -', '1x\\1', item.title)
|
||||
return item
|
||||
def itemHook(it):
|
||||
if not scrapertools.find_single_match(it.title, r'(\d+x\d+)'):
|
||||
it.title = re.sub(r'(\d+) -', '1x\\1', it.title)
|
||||
return it
|
||||
|
||||
def itemlistHook(itl):
|
||||
ret = []
|
||||
for it in itl:
|
||||
if it.stagione: # stagione intera
|
||||
def get_ep(s):
|
||||
srv_mod = __import__('servers.%s' % s.server, None, None, ["servers.%s" % s.server])
|
||||
if hasattr(srv_mod, 'get_filename'):
|
||||
title = srv_mod.get_filename(s.url)
|
||||
ep = scrapertools.get_season_and_episode(title)
|
||||
if ep:
|
||||
if ep not in episodes:
|
||||
episodes[ep] = []
|
||||
episodes[ep].append(s)
|
||||
servers = support.server(item, it.stagione, AutoPlay=False, CheckLinks=False, Download=False, Videolibrary=False)
|
||||
episodes = {}
|
||||
|
||||
for s in servers:
|
||||
# ottengo l'episodio dal nome del file
|
||||
with futures.ThreadPoolExecutor() as executor:
|
||||
executor.submit(get_ep, s)
|
||||
ret.extend([it.clone(title=ep, contentSeason=int(ep.split('x')[0]), contentEpisodeNumber=int(ep.split('x')[1]), servers=[srv.tourl() for srv in episodes[ep]]) for ep in episodes])
|
||||
else:
|
||||
ret.append(it)
|
||||
return sorted(ret, key=lambda i: i.title)
|
||||
|
||||
return locals()
|
||||
|
||||
@@ -138,6 +167,7 @@ def search(item, texto):
|
||||
support.info("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
support.info('newest ->', categoria)
|
||||
itemlist = []
|
||||
@@ -198,6 +228,29 @@ def check(item):
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
def filter_ep(s):
|
||||
srv_mod = __import__('servers.%s' % s.server, None, None, ["servers.%s" % s.server])
|
||||
if hasattr(srv_mod, 'get_filename'):
|
||||
title = srv_mod.get_filename(s.url)
|
||||
# support.dbg()
|
||||
if scrapertools.get_season_and_episode(title) == str(item.contentSeason) + "x" + str(
|
||||
item.contentEpisodeNumber).zfill(2):
|
||||
servers.append(s)
|
||||
support.info()
|
||||
item.data = item.data.replace('http://rapidcrypt.net/verys/', '').replace('http://rapidcrypt.net/open/', '') #blocca la ricerca
|
||||
return support.server(item, data=item.data)
|
||||
if item.servers:
|
||||
return support.server(item, itemlist=[Item().fromurl(s) for s in item.servers])
|
||||
if not item.data:
|
||||
item.data = support.match(item, patron='<p>\s*<strong>\s*<u>.*?</p>').match
|
||||
servers = []
|
||||
total_servers = support.server(item, data=item.data)
|
||||
if item.contentType == 'episode' and len(set([srv.server for srv in total_servers])) < len([srv.server for srv in total_servers]):
|
||||
# i link contengono più puntate, cerco quindi quella selezionata
|
||||
with futures.ThreadPoolExecutor() as executor:
|
||||
for s in total_servers:
|
||||
if s.server:
|
||||
executor.submit(filter_ep, s)
|
||||
else:
|
||||
servers.append(s)
|
||||
return servers
|
||||
else:
|
||||
return total_servers
|
||||
|
||||
@@ -26,10 +26,10 @@ def mainlist(item):
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
# debug = True
|
||||
patronBlock = r'movies-list movies-list-full(?P<block>.*?)footer>'
|
||||
debug = True
|
||||
# patronBlock = r'movies-list movies-list-full(?P<block>.*?)footer>'
|
||||
if item.args == 'search':
|
||||
patron = r'<div data-movie-id[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>(?:\s*<span class="mli-quality">(?P<quality>[^>]+)</span>)?\s*<img src="(?P<thumbnail>[^"]+)[^>]+>[^>]+>[^>]+>(?P<title>[^<]+).*?jt-info[^>]+>[^:]+:\s*(?P<rating>[^<]+)[^>]+>[^>]+>[^>]+>(?P<year>\d*)[^>]+>[^>]+>[^>]+>(?P<duration>\d*).*?"f-desc">\s*<p>(?P<plot>[^<]+)'
|
||||
patron = r'<div data-movie-id[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>(?:\s*<span class="mli-quality">(?P<quality>[^>]+)</span>)?\s*<img src="(?P<thumbnail>[^"]+)[^>]+>[^>]+>[^>]+>(?P<title>[^<]+).*?jt-info[^>]+>[^:]+:\s*(?P<rating>[^<]+)[^>]+>[^>]+>[^>]+>(?P<year>\d*)[^>]+>[^>]+>[^>]+>(?P<duration>\d*).*?"f-desc">(?:\s*<p>(?P<plot>[^<]+))?'
|
||||
else:
|
||||
patron = r'<div data-movie-id[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>[^>]+>[^>]+><img src="(?P<thumbnail>[^"]+)[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+).*?jt-info[^>]+>[^:]+:\s*(?P<rating>[^<]+)[^>]+>[^>]+>[^>]+>(?P<year>\d*)[^>]+>[^>]+>[^>]+>(?P<duration>\d*)'
|
||||
patronNext = '<li class=.active.>.*?href=.(.*?).>'
|
||||
|
||||
@@ -438,15 +438,17 @@ def get_season_and_episode(title):
|
||||
@return: Nseason and episode number in "1x01" format or empty string if not found
|
||||
"""
|
||||
filename = ""
|
||||
|
||||
patrons = ["(\d+)\s*[x-]\s*(\d+)", "(\d+)\s*×\s*(\d+)", "(?:[Ss]|[Tt])(\d+)\s?(?:[Ee]|Ep\.?)(\d+)",
|
||||
"(?:[Ss]tag|[Ss]eason|[Ss]tagione\w*)\s*(\d+)\s*(?:[Ee]pi|[Ee]pisode|[Ee]pisodio\w*)\s*(\d+)"]
|
||||
patrons = ["[ .](\d+)\s*[x-]\s*(\d+)[ .]", "(\d+)\s*×\s*(\d+)", "(?:s|t)(\d+)[ .]?(?:e|Ep\.?)(\d+)",
|
||||
"(?:(?:stag|season|stagione\w*)\s*(\d+))?\s*(?:ep|epi|epis|episod[ioe]?|puntata)[ .-]*(\d+)"]
|
||||
|
||||
for patron in patrons:
|
||||
try:
|
||||
matches = re.compile(patron, re.I).search(title)
|
||||
if matches:
|
||||
filename = str(int(matches.group(1))) + "x" + str(int(matches.group(2))).zfill(2)
|
||||
season = matches.group(1)
|
||||
if not season:
|
||||
season = 1
|
||||
filename = str(int(season)) + "x" + str(int(matches.group(2))).zfill(2)
|
||||
break
|
||||
except:
|
||||
pass
|
||||
|
||||
@@ -44,14 +44,15 @@ class UnshortenIt(object):
|
||||
_snip_regex = r'[0-9a-z]+snip\.|uprotector\.xyz'
|
||||
_linksafe_regex = r'linksafe\.cc'
|
||||
_protectlink_regex = r'(?:s\.)?protectlink\.stream'
|
||||
_uprot_regex = r'uprot\.net'
|
||||
# for services that only include real link inside iframe
|
||||
_simple_iframe_regex = r'cryptmango|xshield\.net|vcrypt\.club|isecure\.link'
|
||||
# for services that only do redirects
|
||||
_simple_redirect = r'streamcrypt\.net/[^/]+|uprot\.net|is\.gd'
|
||||
_simple_redirect = r'streamcrypt\.net/[^/]+|is\.gd|www\.vedere\.stream'
|
||||
|
||||
listRegex = [_adfly_regex, _linkbucks_regex, _adfocus_regex, _lnxlu_regex, _shst_regex, _hrefli_regex, _anonymz_regex,
|
||||
_shrink_service_regex, _rapidcrypt_regex, _simple_iframe_regex, _linkup_regex, _linkhub_regex,
|
||||
_swzz_regex, _stayonline_regex, _snip_regex, _linksafe_regex, _protectlink_regex, _simple_redirect]
|
||||
_swzz_regex, _stayonline_regex, _snip_regex, _linksafe_regex, _protectlink_regex, _uprot_regex, _simple_redirect]
|
||||
|
||||
_maxretries = 5
|
||||
|
||||
@@ -105,6 +106,8 @@ class UnshortenIt(object):
|
||||
uri, code = self._unshorten_linksafe(uri)
|
||||
if re.search(self._protectlink_regex, uri, re.IGNORECASE):
|
||||
uri, code = self._unshorten_protectlink(uri)
|
||||
if re.search(self._uprot_regex, uri, re.IGNORECASE):
|
||||
uri, code = self._unshorten_uprot(uri)
|
||||
if re.search(self._simple_redirect, uri, re.IGNORECASE):
|
||||
p = httptools.downloadpage(uri)
|
||||
uri = p.url
|
||||
@@ -593,7 +596,11 @@ class UnshortenIt(object):
|
||||
uri = 'https://linkhub.icu/view/' + re.search('\.\./view/([^"]+)', r.data).group(1)
|
||||
logger.info(uri)
|
||||
r = httptools.downloadpage(uri, follow_redirect=True, timeout=self._timeout, cookies=False)
|
||||
uri = re.search('<div id="text-url".*\n\s+<a href="([^"]+)', r.data).group(0)
|
||||
links = re.findall('<a href="(http[^"]+)', r.data)
|
||||
if len(links) == 1:
|
||||
uri = links[0]
|
||||
else:
|
||||
uri = "\n".join(links) # folder
|
||||
return uri, r.code
|
||||
except Exception as e:
|
||||
return uri, str(e)
|
||||
@@ -641,6 +648,7 @@ class UnshortenIt(object):
|
||||
id = uri.split('/')[-2]
|
||||
reqUrl = 'https://stayonline.pro/ajax/linkView.php'
|
||||
p = urlencode({"id": id, "ref": ""})
|
||||
time.sleep(1)
|
||||
r = httptools.downloadpage(reqUrl, post=p, headers={'Referer': uri})
|
||||
data = r.data
|
||||
try:
|
||||
@@ -686,6 +694,15 @@ class UnshortenIt(object):
|
||||
else:
|
||||
return httptools.downloadpage(uri, only_headers=True, follow_redirects=False).headers.get('location', uri), 200
|
||||
|
||||
def _unshorten_uprot(self, uri):
|
||||
from core.support import dbg
|
||||
dbg()
|
||||
for link in scrapertools.find_multiple_matches(httptools.downloadpage(uri, cloudscraper=True).data, '<a[^>]+href="([^"]+)'):
|
||||
if link.startswith('https://maxstream.video') or link.startswith('https://uprot.net') and link != uri:
|
||||
return link, 200
|
||||
return uri, 200
|
||||
|
||||
|
||||
def decrypt_aes(text, key):
|
||||
try:
|
||||
from Cryptodome.Cipher import AES
|
||||
|
||||
@@ -186,6 +186,8 @@ def run(item=None):
|
||||
# db need to be closed when not used, it will cause freezes
|
||||
from core import db
|
||||
db.close()
|
||||
import threading
|
||||
logger.debug(threading.enumerate())
|
||||
|
||||
|
||||
def new_search(item, channel=None):
|
||||
|
||||
@@ -46,3 +46,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
video_urls.append(["%s [Mixdrop]" % ext, media_url])
|
||||
|
||||
return video_urls
|
||||
|
||||
|
||||
def get_filename(page_url):
|
||||
title = httptools.downloadpage(page_url.replace('/e/', '/f/')).data.split('<title>')[1].split('</title>')[0]
|
||||
prefix = 'MixDrop - Watch '
|
||||
if title.startswith(prefix):
|
||||
return title[len(prefix):]
|
||||
return ""
|
||||
|
||||
@@ -26,3 +26,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
media_url = data["result"]["Original"]["src"]
|
||||
video_urls.append(["MP4", media_url])
|
||||
return video_urls
|
||||
|
||||
|
||||
def get_filename(page_url):
|
||||
from core import jsontools
|
||||
file = jsontools.load(scrapertools.decodeHtmlentities(httptools.downloadpage(page_url).data.split(':file="')[1].split('"')[0]))
|
||||
return file['name']
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(?:streamsb|sbembed|sbembed1|sbplay1|sbplay|pelistop|tubesb|playersb|embedsb).\\w{2,4}/(?:embed-|d/|e/)?([A-z0-9]+)",
|
||||
"pattern": "(?:streamsb|sbembed|sbembed1|sbplay1|sbplay|pelistop|tubesb|playersb|embedsb|watchsb).\\w{2,4}/(?:embed-|d/|e/)?([A-z0-9]+)",
|
||||
"url": "https://streamsb.net/\\1"
|
||||
},
|
||||
{
|
||||
|
||||
@@ -34,3 +34,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
if media_url:
|
||||
video_urls.append([s[0], media_url])
|
||||
return video_urls
|
||||
|
||||
|
||||
def get_filename(page_url):
|
||||
title = httptools.downloadpage(page_url).data.split('<title>')[1].split('</title>')[0]
|
||||
prefix = 'Watch '
|
||||
if title.startswith(prefix):
|
||||
return title[len(prefix):]
|
||||
return ""
|
||||
|
||||
@@ -36,3 +36,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
video_urls.append(['MP4 [Streamtape]', url])
|
||||
return video_urls
|
||||
|
||||
|
||||
def get_filename(page_url):
|
||||
return httptools.downloadpage(page_url).data.split('<meta name="og:title" content="')[1].split('"')[0]
|
||||
|
||||
@@ -34,3 +34,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
video_urls.append([" [Voe]", url])
|
||||
|
||||
return video_urls
|
||||
|
||||
|
||||
def get_filename(page_url):
|
||||
title = httptools.downloadpage(page_url).data.split('<title>')[1].split('</title>')[0]
|
||||
prefix = 'Watch '
|
||||
if title.startswith(prefix):
|
||||
return title[len(prefix):]
|
||||
return ""
|
||||
|
||||
Reference in New Issue
Block a user