diff --git a/channels/cineblog01.py b/channels/cineblog01.py
index 64ce9996..7aea5cc9 100644
--- a/channels/cineblog01.py
+++ b/channels/cineblog01.py
@@ -194,7 +194,6 @@ def findvideos(item):
def load_links(itemlist, re_txt, desc_txt, quality=""):
streaming = scrapertools.find_single_match(data, re_txt).replace('"', '')
- logger.debug('STREAMING', streaming)
logger.debug('STREAMING=', streaming)
matches = support.match(streaming, patron = r'
]+>([^<]+)<').matches
for scrapedurl, scrapedtitle in matches:
@@ -218,12 +217,9 @@ def findvideos(item):
# Estrae i contenuti - Streaming 3D
load_links(itemlist, 'Streamin?g 3D[^<]+(.*?)cbtable', "Streaming 3D")
- itemlist = support.server(item, itemlist=itemlist)
# Extract the quality format
patronvideos = r'([\w.]+) | '
- support.addQualityTag(item, itemlist, data, patronvideos)
-
- return support.server(item, itemlist=itemlist)
+ return support.server(item, itemlist=itemlist, patronTag=patronvideos)
# Estrae i contenuti - Download
# load_links(itemlist, 'Download:(.*?)', "aqua", "Download")
diff --git a/channels/streamingcommunity.py b/channels/streamingcommunity.py
index b1f2972d..50aabb9d 100644
--- a/channels/streamingcommunity.py
+++ b/channels/streamingcommunity.py
@@ -263,7 +263,7 @@ def play(item):
info = support.match(url, patron=r'LANGUAGE="([^"]+)",\s*URI="([^"]+)|RESOLUTION=\d+x(\d+).*?(http[^"\s]+)').matches
if info:
for lang, sub, res, url in info:
- if sub:
+ if sub and not logger.testMode: # ai test non piace questa parte
if lang == 'auto': lang = 'ita-forced'
s = config.get_temp_file(lang +'.srt')
subs.append(s)
diff --git a/lib/unshortenit.py b/lib/unshortenit.py
index bec346c6..11271085 100644
--- a/lib/unshortenit.py
+++ b/lib/unshortenit.py
@@ -17,7 +17,6 @@ from base64 import b64decode
from core import httptools, scrapertools
from platformcode import config, logger
-
def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
rec = re.compile(regex, flags=flags)
match = rec.search(text)
@@ -48,11 +47,13 @@ class UnshortenIt(object):
# for services that only include real link inside iframe
_simple_iframe_regex = r'cryptmango|xshield\.net|vcrypt\.club|isecure\.link'
# for services that only do redirects
- _simple_redirect = r'streamcrypt\.net/[^/]+|is\.gd|www\.vedere\.stream'
+ _simple_redirect = r'streamcrypt\.net/[^/]+|is\.gd|www\.vedere\.stream|isecure\.link'
+ _filecrypt_regex = r'filecrypt\.cc'
listRegex = [_adfly_regex, _linkbucks_regex, _adfocus_regex, _lnxlu_regex, _shst_regex, _hrefli_regex, _anonymz_regex,
_shrink_service_regex, _rapidcrypt_regex, _simple_iframe_regex, _linkup_regex, _linkhub_regex,
- _swzz_regex, _stayonline_regex, _snip_regex, _linksafe_regex, _protectlink_regex, _uprot_regex, _simple_redirect]
+ _swzz_regex, _stayonline_regex, _snip_regex, _linksafe_regex, _protectlink_regex, _uprot_regex, _simple_redirect,
+ _filecrypt_regex]
_maxretries = 5
@@ -108,6 +109,8 @@ class UnshortenIt(object):
uri, code = self._unshorten_protectlink(uri)
if re.search(self._uprot_regex, uri, re.IGNORECASE):
uri, code = self._unshorten_uprot(uri)
+ if re.search(self._filecrypt_regex, uri, re.IGNORECASE):
+ uri, code = self._unshorten_filecrypt(uri)
if re.search(self._simple_redirect, uri, re.IGNORECASE):
p = httptools.downloadpage(uri)
uri = p.url
@@ -700,6 +703,19 @@ class UnshortenIt(object):
return link, 200
return uri, 200
+ # container, for returning only the first result
+ def _unshorten_filecrypt(self, uri):
+ url = ''
+ try:
+ fc = FileCrypt(uri)
+ url = fc.unshorten(fc.list_files()[0][1])
+ except:
+ import traceback
+ logger.error(traceback.format_exc())
+ if url:
+ return url, 200
+ else:
+ return uri, 200
def decrypt_aes(text, key):
diff --git a/servers/streamsb.json b/servers/streamsb.json
index 289e1b41..e233e353 100644
--- a/servers/streamsb.json
+++ b/servers/streamsb.json
@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
- "pattern": "(?:streamsb|sbembed|sbembed1|sbplay1|sbplay|pelistop|tubesb|playersb|embedsb|watchsb|streamas|sbfast).\\w{2,5}/(?:embed-|d/|e/)?([A-z0-9]+)",
+ "pattern": "(?:streamsb|sbembed|sbembed1|sbplay1|sbplay|pelistop|tubesb|playersb|embedsb|watchsb|streamas|sbfast|sbfull|viewsb).\\w{2,5}/(?:embed-|d/|e/)?([A-z0-9]+)",
"url": "https://streamsb.net/d/\\1"
},
{
diff --git a/tests/test_generic.py b/tests/test_generic.py
index 74b7fd8b..1705beca 100644
--- a/tests/test_generic.py
+++ b/tests/test_generic.py
@@ -64,7 +64,7 @@ validUrlRegex = re.compile(
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
-chBlackList = ['url', 'mediasetplay', 'metalvideo', 'altadefinizionecommunity']
+chBlackList = ['url', 'mediasetplay', 'metalvideo', 'accuradio']
srvBlacklist = ['mega', 'hdmario', 'torrent', 'youtube']
chNumRis = {
'altadefinizione01': {
@@ -186,7 +186,7 @@ for chItem in channel_list:
else:
itemlist = getattr(module, it.action)(it)
- if itemlist and itemlist[0].action in ('findvideos', 'episodios'):
+ if not firstContent and itemlist and itemlist[0].action in ('findvideos', 'episodios'):
firstContent = re.match('[ \w]*', itemlist[0].fulltitle).group(0)
# some sites might have no link inside, but if all results are without servers, there's something wrong
@@ -195,7 +195,7 @@ for chItem in channel_list:
if hasattr(module, resIt.action):
serversFound[it.title] = getattr(module, resIt.action)(resIt)
if serversFound[it.title] and resIt.action == 'episodios':
- getattr(module, serversFound[it.title][0].action)(serversFound[it.title][0])
+ serversFound[it.title] = getattr(module, serversFound[it.title][0].action)(serversFound[it.title][0])
else:
serversFound[it.title] = [resIt]