fix server cb01

This commit is contained in:
marco
2020-04-27 18:03:01 +02:00
parent 61b67a4781
commit 68eea5ec87
5 changed files with 91 additions and 49 deletions

View File

@@ -259,34 +259,4 @@ def findvid_serie(item):
def play(item):
support.log()
itemlist = []
### Handling new cb01 wrapper
if host[9:] + "/film/" in item.url:
iurl = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "")
support.log("/film/ wrapper: ", iurl)
if iurl:
item.url = iurl
if '/goto/' in item.url:
item.url = item.url.split('/goto/')[-1].decode('base64')
item.url = item.url.replace('http://cineblog01.uno', 'http://k4pp4.pw')
logger.debug("##############################################################")
if "go.php" in item.url:
data = httptools.downloadpage(item.url).data
if "window.location.href" in data:
try:
data = scrapertools.find_single_match(data, 'window.location.href = "([^"]+)";')
except IndexError:
data = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get(
"location", "")
data, c = unshortenit.unwrap_30x_only(data)
else:
data = scrapertools.find_single_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>')
logger.debug("##### play go.php data ##\n%s\n##" % data)
else:
data = support.swzz_get_url(item)
return servertools.find_video_items(data=data)
return servertools.find_video_items(data=item.url)

View File

@@ -573,13 +573,16 @@ def dooplay_menu(item, type):
def swzz_get_url(item):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:59.0) Gecko/20100101 Firefox/59.0'}
# dbg()
if "/link/" in item.url:
data = httptools.downloadpage(item.url, headers=headers).data
if "link =" in data:
data = scrapertools.find_single_match(data, 'link = "([^"]+)"')
if 'http' not in data:
data = 'https:' + data
elif 'linkId = ' in data:
id = scrapertools.find_single_match(data, 'linkId = "([^"]+)"')
data = stayonline(id)
else:
match = scrapertools.find_single_match(data, r'<meta name="og:url" content="([^"]+)"')
match = scrapertools.find_single_match(data, r'URL=([^"]+)">') if not match else match
@@ -604,24 +607,26 @@ def swzz_get_url(item):
if not "vcrypt" in data:
data = httptools.downloadpage(data).data
logger.debug("##### play /link/ data ##\n%s\n##" % data)
elif 'stayonline.pro' in item.url:
# dbg()
id = item.url.split('/')[-2]
reqUrl = 'https://stayonline.pro/ajax/linkView.php'
p = urlencode({"id": id})
data = httptools.downloadpage(reqUrl, post=p).data
try:
import json
data = json.loads(data)['data']['value']
except:
data = scrapertools.find_single_match(data, r'"value"\s*:\s*"([^"]+)"')
else:
return ''
data = stayonline(id)
else:
data = item.url
return data.replace('\\','')
def stayonline(id):
reqUrl = 'https://stayonline.pro/ajax/linkView.php'
p = urlencode({"id": id})
data = httptools.downloadpage(reqUrl, post=p).data
try:
import json
data = json.loads(data)['data']['value']
except:
data = scrapertools.find_single_match(data, r'"value"\s*:\s*"([^"]+)"')
return data
def menuItem(itemlist, filename, title='', action='', url='', contentType='movie', args=[]):
# Function to simplify menu creation

View File

@@ -13,7 +13,7 @@ import time
import urllib
from base64 import b64decode
from core import httptools
from core import httptools, scrapertools
from platformcode import config, logger
@@ -38,11 +38,14 @@ class UnshortenIt(object):
_vcrypt_regex = r'vcrypt\.net|vcrypt\.pw'
_linkup_regex = r'linkup\.pro|buckler.link'
_linkhub_regex = r'linkhub\.icu'
_swzz_regex = r'swzz\.xyz'
_stayonline_regex = r'stayonline\.pro'
# for services that only include real link inside iframe
_simple_iframe_regex = r'cryptmango|xshield\.net|vcrypt\.club'
listRegex = [_adfly_regex, _linkbucks_regex, _adfocus_regex, _lnxlu_regex, _shst_regex, _hrefli_regex, _anonymz_regex,
_shrink_service_regex, _rapidcrypt_regex, _simple_iframe_regex, _vcrypt_regex, _linkup_regex, _linkhub_regex]
_shrink_service_regex, _rapidcrypt_regex, _simple_iframe_regex, _vcrypt_regex, _linkup_regex, _linkhub_regex,
_swzz_regex, _stayonline_regex]
_maxretries = 5
@@ -88,10 +91,16 @@ class UnshortenIt(object):
uri, code = self._unshorten_linkup(uri)
if re.search(self._linkhub_regex, uri, re.IGNORECASE):
uri, code = self._unshorten_linkhub(uri)
if re.search(self._swzz_regex, uri, re.IGNORECASE):
uri, code = self._unshorten_swzz(uri)
if re.search(self._stayonline_regex, uri, re.IGNORECASE):
uri, code = self._unshorten_stayonline(uri)
if oldUri == uri:
break
logger.info(uri)
return uri, code
def unwrap_30x(self, uri, timeout=10):
@@ -582,6 +591,59 @@ class UnshortenIt(object):
except Exception as e:
return uri, str(e)
def _unshorten_swzz(self, uri):
try:
r = httptools.downloadpage(uri)
if r.url != uri:
return r.url, r.code
data = r.data
if "link =" in data or 'linkId = ' in data:
uri = scrapertools.find_single_match(data, 'link(?:Id)? = "([^"]+)"')
if 'http' not in data:
uri = 'https:' + uri
else:
match = scrapertools.find_single_match(data, r'<meta name="og:url" content="([^"]+)"')
match = scrapertools.find_single_match(data, r'URL=([^"]+)">') if not match else match
if not match:
from lib import jsunpack
try:
data = scrapertools.find_single_match(data.replace('\n', ''),
r"(eval\s?\(function\(p,a,c,k,e,d.*?)</script>")
data = jsunpack.unpack(data)
logger.debug("##### play /link/ unpack ##\n%s\n##" % data)
except:
logger.debug("##### The content is yet unpacked ##\n%s\n##" % data)
uri = scrapertools.find_single_match(data, r'var link(?:\s)?=(?:\s)?"([^"]+)";')
else:
uri = match
if uri.startswith('/'):
uri = "http://swzz.xyz" + uri
if not "vcrypt" in data:
uri = httptools.downloadpage(data).data
return uri, r.code
except Exception as e:
return uri, str(e)
def _unshorten_stayonline(self, uri):
try:
id = uri.split('/')[-2]
reqUrl = 'https://stayonline.pro/ajax/linkView.php'
p = urllib.urlencode({"id": id})
r = httptools.downloadpage(reqUrl, post=p)
data = r.data
try:
import json
uri = json.loads(data)['data']['value']
except:
uri = scrapertools.find_single_match(data, r'"value"\s*:\s*"([^"]+)"')
return uri, r.code
except Exception as e:
return uri, str(e)
def unwrap_30x_only(uri, timeout=10):
unshortener = UnshortenIt()
@@ -612,6 +674,7 @@ def findlinks(text):
regex = '(?:https?://(?:[\w\d]+\.)?)?(?:' + regex + ')/[a-zA-Z0-9_=/]+'
for match in re.findall(regex, text):
matches.append(match)
logger.info('matches=' + str(matches))
if len(matches) == 1:
text += '\n' + unshorten(matches[0])[0]
elif matches:

View File

@@ -4,8 +4,8 @@
"ignore_urls": ["http://akvideo.stream/video", "http://akvideo.stream/video/api"],
"patterns": [
{
"pattern": "(https?://akvideo.stream/swembedid/\\d+)",
"url": "\\1"
"pattern": "akvideo\\.stream/((?:api/vcmod/fastredirect/streaming\\.php\\?id=|swembedid/)[$0-9]+)",
"url": "http://akvideo.stream/\\1"
},
{
"pattern": "(https://akvideo\\.stream/api/vcmod/fastredirect/embed_ak\\.php\\?id=[0-9]+)",

View File

@@ -7,20 +7,24 @@ from core import httptools
from core import scrapertools
from platformcode import logger, config
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0']]
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
# page_url = re.sub('akvideo.stream/(?:video/|video\\.php\\?file_code=)?(?:embed-)?([a-zA-Z0-9]+)','akvideo.stream/video/\\1',page_url)
global data
page = httptools.downloadpage(page_url)
page = httptools.downloadpage(page_url, headers=headers)
if 'embed_ak.php' in page_url or '/embed-' in page.url:
code = scrapertools.find_single_match(page.url, '/embed-([0-9a-z]+)\.html')
if not code:
code = scrapertools.find_single_match(page.data, r"""input\D*id=(?:'|")[^'"]+(?:'|").*?value='([a-z0-9]+)""")
if code:
page = httptools.downloadpage('http://akvideo.stream/video/' + code)
page = httptools.downloadpage('http://akvideo.stream/video/' + code, headers=headers)
else:
return False, config.get_localized_string(70449) % "Akvideo"
if 'video.php?file_code=' in page.url:
page = httptools.downloadpage(page.url.replace('video.php?file_code=', 'video/'), headers=headers)
data = page.data
# ID, code = scrapertools.find_single_match(data, r"""input\D*id=(?:'|")([^'"]+)(?:'|").*?value='([a-z0-9]+)""")