fix cb01 serie, vcrypt su kodi 19 e deltabit
episodi serietv ordinati
This commit is contained in:
@@ -127,40 +127,57 @@ def peliculas(item):
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
# support.dbg()
|
||||
data = support.match(item.url, headers=headers).data
|
||||
support.info(data)
|
||||
if 'TUTTA LA ' in data:
|
||||
folderUrl = scrapertools.find_single_match(data, r'TUTTA LA \w+\s+(?:–|-)\s+<a href="?([^" ]+)')
|
||||
@support.scrape
|
||||
def folder(item, data):
|
||||
"""
|
||||
Quando c'è un link ad una cartelle di vcrypt contenente più stagioni
|
||||
"""
|
||||
actLike = 'episodios'
|
||||
addVideolibrary = False
|
||||
downloadEnabled = False
|
||||
|
||||
folderUrl = scrapertools.find_single_match(data, r'TUTTA LA \w+\s+(?:–|-)\s+<a href="?([^" ]+)').replace(
|
||||
'.net/', '.pw/') # vcrypt.pw non ha CF
|
||||
data = httptools.downloadpage(folderUrl).data
|
||||
patron = r'<a href="(?P<url>[^"]+)[^>]+>(?P<title>[^<]+)'
|
||||
patron = r'><a href="(?P<url>[^"]+)[^>]+>(?P<title>[^<]+)'
|
||||
sceneTitle = True
|
||||
|
||||
def itemHook(item):
|
||||
item.serieFolder = True
|
||||
return item
|
||||
else:
|
||||
patronBlock = r'(?P<block>sp-head[^>]+>\s*(?:STAGION[EI]\s*(?:DA\s*[0-9]+\s*A)?\s*[0-9]+|MINISERIE) - (?P<lang>[^-<]+)(?:- (?P<quality>[^-<]+))?.*?<\/div>.*?)spdiv[^>]*>'
|
||||
patron = r'(?:/>|<p>|<strong>)(?P<url>.*?(?P<episode>[0-9]+(?:×|ÃÂ)[0-9]+)\s*(?P<title2>.*?)?(?:\s*–|\s*-|\s*<).*?)(?:<\/p>|<br)'
|
||||
def itemlistHook(itemlist):
|
||||
title_dict = {}
|
||||
itlist = []
|
||||
for item in itemlist:
|
||||
item.title = re.sub(r'\.(\D)',' \\1', item.title)
|
||||
match = support.match(item.title, patron=r'(\d+.\d+)').match.replace('x','')
|
||||
item.order = match
|
||||
if match not in title_dict:
|
||||
title_dict[match] = item
|
||||
elif match in title_dict and item.contentLanguage == title_dict[match].contentLanguage \
|
||||
or item.contentLanguage == 'ITA' and not title_dict[match].contentLanguage \
|
||||
or title_dict[match].contentLanguage == 'ITA' and not item.contentLanguage:
|
||||
title_dict[match].url = item.url
|
||||
else:
|
||||
title_dict[match + '1'] = item
|
||||
return locals()
|
||||
|
||||
for key, value in title_dict.items():
|
||||
itlist.append(value)
|
||||
# debug=True
|
||||
data = support.match(item.url, headers=headers).data
|
||||
folderItemlist = folder(item, data) if 'TUTTA LA ' in data else []
|
||||
|
||||
return sorted(itlist, key=lambda it: (it.contentLanguage, int(it.order)))
|
||||
patronBlock = r'(?P<block>sp-head[^>]+>\s*(?:STAGION[EI]\s*(?:DA\s*[0-9]+\s*A)?\s*[0-9]+|MINISERIE) - (?P<lang>[^-<]+)(?:- (?P<quality>[^-<]+))?.*?<\/div>.*?)spdiv[^>]*>'
|
||||
patron = r'(?:/>|<p>|<strong>)(?P<other>.*?(?P<episode>[0-9]+(?:×|ÃÂ)[0-9]+)\s*(?P<title2>.*?)?(?:\s*–|\s*-|\s*<).*?)(?:<\/p>|<br)'
|
||||
def itemlistHook(itemlist):
|
||||
title_dict = {}
|
||||
itlist = []
|
||||
for i in itemlist:
|
||||
i.url = item.url
|
||||
i.title = re.sub(r'\.(\D)',' \\1', i.title)
|
||||
match = support.match(i.title, patron=r'(\d+.\d+)').match.replace('x','')
|
||||
i.order = match
|
||||
if match not in title_dict:
|
||||
title_dict[match] = i
|
||||
elif match in title_dict and i.contentLanguage == title_dict[match].contentLanguage \
|
||||
or i.contentLanguage == 'ITA' and not title_dict[match].contentLanguage \
|
||||
or title_dict[match].contentLanguage == 'ITA' and not i.contentLanguage:
|
||||
title_dict[match].url = i.url
|
||||
else:
|
||||
title_dict[match + '1'] = i
|
||||
|
||||
for key, value in title_dict.items():
|
||||
itlist.append(value)
|
||||
|
||||
itlist = sorted(itlist, key=lambda it: (it.contentLanguage, int(it.order)))
|
||||
|
||||
itlist.extend(folderItemlist)
|
||||
|
||||
return itlist
|
||||
return locals()
|
||||
|
||||
|
||||
@@ -211,32 +228,10 @@ def findvideos(item):
|
||||
|
||||
|
||||
def findvid_serie(item):
|
||||
def load_vid_series(html, item, itemlist, blktxt):
|
||||
support.info('HTML',html)
|
||||
# Estrae i contenuti
|
||||
matches = support.match(html, patron=r'<a href=(?:")?([^ "]+)[^>]+>(?!<!--)(.*?)(?:</a>|<img)').matches
|
||||
for url, server in matches:
|
||||
item = item.clone(action="play", title=server, url=url, server=server, quality=blktxt)
|
||||
if 'swzz' in item.url: item.url = support.swzz_get_url(item)
|
||||
itemlist.append(item)
|
||||
|
||||
support.info()
|
||||
data = re.sub(r'((?:<p>|<strong>)?[^\d]*\d*(?:×|Ã)[0-9]+[^<]+)', '', item.other)
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r'((?:<p>|<strong>)?[^\d]*\d*(?:×|Ã)[0-9]+[^<]+)', '' ,item.url)
|
||||
|
||||
# Blocks with split
|
||||
blk = re.split(r"(?:>\s*)?([A-Za-z\s0-9]*):\s*<", data, re.S)
|
||||
blktxt = ""
|
||||
for b in blk:
|
||||
if b[0:3] == "a h" or b[0:4] == "<a h":
|
||||
load_vid_series("<%s>" % b, item, itemlist, blktxt)
|
||||
blktxt = ""
|
||||
elif len(b.strip()) > 1:
|
||||
blktxt = b.strip()
|
||||
|
||||
return support.server(item, itemlist=itemlist)
|
||||
return support.server(item, data=data)
|
||||
|
||||
|
||||
def play(item):
|
||||
|
||||
@@ -17,7 +17,7 @@ else:
|
||||
from urllib import urlencode
|
||||
|
||||
from time import time
|
||||
from core import httptools, scrapertools, servertools, tmdb, channeltools, autoplay
|
||||
from core import httptools, scrapertools, servertools, tmdb, channeltools, autoplay, scraper
|
||||
from core.item import Item
|
||||
from lib import unshortenit
|
||||
from platformcode import config
|
||||
@@ -391,6 +391,9 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
|
||||
|
||||
|
||||
def html_uniform(data):
|
||||
"""
|
||||
replace all ' with " and eliminate newline, so we don't need to worry about
|
||||
"""
|
||||
return re.sub("='([^']+)'", '="\\1"', data.replace('\n', ' ').replace('\t', ' ').replace(' ', ' '))
|
||||
|
||||
|
||||
@@ -431,6 +434,7 @@ def scrape(func):
|
||||
lang = args.get('deflang', '')
|
||||
sceneTitle = args.get('sceneTitle')
|
||||
group = args.get('group', False)
|
||||
downloadEnabled = args.get('downloadEnabled', True)
|
||||
pag = item.page if item.page else 1 # pagination
|
||||
matches = []
|
||||
|
||||
@@ -438,8 +442,8 @@ def scrape(func):
|
||||
logger.debug('PATRON= ', patron)
|
||||
if not data:
|
||||
page = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True)
|
||||
# replace all ' with " and eliminate newline, so we don't need to worry about
|
||||
data = html_uniform(page.data)
|
||||
data = page.data
|
||||
data = html_uniform(data)
|
||||
scrapingTime = time()
|
||||
if patronBlock:
|
||||
if debugBlock:
|
||||
@@ -506,6 +510,9 @@ def scrape(func):
|
||||
if patronNext and inspect.stack()[1][3] not in ['newest']:
|
||||
nextPage(itemlist, item, data, patronNext, function)
|
||||
|
||||
if function == 'episodios':
|
||||
scraper.sort_episode_list(itemlist)
|
||||
|
||||
# next page for pagination
|
||||
if pagination and len(matches) > pag * pagination and not search:
|
||||
if inspect.stack()[1][3] not in ['newest','get_newest']:
|
||||
@@ -533,7 +540,7 @@ def scrape(func):
|
||||
if addVideolibrary and (item.infoLabels["title"] or item.fulltitle):
|
||||
# item.fulltitle = item.infoLabels["title"]
|
||||
videolibrary(itemlist, item, function=function)
|
||||
if function == 'episodios' or function == 'findvideos':
|
||||
if downloadEnabled and function == 'episodios' or function == 'findvideos':
|
||||
download(itemlist, item, function=function)
|
||||
|
||||
if 'patronMenu' in args and itemlist:
|
||||
@@ -645,63 +652,6 @@ def dooplay_menu(item, type):
|
||||
return locals()
|
||||
|
||||
|
||||
def swzz_get_url(item):
|
||||
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:59.0) Gecko/20100101 Firefox/59.0'}
|
||||
# dbg()
|
||||
if "/link/" in item.url:
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
if "link =" in data:
|
||||
data = scrapertools.find_single_match(data, 'link = "([^"]+)"')
|
||||
if 'http' not in data:
|
||||
data = 'https:' + data
|
||||
elif 'linkId = ' in data:
|
||||
id = scrapertools.find_single_match(data, 'linkId = "([^"]+)"')
|
||||
data = stayonline(id)
|
||||
else:
|
||||
match = scrapertools.find_single_match(data, r'<meta name="og:url" content="([^"]+)"')
|
||||
match = scrapertools.find_single_match(data, r'URL=([^"]+)">') if not match else match
|
||||
|
||||
if not match:
|
||||
from lib import jsunpack
|
||||
|
||||
try:
|
||||
data = scrapertools.find_single_match(data.replace('\n', ''), r"(eval\s?\(function\(p,a,c,k,e,d.*?)</script>")
|
||||
data = jsunpack.unpack(data)
|
||||
|
||||
logger.debug("##### play /link/ unpack ##\n%s\n##" % data)
|
||||
except:
|
||||
logger.debug("##### The content is yet unpacked ##\n%s\n##" % data)
|
||||
|
||||
data = scrapertools.find_single_match(data, r'var link(?:\s)?=(?:\s)?"([^"]+)";')
|
||||
data, c = unshortenit.unwrap_30x_only(data)
|
||||
else:
|
||||
data = match
|
||||
if data.startswith('/'):
|
||||
data = urlparse.urljoin("http://swzz.xyz", data)
|
||||
if not "vcrypt" in data:
|
||||
data = httptools.downloadpage(data).data
|
||||
logger.debug("##### play /link/ data ##\n%s\n##" % data)
|
||||
|
||||
elif 'stayonline.pro' in item.url:
|
||||
id = item.url.split('/')[-2]
|
||||
data = stayonline(id)
|
||||
else:
|
||||
data = item.url
|
||||
|
||||
return data.replace('\\','')
|
||||
|
||||
def stayonline(id):
|
||||
reqUrl = 'https://stayonline.pro/ajax/linkView.php'
|
||||
p = urlencode({"id": id})
|
||||
data = httptools.downloadpage(reqUrl, post=p).data
|
||||
try:
|
||||
import json
|
||||
data = json.loads(data)['data']['value']
|
||||
except:
|
||||
data = scrapertools.find_single_match(data, r'"value"\s*:\s*"([^"]+)"')
|
||||
return data
|
||||
|
||||
|
||||
def menuItem(itemlist, filename, title='', action='', url='', contentType='undefined', args=[], style=True):
|
||||
# Function to simplify menu creation
|
||||
|
||||
|
||||
@@ -492,7 +492,6 @@ class UnshortenIt(object):
|
||||
except Exception as e:
|
||||
return uri, str(e)
|
||||
|
||||
|
||||
def _unshorten_vcrypt(self, uri):
|
||||
uri = uri.replace('.net', '.pw')
|
||||
try:
|
||||
@@ -508,15 +507,15 @@ class UnshortenIt(object):
|
||||
from Crypto.Cipher import AES
|
||||
|
||||
str = str.replace("_ppl_", "+").replace("_eqq_", "=").replace("_sll_", "/")
|
||||
iv = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
|
||||
key = "naphajU2usWUswec"
|
||||
iv = b"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
|
||||
key = b"naphajU2usWUswec"
|
||||
decoded = b64decode(str)
|
||||
decoded = decoded + '\0' * (len(decoded) % 16)
|
||||
decoded = decoded + b'\0' * (len(decoded) % 16)
|
||||
crypt_object = AES.new(key, AES.MODE_CBC, iv)
|
||||
decrypted = ''
|
||||
decrypted = b''
|
||||
for p in range(0, len(decoded), 16):
|
||||
decrypted += crypt_object.decrypt(decoded[p:p + 16]).replace('\0', '')
|
||||
return decrypted
|
||||
decrypted += crypt_object.decrypt(decoded[p:p + 16]).replace(b'\0', b'')
|
||||
return decrypted.decode('ascii')
|
||||
if 'shield' in uri.split('/')[-2]:
|
||||
uri = decrypt(uri.split('/')[-1])
|
||||
else:
|
||||
@@ -537,7 +536,7 @@ class UnshortenIt(object):
|
||||
r = httptools.downloadpage(uri, timeout=self._timeout, headers=headers, follow_redirects=False)
|
||||
if 'Wait 1 hour' in r.data:
|
||||
uri = ''
|
||||
logger.info('IP bannato da vcrypt, aspetta un ora')
|
||||
logger.error('IP bannato da vcrypt, aspetta un ora')
|
||||
else:
|
||||
prev_uri = uri
|
||||
uri = r.headers['location']
|
||||
@@ -549,7 +548,11 @@ class UnshortenIt(object):
|
||||
if 'out_generator' in uri:
|
||||
uri = re.findall('url=(.*)$', uri)[0]
|
||||
elif '/decode/' in uri:
|
||||
uri = httptools.downloadpage(uri, follow_redirects=True).url
|
||||
scheme, netloc, path, query, fragment = urlsplit(uri)
|
||||
splitted = path.split('/')
|
||||
splitted[1] = 'outlink'
|
||||
uri = httptools.downloadpage(scheme + '://' + netloc + "/".join(splitted) + query + fragment, follow_redirects=False,
|
||||
post={'url': splitted[2]}).headers['location']
|
||||
# uri = decrypt(uri.split('/')[-1])
|
||||
|
||||
return uri, r.code if r else 200
|
||||
@@ -557,7 +560,6 @@ class UnshortenIt(object):
|
||||
logger.error(e)
|
||||
return uri, 0
|
||||
|
||||
|
||||
def _unshorten_linkup(self, uri):
|
||||
try:
|
||||
r = None
|
||||
|
||||
@@ -1,36 +1,31 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import time, sys
|
||||
if sys.version_info[0] >= 3:
|
||||
import urllib.parse as urllib
|
||||
else:
|
||||
import urllib
|
||||
|
||||
from core import httptools, scrapertools
|
||||
from core import httptools, scrapertools, support
|
||||
from lib import jsunpack
|
||||
from platformcode import logger, config
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
global data
|
||||
data = httptools.downloadpage(page_url).data.replace('"', "'")
|
||||
global data, real_url
|
||||
page = httptools.downloadpage(page_url)
|
||||
data = page.data.replace('"', "'")
|
||||
real_url = page.url
|
||||
|
||||
if "Not Found" in data or "File Does not Exist" in data:
|
||||
return False, config.get_localized_string(70449) % "DeltaBit"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(deltabit page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
global data
|
||||
|
||||
post = urllib.urlencode({k: v for k, v in scrapertools.find_multiple_matches(data, "name='([^']+)' value='([^']*)'")})
|
||||
global data, real_url
|
||||
post = {k: v for k, v in scrapertools.find_multiple_matches(data, "name='([^']+)' value='([^']*)'")}
|
||||
time.sleep(2.5)
|
||||
data = httptools.downloadpage(page_url, post=post).data
|
||||
data = httptools.downloadpage(real_url, post=post).data
|
||||
|
||||
videos_packed = scrapertools.find_single_match(data, r"</div>\s*<script type='text/javascript'>(eval.function.p,a,c,k,e,.*?)\s*</script>")
|
||||
|
||||
video_unpacked = jsunpack.unpack(videos_packed)
|
||||
videos = scrapertools.find_single_match(video_unpacked, r'sources:\["([^"]+)"\]')
|
||||
video_urls.append([videos.split('.')[-1] + ' [DeltaBit]', videos.replace('https:','http:')])
|
||||
return video_urls
|
||||
# videos_packed = scrapertools.find_single_match(data, r"<script type='text/javascript'>(eval.function.p,a,c,k,e,.*?)\s*</script>")
|
||||
# video_unpacked = jsunpack.unpack(videos_packed)
|
||||
return support.get_jwplayer_mediaurl(data, 'DeltaBit', True)
|
||||
|
||||
Reference in New Issue
Block a user