fix hdpass (altadefinizioneclick, la casa del cinema e seriehd)
disabilitato vedohd (chiuso, rimanda a cb01)
This commit is contained in:
@@ -19,7 +19,6 @@
|
||||
"""
|
||||
|
||||
from core import support
|
||||
from core import scrapertools, httptools
|
||||
from core.item import Item
|
||||
|
||||
|
||||
@@ -119,7 +118,7 @@ def newest(categoria):
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
log('newest log: ', {0}.format(line))
|
||||
support.log('newest log: ', {0}.format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -83,17 +83,41 @@ def peliculas(item):
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
data =''
|
||||
url = support.match(item, patron=r'<iframe id="iframeVid" width="[^"]+" height="[^"]+" src="([^"]+)" allowfullscreen').match
|
||||
seasons = support.match(url, patron=r'<a href="([^"]+)">(\d+)<', patronBlock=r'<h3>STAGIONE</h3><ul>(.*?)</ul>', headers=headers).matches
|
||||
for season_url, season in seasons:
|
||||
season_url = support.urlparse.urljoin(url, season_url)
|
||||
episodes = support.match(season_url, patron=r'<a href="([^"]+)">(\d+(?:-\d+)?)<', patronBlock=r'<h3>EPISODIO</h3><ul>(.*?)</ul>', headers=headers).matches
|
||||
for episode_url, episode in episodes:
|
||||
episode_url = support.urlparse.urljoin(url, episode_url)
|
||||
def get_season(pageData, seas_url, season):
|
||||
data = ''
|
||||
if pageData: # per non riscaricare
|
||||
episodes = pageData
|
||||
pageData = ''
|
||||
else:
|
||||
episodes = httptools.downloadpage(seas_url).data
|
||||
episodes = scrapertools.find_single_match(episodes, patron_episode)
|
||||
for episode_url, episode in scrapertools.find_multiple_matches(episodes, patron_option):
|
||||
episode_url = support.urlparse.urljoin(item.url, episode_url)
|
||||
if '-' in episode: episode = episode.split('-')[0].zfill(2) + 'x' + episode.split('-')[1].zfill(2)
|
||||
title = season + "x" + episode.zfill(2) + ' - ' + item.fulltitle
|
||||
data += title + '|' + episode_url + '\n'
|
||||
return data
|
||||
|
||||
def itemlistHook(itemlist):
|
||||
itemlist.sort(key=lambda item: item.title)
|
||||
return itemlist
|
||||
url = support.match(item,
|
||||
patron=r'<iframe id="iframeVid" width="[^"]+" height="[^"]+" src="([^"]+)" allowfullscreen').match
|
||||
pageData = httptools.downloadpage(url).data
|
||||
patron_season = '<div class="buttons-bar seasons">(.*?)<div class="buttons'
|
||||
patron_episode = '<div class="buttons-bar episodes">(.*?)<div class="buttons'
|
||||
patron_option = r'<a href="([^"]+?)".*?>([^<]+?)</a>'
|
||||
data = ''
|
||||
|
||||
seasons = scrapertools.find_single_match(pageData, patron_season)
|
||||
from concurrent import futures
|
||||
with futures.ThreadPoolExecutor() as executor:
|
||||
thL = []
|
||||
for seas_url, season in scrapertools.find_multiple_matches(seasons, patron_option):
|
||||
thL.append(executor.submit(get_season, pageData, seas_url, season))
|
||||
for res in futures.as_completed(thL):
|
||||
if res.result():
|
||||
data += res.result()
|
||||
patron = r'(?P<title>[^\|]+)\|(?P<url>[^\n]+)\n'
|
||||
action = 'findvideos'
|
||||
return locals()
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"id": "vedohd",
|
||||
"name": "VedoHD",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"thumbnail": "vedohd.png",
|
||||
"banner": "vedohd.png",
|
||||
|
||||
@@ -250,6 +250,7 @@ def downloadpage(url, **opt):
|
||||
HTTPResponse.time: float Time taken to make the request
|
||||
|
||||
"""
|
||||
url = scrapertools.unescape(url)
|
||||
load_cookies()
|
||||
domain = urlparse.urlparse(url).netloc
|
||||
CF = False
|
||||
|
||||
122
core/support.py
122
core/support.py
@@ -5,9 +5,13 @@ import base64
|
||||
import inspect
|
||||
import os
|
||||
import re
|
||||
import urllib
|
||||
import urlparse
|
||||
import xbmcaddon
|
||||
from concurrent import futures
|
||||
|
||||
try:
|
||||
import urllib.request as urllib
|
||||
import urllib.parse as urlparse
|
||||
except ImportError:
|
||||
import urllib, urlparse
|
||||
|
||||
from channelselector import thumb
|
||||
from core import httptools, scrapertools, servertools, tmdb, channeltools
|
||||
@@ -17,76 +21,60 @@ from platformcode import logger, config
|
||||
from specials import autoplay
|
||||
|
||||
def hdpass_get_servers(item):
|
||||
def get_url(mir_url, srv):
|
||||
data = httptools.downloadpage(urlparse.urljoin(url, mir_url)).data # .replace('\n', '')
|
||||
for media_url in scrapertools.find_multiple_matches(data, patron_media):
|
||||
log("video -> ", res_video)
|
||||
return Item(channel=item.channel,
|
||||
action="play",
|
||||
fulltitle=item.fulltitle,
|
||||
quality=res_video,
|
||||
show=item.show,
|
||||
thumbnail=item.thumbnail,
|
||||
contentType=item.contentType,
|
||||
url=base64.b64decode(media_url))
|
||||
|
||||
# Carica la pagina
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data.replace('\n', '')
|
||||
patron = r'<iframe(?: id="[^"]+")? width="[^"]+" height="[^"]+" src="([^"]+)"[^>]+><\/iframe>'
|
||||
url = scrapertools.find_single_match(data, patron).replace("?alta", "")
|
||||
url = url.replace("&download=1", "")
|
||||
if 'hdpass' in item.url or 'hdplayer' in item.url:
|
||||
url = item.url
|
||||
else:
|
||||
data = httptools.downloadpage(item.url).data.replace('\n', '')
|
||||
patron = r'<iframe(?: id="[^"]+")? width="[^"]+" height="[^"]+" src="([^"]+)"[^>]+><\/iframe>'
|
||||
url = scrapertools.find_single_match(data, patron).replace("?alta", "")
|
||||
url = url.replace("&download=1", "")
|
||||
if 'hdpass' not in url and 'hdplayer' not in url:
|
||||
return itemlist
|
||||
if 'https' not in url:
|
||||
url = 'https:' + url
|
||||
|
||||
if 'hdpass' or 'hdplayer' in url:
|
||||
data = httptools.downloadpage(url).data
|
||||
data = httptools.downloadpage(url).data
|
||||
patron_res = '<div class="buttons-bar resolutions-bar">(.*?)<div class="buttons-bar'
|
||||
patron_mir = '<div class="buttons-bar hosts-bar">(.*?)<div id="fake'
|
||||
patron_media = r'<iframe allowfullscreen custom-src="([^"]+)'
|
||||
patron_option = r'<a href="([^"]+?)".*?>([^<]+?)</a>'
|
||||
|
||||
start = data.find('<div class="row mobileRes">')
|
||||
end = data.find('<div id="playerFront">', start)
|
||||
data = data[start:end]
|
||||
res = scrapertools.find_single_match(data, patron_res)
|
||||
|
||||
patron_res = '<div class="row mobileRes">(.*?)</div>'
|
||||
patron_mir = '<div class="row mobileMirrs">(.*?)</div>'
|
||||
patron_media = r'<input type="hidden" name="urlEmbed" data-mirror="([^"]+)" id="urlEmbed"\s*value="([^"]+)"\s*/>'
|
||||
itemlist = []
|
||||
|
||||
res = scrapertools.find_single_match(data, patron_res)
|
||||
|
||||
itemlist = []
|
||||
|
||||
for res_url, res_video in scrapertools.find_multiple_matches(res, '<option.*?value="([^"]+?)">([^<]+?)</option>'):
|
||||
|
||||
data = httptools.downloadpage(urlparse.urljoin(url, res_url)).data.replace('\n', '')
|
||||
|
||||
mir = scrapertools.find_single_match(data, patron_mir)
|
||||
|
||||
for mir_url, srv in scrapertools.find_multiple_matches(mir, '<option.*?value="([^"]+?)">([^<]+?)</value>'):
|
||||
|
||||
data = httptools.downloadpage(urlparse.urljoin(url, mir_url)).data.replace('\n', '')
|
||||
for media_label, media_url in scrapertools.find_multiple_matches(data, patron_media):
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="play",
|
||||
fulltitle=item.fulltitle,
|
||||
quality=res_video,
|
||||
show=item.show,
|
||||
thumbnail=item.thumbnail,
|
||||
contentType=item.contentType,
|
||||
url=url_decode(media_url)))
|
||||
log("video -> ", res_video)
|
||||
with futures.ThreadPoolExecutor() as executor:
|
||||
thL = []
|
||||
for res_url, res_video in scrapertools.find_multiple_matches(res, patron_option):
|
||||
if data: # per non riscaricare
|
||||
page = data
|
||||
data = ''
|
||||
else:
|
||||
page = httptools.downloadpage(urlparse.urljoin(url, res_url)).data
|
||||
mir = scrapertools.find_single_match(page, patron_mir)
|
||||
|
||||
for mir_url, srv in scrapertools.find_multiple_matches(mir, patron_option):
|
||||
thL.append(executor.submit(get_url, mir_url, srv))
|
||||
for res in futures.as_completed(thL):
|
||||
if res.result():
|
||||
itemlist.append(res.result())
|
||||
return server(item, itemlist=itemlist)
|
||||
|
||||
|
||||
def url_decode(url_enc):
|
||||
lenght = len(url_enc)
|
||||
if lenght % 2 == 0:
|
||||
len2 = lenght / 2
|
||||
first = url_enc[0:len2]
|
||||
last = url_enc[len2:lenght]
|
||||
url_enc = last + first
|
||||
reverse = url_enc[::-1]
|
||||
return base64.b64decode(reverse)
|
||||
|
||||
last_car = url_enc[lenght - 1]
|
||||
url_enc[lenght - 1] = ' '
|
||||
url_enc = url_enc.strip()
|
||||
len1 = len(url_enc)
|
||||
len2 = len1 / 2
|
||||
first = url_enc[0:len2]
|
||||
last = url_enc[len2:len1]
|
||||
url_enc = last + first
|
||||
reverse = url_enc[::-1]
|
||||
reverse = reverse + last_car
|
||||
return base64.b64decode(reverse)
|
||||
|
||||
|
||||
def color(text, color):
|
||||
return "[COLOR " + color + "]" + text + "[/COLOR]"
|
||||
|
||||
@@ -1005,13 +993,17 @@ def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=Tru
|
||||
verifiedItemlist = []
|
||||
for videoitem in itemlist:
|
||||
if not videoitem.server:
|
||||
videoitem.url = unshortenit.unshorten(videoitem.url)[0]
|
||||
findS = servertools.findvideos(videoitem.url)
|
||||
if findS:
|
||||
findS = findS[0]
|
||||
else:
|
||||
log(videoitem, 'Non supportato')
|
||||
continue
|
||||
videoitem.url = unshortenit.unshorten(videoitem.url)[0]
|
||||
findS = servertools.findvideos(videoitem.url)
|
||||
if findS:
|
||||
findS = findS[0]
|
||||
else:
|
||||
log(videoitem, 'Non supportato')
|
||||
continue
|
||||
videoitem.server = findS[2]
|
||||
videoitem.title = findS[0]
|
||||
videoitem.url = findS[1]
|
||||
|
||||
@@ -4,12 +4,12 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "vcstream.to/(?:embed|f)/([A-z0-9]+)/([A-z0-9.]+)",
|
||||
"pattern": "vcstream.to/(?:embed|f|v)/([A-z0-9]+)/([A-z0-9.]+)",
|
||||
"url": "https://vcstream.to/embed/\\1/\\2"
|
||||
},
|
||||
{
|
||||
"pattern": "https://vidcloud.co/v/([a-z0-9A-Z]+)",
|
||||
"url": "https:\/\/vidcloud.co\/v\/\\1"
|
||||
"pattern": "vidcloud.co/(?:embed|f|v)/([a-z0-9A-Z]+)",
|
||||
"url": "https://vidcloud.co\/v\/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user