Risposta server come Dict e modifiche varie

This commit is contained in:
Alhaziel01
2021-08-28 18:10:47 +02:00
parent 8975f950c5
commit 60ec158008
108 changed files with 330 additions and 361 deletions

View File

@@ -218,13 +218,13 @@ def findvideos(item):
def videourls(res):
newurl = '{}/{}{}'.format(url, res, token)
if requests.head(newurl, headers=headers).status_code == 200:
video_urls.append(["m3u8 {} [StreamingCommunity]".format(res), newurl])
video_urls.append({'type':'m3u8', 'res':res, 'url':newurl})
with futures.ThreadPoolExecutor() as executor:
for res in ['480p', '720p', '1080p']:
executor.submit(videourls, res)
if not video_urls: video_urls = [["m3u8 [StreamingCommunity]", url + token]]
if not video_urls: video_urls = [{'type':'m3u8', 'url':url + token}]
else: video_urls.sort(key=lambda url: int(support.match(url[0], patron=r'(\d+)p').match))
itemlist = [item.clone(title = channeltools.get_channel_parameters(item.channel)['title'], server='directo', video_urls=video_urls, thumbnail=channeltools.get_channel_parameters(item.channel)["thumbnail"], forcethumb=True)]
return support.server(item, itemlist=itemlist)

View File

@@ -9,13 +9,6 @@ __channel__ = "autoplay"
PLAYED = False
quality_list = ['4k', '2160p', '2160', '4k2160p', '4k2160', '4k 2160p', '4k 2160', '2k',
'fullhd', 'fullhd 1080', 'fullhd 1080p', 'full hd', 'full hd 1080', 'full hd 1080p', 'hd1080', 'hd1080p', 'hd 1080', 'hd 1080p', '1080', '1080p',
'hd', 'hd720', 'hd720p', 'hd 720', 'hd 720p', '720', '720p', 'hdtv',
'sd', '480p', '480', '360p', '360', '240p', '240',
'default']
def start(itemlist, item):
'''
Main method from which the links are automatically reproduced

View File

@@ -254,7 +254,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
if isinstance(video_password, list):
return video_password, len(video_password) > 0, "<br/>".join(error_messages)
logger.info("Server: %s, url is good" % server)
video_urls.append(["%s [%s]" % (urlparse.urlparse(url)[2][-4:], config.get_localized_string(30137)), url])
video_urls.append({'type':urlparse.urlparse(url)[2].split('.')[-1], 'url':url})
# Find out the video URL
else:
@@ -270,7 +270,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
# Count the available options, to calculate the percentage
orden = [
order = [
["free"] + [server] + [premium for premium in server_parameters["premium"] if not premium == server],
[server] + [premium for premium in server_parameters["premium"] if not premium == server] + ["free"],
[premium for premium in server_parameters["premium"] if not premium == server] + [server] + ["free"]
@@ -282,7 +282,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
[premium for premium in server_parameters["premium"] if config.get_setting("premium", server=premium)])
priority = int(config.get_setting("resolve_priority"))
opciones = sorted(opciones, key=lambda x: orden[priority].index(x))
opciones = sorted(opciones, key=lambda x: order[priority].index(x))
logger.info("Available options: %s | %s" % (len(opciones), opciones))
else:
@@ -685,6 +685,7 @@ def sort_servers(servers_list):
blacklisted_servers = config.get_setting("black_list", server='servers', default=[])
favorite_servers = config.get_setting('favorites_servers_list', server='servers', default=[])
favorite_servers = [s for s in favorite_servers if s not in blacklisted_servers]
if isinstance(servers_list[0], str):
servers_list = sorted(servers_list, key=lambda x: favorite_servers.index(x) if x in favorite_servers else 999)
return servers_list
@@ -720,8 +721,9 @@ def sort_servers(servers_list):
continue
element["index_server"] = index(favorite_servers, item.server.lower())
element["index_quality"] = platformtools.calcResolution(item.quality)
element["index_quality"] = index(favorite_quality, item.quality.lower())
element['index_language'] = 0 if item.contentLanguage == 'ITA' else 1
element['videoitem'] = item
sorted_list.append(element)

View File

@@ -1445,9 +1445,9 @@ def get_jwplayer_mediaurl(data, srvName, onlyHttp=False, dataIsBlock=False):
for url, quality in sources:
quality = 'auto' if not quality else quality
if url.split('.')[-1] != 'mpd':
video_urls.append(['.' + url.split('.')[-1].split('?')[0] + ' [' + quality + '] [' + srvName + ']', url.replace(' ', '%20') if not onlyHttp else url.replace('https://', 'http://')])
video_urls.append({'type':url.split('.')[-1], 'res':quality, 'url':url if not onlyHttp else url.replace('https://', 'http://')})
video_urls.sort(key=lambda x: x[0].split()[1])
# video_urls.sort(key=lambda x: x[0].split()[1])
return video_urls

View File

@@ -30,11 +30,6 @@ if not FOLDER_MOVIES or not FOLDER_TVSHOWS or not VIDEOLIBRARY_PATH or not filet
addon_name = "plugin://plugin.video.%s/" % config.PLUGIN_NAME
quality_order = ['4k', '2160p', '2160', '4k2160p', '4k2160', '4k 2160p', '4k 2160', '2k',
'fullhd', 'fullhd 1080', 'fullhd 1080p', 'full hd', 'full hd 1080', 'full hd 1080p', 'hd1080', 'hd1080p', 'hd 1080', 'hd 1080p', '1080', '1080p',
'hd', 'hd720', 'hd720p', 'hd 720', 'hd 720p', '720', '720p', 'hdtv',
'sd', '480p', '480', '360p', '360', '240p', '240']
video_extensions = ['3g2', '3gp', '3gp2', 'asf', 'avi', 'divx', 'flv', 'iso', 'm4v', 'mk2', 'mk3d', 'mka', 'mkv', 'mov', 'mp4', 'mp4a', 'mpeg', 'mpg', 'ogg', 'ogm', 'ogv', 'qt', 'ra', 'ram', 'rm', 'ts', 'vob', 'wav', 'webm', 'wma', 'wmv']
subtitle_extensions = ['srt', 'idx', 'sub', 'ssa', 'ass']
image_extensions = ['.jpg', '.jpeg', '.png']

View File

@@ -301,8 +301,8 @@ def itemlist_refresh(offset=0):
ctl = win.getControl(cid)
pos = Item().fromurl(xbmc.getInfoLabel('ListItem.FileNameAndPath')).itemlistPosition + offset
logger.debug('ID:', _id, 'POSITION:', pos)
# xbmc.executebuiltin("Container.Refresh")
xbmc.executebuiltin('ReloadSkin()')
xbmc.executebuiltin("Container.Refresh")
# xbmc.executebuiltin('ReloadSkin()')
while xbmcgui.getCurrentWindowDialogId() != 10138:
pass
@@ -989,8 +989,7 @@ def get_window():
def play_video(item, strm=False, force_direct=False, autoplay=False):
logger.debug()
logger.debug(item.tostring('\n'))
logger.debug(item)
def play():
if item.channel == 'downloads':
@@ -1010,23 +1009,27 @@ def play_video(item, strm=False, force_direct=False, autoplay=False):
httptools.default_headers['Referer'] = item.referer
# Open the selection dialog to see the available options
opciones, video_urls, seleccion, salir = get_dialogo_opciones(item, default_action, strm, autoplay)
if salir: return
options, video_urls, selection, _exit = get_options_dialog(item, default_action, strm, autoplay)
if _exit: return
# get default option of addon configuration
seleccion = get_seleccion(default_action, opciones, seleccion, video_urls)
if seleccion < 0: return # Canceled box
selection = get_selection(default_action, options, selection, video_urls)
logger.debug("selection=%d" % seleccion)
logger.debug("selection=%s" % opciones[seleccion])
# Canceled box
if selection < 0:
prevent_busy(item)
return
logger.debug("selection=%d" % selection)
logger.debug("selection=%s" % options[selection])
# run the available option, jdwonloader, download, favorites, add to the video library ... IF IT IS NOT PLAY
salir = set_opcion(item, seleccion, opciones, video_urls)
if salir:
_exit = set_option(item, selection, options, video_urls)
if _exit:
return
# we get the selected video
mediaurl, view, mpd = get_video_seleccionado(item, seleccion, video_urls, autoplay)
mediaurl, view, mpd, m3u8 = get_selected_video(item, selection, video_urls, autoplay)
if not mediaurl: return
# video information is obtained.
@@ -1035,8 +1038,7 @@ def play_video(item, strm=False, force_direct=False, autoplay=False):
set_infolabels(xlistitem, item, True)
# if it is a video in mpd format, the listitem is configured to play it ith the inpustreamaddon addon implemented in Kodi 17
# from core.support import dbg;dbg()
if mpd or item.manifest =='mpd':
if mpd or item.manifest == 'mpd':
if not install_inputstream():
return
xlistitem.setProperty('inputstream' if PY3 else 'inputstreamaddon', 'inputstream.adaptive')
@@ -1046,7 +1048,7 @@ def play_video(item, strm=False, force_direct=False, autoplay=False):
xlistitem.setProperty("inputstream.adaptive.license_type", item.drm)
xlistitem.setProperty("inputstream.adaptive.license_key", item.license)
xlistitem.setMimeType('application/dash+xml')
elif item.manifest == 'hls' or (mediaurl.split('|')[0].endswith('m3u8') and mediaurl.startswith('http')):
elif m3u8 or item.manifest == 'hls':
if not install_inputstream():
return
xlistitem.setProperty('inputstream' if PY3 else 'inputstreamaddon', 'inputstream.adaptive')
@@ -1068,62 +1070,22 @@ def stop_video():
xbmc_player.stop()
def get_seleccion(default_action, opciones, seleccion, video_urls):
fixpri = False
# to know what priority you work on
priority = int(config.get_setting("resolve_priority"))
# will be used to check for premium or debrider links
check = []
# Check if resolve stop is disabled
if config.get_setting("resolve_stop") == False:
fixpri = True
# ask
def get_selection(default_action, options, selection, video_urls):
resolutions = []
for url in video_urls:
resolutions.append(calcResolution(url['res']) if 'res' in url else 0)
resolutions.sort()
if default_action == 2: resolutions.reverse()
# ask
if default_action == 0:
# "Choose an option"
seleccion = dialog_select(config.get_localized_string(30163), opciones)
# View in low quality
elif default_action == 1:
resolutions = []
for url in video_urls:
if "debrid]" in url[0] or "Premium)" in url[0]:
check.append(True)
res = calcResolution(url[0])
if res:
resolutions.append(res)
if resolutions:
if (fixpri == True and
check and
priority == 2):
seleccion = 0
else:
seleccion = resolutions.index(min(resolutions))
else:
seleccion = 0
# See in high quality
elif default_action == 2:
resolutions = []
for url in video_urls:
if "debrid]" in url[0] or "Premium)" in url[0]:
check.append(True)
res = calcResolution(url[0])
if res:
resolutions.append(res)
if resolutions:
if (fixpri == True and
check and
priority == 2):
seleccion = 0
else:
seleccion = resolutions.index(max(resolutions))
else:
if fixpri == True and check:
seleccion = 0
else:
seleccion = len(video_urls) - 1
selection = dialog_select(config.get_localized_string(30163), options)
else:
seleccion = 0
return seleccion
selection = 0
return selection
def calcResolution(option):
@@ -1215,12 +1177,12 @@ def handle_wait(time_to_wait, title, text):
return True
def get_dialogo_opciones(item, default_action, strm, autoplay):
def get_options_dialog(item, default_action, strm, autoplay):
logger.debug()
# logger.debug(item.tostring('\n'))
from core import servertools
opciones = []
options = []
error = False
try:
@@ -1244,37 +1206,41 @@ def get_dialogo_opciones(item, default_action, strm, autoplay):
item.server, item.url, item.password, muestra_dialogo)
if play_canceled:
return opciones, [], 0, True
return options, [], 0, True
seleccion = 0
selection = 0
# If you can see the video, present the options
if puedes:
video_urls = sorted(video_urls, key=lambda k: calcResolution(k['res']) if 'res' in k else 0)
video_urls.reverse()
for video_url in video_urls:
opciones.append(config.get_localized_string(60221) + " " + video_url[0])
name = '{} {} [{}]'.format(config.get_localized_string(60221), video_url.get('type'), servertools.get_server_parameters(item.server)['name'])
if video_url.get('res',''): name += ' [{}]'.format(video_url.get('res',''))
options.append(name)
if item.server == "local":
opciones.append(config.get_localized_string(30164))
options.append(config.get_localized_string(30164))
else:
# "Download"
downloadenabled = config.get_setting('downloadenabled')
if downloadenabled != False and item.channel != 'videolibrary':
opcion = config.get_localized_string(30153)
opciones.append(opcion)
options.append(opcion)
if item.isFavourite:
# "Remove from favorites"
opciones.append(config.get_localized_string(30154))
options.append(config.get_localized_string(30154))
else:
# "Add to Favorites"
opciones.append(config.get_localized_string(30155))
options.append(config.get_localized_string(30155))
if default_action == 3:
seleccion = len(opciones) - 1
selection = len(options) - 1
# Search for trailers
if item.channel not in ["trailertools"]:
# "Search Trailer"
opciones.append(config.get_localized_string(30162))
options.append(config.get_localized_string(30162))
# If you can't see the video it informs you
else:
@@ -1291,21 +1257,21 @@ def get_dialogo_opciones(item, default_action, strm, autoplay):
(sys.argv[0], Item(action="open_browser", url=item.url).tourl()))
if item.channel == "favorites":
# "Remove from favorites"
opciones.append(config.get_localized_string(30154))
options.append(config.get_localized_string(30154))
if len(opciones) == 0:
if len(options) == 0:
error = True
return opciones, video_urls, seleccion, error
return options, video_urls, selection, error
def set_opcion(item, seleccion, opciones, video_urls):
def set_option(item, selection, options, video_urls):
logger.debug()
# logger.debug(item.tostring('\n'))
salir = False
_exit = False
# You have not chosen anything, most likely because you have given the ESC
if seleccion == -1:
if selection == -1:
# To avoid the error "One or more elements failed" when deselecting from strm file
listitem = xbmcgui.ListItem(item.title)
@@ -1318,62 +1284,74 @@ def set_opcion(item, seleccion, opciones, video_urls):
xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, listitem)
# "Download"
elif opciones[seleccion] == config.get_localized_string(30153):
elif options[selection] == config.get_localized_string(30153):
from specials import downloads
if item.contentType == "list" or item.contentType == "tvshow":
item.contentType = "video"
item.play_menu = True
downloads.save_download(item)
salir = True
_exit = True
# "Remove from favorites"
elif opciones[seleccion] == config.get_localized_string(30154):
elif options[selection] == config.get_localized_string(30154):
from specials import favorites
favorites.delFavourite(item)
salir = True
_exit = True
# "Add to Favorites":
elif opciones[seleccion] == config.get_localized_string(30155):
elif options[selection] == config.get_localized_string(30155):
from specials import favorites
item.from_channel = "favorites"
favorites.addFavourite(item)
salir = True
_exit = True
# "Search Trailer":
elif opciones[seleccion] == config.get_localized_string(30162):
elif options[selection] == config.get_localized_string(30162):
config.set_setting("subtitulo", False)
xbmc.executebuiltin("RunPlugin(%s?%s)" % (sys.argv[0], item.clone(channel="trailertools", action="buscartrailer", contextual=True).tourl()))
salir = True
_exit = True
return salir
return _exit
def get_video_seleccionado(item, seleccion, video_urls, autoplay=False):
def get_selected_video(item, selection, video_urls, autoplay=False):
logger.debug()
mediaurl = ""
view = False
wait_time = 0
file_type = ''
mpd = False
m3u8 = False
# video_urls Format:
# [{'type':'Video Extension', 'url': 'Video url', 'wait':seconds to wait, 'sub':'subtitle url'}]
# You have chosen one of the videos
if seleccion < len(video_urls):
mediaurl = video_urls[seleccion][1]
if len(video_urls[seleccion]) > 4:
wait_time = video_urls[seleccion][2]
if not item.subtitle:
item.subtitle = video_urls[seleccion][3]
mpd = True
elif len(video_urls[seleccion]) > 3:
wait_time = video_urls[seleccion][2]
if not item.subtitle:
item.subtitle = video_urls[seleccion][3]
elif len(video_urls[seleccion]) > 2:
wait_time = video_urls[seleccion][2]
if selection < len(video_urls):
video_url = video_urls[selection]
mediaurl = video_url.get('url', '')
wait_time = video_url.get('wait', 0)
file_type = video_url.get('type', 'Video').lower()
if not item.subtitle: item.subtitle = video_url.get('sub', '')
view = True
# if selection < len(video_urls):
# mediaurl = video_urls[selection][1]
# if len(video_urls[selection]) > 4:
# wait_time = video_urls[selection][2]
# if not item.subtitle:
# item.subtitle = video_urls[selection][3]
# mpd = True
# elif len(video_urls[selection]) > 3:
# wait_time = video_urls[selection][2]
# if not item.subtitle:
# item.subtitle = video_urls[selection][3]
# elif len(video_urls[selection]) > 2:
# wait_time = video_urls[selection][2]
# view = True
if 'mpd' in video_urls[seleccion][0]:
if 'mpd' in file_type:
mpd = True
elif 'm3u8' in file_type:
m3u8 = True
# If there is no mediaurl it is because the video is not there :)
logger.debug("mediaurl=" + mediaurl)
@@ -1389,7 +1367,7 @@ def get_video_seleccionado(item, seleccion, video_urls, autoplay=False):
if not continuar:
mediaurl = ""
return mediaurl, view, mpd
return mediaurl, view, mpd, m3u8
def set_player(item, xlistitem, mediaurl, view, strm):
@@ -1842,10 +1820,18 @@ def set_played_time(item):
del db['viewed'][ID]
# def prevent_busy(item):
# logger.debug()
# if not item.autoplay and not item.window:
# xbmc.Player().play(os.path.join(config.get_runtime_path(), "resources", "kod.mp4"))
# xbmc.sleep(200)
# xbmc.Player().stop()
def prevent_busy(item):
logger.debug()
if not item.autoplay and not item.window:
xbmc.Player().play(os.path.join(config.get_runtime_path(), "resources", "kod.mp4"))
if item.globalsearch: xbmc.Player().play(os.path.join(config.get_runtime_path(), "resources", "kod.mp4"))
else: xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xbmcgui.ListItem(path=os.path.join(config.get_runtime_path(), "resources", "kod.mp4")))
xbmc.sleep(200)
xbmc.Player().stop()

View File

@@ -61,4 +61,4 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls = support.get_jwplayer_mediaurl(data, 'akvideo', onlyHttp=True)
return sorted(video_urls, key=lambda x: int(x[0].split('x')[0])) if vres else video_urls
return video_urls

View File

@@ -22,5 +22,5 @@ def get_video_url(page_url, user="", password="", video_password=""):
label, videourl = scrapertools.find_single_match(data, 'label":"([^"]+)".*?file":"([^"]+)')
if "animeid.tv" in videourl:
videourl = httptools.downloadpage(videourl, follow_redirects=False, only_headers=True).headers.get("location", "")
video_urls.append([".MP4 " + label + " [animeid]", videourl])
video_urls.append({'type':'mp4', 'res':label, 'url':videourl})
return video_urls

View File

@@ -24,6 +24,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
match = scrapertools.find_multiple_matches(data, patron)
for media_url in match:
media_url += "|Referer=%s" %page_url
title = "mp4 [anonfile]"
video_urls.append([title, media_url])
video_urls.append({'type':'mp4', 'url':media_url})
return video_urls

View File

@@ -23,5 +23,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
patron = '<meta property="og:video" content="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
video_urls.append(['.MP4 [ArchiveOrg]', url])
video_urls.append({'type':'mp4', 'url':url})
return video_urls

View File

@@ -45,9 +45,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.debug("URL=" + str(url))
# URL del vídeo
video_urls.append([".mp4" + " [backin]", url])
video_urls.append({'type':'mp4', 'url':url})
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], httptools.get_url_headers(video_url[1])))
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], httptools.get_url_headers(video_url[1])))
return video_urls

View File

@@ -28,6 +28,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
data = re.sub(r'\n|\r|\t|\s{2,}', "", data)
media_url, ext = scrapertools.find_single_match(data, r'file:\s*"([^"]+)",type:\s*"([^"]+)"')
video_urls.append(["%s [Badshare]" % ext, media_url])
video_urls.append({'type':ext, 'url':media_url})
return video_urls

View File

@@ -34,5 +34,5 @@ def get_video_url(page_url, user="", password="", video_password=""):
file += "&Host=fs30.indifiles.com:182"
video_urls = []
videourl = file
video_urls.append([".MP4 [bdupload]", videourl])
video_urls.append({'type':'mp4', 'url':videourl})
return video_urls

View File

@@ -27,5 +27,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
url += "|Referer=%s" %page_url
video_urls.append(['.m3u8 [CinemaUpload]', url])
video_urls.append({'type':'m3u8', 'url':url})
return video_urls

View File

@@ -49,9 +49,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
# Solo es necesario codificar la ultima parte de la url
url_strip = urllib.quote(media.rsplit('/', 1)[1])
media_url = media.rsplit('/', 1)[0] + "/" + url_strip
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [clicknupload]", media_url])
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
video_urls.append({'type':scrapertools.get_filename_from_url(media_url).split('.')[-1], 'url':media_url})
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -35,7 +35,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
else:
label = video.split('.')[-1]
multires = False
video_urls.append([label + " [clipwatching]", video])
if multires:
video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
video_urls.append({'type':label, 'url':video})
# if multires:
# video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
return video_urls

View File

@@ -37,5 +37,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
url = url.split(',')
video_url = url[0]
Type = url[1].replace('label:','')
video_urls.append(['%s [CloudVideo]' % Type, video_url])
video_urls.append({'type':Type, 'url':video_url})
return video_urls

View File

@@ -92,9 +92,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
import traceback
logger.error(traceback.format_exc())
file_sub = ""
video_urls.append(["%s %sp [crunchyroll]" % (filename, quality), media_url, 0, file_sub])
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
video_urls.append({'type':filename, 'res':quality, 'url':media_url, 'sub':file_sub})
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -38,7 +38,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
stream_url_http = scrapertools.find_single_match(data_m3u8, r'PROGRESSIVE-URI="([^"]+)"')
if stream_url_http:
stream_url = stream_url_http
video_urls.append(["%sp .%s [dailymotion]" % (calidad, stream_type), stream_url, 0, subtitle])
video_urls.append({'type':calidad, 'res':stream_type, 'url':stream_url, 'sub':subtitle})
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
from platformcode import logger, config
from platformcode import logger
from core import scrapertools
def test_video_exists(page_url):
@@ -9,7 +10,6 @@ def test_video_exists(page_url):
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.debug("(page_url='%s')" % page_url)
video_urls = [["%s %s" % (page_url[-4:], config.get_localized_string(30137)), page_url]]
video_urls=[{'type':scrapertools.get_filename_from_url(page_url).split('.')[-1], 'url':page_url}]
return video_urls

View File

@@ -31,7 +31,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
if match:
url, token = match
ret = scraper.get(host + url, headers=headers).text
video_urls.append(['mp4 [DooD Stream]', '{}{}{}{}|Referer={}'.format(randomize(ret), url, token, int(time.time() * 1000), host)])
video_urls.append({'type':'mp4', 'url':'{}{}{}{}|Referer={}'.format(randomize(ret), url, token, int(time.time() * 1000), host)})
return video_urls

View File

@@ -24,6 +24,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
patron += '"src":"(http.*?)".*?'
matches = scrapertools.find_multiple_matches(data, patron)
for label, url in matches:
video_urls.append(['%s [dostream]' %label, url])
video_urls.sort(key=lambda it: int(it[0].split("p ")[0]))
video_urls.append({'type':label, 'url':url})
# video_urls.sort(key=lambda it: int(it[0].split("p ")[0]))
return video_urls

View File

@@ -22,6 +22,6 @@ def get_video_url(page_url, user="", password="", video_password=""):
data = httptools.downloadpage(page_url).data
video_urls = []
videourl = scrapertools.find_single_match(data, 'controls preload.*?src="([^"]+)')
video_urls.append([".MP4 [downace]", videourl])
video_urls.append({'type':'mp4', 'url':videourl})
return video_urls

View File

@@ -31,7 +31,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
videourl = match
videourl = videourl.replace('%5C', '')
videourl = urllib.unquote(videourl)
video_urls.append(["[facebook]", videourl])
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
video_urls.append({'url':videourl})
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -32,7 +32,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
for video_url, video_calidad in videos:
extension = scrapertools.get_filename_from_url(video_url)[-4:]
if extension not in [".vtt", ".srt"]:
video_urls.append(["%s %s [fastplay]" % (extension, video_calidad), video_url, 0, subtitulo])
video_urls.append({'type':extension, 'res':video_calidad, 'url':video_url, 'sub':subtitulo})
try:
video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0].rsplit(" ")[1]))
except:

View File

@@ -32,6 +32,6 @@ def get_video_url(page_url, user="", password="", video_password=""):
media_url = file['file']
label = file['label']
extension = file['type']
video_urls.append([ extension + ' ' + label + ' [Fembed]', media_url])
video_urls.sort(key=lambda x: int(x[0].split()[1].replace('p','')))
video_urls.append({'type':extension, 'quality':label, 'url':media_url})
# video_urls.sort(key=lambda x: int(x[0].split()[1].replace('p','')))
return video_urls

View File

@@ -23,5 +23,5 @@ def get_video_url(page_url, user="", password="", video_password=""):
data = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True)
logger.debug(data.headers)
url = data.headers['location']
video_urls.append(['Fex', url])
video_urls.append({'url':url})
return video_urls

View File

@@ -26,15 +26,15 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
qualities = scrapertools.find_multiple_matches(qualities, ' "([^"]+)')
for calidad in qualities:
media = media_url
title = "%s [filepup]" % (calidad)
# title = "%s [filepup]" % (calidad)
if "480" not in calidad:
med = media_url.split(".mp4")
media = med[0] + "-%s.mp4" %calidad + med[1]
media = med[0] + "-%s.mp4" % calidad + med[1]
media += "|Referer=%s" %page_url
media += "&User-Agent=" + httptools.get_user_agent()
video_urls.append([title, media, int(calidad.replace("p", ""))])
video_urls.sort(key=lambda x: x[2])
for video_url in video_urls:
video_url[2] = 0
logger.debug("%s - %s" % (video_url[0], video_url[1]))
video_urls.append({'type':'mp4', 'res':calidad, 'url':media})
# video_urls.sort(key=lambda x: x[2])
# for video_url in video_urls:
# video_url[2] = 0
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -21,6 +21,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
data = httptools.downloadpage(page_url).data
url = scrapertools.find_single_match(data, '(?i)link:\s*"(https://.*?filescdn\.com.*?mp4)"')
url = url.replace(':443', '')
video_urls.append(['filescdn', url])
video_urls.append({'url':url})
return video_urls

View File

@@ -1,5 +1,5 @@
{
"active": true,
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [

View File

@@ -123,10 +123,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
for media_url, label in media_urls:
if not media_url.endswith("png") and not media_url.endswith(".srt"):
video_urls.append(["." + media_url.rsplit('.', 1)[1] + " [flashx]", media_url, 0, subtitle])
video_urls.append({'type':media_url.rsplit('.', 1)[1], 'url':media_url, 'sub':subtitle})
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
except:
pass

View File

@@ -22,13 +22,13 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
matches = re.compile(patron, re.DOTALL).findall(page_url)
try:
video_urls.append(["[fourshared]", matches[0]])
video_urls.append({'url':matches[0]})
except:
pass
else:
video_urls.append(["[fourshared]", page_url])
video_urls.append({'url':page_url})
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -83,11 +83,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
0] + " playpath=" + playpath + " swfUrl=http://gamovideo.com/player61/jwplayer.flash.swf"
video_urls = []
video_urls.append(["RTMP [gamovideo]", rtmp_url])
video_urls.append([scrapertools.get_filename_from_url(mediaurl)[-4:] + " [gamovideo]", mediaurl])
video_urls.append({'type':'rtmp', 'url':rtmp_url})
video_urls.append({'type':scrapertools.get_filename_from_url(mediaurl).split('.')[-1], 'url':mediaurl})
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,5 +1,5 @@
{
"active": true,
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [

View File

@@ -21,9 +21,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
newpatron = '</script>.*?<a href="(.*?)" title="Click to Download">'
newmatches = re.compile(newpatron, re.DOTALL).findall(data)
if len(newmatches) > 0:
video_urls.append(["[googlevideo]", newmatches[0]])
video_urls.append({'url':newmatches[0]})
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -36,5 +36,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
for url in matches:
if url.startswith('//'): url= 'http:' + url
url += "|Referer=%s" %page_url
video_urls.append(['mp4 [Go Unlimited]', url])
video_urls.append({'type':'mp4', 'url':url})
return video_urls

View File

@@ -3,10 +3,6 @@
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?s)https://youtube.googleapis.com.*?docid=([A-z0-9-_=]+)",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
"pattern": "(?s)http://docs.google.com/get_video_info.*?docid=([A-z0-9-_=]+)",
"url": "http://docs.google.com/get_video_info?docid=\\1"

View File

@@ -77,8 +77,8 @@ def get_video_url(page_url, user="", password="", video_password=""):
for itag, video_url in streams:
if not video_url in urls:
video_url += headers_string
video_urls.append([itags.get(itag, ''), video_url])
video_urls.append({'res':itags.get(itag, ''), 'type':video_url.split('.')[-1], 'url':video_url})
urls.append(video_url)
video_urls.sort(key=lambda video_urls: int(video_urls[0].replace("p", "")))
# video_urls.sort(key=lambda video_urls: int(video_urls[0].replace("p", "")))
return video_urls

View File

@@ -25,6 +25,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.debug(data)
url = base64.b64decode(data)
itemlist.append([".mp4 [HDLoad]", url])
itemlist.append({'type':'mp4', 'url':url})
return itemlist

View File

@@ -140,6 +140,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
data = httptools.downloadpage(baseUrl + '/pl/' + page_url.split('/')[-1].replace('?', '') + '.m3u8', headers=[['X-Secure-Proof', secureProof]]).data
filetools.write(xbmc.translatePath('special://temp/hdmario.m3u8'), data, 'w')
video_urls = [['.m3u8 [HDmario]', 'special://temp/hdmario.m3u8']]
video_urls = [{'type':'m3u8', 'url':'special://temp/hdmario.m3u8'}]
return video_urls

View File

@@ -42,9 +42,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
mediaurl = r[0]
video_urls = []
video_urls.append([scrapertools.get_filename_from_url(mediaurl)[-4:] + " [hugefiles]", mediaurl])
video_urls.append({'type':scrapertools.get_filename_from_url(mediaurl).split('.')[-1], 'url':mediaurl})
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -34,6 +34,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
ext = ext.split("/")[1]
except:
ext = ".mp4"
video_urls.append(["%s (%s) [idtbox]" % (ext, res), url])
video_urls.append({'type':ext, 'res':res, 'url':url})
return video_urls

View File

@@ -18,6 +18,6 @@ def get_video_url(page_url, user="", password="", video_password=""):
data = httptools.downloadpage(page_url).data
video_urls = []
videourl = scrapertools.find_single_match(data, 'source src="([^"]+)')
video_urls.append([".MP4 [jawcloud]", videourl])
video_urls.append({'type':'mp4', 'url':videourl})
return video_urls

View File

@@ -25,9 +25,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls = []
media_url = scrapertools.find_single_match(data, '<video src="([^"]+)"')
if media_url:
ext = media_url[-4:]
ext = media_url.split('.')[-1]
if ext == 'm3u8':
media_url = ''
video_urls.append(["%s [Jetload]" % (ext), media_url])
video_urls.append({'type':ext, 'url':media_url})
return video_urls

View File

@@ -38,13 +38,13 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
if not media_url.startswith("http"):
media_url = "http:" + media_url
quality = " %s" % videos['key']
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + quality + " [mail.ru]", media_url])
try:
video_urls.sort(key=lambda video_urls: int(video_urls[0].rsplit(" ", 2)[1][:-1]))
except:
pass
video_urls.append({'type':scrapertools.get_filename_from_url(media_url).split('.')[-1], 'res':quality, 'url',media_url})
# try:
# video_urls.sort(key=lambda video_urls: int(video_urls[0].rsplit(" ", 2)[1][:-1]))
# except:
# pass
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -49,12 +49,12 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
if url_video:
import random, string
parse = urlparse.urlparse(url_video)
video_urls.append(['mp4 [MaxStream]', url_video])
video_urls.append({'type':'mp4', 'url':url_video})
try:
r1 = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(19))
r2 = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(19))
r3 = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(19))
video_urls.append(['m3u8 [MaxStream]', '{}://{}/hls/{},{},{},{},.urlset/master.m3u8'.format(parse.scheme, parse.netloc, parse.path.split('/')[1], r1, r2, r3)])
video_urls.append({'type':'m3u8', 'url':'{}://{}/hls/{},{},{},{},.urlset/master.m3u8'.format(parse.scheme, parse.netloc, parse.path.split('/')[1], r1, r2, r3)})
# video_urls.append(['m3u8 [MaxStream]', '{}://{}/hls/{},wpsc2hllm5g5fkjvslq,4jcc2hllm5gzykkkgha,fmca2hllm5jtpb7cj5q,.urlset/master.m3u8'.format(parse.scheme, parse.netloc, parse.path.split('/')[1])])
except:
logger.debug('Something wrong: Impossible get HLS stream')

View File

@@ -25,7 +25,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
patron = 'Download file.*?href="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
if len(matches) > 0:
video_urls.append([matches[0][-4:] + " [mediafire]", matches[0]])
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
video_urls.append({'type':matches[0].split('.')[-1], 'url':matches[0]})
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -86,10 +86,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
# This function (the playlist) does not go, you have to browse megaserver / handler.py although the call is in client.py
if len(files) > 5:
media_url = c.get_play_list()
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [mega]", media_url])
video_urls.append({'type':scrapertools.get_filename_from_url(media_url).split('.')[-1], 'url':media_url})
else:
for f in files:
media_url = f["url"]
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [mega]", media_url])
video_urls.append({'type':scrapertools.get_filename_from_url(media_url).split('.')[-1], 'url':media_url})
return video_urls

View File

@@ -27,7 +27,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.debug("url=" + page_url)
video_urls = []
ext = '.mp4'
ext = 'mp4'
global data
packed = scrapertools.find_single_match(data, r'(eval.*?)</script>')
@@ -42,7 +42,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
else:
media_url = ''
if not media_url.startswith('http'):
media_url = 'http:%s' % media_url
video_urls.append(["%s [Mixdrop]" % ext, media_url])
media_url = 'http:' + media_url
video_urls.append({'type':ext, 'url': media_url})
return video_urls

View File

@@ -25,7 +25,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
media_url = scrapertools.find_single_match(data, '"file":"([^"]+)')
logger.debug("media_url=" + media_url)
video_urls = list()
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [mp4upload]", media_url])
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
video_urls.append({'type':scrapertools.get_filename_from_url(media_url).split('.')[-1], 'url':media_url})
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -30,5 +30,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
if not url.startswith("http"):
url = "http:%s" % url
if not "Default" in quality:
video_urls.append(["[mydaddy] %s" % quality, url])
video_urls.append({'res':quality, 'url':url})
return video_urls

View File

@@ -25,7 +25,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls = []
global page_data
video_url = scrapertools.find_single_match(decode(page_data), r"'src',\s*'([^']+)")
video_urls.append([video_url.split('.')[-1] + ' [MyStream]', video_url])
video_urls.append({'type':video_url.split('.')[-1], 'url':video_url})
return video_urls
def decode(data):

View File

@@ -29,5 +29,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
matches = scrapertools.find_multiple_matches(data, 'tracker: "([^"]+)"')
for scrapedurl in matches:
url = base64.b64decode(scrapedurl)
video_urls.append(["[myupload]", url])
video_urls.append({'url':url})
return video_urls

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
import sys
import sys, random
from platformcode import config
@@ -101,7 +101,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
link_m3u8 = 'http://hqq.watch/player/get_md5.php?ver=2&at=%s&adb=0&b=1&link_1=%s&server_2=%s&vid=%s&ext=%s' % (at, link_1, server_2, vid, ext)
# ~ logger.debug(link_m3u8)
video_urls.append(["[netu.tv]", link_m3u8])
video_urls.append({'type':'m3u8', 'url':link_m3u8})
return video_urls

View File

@@ -34,7 +34,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
# support.dbg()
url = data.get('result',{}).get('playlist')
video_urls.append([url.split('.')[-1], url + '|Referer=' + page_url])
video_urls.append({'type':url.split('.')[-1], 'url':url + '|Referer:' + page_url})
return video_urls

View File

@@ -64,12 +64,12 @@ def find_videos(data):
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
titulo = "[nowvideo]"
# titulo = "[nowvideo]"
url = 'http://nowvideo.club/%s' % match
if url not in encontrados:
logger.debug(" url=" + url)
devuelve.append([titulo, url, 'nowvideo'])
devuelve.append({'url':url})
encontrados.add(url)
else:
logger.debug(" url duplicada=" + url)

View File

@@ -28,6 +28,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
# URL del vídeo
for type, url in re.findall(r'\{"name":"([^"]+)","url":"([^"]+)"', data, re.DOTALL):
url = url.replace("%3B", ";").replace("u0026", "&")
video_urls.append([type + " [OKru]", url])
video_urls.append({'type':type, 'url':url})
return video_urls

View File

@@ -23,6 +23,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
protection = support.match(data, patron=r'>var protection="([^"]+)"').match
url = httptools.downloadpage("https://www.okstream.cc/request/", post='&morocco={}&mycountry={}'.format(keys, protection), headers={'Referer':page_url}).data
url = url.strip()
video_urls.append([url.split('.')[-1] + " [OkStream]", url])
video_urls.append({'type':url.split('.')[-1], 'url':url})
return video_urls

View File

@@ -57,9 +57,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.debug("location=" + location)
video_urls = []
video_urls.append([filename[-4:] + " (Premium) [1fichier]", location])
# video_urls.append([filename[-4:] + " (Premium) [1fichier]", location])
video_urls.append({'type':filename.split('.')[-1], 'url':location})
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -30,6 +30,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
quality = quality.split('x')[0]
if quality not in qualities:
qualities.append(quality)
video_urls.append(["m3u8 {}p [Paramount]".format(quality), url])
video_urls.sort(key=lambda url: int(support.match(url[0], patron=r'(\d+)p').match))
video_urls.append({'type':'m3u8', 'res':quality, 'url':url})
# video_urls.sort(key=lambda url: int(support.match(url[0], patron=r'(\d+)p').match))
return video_urls

View File

@@ -23,5 +23,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
pack = scrapertools.find_single_match(data.data, 'p,a,c,k,e,d.*?</script>')
unpacked = jsunpack.unpack(pack)
url = scrapertools.find_single_match(unpacked, 'file:"([^"]+)') + "|Referer=%s" % page_url
video_urls.append(['m3u8 [PlayTube]', url] )
video_urls.append({'type':'m3u8', 'url':url})
return video_urls

View File

@@ -1,5 +1,5 @@
{
"active": true,
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [

View File

@@ -38,12 +38,12 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_url = video_url.replace("\\", "")
if extension not in [".vtt", ".srt"]:
video_urls.append(["%s %s [rcdnme]" % (extension, video_calidad), video_url, 0, subtitulo])
try:
video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0].rsplit(" ")[1]))
except:
pass
for video_url in video_urls:
logger.debug(" %s - %s" % (video_url[0], video_url[1]))
video_urls.append({'type':extension, 'res':video_calidad, 'url':video_url, 'sub':subtitulo})
# try:
# video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0].rsplit(" ")[1]))
# except:
# pass
# for video_url in video_urls:
# logger.debug(" %s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -55,5 +55,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
new_link = base_link + '/?format=json&sqr4374_compat=1&no_404=true&%s&%s' % (referer, id)
data = httptools.downloadpage(new_link).data
json_data = jsontools.load(data)
video_urls.append(['Rutube', json_data['video_balancer']['m3u8']])
video_urls.append({'type':'m3u8', 'url':json_data['video_balancer']['m3u8']})
return video_urls

View File

@@ -30,6 +30,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
#media_url += "|Referer=%s" %page_url
if "m3u8" in media_url:
ext = "m3u8"
video_urls.append(["%s [samaup]" % (ext), media_url])
video_urls.append({'type':ext, 'url':media_url})
return video_urls

View File

@@ -14,15 +14,13 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
data = scrapertools.httptools.downloadpage(page_url).data
media_url = scrapertools.find_single_match(data, 'var\s+video_source\s+\=\s+"([^"]+)"')
if "cache-1" in media_url:
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " (cache1) [sendvid]", media_url])
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " (cache2) [sendvid]",
media_url.replace("cache-1", "cache-2")])
video_urls.append({'type':scrapertools.get_filename_from_url(media_url).split('.')[-1], 'url':media_url})
video_urls.append({'type':scrapertools.get_filename_from_url(media_url).split('.')[-1], 'url':media_url.replace("cache-1", "cache-2")})
elif "cache-2" in media_url:
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " (cache1) [sendvid]",
media_url.replace("cache-2", "cache-1")])
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " (cache2) [sendvid]", media_url])
video_urls.append({'type':scrapertools.get_filename_from_url(media_url).split('.')[-1], 'url':media_url.replace("cache-2", "cache-1")})
video_urls.append({'type':scrapertools.get_filename_from_url(media_url).split('.')[-1], 'url':media_url})
else:
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [sendvid]", media_url])
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
video_urls.append({'type':scrapertools.get_filename_from_url(media_url).split('.')[-1], 'url':media_url})
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -32,10 +32,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
media_url = httptools.downloadpage(media_url, only_headers=True, follow_redirects=False).headers.get("location", "")
if media_url:
video_urls.append([media_url.split('.')[-1] + ' - ' + label + ' - ' + ' [Speedvideo]', media_url])
logger.debug("speed video - media urls: %s " % video_urls)
video_urls.append({'type':media_url.split('.')[-1], 'res':label, 'url':media_url})
# logger.debug("speed video - media urls: %s " % video_urls)
return sorted(video_urls, key=lambda x: quality[x[0].split(' - ')[1]])
return video_urls
##,

View File

@@ -25,6 +25,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
media_url = c.get_manifest_url()
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [Streaming Community]", media_url])
video_urls.append({'type':scrapertools.get_filename_from_url(media_url).split('.')[-1] , 'url':media_url})
return video_urls

View File

@@ -56,7 +56,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls = []
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [Streamon]", media_url])
video_urls.append({'type':scrapertools.get_filename_from_url(media_url).split('.')[-1], 'url':media_url})
return video_urls

View File

@@ -33,5 +33,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
possible_url = js2py.eval_js(find_url)
url = "https:" + possible_url
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
video_urls.append(['MP4 [Streamtape]', url])
video_urls.append({'type':'mp4', 'url':url})
return video_urls

View File

@@ -35,6 +35,6 @@ def get_video_url(page_url, video_password=""):
else:
url = re.sub(r'(\.\w{2,3})/\w', '\\1/getl1nk-', data.url) + '.dll'
url += "|Referer=https://streamz.ws/&User-Agent=Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'"
video_urls.append(["mp4 [streamZ]", url])
video_urls.append({'type':'mp4', 'url':url})
return video_urls

View File

@@ -43,15 +43,15 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
for source in lSrc:
quality = source['label'] if 'label' in source else 'auto'
video_urls.append(['.' + source['file'].split('.')[-1] + ' [' + quality + '] [SuperVideo]', source['file']])
video_urls.append({'type':source['file'].split('.')[-1], 'res':quality, 'url':source['file']})
else:
matches = scrapertools.find_multiple_matches(data, r'src:\s*"([^"]+)",\s*type:\s*"[^"]+"(?:\s*, res:\s(\d+))?')
for url, quality in matches:
if url.split('.')[-1] != 'm3u8':
video_urls.append([url.split('.')[-1] + ' [' + quality + '] [SuperVideo]', url])
video_urls.append({'type':url.split('.')[-1], 'res':quality, 'url':url})
else:
video_urls.append([url.split('.')[-1], url])
video_urls.append({'type':url.split('.')[-1], 'url':url})
video_urls.sort(key=lambda x: x[0].split()[-2])
# video_urls.sort(key=lambda x: x[0].split()[-2])
return video_urls

View File

@@ -30,6 +30,6 @@ def get_video_url(page_url, user="", password="", video_password=""):
if not video.startswith("//"):
continue
video = "https:" + video
video_urls.append(["mp4 [Thevid]", video])
logger.debug("Url: %s" % videos)
video_urls.append({'type':'mp4', 'url':video})
# logger.debug("Url: %s" % videos)
return video_urls

View File

@@ -21,6 +21,6 @@ def get_video_url(page_url, user="", password="", video_password=""):
data = httptools.downloadpage(page_url).data
video_urls = []
videourl = scrapertools.find_single_match(data, 'src: "([^"]+)')
video_urls.append([".MP4 [thevideobee]", videourl])
video_urls.append({'type':'mp4', 'url':videourl})
return video_urls

View File

@@ -29,9 +29,9 @@ def get_video_url(page_url, premium=False, user='', password='', video_password=
info('server=torrent, the url is the good')
if page_url.startswith('magnet:'):
video_urls = [['magnet: [torrent]', page_url]]
video_urls = [{'type':'magnet', 'url':page_url}]
else:
video_urls = [['.torrent [torrent]', page_url]]
video_urls = [{'type':'torrent', 'url':page_url}]
return video_urls

View File

@@ -21,6 +21,6 @@ def get_video_url(page_url, user="", password="", video_password=""):
data = httptools.downloadpage(page_url).data
video_urls = []
videourl = scrapertools.find_single_match(data, 'source src="([^"]+)')
video_urls.append([".MP4 [tusfiles]", videourl])
video_urls.append({'type':'mp4', 'url':videourl})
return video_urls

View File

@@ -71,10 +71,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.error(traceback.format_exc())
extension = ""
video_urls.append([extension + " (Premium) [uploaded.to]", location])
video_urls.append({'type':extension, 'url':location})
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -27,6 +27,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
data = httptools.downloadpage(page_url, post=post).data
media_url = scrapertools.find_single_match(data, '<a href="([^"]+)">http')
ext = scrapertools.get_filename_from_url(media_url)
video_urls.append(["%s [Uppom]" % ext, media_url])
video_urls.append({'type':ext, 'url':media_url})
return video_urls

View File

@@ -24,7 +24,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
if new_data != "":
from lib import jsunpack
data = jsunpack.unpack(new_data)
media_url = scrapertools.find_single_match(data, r'file:"([^"]+)"') + '|Referer=' + page_url
video_urls.append(["%s [UPstream]" % media_url.split('.')[-1], media_url])
media_url = scrapertools.find_single_match(data, r'file:"([^"]+)"')
video_urls.append({'type':media_url.split('.')[-1], 'url':media_url + '|Referer=' + page_url})
return video_urls

View File

@@ -74,8 +74,8 @@ def uptostream(data):
tipo = tipo.replace("video/","")
if lang: extension = "{} - {} [{}]".format(tipo, res, lang.upper())
else: extension = "{} - {}".format(tipo, res)
video_urls.append([extension + " [UPtoStream]", media_url, 0, subtitle])
video_urls.sort(key=lambda url: int(match(url[0], patron=r'(\d+)p').match))
video_urls.append({'type':extension, 'url':media_url, 'sub':subtitle})
# video_urls.sort(key=lambda url: int(match(url[0], patron=r'(\d+)p').match))
return video_urls
def atob(s):
@@ -93,6 +93,6 @@ def uptobox(url, data):
media = match(url, post=post[:-1], patron=r'<a href="([^"]+)">\s*<span class="button_upload green">').match
url_strip = media.rsplit('/', 1)[1]
media_url = media.rsplit('/', 1)[0] + "/" + url_strip
video_urls.append([media_url[-4:] + " [UPtoStream]", media_url])
video_urls.append({'type':media_url.split('.')[-1], 'url':media_url})
return video_urls

View File

@@ -43,7 +43,7 @@ def get_video_url(page_url, premium = False, user = "", password = "", video_pas
oculto = re.findall('<input type=hidden value=([^ ]+) id=func', data, flags=re.DOTALL)[0]
funciones = resuelve(clave, base64.b64decode(oculto))
url, type = scrapertools.find_single_match(funciones, "setAttribute\('src', '(.*?)'\);\s.*?type', 'video/(.*?)'")
video_urls.append(['upvid [%s]' % type ,url])
video_urls.append({'type':type ,'url':url})
return video_urls

View File

@@ -31,6 +31,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
for url in matches:
url = url+'|Referer='+page_url
video_urls.append(["[uqload]", url])
video_urls.append({'url':url})
return video_urls

View File

@@ -32,6 +32,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.debug(post)
url = support.match('https://userload.co/api/request/', post=post, patron=r'([^\s\r\n]+)').match
if url:
video_urls.append(["{} [Userload]".format(url.split('.')[-1]), url])
video_urls.append({'type':url.split('.')[-1], 'url':url})
return video_urls

View File

@@ -33,10 +33,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
data = httptools.downloadpage(page_url, post=post).data
media_url = scrapertools.find_single_match(data, 'name="down_script".*?<a href="([^"]+)"')
ext = scrapertools.get_filename_from_url(media_url)[-4:]
video_urls.append(["%s [userscloud]" % ext, media_url])
ext = scrapertools.get_filename_from_url(media_url).split('.')[-1]
video_urls.append({'type':ext, 'url':media_url})
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -36,5 +36,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
matches = scrapertools.find_multiple_matches(bloque, '"([^"]+)":"([^"]+)')
for res, media_url in matches:
video_urls.append(
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + res + ") [vevio.me]", media_url])
{'type':scrapertools.get_filename_from_url(media_url).split('.')[-1],'res':res, 'url':media_url})
return video_urls

View File

@@ -53,9 +53,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
ext = "mp4"
if "m3u8" in media_url:
ext = "m3u8"
video_urls.append(["%s [Vidcloud" % ext, media_url])
video_urls.append({'type':ext, 'url':media_url})
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -25,6 +25,6 @@ def get_video_url(page_url, user="", password="", video_password=""):
bloque = scrapertools.find_single_match(data, 'sources:.\[.*?]')
matches = scrapertools.find_multiple_matches(bloque, '(http.*?)"')
for videourl in matches:
extension = extension = scrapertools.get_filename_from_url(videourl)[-4:]
video_urls.append(["%s [videobin]" %extension, videourl])
extension = extension = scrapertools.get_filename_from_url(videourl).split('.')[-1]
video_urls.append({'type':extension, 'url':videourl})
return video_urls

View File

@@ -15,6 +15,6 @@ def get_video_url(page_url, video_password):
url = url.replace(" ", "")
data=httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
video_urls.append(["[videomega]", url])
video_urls.append({'url':url})
return video_urls

View File

@@ -26,7 +26,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.debug("Intel11 %s" %data)
media_url = scrapertools.find_single_match(data, 'file:"([^"]+)')
if media_url:
ext = media_url[-4:]
video_urls.append(["%s [vidfast]" % (ext), media_url])
ext = media_url.split('.')[-1]
video_urls.append({'type':ext, 'url':media_url})
return video_urls

View File

@@ -27,9 +27,9 @@ def get_video_url(page_url, user="", password="", video_password=""):
bloque = scrapertools.find_single_match(data, 'sources:.\[.*?]')
matches = scrapertools.find_multiple_matches(bloque, '(http.*?)"')
for videourl in matches:
extension = videourl[-4:]
extension = videourl.split('.')[-1]
if extension == 'm3u8':
continue
video_urls.append(["%s [vidlox]" % extension, videourl])
video_urls.reverse()
video_urls.append({'type':extension, 'url':videourl})
# video_urls.reverse()
return video_urls

View File

@@ -18,8 +18,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.debug("url=" + page_url)
global data
video_urls = support.get_jwplayer_mediaurl(data, 'Vidmoly')
for url in video_urls:
for url in video_urls.items:
logger.debug(url)
url[-1] = url[-1].replace(',','').replace('.urlset','').replace('/hls','') + '|Referer=' + page_url
url[url] = url['url'].replace(',','').replace('.urlset','').replace('/hls','') + '|Referer=' + page_url
return video_urls

View File

@@ -31,13 +31,14 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
for enlace in data:
if 'src' in enlace or 'file' in enlace:
url = enlace['src'] if 'src' in enlace else enlace['file']
tit = ''
if 'label' in enlace: tit += ' [%s]' % enlace['label']
if 'res' in enlace: tit += ' [%s]' % enlace['res']
if tit == '' and 'type' in enlace: tit = enlace['type']
if tit == '': tit = '.mp4'
ext = ''
res = ''
if 'type' in enlace: tit = enlace['type'].split('/')[-1]
else: tit = 'mp4'
if 'res' in enlace: res = enlace['res']
elif 'label' in enlace: res = enlace['label']
video_urls.append(["%s [Vidoza]" % tit, url])
video_urls.append({'type':tit, 'res':res, 'url':url})
except:
logger.debug('No se detecta json %s' % s)
pass

View File

@@ -38,8 +38,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
scrapertools.find_single_match(data, '"Watch video ([^"]+")').replace(' ', '.') + ".mp4"
for playpath, inf in playpaths:
h = scrapertools.find_single_match(playpath, 'h=([a-z0-9]+)')
video_urls.append([".mp4 [%s] %s" % (id_server, inf), mp4 % h])
video_urls.append(["RTMP [%s] %s" % (id_server, inf), "%s playpath=%s" % (rtmp, playpath)])
for video_url in video_urls:
logger.debug("video_url: %s - %s" % (video_url[0], video_url[1]))
video_urls.append({'type':'mp4', 'res':inf, 'url':mp4 % h})
video_urls.append({'type':'rtmp', 'res':inf, 'url':"%s playpath=%s" % (rtmp, playpath)})
# for video_url in video_urls:
# logger.debug("video_url: %s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -40,7 +40,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
bloque = scrapertools.find_single_match(data, 'qualities":\{(.*?)\}')
matches = scrapertools.find_multiple_matches(bloque, '"([^"]+)":"([^"]+)')
for res, media_url in matches:
video_urls.append(
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + res + ") [vidup.tv]", media_url])
video_urls.reverse()
video_urls.append({'type':scrapertools.get_filename_from_url(media_url).split('.')[-1], 'res':res, 'url':media_url})
# video_urls.reverse()
return video_urls

View File

@@ -32,12 +32,12 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
patron += '.*?quality":"([^"]+)"'
match = scrapertools.find_multiple_matches(data, patron)
for mime, media_url, calidad in match:
title = "%s (%s) [Vimeo]" % (mime.replace("video/", "."), calidad)
video_urls.append([title, media_url, int(calidad.replace("p", ""))])
# title = "%s (%s) [Vimeo]" % (mime.replace("video/", "."), calidad)
video_urls.append({'type':mime.replace("video/", ""), 'url':media_url, 'res':calidad})
video_urls.sort(key=lambda x: x[2])
for video_url in video_urls:
video_url[2] = 0
logger.debug("%s - %s" % (video_url[0], video_url[1]))
# video_urls.sort(key=lambda x: x[2])
# for video_url in video_urls:
# video_url[2] = 0
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -31,9 +31,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
"&Cookie=%s; %s" % (cfduid, univid)
video_urls = []
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [vimple.ru]", media_url])
video_urls.append({'type':scrapertools.get_filename_from_url(media_url).split('.')[-1], 'url':media_url})
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -22,5 +22,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
data = httptools.downloadpage(page_url).data
enc_data = scrapertools.find_single_match(data, 'data-stream="([^"]+)')
dec_data = base64.b64decode(enc_data)
video_urls.append(['vivo', dec_data])
video_urls.append({'url':dec_data})
return video_urls

View File

@@ -34,10 +34,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
matches = scrapertools.find_multiple_matches(data, '<source src="([^"]+)" type="video/(\w+)')
for media_url, ext in matches:
calidad = scrapertools.find_single_match(media_url, '(\d+)\.%s' % ext)
video_urls.append([calidad + "p ." + ext + " [vk]", media_url])
video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
for video_url in video_urls:
logger.debug("%s - %s" % (video_url[0], video_url[1]))
video_urls.append({'res':calidad, 'type':ext, 'url':media_url})
# video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
# for video_url in video_urls:
# logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -39,9 +39,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
videoSources = re.findall("<source[\s]+src=[\"'](?P<url>[^\"']+)[^>]+label=[\"'](?P<label>[^\"']+)", strResult)
for url, label in videoSources:
url += "|Referer=%s" %page_url
video_urls.append([label, url])
video_urls.sort(key=lambda i: int(i[0].replace("p","")))
video_urls.append({'type':label, 'url':url})
# video_urls.sort(key=lambda i: int(i[0].replace("p","")))
except:
url = scrapertools.find_single_match(data,'<source src="([^"]+)')
video_urls.append(["MP4", url])
video_urls.append({'type':'mp4', 'url':url})
return video_urls

View File

@@ -56,12 +56,12 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
# Search for the correct episode
for episode in json_file['data']:
if episode['video_id'] == int(video_id):
ep_title = '[B]' + episode['title'] + '[/B]'
# ep_title = '[B]' + episode['title'] + '[/B]'
embed_info = vvvvid_decoder.dec_ei(episode['embed_info'])
embed_info = embed_info.replace('manifest.f4m','master.m3u8').replace('http://','https://').replace('/z/','/i/')
key_url = 'https://www.vvvvid.it/kenc?action=kt&conn_id=' + conn_id + '&url=' + embed_info.replace(':','%3A').replace('/','%2F')
key = vvvvid_decoder.dec_ei(current_session.get(key_url, headers=headers, params=payload).json()['message'])
video_urls.append([ep_title, str(embed_info) + '?' + key])
video_urls.append({'type':'m3u8', 'url':str(embed_info) + '?' + key})
return video_urls

Some files were not shown because too many files have changed in this diff Show More