aggiunto server vidmoly, miglioria altadefinizioneclick
This commit is contained in:
@@ -138,5 +138,5 @@ def findvideos(item):
|
||||
support.log('findvideos', item)
|
||||
return support.hdpass_get_servers(item)
|
||||
|
||||
def play(item):
|
||||
return support.hdpass_get_url(item)
|
||||
# def play(item):
|
||||
# return support.hdpass_get_url(item)
|
||||
@@ -34,30 +34,35 @@ from specials import autoplay
|
||||
def hdpass_get_servers(item):
|
||||
def get_hosts(url, quality):
|
||||
ret = []
|
||||
page = httptools.downloadpage(url).data
|
||||
|
||||
page = httptools.downloadpage(url, CF=False).data
|
||||
mir = scrapertools.find_single_match(page, patron_mir)
|
||||
|
||||
for mir_url, srv in scrapertools.find_multiple_matches(mir, patron_option):
|
||||
mir_url = scrapertools.decodeHtmlentities(mir_url)
|
||||
log(mir_url)
|
||||
ret.append(Item(channel=item.channel,
|
||||
action="play",
|
||||
fulltitle=item.fulltitle,
|
||||
quality=quality,
|
||||
show=item.show,
|
||||
thumbnail=item.thumbnail,
|
||||
contentType=item.contentType,
|
||||
title=srv,
|
||||
server=srv,
|
||||
url= mir_url))
|
||||
with futures.ThreadPoolExecutor() as executor:
|
||||
thL = []
|
||||
for mir_url, srv in scrapertools.find_multiple_matches(mir, patron_option):
|
||||
mir_url = scrapertools.decodeHtmlentities(mir_url)
|
||||
log(mir_url)
|
||||
it = Item(channel=item.channel,
|
||||
action="play",
|
||||
fulltitle=item.fulltitle,
|
||||
quality=quality,
|
||||
show=item.show,
|
||||
thumbnail=item.thumbnail,
|
||||
contentType=item.contentType,
|
||||
title=srv,
|
||||
# server=srv,
|
||||
url= mir_url)
|
||||
thL.append(executor.submit(hdpass_get_url, it))
|
||||
for res in futures.as_completed(thL):
|
||||
if res.result():
|
||||
ret.append(res.result()[0])
|
||||
return ret
|
||||
# Carica la pagina
|
||||
itemlist = []
|
||||
if 'hdpass' in item.url or 'hdplayer' in item.url:
|
||||
url = item.url
|
||||
else:
|
||||
data = httptools.downloadpage(item.url).data.replace('\n', '')
|
||||
data = httptools.downloadpage(item.url, CF=False).data.replace('\n', '')
|
||||
patron = r'<iframe(?: id="[^"]+")? width="[^"]+" height="[^"]+" src="([^"]+)"[^>]+><\/iframe>'
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
url = url.replace("&download=1", "")
|
||||
@@ -66,7 +71,7 @@ def hdpass_get_servers(item):
|
||||
if not url.startswith('http'):
|
||||
url = 'https:' + url
|
||||
|
||||
data = httptools.downloadpage(url).data
|
||||
data = httptools.downloadpage(url, CF=False).data
|
||||
patron_res = '<div class="buttons-bar resolutions-bar">(.*?)<div class="buttons-bar'
|
||||
patron_mir = '<div class="buttons-bar hosts-bar">(.*?)<div id="fake'
|
||||
patron_option = r'<a href="([^"]+?)".*?>([^<]+?)</a>'
|
||||
@@ -85,7 +90,7 @@ def hdpass_get_servers(item):
|
||||
|
||||
def hdpass_get_url(item):
|
||||
patron_media = r'<iframe allowfullscreen custom-src="([^"]+)'
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = httptools.downloadpage(item.url, CF=False).data
|
||||
item.url = base64.b64decode(scrapertools.find_single_match(data, patron_media))
|
||||
return [item]
|
||||
|
||||
@@ -1292,3 +1297,17 @@ def addQualityTag(item, itemlist, data, patron):
|
||||
folder=False))
|
||||
else:
|
||||
log('nessun tag qualità trovato')
|
||||
|
||||
def get_jwplayer_mediaurl(data, srvName):
|
||||
video_urls = []
|
||||
block = scrapertools.find_single_match(data, r'sources: \[([^\]]+)\]')
|
||||
sources = scrapertools.find_multiple_matches(block, r'file:\s*"([^"]+)"(?:,label:\s*"([^"]+)")?')
|
||||
if not sources:
|
||||
sources = scrapertools.find_multiple_matches(data,
|
||||
r'src:\s*"([^"]+)",\s*type:\s*"[^"]+",[^,]+,\s*label:\s*"([^"]+)"')
|
||||
for url, quality in sources:
|
||||
quality = 'auto' if not quality else quality
|
||||
video_urls.append(['.' + url.split('.')[-1] + ' [' + quality + '] [' + srvName + ']', url])
|
||||
|
||||
video_urls.sort(key=lambda x: x[0].split()[1])
|
||||
return video_urls
|
||||
@@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import httptools, support
|
||||
from core import scrapertools
|
||||
from platformcode import config, logger
|
||||
|
||||
@@ -17,15 +17,7 @@ def test_video_exists(page_url):
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
# logger.info(data)
|
||||
block = scrapertools.find_single_match(data, r'sources: \[([^\]]+)\]')
|
||||
sources = scrapertools.find_multiple_matches(block, r'file:\s*"([^"]+)"(?:,label:\s*"([^"]+)")?')
|
||||
if not sources:
|
||||
sources = scrapertools.find_multiple_matches(data, r'src:\s*"([^"]+)",\s*type:\s*"[^"]+",[^,]+,\s*label:\s*"([^"]+)"')
|
||||
for url, quality in sources:
|
||||
quality = 'auto' if not quality else quality
|
||||
video_urls.append(['.' + url.split('.')[-1] + ' [' + quality + '] [Onlystream]', url])
|
||||
video_urls.sort(key=lambda x: x[0].split()[1])
|
||||
video_urls = support.get_jwplayer_mediaurl(data, 'Onlystream')
|
||||
return video_urls
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------
|
||||
# Conector Mixdrop By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
@@ -22,7 +19,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
video_urls = []
|
||||
global data
|
||||
|
||||
media_url = 'https://' + scrapertools.find_single_match(data, 'id="videolink"[^>]+>\n?\s*//(.*?)<') + '&stream=1'
|
||||
video_urls.append([".mp4 [Streamtape]", media_url])
|
||||
url = scrapertools.find_single_match(data, 'id="videolink"[^>]+>\n?\s*//(.*?)<')
|
||||
if url:
|
||||
media_url = 'https://' + url + '&stream=1'
|
||||
video_urls.append([".mp4 [Streamtape]", media_url])
|
||||
|
||||
return video_urls
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import time
|
||||
import urllib
|
||||
|
||||
from core import httptools
|
||||
from core import httptools, support
|
||||
from core import scrapertools
|
||||
from platformcode import logger, config
|
||||
|
||||
@@ -29,9 +29,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
time.sleep(6)
|
||||
data = httptools.downloadpage(page_url_post, post=post).data
|
||||
logger.info("(data page_url='%s')" % data)
|
||||
sources = scrapertools.find_single_match(data, r'sources: \[([^\]]+)\]')
|
||||
|
||||
for media_url in scrapertools.find_multiple_matches(sources, '"([^"]+)"'):
|
||||
ext = scrapertools.get_filename_from_url(media_url)[-4:]
|
||||
video_urls.append(["%s [%s]" % (ext, server), media_url])
|
||||
video_urls = support.get_jwplayer_mediaurl(data, 'Turbovid')
|
||||
return video_urls
|
||||
|
||||
46
servers/vidmoly.json
Normal file
46
servers/vidmoly.json
Normal file
@@ -0,0 +1,46 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https?://vidmoly.net/(?:embed-)?(\\w+)\\.html",
|
||||
"url": "https://vidmoly.net/embed-\\1.html"
|
||||
},
|
||||
{
|
||||
"pattern": "https?://fasturl.ga/(?:embed-)?(\\w+)\\.html",
|
||||
"url": "https://vidmoly.net/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "vidmoly",
|
||||
"name": "Vidmoly",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": ""
|
||||
}
|
||||
21
servers/vidmoly.py
Normal file
21
servers/vidmoly.py
Normal file
@@ -0,0 +1,21 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools, support
|
||||
from platformcode import logger, config
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
global data
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "Not Found" in data:
|
||||
return False, config.get_localized_string(70449) % "Vidmoly"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
global data
|
||||
video_urls = support.get_jwplayer_mediaurl(data, 'Vidmoly')
|
||||
|
||||
return video_urls
|
||||
Reference in New Issue
Block a user