Eliminados y actualizados servers

cloudsany, nowvideo, oboom, playwatch, playwire, rutube, streame, veoh, vidabc, videowood, vidgg, vidgot, vidzella, watchers, wholecloud: eliminados
gounlimited, thevideome, watchvideo fix
This commit is contained in:
Intel1
2018-09-01 10:15:42 -05:00
parent 9892784e1d
commit c5dffe09da
33 changed files with 54 additions and 1223 deletions

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "cloudsany.com/i/([A-z0-9]+)",
"url": "https://cloudsany.com/i/\\1"
}
]
},
"free": true,
"id": "cloudsany",
"name": "cloudsany",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s1.postimg.cc/6wixo35myn/cloudsany1.png"
}

View File

@@ -1,33 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Alfa addon - KODI Plugin
# Conector para cloudsany
# https://github.com/alfa-addon
# ------------------------------------------------------------
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data:
return False, "[Cloudsany] El fichero ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
data = scrapertools.find_single_match(data, 'p,a,c,k,e.*?</script>')
unpack = jsunpack.unpack(data)
logger.info("Intel11 %s" %unpack)
video_urls = []
videourl = scrapertools.find_single_match(unpack, 'config={file:"([^"]+)')
video_urls.append([".MP4 [Cloudsany]", videourl])
return video_urls

View File

@@ -24,9 +24,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
packed_data = scrapertools.find_single_match(data, "javascript'>(eval.*?)</script>")
unpacked = jsunpack.unpack(packed_data)
patron = "file:(.*?),label:(.*?)}"
patron = "sources..([^\]]+)"
matches = re.compile(patron, re.DOTALL).findall(unpacked)
for url, quality in matches:
video_urls.append(['%s' % quality, url])
video_urls.sort(key=lambda x: int(x[0]))
for url in matches:
url += "|Referer=%s" %page_url
video_urls.append(['mp4', url])
return video_urls

View File

@@ -1,71 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "nowvideo.../(?:video/|embed.php\\?.*v=)([A-z0-9]+)",
"url": "http://www.nowvideo.sx/video/\\1"
}
]
},
"free": true,
"id": "nowvideo",
"name": "nowvideo",
"premium": [
"nowvideo",
"realdebrid"
],
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
},
{
"default": false,
"enabled": true,
"id": "premium",
"label": "Activar cuenta premium",
"type": "bool",
"visible": true
},
{
"default": "",
"enabled": "eq(-1,true)",
"id": "user",
"label": "@30014",
"type": "text",
"visible": true
},
{
"default": "",
"enabled": "eq(-2,true)+!eq(-1,'')",
"hidden": true,
"id": "password",
"label": "@30015",
"type": "text",
"visible": true
}
],
"thumbnail": "server_nowvideo.png"
}

View File

@@ -1,59 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
url = page_url.replace("http://www.nowvideo.sx/video/", "http://embed.nowvideo.sx/embed/?v=")
data = httptools.downloadpage(url).data
if "The file is being converted" in data or "Please try again later" in data:
return False, "El fichero está en proceso"
elif "no longer exists" in data:
return False, "El fichero ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
if premium:
login_url = "http://www.nowvideo.eu/login.php"
data = httptools.downloadpage(login_url).data
login_url = "http://www.nowvideo.eu/login.php?return="
post = "user=" + user + "&pass=" + password + "&register=Login"
headers = {"Referer": "http://www.nowvideo.eu/login.php"}
data = httptools.downloadpage(login_url, post, headers=headers).data
data = httptools.downloadpage(page_url).data
flashvar_file = scrapertools.get_match(data, 'flashvars.file="([^"]+)"')
flashvar_filekey = scrapertools.get_match(data, 'flashvars.filekey=([^;]+);')
flashvar_filekey = scrapertools.get_match(data, 'var ' + flashvar_filekey + '="([^"]+)"')
flashvar_user = scrapertools.get_match(data, 'flashvars.user="([^"]+)"')
flashvar_key = scrapertools.get_match(data, 'flashvars.key="([^"]+)"')
flashvar_type = scrapertools.get_match(data, 'flashvars.type="([^"]+)"')
url = "http://www.nowvideo.eu/api/player.api.php?user=" + flashvar_user + "&file=" + flashvar_file + "&pass=" + flashvar_key + "&cid=1&cid2=undefined&key=" + flashvar_filekey.replace(
".", "%2E").replace("-", "%2D") + "&cid3=undefined"
data = httptools.downloadpage(url).data
location = scrapertools.get_match(data, 'url=([^\&]+)&')
location = location + "?client=FLASH"
video_urls.append([scrapertools.get_filename_from_url(location)[-4:] + " [premium][nowvideo]", location])
else:
url = page_url.replace("http://www.nowvideo.sx/video/", "http://embed.nowvideo.sx/embed/?v=")
data = httptools.downloadpage(url).data
videourls = scrapertools.find_multiple_matches(data, 'src\s*:\s*[\'"]([^\'"]+)[\'"]')
if not videourls:
videourls = scrapertools.find_multiple_matches(data, '<source src=[\'"]([^\'"]+)[\'"]')
for videourl in videourls:
if videourl.endswith(".mpd"):
id = scrapertools.find_single_match(videourl, '/dash/(.*?)/')
videourl = "http://www.nowvideo.sx/download.php%3Ffile=mm" + "%s.mp4" % id
videourl = re.sub(r'/dl(\d)*/', '/dl/', videourl)
ext = scrapertools.get_filename_from_url(videourl)[-4:]
videourl = videourl.replace("%3F", "?") + \
"|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0"
video_urls.append([ext + " [nowvideo]", videourl])
return video_urls

View File

@@ -1,44 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(oboom.com/[a-zA-Z0-9]+)",
"url": "https://www.\\1"
}
]
},
"free": false,
"id": "oboom",
"name": "oboom",
"premium": [
"realdebrid"
],
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,9 +0,0 @@
# -*- coding: utf-8 -*-
from platformcode import logger
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
return video_urls

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "playwatch.me/(?:embed/|)([A-z0-9]+)",
"url": "http://playwatch.me/embed/\\1"
}
]
},
"free": true,
"id": "playwatch",
"name": "playwatch",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/c7LwCTc.png?1"
}

View File

@@ -1,34 +0,0 @@
# -*- coding: utf-8 -*-
import base64
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
response = httptools.downloadpage(page_url, follow_redirects=False)
if not response.sucess or response.headers.get("location"):
return False, "[Playwatch] El fichero no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = httptools.downloadpage(page_url, follow_redirects=False).data
code = scrapertools.find_single_match(data, ' tracker:\s*"([^"]+)"')
media_url = base64.b64decode(code)
ext = scrapertools.get_filename_from_url(media_url)[-4:]
video_urls = [["%s [playwatch]" % ext, media_url]]
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:cdn|config).playwire.com(?:/v2|)/(\\d+)/(?:embed|videos/v2|config)/(\\d+)",
"url": "http://config.playwire.com/\\1/videos/v2/\\2/zeus.json"
}
]
},
"free": true,
"id": "playwire",
"name": "playwire",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,45 +0,0 @@
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
from core import jsontools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cachePage(page_url)
if ("File was deleted" or "Not Found") in data: return False, "[playwire] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = scrapertools.cachePage(page_url)
data = jsontools.load(data)
f4m = data['content']['media']['f4m']
video_urls = []
data = scrapertools.downloadpageGzip(f4m)
xml = ET.fromstring(data)
base_url = xml.find('{http://ns.adobe.com/f4m/1.0}baseURL').text
for media in xml.findall('{http://ns.adobe.com/f4m/1.0}media'):
if ".m3u8" in media.get('url'): continue
media_url = base_url + "/" + media.get('url')
try:
height = media.get('height')
width = media.get('width')
label = "(" + width + "x" + height + ")"
except:
label = ""
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " " + label + " [playwire]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,45 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "rutube.ru\\/(?:video\\/([\\da-zA-Z]{32})|play\\/embed\\/([\\d]+))",
"url": "http://rutube.ru/api/play/options/\\1/?format=json"
}
]
},
"free": true,
"id": "rutube",
"name": "rutube",
"premium": [
"realdebrid"
],
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_rutube.png"
}

View File

@@ -1,37 +0,0 @@
# -*- coding: utf-8 -*-
from core import jsontools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cachePage(page_url)
if ("File was deleted" or "Not Found") in data: return False, "[rutube] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = scrapertools.cachePage(page_url)
if "embed" in page_url:
link = scrapertools.find_single_match(data, '<link rel="canonical" href="https://rutube.ru/video/([\da-z]{32})')
url = "http://rutube.ru/api/play/options/%s/?format=json" % link
data = scrapertools.cachePage(url)
data = jsontools.load(data)
m3u8 = data['video_balancer']['m3u8']
data = scrapertools.downloadpageGzip(m3u8)
video_urls = []
mediaurls = scrapertools.find_multiple_matches(data, '(http://.*?)\?i=(.*?)_')
for media_url, label in mediaurls:
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " (" + label + ") [rutube]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "streame.net/(?:embed-|)([a-z0-9]+)",
"url": "http://streame.net/embed-\\1.html"
}
]
},
"free": true,
"id": "streame",
"name": "streame",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,29 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
if ("File was deleted" or "Not Found") in data: return False, "[Streame] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = scrapertools.cache_page(page_url)
media_urls = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",label:"([^"]+)"\}')
video_urls = []
for media_url, label in media_urls:
video_urls.append(
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + label + ") [streame]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -8,9 +8,9 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
return True, ""
page_url = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers.get("location", "")
data = httptools.downloadpage(page_url).data
if "File was deleted" in data or "Page Cannot Be Found" in data:
if "File was deleted" in data or "Page Cannot Be Found" in data or "<title>Video not found" in data:
return False, "[thevideo.me] El archivo ha sido eliminado o no existe"
return True, ""

View File

@@ -1,45 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "href=\"http://www.veoh.com/.*?permalinkId=([^&\"]+)\"",
"url": "\\1"
},
{
"pattern": "pattern=\"http://www.veoh.com/static/swf/webplayer/WebPlayer.swf.*?permalinkId=([^&]+)=videodetailsembedded=0=anonymous\"",
"url": "\\1"
}
]
},
"free": true,
"id": "veoh",
"name": "veoh",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,32 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import scrapertools
from platformcode import logger
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
# Lo extrae a partir de flashvideodownloader.org
if page_url.startswith("http://"):
url = 'http://www.flashvideodownloader.org/download.php?u=' + page_url
else:
url = 'http://www.flashvideodownloader.org/download.php?u=http://www.veoh.com/watch/' + page_url
logger.info("url=" + url)
data = scrapertools.cachePage(url)
# Extrae el vídeo
patronvideos = '<a href="(http://content.veoh.com.*?)"'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
video_urls.append(["[veoh]", matches[0]])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "vidabc.com/(?:embed-|)([a-z0-9]+)",
"url": "http://vidabc.com/embed-\\1.html"
}
]
},
"free": true,
"id": "vidabc",
"name": "vidabc",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,54 +0,0 @@
# -*- coding: utf-8 -*-
from core.httptools import downloadpage
from core.scrapertools import get_match, find_multiple_matches
from platformcode import logger
host = "http://vidabc.com"
id_server = "vidabc"
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = downloadpage(page_url).data
if "Video is processing now" in data:
return False, "[vidabc] El archivo se está procesando"
if "File was deleted" in data:
return False, "[vidabc] El archivo ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = downloadpage(page_url).data
try:
sources = get_match(data, 'sources\s*:\s* \[([^\]]+)\]')
except:
from lib import jsunpack
sources = jsunpack.unpack(get_match(data, '<script[^>]*>(eval.function.p,a,c,k,e,.*?)</script>'))
sources = get_match(sources, 'sources\s*:\s*\[([^\]]+)\]')
video_urls = []
for media_url in find_multiple_matches(sources, '"([^"]+)"'):
if media_url.endswith(".mp4"):
video_urls.append([".mp4 [%s]" % id_server, media_url])
if media_url.endswith(".m3u8"):
video_urls.append(["M3U8 [%s]" % id_server, media_url])
if media_url.endswith(".smil"):
smil_data = downloadpage(media_url).data
rtmp = get_match(smil_data, 'base="([^"]+)"')
playpaths = find_multiple_matches(smil_data, 'src="([^"]+)" height="(\d+)"')
for playpath, inf in playpaths:
h = get_match(playpath, 'h=([a-z0-9]+)')
video_urls.append(["RTMP [%s] %s" % (id_server, inf), "%s playpath=%s" % (rtmp, playpath)])
for video_url in video_urls:
logger.info("video_url: %s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https?://(?:www.)?videowood.tv/)(?:embed|video)(/[0-9a-z]+)",
"url": "\\1embed\\2"
}
]
},
"free": true,
"id": "videowood",
"name": "videowood",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,26 +0,0 @@
# -*- coding: utf-8 -*-
from aadecode import decode as aadecode
from core import scrapertools
from core import httptools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "This video doesn't exist." in data:
return False, '[videowood] El video no puede ser encontrado o ha sido eliminado.'
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
text_encode = scrapertools.find_single_match(data, "(eval\(function\(p,a,c,k,e,d.*?)</script>")
text_decode = aadecode(text_encode)
patron = "'([^']+)'"
media_url = scrapertools.find_single_match(text_decode, patron)
video_urls.append([media_url[-4:] + " [Videowood]", media_url])
return video_urls

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:vidgg.to|vid.gg)/(?:embed/|video/)([a-z0-9]+)",
"url": "http://vidgg.to/video/\\1"
}
]
},
"free": true,
"id": "vidgg",
"name": "vidgg",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,53 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import jsontools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = jsontools.load(httptools.downloadpage("http://www.vidgg.to/api-v2/alive.php?link=" + page_url).data)
if data["data"] == "NOT_FOUND" or data["data"] == "FAILED":
return False, "[Vidgg] El archivo no existe o ha sido borrado"
elif data["data"] == "CONVERTING":
return False, "[Vidgg] El archivo se está procesando"
else:
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
mediaurls = scrapertools.find_multiple_matches(data, '<source src="([^"]+)"')
if not mediaurls:
id_file = page_url.rsplit("/", 1)[1]
key = scrapertools.find_single_match(data, 'flashvars\.filekey\s*=\s*"([^"]+)"')
if not key:
varkey = scrapertools.find_single_match(data, 'flashvars\.filekey\s*=\s*([^;]+);')
key = scrapertools.find_single_match(data, varkey + '\s*=\s*"([^"]+)"')
# Primera url, se extrae una url erronea necesaria para sacar el enlace
url = "http://www.vidgg.to//api/player.api.php?cid2=undefined&cid=undefined&numOfErrors=0&user=undefined&cid3=undefined&key=%s&file=%s&pass=undefined" % (
key, id_file)
data = httptools.downloadpage(url).data
url_error = scrapertools.find_single_match(data, 'url=([^&]+)&')
url = "http://www.vidgg.to//api/player.api.php?cid2=undefined&cid=undefined&numOfErrors=1&errorUrl=%s&errorCode=404&user=undefined&cid3=undefined&key=%s&file=%s&pass=undefined" % (
url_error, key, id_file)
data = httptools.downloadpage(url).data
mediaurls = scrapertools.find_multiple_matches(data, 'url=([^&]+)&')
for i, mediaurl in enumerate(mediaurls):
title = scrapertools.get_filename_from_url(mediaurl)[-4:] + " Mirror %s [vidgg]" % str(i + 1)
mediaurl += "|User-Agent=Mozilla/5.0"
video_urls.append([title, mediaurl])
for video_url in video_urls:
logger.info(" %s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,36 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "File was deleted" in data:
return False, "[Vidgot] El fichero ha sido borrado de novamov"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
data_js = scrapertools.find_single_match(data, "<script type='text/javascript'>(eval\(function.*?)</script>")
data_js = jsunpack.unpack(data_js)
mediaurls = scrapertools.find_multiple_matches(data_js, '\{file\s*:\s*"([^"]+)"\}')
video_urls = []
for mediaurl in mediaurls:
ext = scrapertools.get_filename_from_url(mediaurl)[-4:]
if "mp4" not in ext and "m3u8" not in ext:
continue
video_urls.append([ext + " [vidgot]", mediaurl])
return video_urls

View File

@@ -1,42 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?i)(https://vidlox.(?:tv|me)/embed-.*?.html)",
"url": "\\1"
}
]
},
"free": true,
"id": "vidlox",
"name": "vidlox",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s1.postimg.cc/wathgtvin/logo-vidlox1.png"
}
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?i)(https://vidlox.(?:tv|me)/embed-.*?.html)",
"url": "\\1"
}
]
},
"free": true,
"id": "vidlox",
"name": "vidlox",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s1.postimg.cc/wathgtvin/logo-vidlox1.png"
}

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://vidzella.me/e/([a-zA-Z0-9]+)",
"url": "https://vidzella.me/e/\\1"
}
]
},
"free": true,
"id": "vidzella",
"name": "vidzella",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s15.postimg.cc/albqao5pn/vidzella.png"
}

View File

@@ -1,33 +0,0 @@
# Conector Vidzella By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[Vidzella] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
logger.debug(data)
patron = "src=([^ ]+) type='.*?/(.*?)'"
matches = re.compile(patron, re.DOTALL).findall(data)
for url, type in matches:
video_urls.append(['vidzella %s' % type, url])
return video_urls

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "watchers.to/(?:embed-|)([A-z0-9]+)",
"url": "http://watchers.to/embed-\\1.html"
}
]
},
"free": true,
"id": "watchers",
"name": "watchers",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/WApzSMn.png?1"
}

View File

@@ -1,36 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "File Not Found" in data:
return False, "[Watchers] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=%s" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
packed = scrapertools.find_single_match(data, '(eval\(function\(p,a,c,k,e.*?)</script>').strip()
unpack = jsunpack.unpack(packed)
bloque = scrapertools.find_single_match(unpack, 'sources:\[(.*?)\}\]')
matches = scrapertools.find_multiple_matches(bloque, 'file:"([^"]+)"(?:,label:"([^"]+)"|\})')
for media_url, calidad in matches:
ext = scrapertools.get_filename_from_url(media_url)[-4:]
if calidad:
ext += " " + calidad + "p"
media_url += "|Referer=%s" % page_url
video_urls.append([ext + ' [watchers]', media_url])
return video_urls

View File

@@ -18,8 +18,13 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
media_urls = scrapertools.find_multiple_matches(data, 'file:"([^"]+)"')
packed = scrapertools.find_single_match(data, "text/javascript'>(.*?)\s*</script>")
unpacked = jsunpack.unpack(packed)
media_urls = scrapertools.find_multiple_matches(unpacked, 'file:"([^"]+)"')
for media_url in media_urls:
media_url += "|Referer=%s" %page_url
if ".png" in media_url:
continue
ext = "mp4"
if "m3u8" in media_url:
ext = "m3u8"

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "wholecloud.net/(?:video/|embed/?v=)([A-z0-9]+)",
"url": "http://wholecloud.net/embed/?v=\\1"
}
]
},
"free": true,
"id": "wholecloud",
"name": "wholecloud",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/yIAQurm.png"
}

View File

@@ -1,38 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "This file no longer exists on our servers" in data:
return False, "[wholecloud] El archivo ha sido eliminado o no existe"
if "This video is not yet ready" in data:
return False, "[wholecloud] El archivo no está listo, se está subiendo o convirtiendo"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
media_urls = scrapertools.find_multiple_matches(data, '<source src="([^"]+)"')
if not media_urls:
media_url = scrapertools.find_single_match(data, 'src="/api/toker.php\?f=([^"]+)"')
ext = scrapertools.get_filename_from_url(media_url)[-4:]
media_url = "http://wholecloud.net/download.php?file=%s|User-Agent=Mozilla/5.0" % media_url
video_urls.append([ext + " [wholecloud]", media_url])
else:
for media_url in media_urls:
ext = scrapertools.get_filename_from_url(media_url)[-4:]
media_url += "|User-Agent=Mozilla/5.0"
video_urls.append([ext + " [wholecloud]", media_url])
return video_urls