eliminati server non più esistenti

This commit is contained in:
marco
2020-05-31 18:19:00 +02:00
parent 2749687509
commit b44709ff5a
43 changed files with 0 additions and 2019 deletions

View File

@@ -1,47 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(http://b.ter.tv/v/[A-z0-9]+)",
"url": "\\1"
},
{
"pattern": "(https://byter.tv/v/[A-z0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "bitertv",
"name": "Bitertv",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s18.postimg.cc/f56rayqq1/logo-bitertv.png",
"version": 1
}

View File

@@ -1,24 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import config
from platformcode import logger
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Archive no Encontrado" in data or "File has been removed" in data:
return False, config.get_localized_string(70449) % "bitertv"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
patron = "(?s)file: '([^']+)"
file = scrapertools.find_single_match(data, patron)
video_urls.append([".MP4 [bitertv]", file])
return video_urls

View File

@@ -1,53 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(bitshare.com/files/[^/]+/[\\w.+\\-_]+)",
"url": "http://\\1"
},
{
"pattern": "(bitshare.com/files/[a-z0-9]+)[^/a-z0-9]",
"url": "http://\\1"
},
{
"pattern": "(bitshare.com/\\?f=[\\w+]+)",
"url": "http://\\1"
}
]
},
"free": false,
"id": "bitshare",
"name": "bitshare",
"premium": [
"realdebrid",
"alldebrid"
],
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,19 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Error - Archivo no disponible" in data or "Por favor seleccione el archivo a cargar" in data:
return False, "Archivo no encontrado"
patron = '<b>()'
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
return video_urls

View File

@@ -1,46 +0,0 @@
{
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://estream.to/embed-([a-z0-9]+).html",
"url": "https://estream.to/\\1.html"
},
{
"pattern": "https://estream.xyz/embed-([a-z0-9]+).html",
"url": "https://estream.to/\\1.html"
}
]
},
"free": true,
"id": "estream",
"name": "estream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s14.postimg.cc/ibd54ayf5/estream.png"
}

View File

@@ -1,35 +0,0 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector Estream By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from platformcode import config
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, config.get_localized_string(70449) % "Estream"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = "<source src=([^ ]+) type='video/mp4' label='.*?x(.*?)'"
matches = re.compile(patron, re.DOTALL).findall(data)
for url, quality in matches:
video_urls.append(["%sp [estream]" % quality, url])
return video_urls

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://filebebo.com/(?:e|d)/[a-zA-Z0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "filebebo",
"name": "filebebo",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://filebebo.com/images/logo.png"
}

View File

@@ -1,36 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Server Filebebo -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from core import httptools
from core import scrapertools
from platformcode import config
from platformcode import logger
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = get_source(page_url)
if "File was deleted" in data or "File Not Found" in data:
return False, config.get_localized_string(70449) % "Filebebo"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = get_source(page_url)
url = scrapertools.find_single_match(data, "<source src=(.*?) type='video/.*?'")
video_urls.append(['Filebebo', url])
return video_urls

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://www.filevideo.net/embed-(?:embed-|)([A-z0-9]+)",
"url": "http://filevideo.net/embed-\\1.html"
}
]
},
"free": true,
"id": "filevideo",
"name": "filevideo",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s15.postimg.cc/b7jj9dbbf/filevideo.png"
}

View File

@@ -1,42 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Not Found" in data or "File was deleted" in data:
return False, config.get_localized_string(70449) % "Filevideo"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = httptools.downloadpage(page_url).data
enc_data = scrapertools.find_single_match(data, "type='text/javascript'>(eval.*?)\s*</script>")
dec_data = jsunpack.unpack(enc_data)
video_urls = []
media_urls = scrapertools.find_multiple_matches(dec_data, '\{file\s*:\s*"([^"]+)",label\s*:\s*"([^"]+)"\}')
for media_url, label in media_urls:
ext = scrapertools.get_filename_from_url(media_url)[-4:]
video_urls.append(["%s %sp [filevideo]" % (ext, label), media_url])
video_urls.reverse()
m3u8 = scrapertools.find_single_match(dec_data, '\{file\:"(.*?.m3u8)"\}')
if m3u8:
title = video_urls[-1][0].split(" ", 1)[1]
video_urls.insert(0, [".m3u8 %s" % title, m3u8])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,42 +0,0 @@
{
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "flix555.com/(?:embed-|)([A-z0-9]+)",
"url": "https://flix555.com/embed-\\1.html"
}
]
},
"free": true,
"id": "flix555",
"name": "flix555",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://flix555.com/img/logo.png"
}

View File

@@ -1,44 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools, scrapertools
from lib import jsunpack
from platformcode import config
from platformcode import logger
data = ""
def test_video_exists(page_url):
resp = httptools.downloadpage(page_url)
global data
data = resp.data
if resp.code == 404 or '<b>File Not Found</b>' in resp.data or "<b>File is no longer available" in resp.data:
return False, config.get_localized_string(70449) % "flix555"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
itemlist = []
packed = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
unpacked = jsunpack.unpack(packed)
# ~ logger.info(unpacked)
unpacked = re.sub(r'\n|\r|\t|\s{2,}', "", unpacked)
subtitles = scrapertools.find_single_match(unpacked, r'tracks:\s*\[\{\s*file\s*:\s*"([^"]*)"\s*,\s*label')
if "empty." in subtitles: subtitles = ""
matches = scrapertools.find_multiple_matches(unpacked, 'file\s*:\s*"([^"]*)"\s*,\s*label\s*:\s*"([^"]*)"')
if matches:
for url, lbl in matches:
if url.endswith('.srt') or url.endswith('.vtt'):
#subtitles += url
continue
itemlist.append(['.mp4 (%s) [flix555]' % lbl, url, 0, subtitles])
url = scrapertools.find_single_match(unpacked, 'file\s*:\s*"([^"]*)"\s*')
if url:
if not url.endswith('.srt') or not url.endswith('.vtt'):
itemlist.append(['.m3u8 [flix555]', url, 0, subtitles])
return itemlist

View File

@@ -1,45 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(freakshare.com/files/[^/]+/[^\"'\\n ]+)",
"url": "http://\\1"
}
]
},
"free": false,
"id": "freakshare",
"name": "freakshare",
"premium": [
"realdebrid",
"alldebrid"
],
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,18 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Este archivo no existe" in data:
return False, "Archivo no existe"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
return video_urls

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "jplayer.net/v/([A-z0-9_-]+)",
"url": "https://www.jplayer.net/api/source/\\1"
}
]
},
"free": true,
"id": "jplayer",
"name": "jplayer",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

View File

@@ -1,43 +0,0 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector jplayer By Alfa development Group
# --------------------------------------------------------
import sys
from platformcode import config
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
else:
import urllib # Usamos el nativo de PY2 que es más rápido
from core import httptools
from core import jsontools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data or "to copyright issues" in data:
return False, config.get_localized_string(70449) % "jplayer"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
post = urllib.urlencode({"r":"", "d":"www.jplayer.net"})
data = httptools.downloadpage(page_url, post=post).data
json = jsontools.load(data)["data"]
for _url in json:
url = _url["file"]
label = _url["label"]
video_urls.append([label +" [jplayer]", url])
return video_urls

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://manyvideos.xyz/embed/([A-z0-9]+)",
"url": "https://manyvideos.xyz/embed/\\1"
}
]
},
"free": true,
"id": "manyvideos",
"name": "manyvideos",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

View File

@@ -1,38 +0,0 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector manyvideos By Alfa development Group
# --------------------------------------------------------
import base64
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import config
from platformcode import logger
def test_video_exists(page_url):
response = httptools.downloadpage(page_url)
if not response.sucess or \
"Not Found" in response.data \
or "File was deleted" in response.data \
or "is no longer available" in response.data:
return False, config.get_localized_string(70449) % "manyvideos"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
video_urls = []
data = httptools.downloadpage(page_url).data
data = scrapertools.find_single_match(data, 'JuicyCodes.Run\(([^\)]+)\)')
data = data.replace("+", "")
data = base64.b64decode(data)
unpack = jsunpack.unpack(data)
matches = scrapertools.find_multiple_matches(unpack, '"file":"([^"]+)","label":"([^"]+)"')
for url,quality in matches:
url = url.replace("v2.", "v1.")
video_urls.append(["[manyvideos] %s" % quality, url])
return video_urls

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "megadrive.co/embed/([A-z0-9]+)",
"url": "https://megadrive.co/embed/\\1"
}
]
},
"free": true,
"id": "megadrive",
"name": "megadrive",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s8.postimg.cc/kr5olxmad/megadrive1.png"
}

View File

@@ -1,27 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import config
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data or "to copyright issues" in data:
return False, "[Megadrive] El video ha sido borrado"
if "please+try+again+later." in data:
return False, "[Megadrive] Error de Megadrive, no se puede generar el enlace al video"
if "File has been removed due to inactivity" in data:
return False, config.get_localized_string(70449) % "Megadrive"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
videourl = scrapertools.find_single_match(data, "<source.*?src='([^']+)")
video_urls.append([".MP4 [megadrive]", videourl])
return video_urls

View File

@@ -1,45 +0,0 @@
{
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:openload|oload|openloads)[^/]+/(?:embed|f|e|f[0-9]|c|c[0-9])?/([0-9a-zA-Z-_]+)",
"url": "https://oload.stream/f/\\1/"
}
]
},
"free": true,
"id": "openload",
"name": "openload",
"premium": [
"realdebrid"
],
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_openload.png"
}

View File

@@ -1,183 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import jsontools
from core import scrapertools
from core.servertools import get_server_host
from platformcode import config, logger
host = "https://" + get_server_host('openload')[0]
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
header = {}
if "|" in page_url:
page_url, referer = page_url.split("|", 1)
header = {'Referer': referer}
data = httptools.downloadpage(page_url, headers=header, cookies=False).data
if 'Were Sorry!' in data:
data = httptools.downloadpage(page_url.replace("/embed/", "/f/"), headers=header, cookies=False).data
if 'Were Sorry!' in data:
return False, config.get_localized_string(70449) % "Openload"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
itemlist = []
header = {}
if "|" in page_url:
page_url, referer = page_url.split("|", 1)
header = {'Referer': referer}
data = httptools.downloadpage(page_url, cookies=False, headers=header).data
logger.info('OP DATA= ' + data)
subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
try:
code = scrapertools.find_single_match(data, '<p style="" id="[^"]+">(.*?)</p>' )
if not code or code == "" or code is None:
code = scrapertools.find_single_match(data, '<p id="[^"]+" style="">(.*?)</p>' )
_0x59ce16 = eval(scrapertools.find_single_match(data, '_0x59ce16=([^;]+)').replace('parseInt', 'int'))
_1x4bfb36 = eval(scrapertools.find_single_match(data, '_1x4bfb36=([^;]+)').replace('parseInt', 'int'))
parseInt = eval(scrapertools.find_single_match(data, '_0x30725e,(\(parseInt.*?)\),').replace('parseInt', 'int'))
url = decode(code, parseInt, _0x59ce16, _1x4bfb36)
url = httptools.downloadpage(url, only_headers=True, follow_redirects=False).headers.get('location')
extension = scrapertools.find_single_match(url, '(\..{,3})\?')
itemlist.append([extension, url, 0,subtitle])
except Exception:
logger.info()
if config.get_setting('api', __file__):
url = get_link_api(page_url)
extension = scrapertools.find_single_match(url, '(\..{,3})\?')
if url:
itemlist.append([extension, url, 0,subtitle])
logger.debug(itemlist)
return itemlist
def decode(code, parseInt, _0x59ce16, _1x4bfb36):
logger.info()
import math
_0x1bf6e5 = ''
ke = []
for i in range(0, len(code[0:9*8]),8):
ke.append(int(code[i:i+8],16))
_0x439a49 = 0
_0x145894 = 0
while _0x439a49 < len(code[9*8:]):
_0x5eb93a = 64
_0x896767 = 0
_0x1a873b = 0
_0x3c9d8e = 0
while True:
if _0x439a49 + 1 >= len(code[9*8:]):
_0x5eb93a = 143;
_0x3c9d8e = int(code[9*8+_0x439a49:9*8+_0x439a49+2], 16)
_0x439a49 +=2
if _0x1a873b < 6*5:
_0x332549 = _0x3c9d8e & 63
_0x896767 += _0x332549 << _0x1a873b
else:
_0x332549 = _0x3c9d8e & 63
_0x896767 += int(_0x332549 * math.pow(2, _0x1a873b))
_0x1a873b += 6
if not _0x3c9d8e >= _0x5eb93a: break
# _0x30725e = _0x896767 ^ ke[_0x145894 % 9] ^ _0x59ce16 ^ parseInt ^ _1x4bfb36
_0x30725e = _0x896767 ^ ke[_0x145894 % 9] ^ parseInt ^ _1x4bfb36
_0x2de433 = _0x5eb93a * 2 + 127
for i in range(4):
_0x3fa834 = chr(((_0x30725e & _0x2de433) >> (9*8/ 9)* i) - 1)
if _0x3fa834 != '$':
_0x1bf6e5 += _0x3fa834
_0x2de433 = (_0x2de433 << (9*8/ 9))
_0x145894 += 1
url = host + "/stream/%s?mime=true" % _0x1bf6e5
return url
def login():
logger.info()
data = httptools.downloadpage(host).data
_csrf = scrapertools.find_single_match(data, '<input type="hidden" name="_csrf" value="([^"]+)">')
post = {
'LoginForm[email]' : config.get_setting('user', __file__),
'LoginForm[password]' : config.get_setting('passowrd', __file__),
'LoginForm[rememberMe]' : 1,
'_csrf' : _csrf
}
data = httptools.downloadpage(host + '/login', post = post).data
if 'Login key has already been sent.' in data:
while True :
if 'Invalid login key.' in data:
platformtools.dialog_ok('openload', 'El código introducido no es válido\nrevisa tu correo e introduce el código correcto')
code = platformtools.dialog_input( post.get('LoginForm[loginkey]', ''),
'Introduzca el código que ha sido enviado a \'%s\'' % 'r_dav'
)
if not code:
break
else:
post['LoginForm[loginkey]'] = code
data = httptools.downloadpage(host + '/login', post = post).data
if 'Welcome back,' in data: break
def get_api_keys():
logger.info()
api_login = config.get_setting('api_login', __file__)
api_key = config.get_setting('api_key', __file__)
if not api_key or not api_login:
login()
data = httptools.downloadpage(host + '/account').data
post = {
'FTPKey[password]' : config.get_setting('password', __file__),
'_csrf' : scrapertools.find_single_match(data, '<input type="hidden" name="_csrf" value="([^"]+)">')
}
data = httptools.downloadpage(host + '/account', post = post).data
api_login = scrapertools.find_single_match(data, '<tr><td>ID:</td><td>([^<]+)</td></tr>')
api_key = scrapertools.find_single_match(data, 'Your FTP Password/API Key is: ([^<]+) </div>')
config.set_setting('api_login', api_login, __file__)
config.set_setting('api_key', api_key, __file__)
return api_login, api_key
def get_link_api(page_url):
logger.info()
api_login, api_key = get_api_keys()
file_id = scrapertools.find_single_match(page_url, '(?:embed|f)/([0-9a-zA-Z-_]+)')
data = httptools.downloadpage(host + "/api/1/file/dlticket?file=%s&login=%s&key=%s" % (file_id, api_login, api_key)).data
data = jsontools.load_json(data)
# logger.info(data)
if data["status"] == 200:
ticket = data["result"]["ticket"]
data = httptools.downloadpage(host + "/api/1/file/dl?file=%s&ticket=%s" % (file_id, ticket)).data
data = jsontools.load(data)
return data['result']['url'].replace("https", "http")

View File

@@ -1,42 +0,0 @@
{
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:rapidvideo|rapidvid).(?:org|com|to|is)/(?:\\?v=|e/|embed/|v/|d/)([A-z0-9]+)",
"url": "https://www.rapidvideo.com/e/\\1"
}
]
},
"free": true,
"id": "rapidvideo",
"name": "rapidvideo",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s26.postimg.cc/y5arjad1l/rapidvideo1.png"
}

View File

@@ -1,53 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import config, logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
response = httptools.downloadpage(page_url)
if response.code == 404:
return False, config.get_localized_string(70449) % "RapidVideo"
if not response.data or "urlopen error [Errno 1]" in str(response.code):
if config.is_xbmc():
return False, config.get_localized_string(70302) % "RapidVideo"
elif config.get_platform() == "plex":
return False, config.get_localized_string(70303) % "RapidVideo"
elif config.get_platform() == "mediaserver":
return False, config.get_localized_string(70304) % "RapidVideo"
if "Object not found" in response.data:
return False, config.get_localized_string(70449) % "RapidVideo"
if response.code == 500:
return False, config.get_localized_string(70524) % "RapidVideo"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
post = "confirm.x=77&confirm.y=76&block=1"
if "Please click on this button to open this video" in data:
data = httptools.downloadpage(page_url, post=post).data
patron = 'https://www.rapidvideo.com/e/[^"]+'
match = scrapertools.find_multiple_matches(data, patron)
if match:
for url1 in match:
res = scrapertools.find_single_match(url1, '=(\w+)')
data = httptools.downloadpage(url1).data
if "Please click on this button to open this video" in data:
data = httptools.downloadpage(url1, post=post).data
url = scrapertools.find_single_match(data, 'source src="([^"]+)')
ext = scrapertools.get_filename_from_url(url)[-4:]
video_urls.append(['%s %s [rapidvideo]' % (ext, res), url])
else:
patron = 'src="([^"]+)" type="video/([^"]+)" label="([^"]+)"'
match = scrapertools.find_multiple_matches(data, patron)
if match:
for url, ext, res in match:
video_urls.append(['.%s %s [Rapidvideo]' % (ext, res), url])
return video_urls

View File

@@ -1,54 +0,0 @@
{
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "streamango.com/(?:embed|f)/([A-z0-9]+)",
"url": "http://streamango.com/embed/\\1"
},
{
"pattern": "https://fruitadblock.net/embed/([A-z0-9]+)",
"url": "http://streamango.com/embed/\\1"
},
{
"pattern": "https://streamangos.com/e/([A-z0-9]+)",
"url": "http://streamango.com/embed/\\1"
}
]
},
"free": true,
"id": "streamango",
"name": "streamango",
"premium": [
"realdebrid",
"alldebrid"
],
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/o8XR8fL.png"
}

View File

@@ -1,74 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import config, logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "We are unable to find the video" in data:
return False, config.get_localized_string(70449) % "Streamango"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
matches = scrapertools.find_multiple_matches(data, "type:\"video/([^\"]+)\",src:d\('([^']+)',(.*?)\).+?height:(\d+)")
for ext, encoded, code, quality in matches:
media_url = decode(encoded, int(code))
media_url = media_url.replace("@","")
if not media_url.startswith("http"):
media_url = "http:" + media_url
video_urls.append([".%s %sp [streamango]" % (ext, quality), media_url])
video_urls.reverse()
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
def decode(encoded, code):
logger.info("encoded '%s', code '%s'" % (encoded, code))
_0x59b81a = ""
k = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='
k = k[::-1]
count = 0
for index in range(0, len(encoded) - 1):
while count <= len(encoded) - 1:
_0x4a2f3a = k.index(encoded[count])
count += 1
_0x29d5bf = k.index(encoded[count])
count += 1
_0x3b6833 = k.index(encoded[count])
count += 1
_0x426d70 = k.index(encoded[count])
count += 1
_0x2e4782 = ((_0x4a2f3a << 2) | (_0x29d5bf >> 4))
_0x2c0540 = (((_0x29d5bf & 15) << 4) | (_0x3b6833 >> 2))
_0x5a46ef = ((_0x3b6833 & 3) << 6) | _0x426d70
_0x2e4782 = _0x2e4782 ^ code
_0x59b81a = str(_0x59b81a) + chr(_0x2e4782)
if _0x3b6833 != 64:
_0x59b81a = str(_0x59b81a) + chr(_0x2c0540)
if _0x3b6833 != 64:
_0x59b81a = str(_0x59b81a) + chr(_0x5a46ef)
return _0x59b81a

View File

@@ -1,45 +0,0 @@
{
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "streamcherry.com/(?:embed|f)/([A-z0-9]+)",
"url": "http://streamcherry.com/embed/\\1"
}
]
},
"free": true,
"id": "streamcherry",
"name": "streamcherry",
"premium": [
"realdebrid"
],
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/l45Tk0G.png"
}

View File

@@ -1,77 +0,0 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector para streamcherry
# --------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import config
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "We are unable to find the video" in data:
return False, config.get_localized_string(70449) % "streamcherry"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
matches = scrapertools.find_multiple_matches(data, "type:\"video/([^\"]+)\",src:d\('([^']+)',(.*?)\).+?height:(\d+)")
for ext, encoded, code, quality in matches:
media_url = decode(encoded, int(code))
media_url = media_url.replace("@","")
if not media_url.startswith("http"):
media_url = "http:" + media_url
video_urls.append([".%s %sp [streamcherry]" % (ext, quality), media_url])
video_urls.reverse()
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
def decode(encoded, code):
logger.info("encoded '%s', code '%s'" % (encoded, code))
_0x59b81a = ""
k = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='
k = k[::-1]
count = 0
for index in range(0, len(encoded) - 1):
while count <= len(encoded) - 1:
_0x4a2f3a = k.index(encoded[count])
count += 1
_0x29d5bf = k.index(encoded[count])
count += 1
_0x3b6833 = k.index(encoded[count])
count += 1
_0x426d70 = k.index(encoded[count])
count += 1
_0x2e4782 = ((_0x4a2f3a << 2) | (_0x29d5bf >> 4))
_0x2c0540 = (((_0x29d5bf & 15) << 4) | (_0x3b6833 >> 2))
_0x5a46ef = ((_0x3b6833 & 3) << 6) | _0x426d70
_0x2e4782 = _0x2e4782 ^ code
_0x59b81a = str(_0x59b81a) + chr(_0x2e4782)
if _0x3b6833 != 64:
_0x59b81a = str(_0x59b81a) + chr(_0x2c0540)
if _0x3b6833 != 64:
_0x59b81a = str(_0x59b81a) + chr(_0x5a46ef)
return _0x59b81a

View File

@@ -1,59 +0,0 @@
{
"active": false,
"find_videos": {
"ignore_urls": [
"http://streamcloud.eu/stylesheets",
"http://streamcloud.eu/control",
"http://streamcloud.eu/xupload",
"http://streamcloud.eu/js",
"http://streamcloud.eu/favicon",
"http://streamcloud.eu/reward",
"http://streamcloud.eu/login",
"http://streamcloud.eu/deliver",
"http://streamcloud.eu/faq",
"http://streamcloud.eu/tos",
"http://streamcloud.eu/checkfiles",
"http://streamcloud.eu/contact",
"http://streamcloud.eu/serve"
],
"patterns": [
{
"pattern": "(streamcloud.eu/[a-z0-9]+)",
"url": "http://\\1"
}
]
},
"free": true,
"id": "streamcloud",
"name": "streamcloud",
"premium": [
"alldebrid"
],
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_streamcloud.png"
}

View File

@@ -1,78 +0,0 @@
# -*- coding: utf-8 -*-
from __future__ import print_function
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("page_url='%s')" % page_url)
data = httptools.downloadpage(url=page_url).data
if "<h1>404 Not Found</h1>" in data:
return False, "El archivo no existe<br/>en streamcloud o ha sido borrado."
elif "<h1>File Not Found</h1>" in data:
return False, "El archivo no existe<br/>en streamcloud o ha sido borrado."
elif "<h1>Archivo no encontrado</h1>" in data:
return False, "El archivo no existe<br/>en streamcloud o ha sido borrado."
else:
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
# Lo pide una vez
headers = [
['User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14']]
data = httptools.downloadpage(page_url, headers=headers).data
media_url = scrapertools.find_single_match(data, 'file\: "([^"]+)"')
if len(media_url) == 0:
post = ""
matches = scrapertools.find_multiple_matches(data, '<input.*?name="([^"]+)".*?value="([^"]*)">')
for inputname, inputvalue in matches:
post += inputname + "=" + inputvalue + "&"
post = post.replace("op=download1", "op=download2")
data = httptools.downloadpage(page_url, post=post).data
if 'id="justanotice"' in data:
logger.info("data=" + data)
logger.info("Ha saltado el detector de adblock")
return []
# Extrae la URL
media_url = scrapertools.find_single_match(data, 'file\: "([^"]+)"')
video_urls = []
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [streamcloud]", media_url+"|Referer="+page_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
if __name__ == "__main__":
import getopt
import sys
options, arguments = getopt.getopt(sys.argv[1:], "", ["video_url=", "login=", "password="])
video_url = ""
login = ""
password = ""
logger.info("%s %s" % (str(options), str(arguments)))
for option, argument in options:
print(option, argument)
if option == "--video_url":
video_url = argument
elif option == "--login":
login = argument
elif option == "--password":
password = argument
else:
assert False, "Opcion desconocida"
if video_url == "":
print("ejemplo de invocacion")
print("streamcloud --video_url http://xxx --login usuario --password secreto")
else:
if login != "":
premium = True
else:
premium = False
print(get_video_url(video_url, premium, login, password))

View File

@@ -1,42 +0,0 @@
{
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:thevideo.me|tvad.me|thevideo.ch|thevideo.us)/(?:embed-|)([A-z0-9]+)",
"url": "https://thevideo.me/embed-\\1.html"
}
]
},
"free": true,
"id": "thevideome",
"name": "thevideome",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s26.postimg.cc/fzmu2c761/thevideo.me1.png"
}

View File

@@ -1,45 +0,0 @@
# -*- coding: utf-8 -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
else:
import urllib # Usamos el nativo de PY2 que es más rápido
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
#Deshabilitamos el server hasta nueva orden
return False, "[Thevideo.me] Servidor deshabilitado"
page_url = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers.get("location", "")
data = httptools.downloadpage(page_url).data
if "File was deleted" in data or "Page Cannot Be Found" in data or "<title>Video not found" in data:
return False, "[thevideo.me] El archivo ha sido eliminado o no existe"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
post = {}
post = urllib.urlencode(post)
if not "embed" in page_url:
page_url = page_url.replace("https://thevideo.me/", "https://thevideo.me/embed-") + ".html"
url = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers.get("location", "")
data = httptools.downloadpage("https://vev.io/api/serve/video/" + scrapertools.find_single_match(url, "embed/([A-z0-9]+)"), post=post).data
bloque = scrapertools.find_single_match(data, 'qualities":\{(.*?)\}')
matches = scrapertools.find_multiple_matches(bloque, '"([^"]+)":"([^"]+)')
for res, media_url in matches:
video_urls.append(
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + res + ") [thevideo.me]", media_url])
return video_urls

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "thevideos.tv/(?:embed-|)([a-z0-9A-Z]+)",
"url": "http://thevideos.tv/embed-\\1.html"
}
]
},
"free": true,
"id": "thevideos",
"name": "thevideos",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,29 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = httptools.downloadpage(page_url).data
match = scrapertools.find_single_match(data, "<script type='text/javascript'>(.*?)</script>")
if match.startswith("eval"):
match = jsunpack.unpack(match)
# Extrae la URL
# {file:"http://95.211.81.229/kj2vy4rle46vtaw52bsj4ooof6meikcbmwimkrthrahbmy4re3eqg3buhoza/v.mp4",label:"240p"
video_urls = []
media_urls = scrapertools.find_multiple_matches(match, '\{file\:"([^"]+)",label:"([^"]+)"')
subtitle = scrapertools.find_single_match(match, 'tracks: \[\{file: "([^"]+)", label: "Spanish"')
for media_url, quality in media_urls:
video_urls.append([media_url[-4:] + " [thevideos] " + quality, media_url, 0, subtitle])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,45 +0,0 @@
{
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:verystream|woof.tube).*?/(?:e|stream)/([0-9a-zA-Z-_]+)",
"url": "https://verystream.com/e/\\1/"
}
]
},
"free": true,
"id": "verystream",
"name": "verystream",
"premium": [
"realdebrid"
],
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_verystream.png"
}

View File

@@ -1,55 +0,0 @@
# -*- coding: utf-8 -*-
# Verystream server tool
# Developed by KOD for KOD
# KOD - Kodi on Demand Team
from core import httptools
from core import scrapertools
from platformcode import config, logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
header = {}
if "|" in page_url:
page_url, referer = page_url.split("|", 1)
header = {'Referer': referer}
data = httptools.downloadpage(page_url, headers=header, cookies=False).data
if 'not found!' in data:
data = httptools.downloadpage(page_url.replace("/e/", "/stream/"), headers=header, cookies=False).data
if 'not found!' in data:
return False, config.get_localized_string(70449) % "Verystream"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
itemlist = []
header = {}
if "|" in page_url:
page_url, referer = page_url.split("|", 1)
header = {'Referer': referer}
data = httptools.downloadpage(page_url, cookies=False, headers=header).data
subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="it"')
try:
code = scrapertools.find_single_match(data, '<p style="" class="" id="videolink">(.*?)</p>' )
url = "https://verystream.com/gettoken/" + code + "?mime=true"
url = httptools.downloadpage(url, only_headers=True, follow_redirects=False).headers.get('location')
extension = scrapertools.find_single_match(url, '(\..{,3})\?')
itemlist.append([extension, url, 0,subtitle])
except Exception:
logger.info()
if config.get_setting('api', __file__):
url = get_link_api(page_url)
extension = scrapertools.find_single_match(url, '(\..{,3})\?')
if url:
itemlist.append([extension, url, 0,subtitle])
logger.debug(itemlist)
return itemlist

View File

@@ -1,42 +0,0 @@
{
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://videofiles.net/(?:embed-|)([A-z0-9]+)",
"url": "https://videofiles.net/embed-\\1.html"
}
]
},
"free": true,
"id": "videofiles",
"name": "videofiles",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://videofiles.net/img/logo.png"
}

View File

@@ -1,39 +0,0 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector Videofiles By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from platformcode import config
from platformcode import logger
data = ""
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if not data.sucess or "Not Found" in data.data or "File was deleted" in data.data or "is no longer available" in data.data:
return False, config.get_localized_string(70449) % "Videofiles"
global data
data = data.data
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
patron = 'src: "([^"]+)", type: "([^"]+)", res: (\d+),'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, ext, res in matches:
res = res+'p'
try:
ext = ext.split("/")[1]
except:
pass
video_urls.append(["%s (%s) [videofiles]" % (ext, res), url])
return video_urls

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "videoz.me/(?:embed-|)([A-z0-9]+)",
"url": "https://videoz.me/embed-\\1.html"
}
]
},
"free": true,
"id": "videoz",
"name": "videoz",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://i.imgur.com/FAPDkF6.png"
}

View File

@@ -1,34 +0,0 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector Videoz By Alfa development Group
# --------------------------------------------------------
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import config
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "Not Found" in data or "File was deleted" in data:
return False, config.get_localized_string(70449) % "Videoz"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
packed = scrapertools.find_single_match(data, "text/javascript'>(eval.*?)\s*</script>")
unpacked = jsunpack.unpack(packed)
media_url = scrapertools.find_single_match(unpacked, 'file:"([^"]+)"')
#media_url += "|Referer=%s" %page_url
if "m3u8" in media_url:
ext = "m3u8"
video_urls.append(["%s [videoz]" % (ext), media_url])
return video_urls

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "vidgot.com/(?:embed-|)([A-z0-9]+)",
"url": "http://www.vidgot.com/embed-\\1.html"
}
]
},
"free": true,
"id": "vidgot",
"name": "vidgot",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/tAI34bF.png?1"
}

View File

@@ -1,46 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "vidzi.tv/embed-([a-z0-9A-Z]+)",
"url": "http://vidzi.tv/embed-\\1.html"
},
{
"pattern": "vidzi.tv/([a-z0-9A-Z]+)",
"url": "http://vidzi.tv/embed-\\1.html"
}
]
},
"free": true,
"id": "vidzi",
"name": "vidzi",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s1.postimg.cc/597or3a31b/vidzi1.png"
}

View File

@@ -1,38 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import config
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
response = httptools.downloadpage(page_url)
if not response.sucess:
return False, config.get_localized_string(70449) % "Vidzi"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
if not "embed" in page_url:
page_url = page_url.replace("http://vidzi.tv/", "http://vidzi.tv/embed-") + ".html"
data = httptools.downloadpage(page_url).data
media_urls = scrapertools.find_multiple_matches(data, 'file\s*:\s*"([^"]+)"')
if not media_urls:
data = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script>")
data = jsunpack.unpack(data)
media_urls = scrapertools.find_multiple_matches(data, 'file\s*:\s*"([^"]+)"')
video_urls = []
for media_url in media_urls:
ext = scrapertools.get_filename_from_url(media_url)[-4:]
if not media_url.endswith("vtt"):
video_urls.append(["%s [vidzi]" % ext, media_url])
return video_urls