Servidores sin uso

auroravid
backin
bigfile
streaminto
tunepk
tutv
This commit is contained in:
Intel1
2018-08-17 09:52:04 -05:00
parent 79a4f1408b
commit 6b412ba67b
12 changed files with 0 additions and 532 deletions

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:embed.|)auroravid.to/(?:video/|embed/\\?v=)([A-z0-9]{13})",
"url": "http://www.auroravid.to/embed/?v=\\1"
}
]
},
"free": true,
"id": "auroravid",
"name": "auroravid",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,43 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "This file no longer exists on our servers" in data:
return False, "[Auroravid] El fichero ha sido borrado"
elif "is being converted" in data:
return False, "[Auroravid] El fichero está en proceso todavía"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
videourls = scrapertools.find_multiple_matches(data, 'src\s*:\s*[\'"]([^\'"]+)[\'"]')
if not videourls:
videourls = scrapertools.find_multiple_matches(data, '<source src=[\'"]([^\'"]+)[\'"]')
for videourl in videourls:
if videourl.endswith(".mpd"):
id = scrapertools.find_single_match(videourl, '/dash/(.*?)/')
videourl = "http://www.auroravid.to/download.php%3Ffile=mm" + "%s.mp4" % id
videourl = re.sub(r'/dl(\d)*/', '/dl/', videourl)
ext = scrapertools.get_filename_from_url(videourl)[-4:]
videourl = videourl.replace("%3F", "?") + \
"|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0"
video_urls.append([ext + " [auroravid]", videourl])
return video_urls

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:backin).net/([A-Z0-9]+)",
"url": "http://backin.net/s/generating.php?code=\\1"
}
]
},
"free": true,
"id": "backin",
"name": "backin",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,39 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
# if '<meta property="og:title" content=""/>' in data:
# return False,"The video has been cancelled from Backin.net"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
headers = []
headers.append(["User-Agent",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_5) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17"])
# First access
data = scrapertools.cache_page(page_url, headers=headers)
logger.info("data=" + data)
# URL
url = scrapertools.find_single_match(data, 'type="video/mp4" src="([^"]+)"')
logger.info("url=" + url)
# URL del vídeo
video_urls.append([".mp4" + " [backin]", url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,44 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "bigfile.to/((?:list|file)/[\\w]+)",
"url": "https://www.bigfile.to/\\1"
}
]
},
"free": false,
"id": "bigfile",
"name": "bigfile",
"premium": [
"realdebrid"
],
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,15 +0,0 @@
# -*- coding: utf-8 -*-
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
return video_urls

View File

@@ -1,58 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [
"http://streamin.to/embed-theme.html",
"http://streamin.to/embed-jquery.html",
"http://streamin.to/embed-s.html",
"http://streamin.to/embed-images.html",
"http://streamin.to/embed-faq.html",
"http://streamin.to/embed-embed.html",
"http://streamin.to/embed-ri.html",
"http://streamin.to/embed-d.html",
"http://streamin.to/embed-css.html",
"http://streamin.to/embed-js.html",
"http://streamin.to/embed-player.html",
"http://streamin.to/embed-cgi.html"
],
"patterns": [
{
"pattern": "streamin.to/(?:embed-)?([a-z0-9A-Z]+)",
"url": "http://streamin.to/embed-\\1.html"
}
]
},
"free": true,
"id": [
"streaminto",
"streamin"
],
"name": "streamin",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_streaminto.png"
}

View File

@@ -1,77 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "File was deleted" in data:
return False, "El archivo no existe<br/>en streaminto o ha sido borrado."
elif "Video is processing now" in data:
return False, "El archivo está siendo procesado<br/>Prueba dentro de un rato."
else:
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = re.sub(r'\n|\t|\s+', '', httptools.downloadpage(page_url).data)
video_urls = []
try:
media_url = scrapertools.get_match(data, """.setup\({file:"([^"]+)",image""")
except:
js_data = scrapertools.find_single_match(data, "(eval.function.p,a,c,k,e.*?)</script>")
js_data = unPack(js_data)
media_url = scrapertools.get_match(js_data, """.setup\({file:"([^"]+)",image""")
if media_url.endswith("v.mp4"):
media_url_mp42flv = re.sub(r'/v.mp4$', '/v.flv', media_url)
video_urls.append(
[scrapertools.get_filename_from_url(media_url_mp42flv)[-4:] + " [streaminto]", media_url_mp42flv])
if media_url.endswith("v.flv"):
media_url_flv2mp4 = re.sub(r'/v.flv$', '/v.mp4', media_url)
video_urls.append(
[scrapertools.get_filename_from_url(media_url_flv2mp4)[-4:] + " [streaminto]", media_url_flv2mp4])
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [streaminto]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
def unPack(packed):
pattern = "}\('(.*)', *(\d+), *(\d+), *'(.*)'\.split\('([^']+)'\)"
d = [d for d in re.search(pattern, packed, re.DOTALL).groups()]
p = d[0];
a = int(d[1]);
c = int(d[2]);
k = d[3].split(d[4])
if a <= 62:
toString = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
else:
toString = """ !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[]^_`abcdefghijklmnopqrstuvwxyz{|}~"""
def e(c):
return toString[c] if c < a else toString[c // a] + toString[c % a]
while c > 0:
c -= 1
if k[c]:
x = e(c)
else:
x = k[c]
y = k[c]
p = re.sub(r"(\b%s\b)" % x, y, p)
return p

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "tune.pk/player/embed_player.php\\?vid\\=(\\d+)",
"url": "http://embed.tune.pk/play/\\1?autoplay=no"
}
]
},
"free": true,
"id": "tunepk",
"name": "tunepk",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,32 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import scrapertools
from platformcode import logger
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
logger.info(data)
patron = 'file: "([^"]+)",\s+'
patron += 'width: "[^"]+",\s+'
patron += 'height: "[^"]+",\s+'
patron += 'label : "([^"]+)",\s+'
patron += 'type : "([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for url, calidad, formato in matches:
video_url = ["%s %s [tune.pk]" % (calidad, formato), url]
video_urls.append(video_url)
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,45 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(http://(?:www.)?tu.tv[^\"]+)",
"url": "\\1"
},
{
"pattern": "tu.tv/(iframe/\\d+)",
"url": "http://tu.tv/\\1"
}
]
},
"free": true,
"id": "tutv",
"name": "tutv",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,56 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
from core import scrapertools
from platformcode import logger
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
# Busca el ID en la URL
id = extract_id(page_url)
# Si no lo tiene, lo extrae de la página
if id == "":
# La descarga
data = scrapertools.cache_page(page_url)
patron = '<link rel="video_src" href="([^"]+)"/>'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 0:
id = extract_id(matches[0])
else:
id = ""
if id == "":
id = scrapertools.get_match(page_url, "tu.tv/iframe/(\d+)")
# Descarga el descriptor
url = "http://tu.tv/visualizacionExterna2.php?web=undefined&codVideo=" + id
data = scrapertools.cache_page(url)
# Obtiene el enlace al vídeo
patronvideos = 'urlVideo0=([^\&]+)\&'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
# scrapertools.printMatches(matches)
url = urllib.unquote_plus(matches[0])
video_urls = [["[tu.tv]", url]]
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
def extract_id(text):
patron = "xtp\=([a-zA-Z0-9]+)"
matches = re.compile(patron, re.DOTALL).findall(text)
if len(matches) > 0:
devuelve = matches[0]
else:
devuelve = ""
return devuelve