Actualizados 1

animeflv.me: desactivado, no funciona la web
peliculasrey: desactivado, no funciona la web
yaske: desactivado, no funciona la web
httptools: fix error certificado SSL
strings.po: fix languages
adnstream: eliminado, no existe server
bitvidsx: eliminado, no existe server
cloudy: eliminado, no existe server
thevideome: fix
vshare: update test_video_exists
This commit is contained in:
Intel1
2018-08-04 12:42:12 -05:00
parent 8ddfdaad1a
commit 1c751fc1b7
18 changed files with 44 additions and 271 deletions

View File

@@ -1,7 +1,7 @@
{
"id": "animeflv_me",
"name": "Animeflv.ME",
"active": true,
"active": false,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/x9AdvBx.png",

View File

@@ -1,7 +1,7 @@
{
"id": "peliculasrey",
"name": "peliculasrey",
"active": true,
"active": false,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "peliculasrey.png",

View File

@@ -1,7 +1,7 @@
{
"id": "yaske",
"name": "Yaske",
"active": true,
"active": false,
"adult": false,
"language": ["cast", "lat"],
"banner": "yaske.png",

View File

@@ -3,6 +3,24 @@
# httptools
# --------------------------------------------------------------------------------
# Fix para error de validación del certificado del tipo:
# [downloadpage] Response code: <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:661)>
# [downloadpage] Response error: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:661)
# Fix desde la página: https://stackoverflow.com/questions/27835619/urllib-and-ssl-certificate-verify-failed-error
#-----------------------------------------------------------------------
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
#-----------------------------------------------------------------------
import inspect
import cookielib
import gzip

View File

@@ -846,7 +846,7 @@ msgid "Enter URL"
msgstr ""
msgctxt "#60089"
msgid "Enter the URL [Link to server / download]"
msgid "Enter the URL [Link to server/download]"
msgstr ""
msgctxt "#60090"

View File

@@ -834,8 +834,8 @@ msgid "Enter URL"
msgstr "Inserisci URL"
msgctxt "#60089"
msgid "Enter the URL [Link to server / download]"
msgstr "Inserire l'URL [Link a server / download]"
msgid "Enter the URL [Link to server/download]"
msgstr "Inserire l'URL [Link a server/download]"
msgctxt "#60090"
msgid "Enter the URL [Direct link to video]."

View File

@@ -3845,14 +3845,6 @@ msgctxt "#70288"
msgid "Configure Downloads"
msgstr "Configurar Descargas"
msgctxt "#70289"
msgid "Alfa\nCorrected an error in the adult section, the password has been reset to "
msgstr "Alfa\nCorregido un error en la seccion adultos, se ha reseteado la contrasena a por "
msgctxt "#70290"
msgid "default, you will have to change it again if you want.\ n Type 's', if you have understood it: "
msgstr "defecto, tendra que cambiarla de nuevo si lo desea.\n Escriba 's', si lo ha entendido: "
msgctxt "#70291"
msgid "Error, during conversion"
msgstr "Error, en conversión"

View File

@@ -3845,14 +3845,6 @@ msgctxt "#70288"
msgid "Configure Downloads"
msgstr "Configurar Descargas"
msgctxt "#70289"
msgid "Alfa\nCorrected an error in the adult section, the password has been reset to "
msgstr "Alfa\nCorregido un error en la seccion adultos, se ha reseteado la contrasena a por "
msgctxt "#70290"
msgid "default, you will have to change it again if you want.\ n Type 's', if you have understood it: "
msgstr "defecto, tendra que cambiarla de nuevo si lo desea.\n Escriba 's', si lo ha entendido: "
msgctxt "#70291"
msgid "Error, during conversion"
msgstr "Error, en conversión"

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "adnstream.com/video/([a-zA-Z]+)",
"url": "http://www.adnstream.com/video/\\1/"
}
]
},
"free": true,
"id": "adnstream",
"name": "adnstream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,30 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from platformcode import logger
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
# Saca el código del vídeo
if page_url.startswith("http://"):
try:
code = scrapertools.get_match(page_url, "http\://www.adnstream.com/video/([a-zA-Z]+)/")
except:
code = scrapertools.get_match(page_url, "http\://www.adnstream.tv/video/([a-zA-Z]+)/")
else:
code = page_url
# Lee la playlist
url = "http://www.adnstream.com/get_playlist.php?lista=video&param=" + code + "&c=463"
data = scrapertools.cache_page(url)
# Extrae la URL
media_url = scrapertools.get_match(data, "<jwplayer:file>([^<]+)</jwplayer:file>")
video_urls = [[scrapertools.get_filename_from_url(media_url)[-4:] + ' [adnstream]', media_url]]
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,45 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(http://www.(?:videoweed|bitvid)\\.[a-z]+/file/[a-zA-Z0-9]+)",
"url": "\\1"
},
{
"pattern": "(http://embed.(?:videoweed|bitvid)\\.[a-z]+/embed.php?v=[a-zA-Z0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "bitvidsx",
"name": "bitvidsx",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,40 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "This video is not yet ready" in data:
return False, "[Bitvid] El fichero está en proceso todavía o ha sido eliminado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
videourls = scrapertools.find_multiple_matches(data, 'src\s*:\s*[\'"]([^\'"]+)[\'"]')
if not videourls:
videourls = scrapertools.find_multiple_matches(data, '<source src=[\'"]([^\'"]+)[\'"]')
for videourl in videourls:
if videourl.endswith(".mpd"):
id = scrapertools.find_single_match(videourl, '/dash/(.*?)/')
videourl = "http://www.bitvid.sx/download.php%3Ffile=mm" + "%s.mp4" % id
videourl = re.sub(r'/dl(\d)*/', '/dl/', videourl)
ext = scrapertools.get_filename_from_url(videourl)[-4:]
videourl = videourl.replace("%3F", "?") + \
"|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0"
video_urls.append([ext + " [bitvid]", videourl])
return video_urls

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "cloudy.ec/(?:embed.php\\?id=|v/)([A-z0-9]+)",
"url": "https://www.cloudy.ec/embed.php?id=\\1&playerPage=1"
}
]
},
"free": true,
"id": "cloudy",
"name": "cloudy",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s1.postimg.cc/9e6doboo2n/cloudy1.png"
}

View File

@@ -1,29 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "This video is being prepared" in data:
return False, "[Cloudy] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
media_urls = scrapertools.find_multiple_matches(data, '<source src="([^"]+)"')
for mediaurl in media_urls:
title = "%s [cloudy]" % scrapertools.get_filename_from_url(mediaurl)[-4:]
mediaurl += "|User-Agent=Mozilla/5.0"
video_urls.append([title, mediaurl])
return video_urls

View File

@@ -5,7 +5,7 @@
"patterns": [
{
"pattern": "(?:thevideo.me|tvad.me|thevid.net|thevideo.ch|thevideo.us)/(?:embed-|)([A-z0-9]+)",
"url": "http://thevideo.me/embed-\\1.html"
"url": "https://thevideo.me/embed-\\1.html"
}
]
},

View File

@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
import urllib
from core import httptools
from core import scrapertools
from platformcode import logger
@@ -7,6 +8,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
return True, ""
data = httptools.downloadpage(page_url).data
if "File was deleted" in data or "Page Cannot Be Found" in data:
return False, "[thevideo.me] El archivo ha sido eliminado o no existe"
@@ -15,19 +17,16 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
if not "embed" in page_url:
page_url = page_url.replace("http://thevideo.me/", "http://thevideo.me/embed-") + ".html"
data = httptools.downloadpage(page_url).data
var = scrapertools.find_single_match(data, 'vsign.player.*?\+ (\w+)')
mpri_Key = scrapertools.find_single_match(data, "%s='([^']+)'" %var)
data_vt = httptools.downloadpage("https://thevideo.me/vsign/player/%s" % mpri_Key).data
vt = scrapertools.find_single_match(data_vt, 'function\|([^\|]+)\|')
if "fallback" in vt:
vt = scrapertools.find_single_match(data_vt, 'jwConfig\|([^\|]+)\|')
media_urls = scrapertools.find_multiple_matches(data, '\{"file"\s*\:\s*"([^"]+)"\s*,\s*"label"\s*\:\s*"([^"]+)"')
video_urls = []
for media_url, label in media_urls:
media_url += "?direct=false&ua=1&vt=%s" % vt
post= {}
post = urllib.urlencode(post)
if not "embed" in page_url:
page_url = page_url.replace("https://thevideo.me/", "https://thevideo.me/embed-") + ".html"
url = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers.get("location", "")
data = httptools.downloadpage("https://vev.io/api/serve/video/" + scrapertools.find_single_match(url, "embed/([A-z0-9]+)"), post=post).data
bloque = scrapertools.find_single_match(data, 'qualities":\{(.*?)\}')
matches = scrapertools.find_multiple_matches(bloque, '"([^"]+)":"([^"]+)')
for res, media_url in matches:
video_urls.append(
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + label + ") [thevideo.me]", media_url])
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + res + ") [thevideo.me]", media_url])
return video_urls

View File

@@ -9,6 +9,10 @@
{
"pattern": "(vshare.eu/embed-[a-zA-Z0-9/-]+.html)",
"url": "http://\\1"
},
{
"pattern": "(vshare.eu/[a-zA-Z0-9/-]+.htm)",
"url": "http://\\1"
}
]
},

View File

@@ -10,22 +10,19 @@ from lib import jsunpack
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
if httptools.downloadpage(page_url).code != 200:
return False, "El archivo no existe en vShare o ha sido borrado."
response = httptools.downloadpage(page_url)
if response.code != 200 or "No longer available!" in response.data:
return False, "[vshare] El archivo no existe o ha sido borrado."
else:
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url = " + page_url)
data = httptools.downloadpage(page_url).data
flowplayer = re.search("url: [\"']([^\"']+)", data)
if flowplayer:
return [["FLV", flowplayer.group(1)]]
video_urls = []
try:
jsUnpack = jsunpack.unpack(data)
@@ -35,7 +32,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.debug("Values: " + fields.group(1))
logger.debug("Substract: " + fields.group(2))
substract = int(fields.group(2))
arrayResult = [chr(int(value) - substract) for value in fields.group(1).split(",")]
strResult = "".join(arrayResult)
logger.debug(strResult)
@@ -46,5 +42,4 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
except:
url = scrapertools.find_single_match(data,'<source src="([^"]+)')
video_urls.append(["MP4", url])
return video_urls