Merge pull request #88 from Intel11/ultimo

Actualizados
This commit is contained in:
Alfa
2017-09-16 00:20:56 +02:00
committed by GitHub
5 changed files with 48 additions and 27 deletions

View File

@@ -111,6 +111,8 @@ def findvideos(item):
match = scrapertools.find_multiple_matches(bloque, '(?is)(?:iframe|script) .*?src="([^"]+)')
for url in match:
titulo = "Ver en: %s"
if "goo.gl" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
if "youtube" in url:
titulo = "[COLOR = yellow]Ver trailer: %s[/COLOR]"
if "ad.js" in url or "script" in url:
@@ -123,7 +125,6 @@ def findvideos(item):
title = titulo,
fulltitle = item.fulltitle,
thumbnail = item.thumbnail,
server = "",
url = url
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())

View File

@@ -8,10 +8,12 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
data = httptools.downloadpage(page_url)
if "Object not found" in data or "longer exists on our servers" in data:
if "Object not found" in data.data or "longer exists on our servers" in data.data:
return False, "[Fastplay] El archivo no existe o ha sido borrado"
if data.code == 500:
return False, "[Fastplay] Error interno del servidor"
return True, ""

View File

@@ -52,5 +52,6 @@
"visible": false
}
],
"thumbnail": "https://s26.postimg.org/y5arjad1l/rapidvideo1.png",
"version": 1
}
}

View File

@@ -26,12 +26,16 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
data = httptools.downloadpage(page_url).data
video_urls = []
matches = scrapertools.find_multiple_matches(data, 'type\s*:\s*"([^"]+)"\s*,\s*src:"([^"]+)",height\s*:\s*(\d+)')
for ext, media_url, calidad in matches:
ext = ext.replace("video/", "")
matches = scrapertools.find_multiple_matches(data, "type:\"video/([^\"]+)\",src:d\('([^']+)',(.*?)\).+?height:(\d+)")
for ext, encoded, code, quality in matches:
media_url = decode(encoded, int(code))
if not media_url.startswith("http"):
media_url = "http:%s" % media_url
video_urls.append([".%s %sp [streamcherry]" % (ext, calidad), media_url])
media_url = "http:" + media_url
video_urls.append([".%s %sp [streamango]" % (ext, quality), media_url])
video_urls.reverse()
for video_url in video_urls:
@@ -40,23 +44,36 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
def decode(encoded, code):
logger.info("encoded '%s', code '%s'" % (encoded, code))
patronvideos = 'streamcherry.com/(?:embed|f)/([A-z0-9]+)'
logger.info("#" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
_0x59b81a = ""
k = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='
k = k[::-1]
for match in matches:
titulo = "[streamcherry]"
url = "http://streamcherry.com/embed/%s" % match
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'streamcherry'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
count = 0
return devuelve
for index in range(0, len(encoded) - 1):
while count <= len(encoded) - 1:
_0x4a2f3a = k.index(encoded[count])
count += 1
_0x29d5bf = k.index(encoded[count])
count += 1
_0x3b6833 = k.index(encoded[count])
count += 1
_0x426d70 = k.index(encoded[count])
count += 1
_0x2e4782 = ((_0x4a2f3a << 2) | (_0x29d5bf >> 4))
_0x2c0540 = (((_0x29d5bf & 15) << 4) | (_0x3b6833 >> 2))
_0x5a46ef = ((_0x3b6833 & 3) << 6) | _0x426d70
_0x2e4782 = _0x2e4782 ^ code
_0x59b81a = str(_0x59b81a) + chr(_0x2e4782)
if _0x3b6833 != 64:
_0x59b81a = str(_0x59b81a) + chr(_0x2c0540)
if _0x3b6833 != 64:
_0x59b81a = str(_0x59b81a) + chr(_0x5a46ef)
return _0x59b81a

View File

@@ -36,7 +36,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
for video_url in matches:
_hash = scrapertools.find_single_match(video_url, '[A-z0-9\_\-]{40,}')
hash = _hash[::-1]
hash = hash.replace(hash[2:3],"",1)
hash = hash.replace(hash[0:1],"",1)
video_url = video_url.replace(_hash, hash)
filename = scrapertools.get_filename_from_url(video_url)[-4:]