Actualizados

- Actualización de código
This commit is contained in:
Intel1
2019-03-22 14:58:19 -05:00
parent c1571914b7
commit 5048c9f0a1
18 changed files with 4986 additions and 5074 deletions

View File

@@ -147,7 +147,7 @@ def findvideos(item):
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.fulltitle

View File

@@ -34,6 +34,8 @@ class Cloudflare:
re.compile('var s,t,o,p,b,r,e,a,k,i,n,g,f[^:]+"([^"]+)":([^\n]+)};', re.DOTALL).findall(response["data"])[0]
self.js_data["op"] = re.compile(var + "([\+|\-|\*|\/])=([^;]+)", re.MULTILINE).findall(response["data"])
self.js_data["wait"] = int(re.compile("\}, ([\d]+)\);", re.MULTILINE).findall(response["data"])[0]) / 1000
self.js_data["params"]["s"] = \
re.compile('<input type="hidden" name="s" value="([^"]+)"').findall(response["data"])[0]
except:
logger.debug("Metodo #1 (javascript): NO disponible")
self.js_data = {}

File diff suppressed because it is too large Load Diff

View File

@@ -4921,5 +4921,5 @@ msgstr "Cerca Simili"
msgctxt "#70562"
msgid "autoplay"
msgstr "Abilita autoplay in tutti i canali"
msgid "Autoplay (Enable autoplay in all channels)"
msgstr "Autoplay (Abilita autoplay in tutti i canali)"

View File

@@ -4931,11 +4931,6 @@ msgctxt "#70561"
msgid "Search Similar"
msgstr "Buscar Similares"
msgctxt "#70562"
msgid "Autoplay (Enable autoplay on all supported channels)"
msgstr "Autoplay (Habilitar autoplay en todos los canales soportados)"

View File

@@ -4931,11 +4931,6 @@ msgctxt "#70561"
msgid "Search Similar"
msgstr "Buscar Similares"
msgctxt "#70562"
msgid "Autoplay (Enable autoplay on all supported channels)"
msgstr "Autoplay (Habilitar autoplay en todos los canales soportados)"

View File

@@ -4932,12 +4932,5 @@ msgid "Search Similar"
msgstr "Buscar Similares"
msgctxt "#70562"
msgid "autoplay"
msgstr "Habilitar reproducción automática en todos los canales"
msgid "Autoplay (Enable autoplay on all supported channels)"
msgstr "Autoplay (Habilitar autoplay en todos los canales soportados)"

View File

@@ -2,32 +2,17 @@
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
# Existe: http://bitshare.com/files/v1ehsvu3/Nikita.S02E15.HDTV.XviD-ASAP.avi.html
# No existe: http://bitshare.com/files/tn74w9tm/Rio.2011.DVDRip.LATiNO.XviD.by.Glad31.avi.html
data = scrapertools.cache_page(page_url)
patron = '<h1>Descargando([^<]+)</h1>'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 0:
return True, ""
patron = '<h1>(Error - Archivo no disponible)</h1>'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 0:
return False, "File not found"
patron = '<b>(Por favor seleccione el archivo a cargar)'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 0:
return False, "Enlace no válido"
data = httptools.downloadpage(page_url).data
if "Error - Archivo no disponible" in data or "Por favor seleccione el archivo a cargar" in data:
return False, "Archivo no encontrado"
patron = '<b>()'
return True, ""

View File

@@ -3,30 +3,25 @@
import re
import urllib
from core import httptools
from core import scrapertools
from platformcode import logger
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
page_url = page_url.replace("amp;", "")
data = scrapertools.cache_page(page_url)
data = httptools.downloadpage(page_url).data
logger.info("data=" + data)
video_urls = []
patron = "video_src.*?(http.*?)%22%2C%22video_timestamp"
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for match in matches:
videourl = match
logger.info(match)
videourl = videourl.replace('%5C', '')
videourl = urllib.unquote(videourl)
video_urls.append(["[facebook]", videourl])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
@@ -11,8 +12,8 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = scrapertools.cache_page(page_url)
media_url = scrapertools.get_match(data, '\'file\': \'([^"]+)\',')
data = httptools.downloadpage(page_url).page
media_url = scrapertools.find_single_match(data, '\'file\': \'([^"]+)\',')
video_urls = []
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [fakingstv]", media_url])

View File

@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
@@ -15,7 +16,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
enlace = "no"
post2 = "username=" + user + "&password=" + password
login_url = "http://filesmonster.com/api/public/login"
data1 = scrapertools.cache_page(login_url, post=post2)
data1 = httptools.downloadpage(login_url, post=post2).data
partes1 = data1.split('"')
estado = partes1[3]
if estado != 'success': alerta = "[error de filesmonster premium]: " + estado
@@ -24,7 +25,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
id = id.replace("http://filesmonster.com/download.php", "")
post = id.replace("?", "")
url = 'http://filesmonster.com/api/public/premiumDownload'
data2 = scrapertools.cache_page(url, post=post)
data2 = httptools.downloadpage(url, post=post).data
partes = data2.split('"')

View File

@@ -2,27 +2,16 @@
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
# Existe: http://freakshare.com/files/wy6vs8zu/4x01-mundo-primitivo.avi.html
# No existe:
data = scrapertools.cache_page(page_url)
patron = '<h1 class="box_heading" style="text-align:center;">([^<]+)</h1>'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 0:
return True, ""
else:
patron = '<div style="text-align:center;"> (Este archivo no existe)'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 0:
return False, matches[0]
data = httptools.downloadpage(page_url).data
if "Este archivo no existe" in data:
return False, "Archivo no existe"
return True, ""

View File

@@ -1,19 +1,16 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
# Vídeo borrado: http://www.gigasize.com/get/097fadecgh7pf
# Video erróneo:
data = scrapertools.cache_page(page_url)
data = httptools.downloadpage(page_url).data
if '<h2 class="error">Download error</h2>' in data:
return False, "El enlace no es válido<br/>o ha sido borrado de gigasize"
else:
return True, ""
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):

View File

@@ -2,23 +2,21 @@
import re
from core import httptools
from core import scrapertools
from platformcode import logger
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
# Lo extrae a partir de flashvideodownloader.org
if page_url.startswith("http://"):
url = 'http://www.flashvideodownloader.org/download.php?u=' + page_url
else:
url = 'http://www.flashvideodownloader.org/download.php?u=http://video.google.com/videoplay?docid=' + page_url
logger.info("url=" + url)
data = scrapertools.cache_page(url)
data = httptools.downloadpage(url).data
# Extrae el vídeo
newpatron = '</script>.*?<a href="(.*?)" title="Click to Download">'

View File

@@ -3,6 +3,7 @@
import re
import urllib
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
@@ -10,17 +11,13 @@ from platformcode import logger
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
# Submit
data = httptools.downloadpage(page_url).data
post = {}
r = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', data)
for name, value in r:
post[name] = value
post.update({'method_free': 'Free Download'})
data = scrapertools.cache_page(page_url, post=urllib.urlencode(post))
data = httptools.downloadpage(page_url, post=urllib.urlencode(post)).data
# Get link
sPattern = '''<div id="player_code">.*?<script type='text/javascript'>(eval.+?)</script>'''
r = re.findall(sPattern, data, re.DOTALL | re.I)

View File

@@ -2,6 +2,7 @@
import urllib
from core import httptools
from core import scrapertools
from platformcode import config, logger
@@ -12,18 +13,16 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
if config.get_setting("premium", server="onefichier"):
user = config.get_setting("user", server="onefichier")
password = config.get_setting("password", server="onefichier")
url = "https://1fichier.com/login.pl"
logger.info("url=" + url)
post_parameters = {"mail": user, "pass": password, "lt": "on", "purge": "on", "valider": "Send"}
post = urllib.urlencode(post_parameters)
logger.info("post=" + post)
data = scrapertools.cache_page(url, post=post)
data = httptools.downloadpage(url, post=post).data
# logger.info("data="+data)
cookies = config.get_cookie_data()

View File

@@ -1,13 +1,13 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("page_url='%s')" % page_url)
data = scrapertools.cache_page(url=page_url)
data = httptools.downloadpage(url=page_url).data
if "<h1>404 Not Found</h1>" in data:
return False, "El archivo no existe<br/>en streamcloud o ha sido borrado."
elif "<h1>File Not Found</h1>" in data:
@@ -20,52 +20,40 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
# Lo pide una vez
headers = [
['User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14']]
data = scrapertools.cache_page(page_url, headers=headers)
data = httptools.downloadpage(page_url, headers=headers).data
try:
media_url = scrapertools.get_match(data, 'file\: "([^"]+)"')
media_url = scrapertools.find_single_match(data, 'file\: "([^"]+)"')
except:
post = ""
matches = scrapertools.find_multiple_matches(data, '<input.*?name="([^"]+)".*?value="([^"]*)">')
for inputname, inputvalue in matches:
post += inputname + "=" + inputvalue + "&"
post = post.replace("op=download1", "op=download2")
data = scrapertools.cache_page(page_url, post=post)
data = httptools.downloadpage(page_url, post=post).data
if 'id="justanotice"' in data:
logger.info("data=" + data)
logger.info("Ha saltado el detector de adblock")
return []
# Extrae la URL
media_url = scrapertools.get_match(data, 'file\: "([^"]+)"')
media_url = scrapertools.find_single_match(data, 'file\: "([^"]+)"')
video_urls = []
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [streamcloud]", media_url+"|Referer="+page_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
if __name__ == "__main__":
import getopt
import sys
options, arguments = getopt.getopt(sys.argv[1:], "", ["video_url=", "login=", "password="])
video_url = ""
login = ""
password = ""
logger.info("%s %s" % (str(options), str(arguments)))
for option, argument in options:
print option, argument
if option == "--video_url":
@@ -76,15 +64,12 @@ if __name__ == "__main__":
password = argument
else:
assert False, "Opcion desconocida"
if video_url == "":
print "ejemplo de invocacion"
print "streamcloud --video_url http://xxx --login usuario --password secreto"
else:
if login != "":
premium = True
else:
premium = False
print get_video_url(video_url, premium, login, password)

View File

@@ -1,48 +1,39 @@
# -*- coding: utf-8 -*-
from core.scrapertools import *
import urllib
from core import httptools
from core import scrapertools
host = "http://vidtodo.com"
id_server = "vidtodo"
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = cache_page(page_url).replace('"', "'")
video_urls = []
data = httptools.downloadpage(page_url).data
data = data.replace('"', "'")
page_url_post = find_single_match(data, "<Form method='POST' action='([^']+)'>")
imhuman = "&imhuman=" + find_single_match(data, "name='imhuman' value='([^']+)'").replace(" ", "+")
import urllib
post = urllib.urlencode({k: v for k, v in find_multiple_matches(data, "name='([^']+)' value='([^']*)'")}) + imhuman
time.sleep(1)
data = cache_page(page_url_post, post=post)
sources = get_match(data, 'sources: \[([^\]]+)\]')
video_urls = []
data = httptools.downloadpage(page_url_post, post=post).data
sources = scrapertools.find_single_match(data, 'sources: \[([^\]]+)\]')
for media_url in find_multiple_matches(sources, '"([^"]+)"'):
if media_url.endswith(".mp4"):
video_urls.append([".mp4 [%s]" % id_server, media_url])
if media_url.endswith(".m3u8"):
video_urls.append(["M3U8 [%s]" % id_server, media_url])
if media_url.endswith(".smil"):
smil_data = cache_page(media_url)
rtmp = get_match(smil_data, 'base="([^"]+)"')
playpaths = find_multiple_matches(smil_data, 'src="([^"]+)" height="(\d+)"')
mp4 = "http:" + get_match(rtmp, '(//[^:]+):') + "/%s/" + \
get_match(data, '"Watch video ([^"]+")').replace(' ', '.') + ".mp4"
smil_data = httptools.downloadpage(media_url).data
rtmp = scrapertools.find_single_match(smil_data, 'base="([^"]+)"')
playpaths = scrapertools.find_single_match(smil_data, 'src="([^"]+)" height="(\d+)"')
mp4 = "http:" + scrapertools.find_single_match(rtmp, '(//[^:]+):') + "/%s/" + \
scrapertools.find_single_match(data, '"Watch video ([^"]+")').replace(' ', '.') + ".mp4"
for playpath, inf in playpaths:
h = get_match(playpath, 'h=([a-z0-9]+)')
h = scrapertools.find_single_match(playpath, 'h=([a-z0-9]+)')
video_urls.append([".mp4 [%s] %s" % (id_server, inf), mp4 % h])
video_urls.append(["RTMP [%s] %s" % (id_server, inf), "%s playpath=%s" % (rtmp, playpath)])
for video_url in video_urls:
logger.info("video_url: %s - %s" % (video_url[0], video_url[1]))
return video_urls