Merge pull request #488 from Intel11/master

Actualizados
This commit is contained in:
Alfa
2018-11-15 10:33:31 -05:00
committed by GitHub
8 changed files with 53 additions and 198 deletions

View File

@@ -219,173 +219,17 @@ def idioma(item):
return itemlist
def findvideos(item):
logger.info()
itemlist = []
try:
filtro_idioma = config.get_setting("filterlanguages", item.channel)
filtro_enlaces = config.get_setting("filterlinks", item.channel)
except:
filtro_idioma = 3
filtro_enlaces = 2
dict_idiomas = {'Español': 2, 'Latino': 1, 'Subtitulado': 0}
data = httptools.downloadpage(item.url).data
if filtro_enlaces != 0:
list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "online", item)
return
if list_enlaces:
itemlist.append(item.clone(action="", title="Enlaces Online", text_color=color1,
text_bold=True))
itemlist.extend(list_enlaces)
if filtro_enlaces != 1:
list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "descarga", item)
if list_enlaces:
itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color=color1,
text_bold=True))
itemlist.extend(list_enlaces)
tmdb.set_infoLabels(item, __modo_grafico__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if itemlist:
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, fulltitle = item.fulltitle
))
return itemlist
# def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
# logger.info()
# lista_enlaces = []
# matches = []
# if type == "online": t_tipo = "Ver Online"
# if type == "descarga": t_tipo = "Descargar"
# data = data.replace("\n", "")
# if type == "online":
# patron = '(?is)class="playex.*?sheader'
# bloque1 = scrapertools.find_single_match(data, patron)
# patron = '(?is)#(option-[^"]+).*?png">([^<]+)'
# match = scrapertools.find_multiple_matches(data, patron)
# for scrapedoption, language in match:
# scrapedserver = ""
# lazy = ""
# if "lazy" in bloque1:
# lazy = "lazy-"
# patron = '(?s)id="%s".*?metaframe.*?%ssrc="([^"]+)' % (scrapedoption, lazy)
# url = scrapertools.find_single_match(bloque1, patron)
# if "goo.gl" in url:
# url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
# if "drive.php" in url:
# scrapedserver = "gvideo"
# if "player" in url:
# scrapedserver = scrapertools.find_single_match(url, 'player/(\w+)')
# if "ok" in scrapedserver: scrapedserver = "okru"
# matches.append([url, scrapedserver, "", language.strip(), t_tipo])
# bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single')
# bloque2 = bloque2.replace("\t", "").replace("\r", "")
# patron = '(?s)optn" href="([^"]+)'
# patron += '.*?alt="([^\.]+)'
# patron += '.*?src.*?src="[^>]+"?/>([^<]+)'
# patron += '.*?src="[^>]+"?/>([^<]+)'
# patron += '.*?/span>([^<]+)'
# matches.extend(scrapertools.find_multiple_matches(bloque2, patron))
# filtrados = []
# for match in matches:
# scrapedurl = match[0]
# scrapedserver = match[1]
# scrapedcalidad = match[2]
# language = match[3]
# scrapedtipo = match[4]
# if t_tipo.upper() not in scrapedtipo.upper():
# continue
# title = " Mirror en %s (" + language + ")"
# if len(scrapedcalidad.strip()) > 0:
# title += " (Calidad " + scrapedcalidad.strip() + ")"
#
# if filtro_idioma == 3 or item.filtro:
# lista_enlaces.append(item.clone(title=title, action="play", text_color=color2,
# url=scrapedurl, server=scrapedserver,
# extra=item.url, contentThumbnail = item.thumbnail,
# language=language))
# else:
# idioma = dict_idiomas[language]
# if idioma == filtro_idioma:
# lista_enlaces.append(item.clone(title=title, action="play", text_color=color2,
# url=scrapedurl, server=scrapedserver,
# extra=item.url, contentThumbnail = item.thumbnail,
# language=language))
# else:
# if language not in filtrados:
# filtrados.append(language)
# lista_enlaces = servertools.get_servers_itemlist(lista_enlaces, lambda i: i.title % i.server.capitalize())
# if filtro_idioma != 3:
# if len(filtrados) > 0:
# title = "Mostrar también enlaces filtrados en %s" % ", ".join(filtrados)
# lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3,
# filtro=True))
# return lista_enlaces
#
#
# def play(item):
# logger.info()
# itemlist = []
# if "api.cinetux" in item.url or item.server == "okru" or "drive.php" in item.url or "youtube" in item.url:
# data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
# id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
# item.url = "http://docs.google.com/get_video_info?docid=" + id
# if item.server == "okru":
# item.url = "https://ok.ru/videoembed/" + id
# if item.server == "youtube":
# item.url = "https://www.youtube.com/embed/" + id
# elif "links" in item.url or "www.cinetux.me" in item.url:
# data = httptools.downloadpage(item.url).data
# scrapedurl = scrapertools.find_single_match(data, '<a href="(http[^"]+)')
# if scrapedurl == "":
# scrapedurl = scrapertools.find_single_match(data, '(?i)frame.*?src="(http[^"]+)')
# if scrapedurl == "":
# scrapedurl = scrapertools.find_single_match(data, 'replace."([^"]+)"')
# elif "goo.gl" in scrapedurl:
# scrapedurl = httptools.downloadpage(scrapedurl, follow_redirects=False, only_headers=True).headers.get(
# "location", "")
# item.url = scrapedurl
# item.server = ""
# itemlist.append(item.clone())
# itemlist = servertools.get_servers_itemlist(itemlist)
# for i in itemlist:
# i.thumbnail = i.contentThumbnail
# return itemlist
def get_source(url, referer=None):
logger.info()
if referer == None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def findvideos(item):
import urllib
logger.info()
itemlist=[]
data = get_source(item.url)
patron = 'class="title">([^>]+)</span>.*?data-type="([^"]+)" data-post="(\d+)" data-nume="(\d+)'
data = httptools.downloadpage(item.url).data
patron = 'class="title">.*?src.*?/>([^>]+)</span>.*?data-type="([^"]+).*?data-post="(\d+)".*?data-nume="(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
#logger.info("Intel66")
#scrapertools.printMatches(matches)
for language, tp, pt, nm in matches:
language = language.strip()
post = {'action':'doo_player_ajax', 'post':pt, 'nume':nm, 'type':tp}
post = urllib.urlencode(post)
new_data = httptools.downloadpage(CHANNEL_HOST+'wp-admin/admin-ajax.php', post=post, headers={'Referer':item.url}).data
@@ -398,23 +242,45 @@ def findvideos(item):
else:
title = ''
url = scrapertools.find_single_match(new_data, "src='([^']+)'")
itemlist.append(Item(channel=item.channel, title ='%s'+title, url=url, action='play', quality=item.quality,
language=IDIOMAS[language], infoLabels=item.infoLabels))
#logger.info("Intel33 %s" %url)
url = get_url(url)
if "mega" not in url and "mediafire" not in url:
itemlist.append(Item(channel=item.channel, title ='%s'+title, url=url, action='play', quality=item.quality,
language=IDIOMAS[language], infoLabels=item.infoLabels))
#logger.info("Intel44")
#scrapertools.printMatches(itemlist)
patron = "<a class='optn' href='([^']+)'.*?<img src='.*?>([^<]+)<.*?<img src='.*?>([^<]+)<"
matches = re.compile(patron, re.DOTALL).findall(data)
#logger.info("Intel66a")
#scrapertools.printMatches(matches)
for hidden_url, quality, language in matches:
if not config.get_setting('unify'):
title = ' [%s][%s]' % (quality, IDIOMAS[language])
else:
title = ''
new_data = get_source(hidden_url)
url = scrapertools.find_single_match(new_data, '"url":"([^"]+)"')
new_data = httptools.downloadpage(hidden_url).data
url = scrapertools.find_single_match(new_data, 'id="link" href="([^"]+)"')
url = url.replace('\\/', '/')
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', quality=quality,
language=IDIOMAS[language], infoLabels=item.infoLabels))
url = get_url(url)
if "mega" not in url and "mediafire" not in url:
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', quality=quality,
language=IDIOMAS[language], infoLabels=item.infoLabels))
#logger.info("Intel55")
#scrapertools.printMatches(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return itemlist
return itemlist
def get_url(url):
if "cinetux.me" in url:
d1 = httptools.downloadpage(url).data
if "mail" in url:
id = scrapertools.find_single_match(d1, '<img src="[^#]+#(\w+)')
#logger.info("Intel77b %s" %id)
url = "https://my.mail.ru/video/embed/" + id
else:
url = scrapertools.find_single_match(d1, 'document.location.replace\("([^"]+)')
#logger.info("Intel22a %s" %d1)
#logger.info("Intel77a %s" %url)
url = url.replace("povwideo","powvideo")
return url

View File

@@ -75,7 +75,7 @@ def lista(item):
else:
scrapedurl = urlparse.urljoin(host, scrapedurl)
if not scrapedthumbnail.startswith("https"):
scrapedthumbnail = "https:%s" % scrapedthumbnail
scrapedthumbnail = host + "%s" % scrapedthumbnail
if duration:
scrapedtitle = "%s - %s" % (duration, scrapedtitle)
if '>HD<' in quality:
@@ -83,7 +83,6 @@ def lista(item):
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentThumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Extrae la marca de siguiente página
if item.extra:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)')

View File

@@ -27,7 +27,7 @@ list_servers = ['rapidvideo', 'streamango', 'fastplay', 'openload', 'netu', 'vid
__channel__='repelis'
host = "https://repelis.io"
host = "https://repelisgo.io"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
@@ -93,7 +93,7 @@ def peliculas(item):
bloquex = scrapertools.find_single_match(data, 'window.__NUXT__={.*?movies":(.*?\])')
dict = jsontools.load(bloquex)
else:
dd = httptools.downloadpage("https://repelis.io/graph", post=jsontools.dump(item.post), headers=headers).data
dd = httptools.downloadpage(host + "/graph", post=jsontools.dump(item.post), headers=headers).data
dict = jsontools.load(dd)["data"]["movies"]
for datos in dict:
scrapedurl = host + "/pelicula/" + datos["slug"] + "-" + datos["id"]
@@ -222,6 +222,7 @@ def findvideos(item):
def play(item):
logger.info()
itemlist = []
url1 = httptools.downloadpage(host + item.url, follow_redirects=False, only_headers=True).headers.get("location", "")
if "storage" in url1:

View File

@@ -64,7 +64,7 @@ def list_all(item):
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
scrapedtitle = scrapedtitle.lower().replace('enlace permanente a', '').capitalize()
contentSerieName = scrapedtitle
action = 'seasons'

View File

@@ -16,9 +16,9 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
page_url = page_url.replace("/v/","/api/sources/")
page_url = page_url.replace("/v/","/api/source/")
data = httptools.downloadpage(page_url, post={}).data
data = jsontools.load(data)
for videos in data["data"]:
video_urls.append([videos["label"] + " [fembed]", videos["file"]])
video_urls.append([videos["label"] + " [fembed]", "https://www.fembed.com" + videos["file"]])
return video_urls

View File

@@ -20,10 +20,12 @@ def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
packed = scrapertools.find_multiple_matches(data, "(?s)<script>\s*eval(.*?)\s*</script>")
scrapertools.printMatches(packed)
for pack in packed:
unpacked = jsunpack.unpack(pack)
if "tida" in unpacked:
videos = scrapertools.find_multiple_matches(unpacked, 'tid.="([^"]+)')
logger.info("Intel11 %s" %unpacked)
if "ldaa" in unpacked:
videos = scrapertools.find_multiple_matches(unpacked, 'lda.="([^"]+)')
video_urls = []
for video in videos:
if not video.startswith("//"):

View File

@@ -5,10 +5,10 @@
"patterns": [
{
"pattern": "uptobox.com/([a-z0-9]+)",
"url": "http://uptobox.com/\\1"
"url": "http://uptostream.com/iframe/\\1"
},
{
"pattern": "uptostream.com/iframe/([a-z0-9]+)",
"pattern": "uptostream.com/(?:iframe/|)([a-z0-9]+)",
"url": "http://uptostream.com/iframe/\\1"
}
]

View File

@@ -8,32 +8,19 @@ from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[Vivo] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
enc_data = scrapertools.find_single_match(data, "Core.InitializeStream \('(.*?)'\)")
logger.debug(enc_data)
enc_data = scrapertools.find_single_match(data, 'data-stream="([^"]+)')
dec_data = base64.b64decode(enc_data)
logger.debug(dec_data)
for url in eval(dec_data):
video_urls.append(['vivo', url])
video_urls.append(['vivo', dec_data])
return video_urls