Merge pull request #4 from Intel11/patch-1

Nuevo: downace, vernovelasonline, gvideo, actualizado:  cinetux
This commit is contained in:
Alfa
2017-07-31 17:02:13 -04:00
committed by GitHub
9 changed files with 502 additions and 64 deletions

View File

@@ -142,7 +142,7 @@ def findvideos(item):
# Opción "Añadir esta película a la biblioteca de XBMC"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la biblioteca", text_color="green",
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
filtro=True, action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
extra="library"))

View File

@@ -9,6 +9,10 @@
"fanart": "cinetux.jpg",
"version": 1,
"changes": [
{
"date": "31/07/2017",
"description": "Actualizado por cambio de estructura de la página"
},
{
"date": "12/05/2017",
"description": "Arreglada paginación y enlaces directos"
@@ -121,4 +125,4 @@
]
}
]
}
}

View File

@@ -10,6 +10,7 @@ from core import servertools
from core import tmdb
from core.item import Item
CHANNEL_HOST = "http://www.cinetux.net/"
# Configuracion del canal
@@ -31,12 +32,15 @@ def mainlist(item):
itemlist = []
item.viewmode = viewmode
itemlist.append(item.clone(title="Películas", text_color=color2, action="", text_bold=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST,
data = httptools.downloadpage(CHANNEL_HOST).data
total = scrapertools.find_single_match(data, "TENEMOS\s<b>(.*?)</b>")
titulo = "Peliculas (%s)" %total
itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula",
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
"/0/Directors%20Chair.png",
text_color=color1))
itemlist.append(item.clone(action="vistas", title=" Más vistas", url="http://www.cinetux.net/mas-vistos/",
itemlist.append(item.clone(action="destacadas", title=" Destacadas", url="http://www.cinetux.net/mas-vistos/",
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
"/0/Favorites.png",
text_color=color1))
@@ -130,22 +134,22 @@ def peliculas(item):
# Descarga la página
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patron = '<div class="item">.*?<div class="audio">\s*([^<]*)<.*?href="([^"]+)".*?src="([^"]+)"' \
'.*?<h3 class="name"><a.*?>([^<]+)</a>'
patron = '(?s)class="(?:result-item|item movies)">.*?<img src="([^"]+)'
patron += '.*?alt="([^"]+)"'
patron += '(.*?)'
patron += 'href="([^"]+)"'
patron += '.*?(?:<span>|<span class="year">)([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for calidad, scrapedurl, scrapedthumbnail, scrapedtitle in matches:
for scrapedthumbnail, scrapedtitle, calidad, scrapedurl, scrapedyear in matches:
calidad = scrapertools.find_single_match(calidad, '.*?quality">([^<]+)')
try:
fulltitle, year = scrapedtitle.rsplit("(", 1)
year = scrapertools.get_match(year, '(\d{4})')
fulltitle = scrapedtitle
year = scrapedyear.replace("&nbsp;","")
if "/" in fulltitle:
fulltitle = fulltitle.split(" /", 1)[0]
scrapedtitle = "%s (%s)" % (fulltitle, year)
scrapedtitle = "%s (%s)" % (fulltitle, year)
except:
fulltitle = scrapedtitle
year = ""
if calidad:
scrapedtitle += " [%s]" % calidad
new_item = item.clone(action="findvideos", title=scrapedtitle, fulltitle=fulltitle,
@@ -155,12 +159,13 @@ def peliculas(item):
new_item.infoLabels['year'] = int(year)
itemlist.append(new_item)
try:
tmdb.set_infoLabels(itemlist, __modo_grafico__)
#tmdb.set_infoLabels(itemlist, __modo_grafico__)
a = 1
except:
pass
# Extrae el paginador
next_page_link = scrapertools.find_single_match(data, '<a href="([^"]+)"\s*><span [^>]+>&raquo;</span>')
next_page_link = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)')
if next_page_link:
itemlist.append(item.clone(action="peliculas", title=">> Página siguiente", url=next_page_link,
text_color=color3))
@@ -168,7 +173,7 @@ def peliculas(item):
return itemlist
def vistas(item):
def destacadas(item):
logger.info()
itemlist = []
item.text_color = color2
@@ -177,10 +182,14 @@ def vistas(item):
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patron = '<li class="item">.*?href="([^"]+)".*?src="([^"]+)"' \
'.*?<h3 class="name"><a.*?>([^<]+)</a>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
bloque = scrapertools.find_single_match(data, 'peliculas_destacadas.*?class="single-page')
patron = '(?s)title="([^"]+)"'
patron += '.href="([^"]+)"'
patron += '.*?src="([^"]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
scrapedurl = "http://www.cinetux.net" + scrapedurl
scrapedtitle = scrapedtitle.replace("Ver ","")
new_item = item.clone(action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle,
url=scrapedurl, thumbnail=scrapedthumbnail,
contentTitle=scrapedtitle, contentType="movie")
@@ -189,7 +198,7 @@ def vistas(item):
# Extrae el paginador
next_page_link = scrapertools.find_single_match(data, '<a href="([^"]+)"\s+><span [^>]+>&raquo;</span>')
if next_page_link:
itemlist.append(item.clone(action="vistas", title=">> Página siguiente", url=next_page_link, text_color=color3))
itemlist.append(item.clone(action="destacadas", title=">> Página siguiente", url=next_page_link, text_color=color3))
return itemlist
@@ -200,15 +209,15 @@ def generos(item):
# Descarga la página
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<div class="sub_title">Géneros</div>(.*?)</ul>')
bloque = scrapertools.find_single_match(data, '(?s)dos_columnas">(.*?)</ul>')
# Extrae las entradas
patron = '<li><a href="([^"]+)">(.*?)</li>'
patron = '<li><a href="/([^"]+)">(.*?)</li>'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle in matches:
scrapedurl = CHANNEL_HOST + scrapedurl
scrapedtitle = scrapertools.htmlclean(scrapedtitle).strip()
scrapedtitle = unicode(scrapedtitle, "utf8").capitalize().encode("utf8")
if scrapedtitle == "Erotico" and config.get_setting("adult_mode") == '0':
if scrapedtitle == "Erotico" and config.get_setting("adult_mode") == 0:
continue
itemlist.append(item.clone(action="peliculas", title=scrapedtitle, url=scrapedurl))
@@ -226,7 +235,7 @@ def idioma(item):
return itemlist
def findvideos(item):
logger.info()
itemlist = []
@@ -241,10 +250,10 @@ def findvideos(item):
# Busca el argumento
data = httptools.downloadpage(item.url).data
year = scrapertools.find_single_match(data, '<h1><span>.*?rel="tag">([^<]+)</a>')
year = scrapertools.find_single_match(item.title, "\(([0-9]+)")
if year and item.extra != "library":
item.infoLabels['year'] = int(year)
item.infoLabels['year'] = int(year)
# Ampliamos datos en tmdb
if not item.infoLabels['plot']:
try:
@@ -271,15 +280,15 @@ def findvideos(item):
if itemlist:
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la videoteca de XBMC"
text_color="magenta"))
# Opción "Añadir esta película a la videoteca"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
filtro=True, action="add_pelicula_to_library", url=item.url,
infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
extra="library"))
else:
itemlist.append(item.clone(title="No hay enlaces disponibles", action="", text_color=color3))
@@ -289,66 +298,84 @@ def findvideos(item):
def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
logger.info()
lista_enlaces = []
matches = []
if type == "online" : t_tipo = "Ver Online"
if type == "descarga": t_tipo = "Descargar"
data = data.replace("\n","")
if type == "online":
patron = '<a href="#([^"]+)" data-toggle="tab">([^<]+)</a>'
bloques = scrapertools.find_multiple_matches(data, patron)
for id, language in bloques:
patron = 'id="' + id + '">.*?<iframe src="([^"]+)"'
url = scrapertools.find_single_match(data, patron)
matches.append([url, "", language])
bloque2 = scrapertools.find_single_match(data, '<div class="table-link" id="%s">(.*?)</table>' % type)
patron = 'tr>[^<]+<td>.*?href="([^"]+)".*?src.*?title="([^"]+)"' \
'.*?src.*?title="([^"]+)".*?src.*?title="(.*?)"'
patron = '(?is)class="playex.*?visualizaciones'
bloque1 = scrapertools.find_single_match(data, patron)
patron = '(?is)#(option-[^"]+).*?png">([^<]+)'
match = scrapertools.find_multiple_matches(data, patron)
for scrapedoption, language in match:
lazy = ""
if "lazy" in bloque1:
lazy = "lazy-"
patron = '(?s)id="%s".*?metaframe.*?%ssrc="([^"]+)' %(scrapedoption, lazy)
#logger.info("Intel22 %s" %patron)
url = scrapertools.find_single_match(bloque1, patron)
if "goo.gl" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location","")
server = servertools.get_server_from_url(url)
matches.append([url, server, "", language.strip(), t_tipo])
bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single')
bloque2 = bloque2.replace("\t","").replace("\r","")
patron = '(?s)optn" href="([^"]+)'
patron += '.*?title="([^"]+)'
patron += '.*?src.*?src="[^>]+"\s/>([^<]+)'
patron += '.*?src="[^>]+"\s/>([^<]+)'
patron += '.*?/span>([^<]+)'
matches.extend(scrapertools.find_multiple_matches(bloque2, patron))
filtrados = []
for match in matches:
scrapedurl = match[0]
language = match[2].strip()
title = " Mirror en %s (" + language + ")"
if len(match) == 4:
title += " (Calidad " + match[3].strip() + ")"
scrapedserver = match[1]
scrapedcalidad = match[2]
scrapedlanguage = match[3]
scrapedtipo = match[4]
if t_tipo.upper() not in scrapedtipo.upper():
continue
title = " Mirror en " + scrapedserver.split(".")[0] + " (" + scrapedlanguage + ")"
if len(scrapedcalidad.strip()) > 0:
title += " (Calidad " + scrapedcalidad.strip() + ")"
if filtro_idioma == 3 or item.filtro:
lista_enlaces.append(item.clone(title=title, action="play", text_color=color2,
url=scrapedurl, idioma=language, extra=item.url))
url=scrapedurl, server=scrapedserver, idioma=scrapedlanguage, extra=item.url))
else:
idioma = dict_idiomas[language]
if idioma == filtro_idioma:
lista_enlaces.append(item.clone(title=title, text_color=color2, action="play", url=scrapedurl,
lista_enlaces.append(item.clone(title=title, text_color=color2, action="play", url=scrapedurl,
extra=item.url))
else:
if language not in filtrados:
filtrados.append(language)
lista_enlaces = servertools.get_servers_itemlist(lista_enlaces, lambda i: i.title % i.server)
if filtro_idioma != 3:
if len(filtrados) > 0:
title = "Mostrar enlaces filtrados en %s" % ", ".join(filtrados)
lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3,
filtro=True))
return lista_enlaces
def play(item):
logger.info()
itemlist = []
video_urls = []
if "api.cinetux" in item.url:
data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
bloque = scrapertools.find_single_match(data, 'sources:\s*(\[.*?\])')
if bloque:
bloque = eval(bloque)
video_urls = []
for b in bloque:
ext = b["type"].replace("video/", "")
video_urls.append([".%s %sp [directo]" % (ext, b["label"]), b["file"], b["label"]])
video_urls.sort(key=lambda vdu: vdu[2])
for v in video_urls:
itemlist.append([v[0], v[1]])
id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
item.url = "https://youtube.googleapis.com/embed/?status=ok&hl=es&allow_embed=1&ps=docs&partnerid=30&hd=1&autoplay=0&cc_load_policy=1&showinfo=0&docid=" + id
itemlist = servertools.find_video_items(data = item.url)
elif "links" in item.url:
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data, '<a href="(http[^"]+)')
if scrapedurl == "":
scrapedurl = scrapertools.find_single_match(data, '(?i)<frame src="(http[^"]+)')
if "goo.gl" in scrapedurl:
scrapedurl = httptools.downloadpage(scrapedurl, follow_redirects=False, only_headers=True).headers.get("location", "")
item.url = scrapedurl
itemlist = servertools.find_video_items(data = item.url)
else:
return [item]
return itemlist

View File

@@ -0,0 +1,30 @@
{
"id": "vernovelasonline",
"name": "Ver Novelas Online",
"active": true,
"adult": false,
"language": "es",
"thumbnail": "https://s16.postimg.org/g4lzydrmd/vernovelasonline1.png",
"bannermenu": "https://s16.postimg.org/w44nhxno5/vernovelasonline2.png",
"version": 1,
"changes": [
{
"date": "28/06/2017",
"description": "Primera version"
}
],
"categories": [
"tvshow",
"latino"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,216 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item
host = "http://ver-novelas-online.com/"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel = item.channel, title = "Ultimos capitulos subidos", action = "capitulos_ultimos", url = host))
itemlist.append(Item(channel = item.channel, title = "Novelas por letra", action = "novelas_letra", url = host + "video/category/letra-" ))
itemlist.append(Item(channel = item.channel, title = "Novelas en emision (Sin caratulas)", action = "novelas_emision", url = host))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Buscar novela", action = "search", url = host + "?s="))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
item.channel = "vernovelasonline"
item.extra = "newest"
item.url = "http://www.ver-novelas-online.com/"
item.action = "capitulos_ultimos"
itemlist = capitulos_ultimos(item)
# Se captura la excepcion, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def novelas_emision(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = data.replace("\n","")
block = scrapertools.find_single_match(data, '<aside id="text-2.*?</aside>')
match = scrapertools.find_multiple_matches(block, 'a href="([^"]+)">([^<]+)')
for url, titulo in match:
itemlist.append(Item(channel = item.channel,
action = "capitulos_de_una_novela",
title = titulo,
url = url,
extra1 = titulo
))
return itemlist
def novelas_letra(item):
logger.info()
itemlist = []
for letra in "abcdefghijklmnopqrstuvwxyz":
itemlist.append(item.clone(title = letra.upper(), url = item.url+letra, action = "lista"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ","+")
item.url = "http://ver-novelas-online.com/?s=" + texto
item.extra = "busca"
if texto!='':
return lista(item)
else:
return []
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = data.replace("\n","")
accion = "capitulos_de_una_novela"
patron = """itemprop="url" href="([^"]+)".*?mark">([^<]*)</a>.*?href="([^"]+)"""
if item.extra == "busca":
patron = """itemprop="url" href="([^"]+)".*?mark">([^<]*)</a>.*?href='([^']+)"""
accion = "findvideos"
matches = scrapertools.find_multiple_matches(data, patron)
for url, title, thumbnail in matches:
mtitle = title.replace("CAPITULOS COMPLETOS","").title()
mextra1 = scrapertools.find_single_match(mtitle, "(?i)(.*?) Capitulo")
mextra2 = scrapertools.find_single_match(mtitle, "(?i)(cap.*?[0-9]+)").title()
if mextra1 == "":
mextra1 = mextra2 = mtitle
itemlist.append(Item(channel = item.channel,
action = accion,
title = mtitle,
url = url,
thumbnail = thumbnail,
fantart = thumbnail,
plot = "prueba de plot",
extra1 = mextra1,
extra2 = mextra2
))
mpagina = scrapertools.find_single_match(data, 'page-numbers" href="([^"]+)')
pagina = scrapertools.find_single_match(mpagina, "page/([0-9]+)")
if len(pagina)>0 and "busca" not in item.extra:
itemlist.append(
Item(channel = item.channel,
action = "lista",
title = "Pagina: "+pagina,
url = mpagina,
extra = item.extra
))
return itemlist
def capitulos_ultimos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = data.replace("\n","")
patron = "<div class='item'>.*?<a href='([^']+)"
patron += ".*?title='([^']+)"
patron += ".*?img src='([^']+)"
matches = scrapertools.find_multiple_matches(data,patron)
for url, title, thumbnail in matches:
mextra1 = scrapertools.find_single_match(title, "(?i)(.*?) Capitulo")
mextra2 = scrapertools.find_single_match(title, "(?i)(cap.*?[0-9]+)").title()
itemlist.append(
Item(channel = item.channel,
action = "findvideos",
title = title.title(),
url = url,
thumbnail = thumbnail,
extra1 = mextra1,
extra2 = mextra2
))
mpagina = scrapertools.find_single_match(data, 'next" href="([^"]+)')
pagina = scrapertools.find_single_match(mpagina, "page/([0-9]+)")
if "newest" not in item.extra:
itemlist.append(
Item(channel = item.channel,
action = "capitulos_ultimos",
title = "Pagina: "+pagina,
url = mpagina
))
return itemlist
def capitulos_de_una_novela(item):
logger.info()
itemlist = []
url = item.url
data = httptools.downloadpage(url).data
if len(item.thumbnail) == 0:
item.thumbnail = scrapertools.find_single_match(data, 'og:image" content="([^"]+)' )
matches = scrapertools.find_multiple_matches(data, '<a target="_blank" href="([^"]+)">([^<]+)')
for url, titulo in matches:
mextra2 = scrapertools.find_single_match(titulo,"(?i)(cap.*?[0-9]+)")
itemlist.append(
Item(channel = item.channel,
action = "findvideos",
title = titulo,
thumbnail = item.thumbnail,
url = url,
extra1 = item.extra1,
extra2 = mextra2
))
itemlist.append(Item(channel = item.channel, title = "Novela: [COLOR=blue]" + item.extra1 + "[/COLOR]"))
# PARA INVERTIR EL ORDEN DE LA LISTA
itemlist = itemlist[::-1]
return itemlist
def findvideos(item):
data = httptools.downloadpage(item.url).data
data = data.replace("&quot;","").replace("\n","").replace("\\","")
itemlist = servertools.find_video_items(data = data)
for video in itemlist:
video.channel = item.channel
video.action = "play"
video.thumbnail = item.thumbnail
video.fulltitle = item.extra1 + " / " +item.extra2
video.title = "Ver en: " + video.server
itemlist.append(Item(channel = item.channel) )
block = scrapertools.find_single_match(data, '<div class="btn-group-justified">.*?</div>')
if len(block)>0:
matches = scrapertools.find_multiple_matches(block, 'href="([^"]+).*?hidden-xs">([^<]+)')
for url, xs in matches:
accion = "findvideos"
capitulo = scrapertools.find_single_match(url, "capitulo-([^/]+)")
if "DE CAPITULOS" in xs:
xs = "LISTA" + xs + ": " + item.extra1
accion = "capitulos_de_una_novela"
else:
xs += ": " + capitulo
capitulo = "Capitulo " + capitulo
itemlist.append(
Item(channel = item.channel,
title = "[COLOR=yellow]" + xs.title() + "[/COLOR]",
action = accion,
url = url,
thumbnail = item.thumbnail,
extra1 = item.extra1,
extra2 = capitulo
))
else:
url = scrapertools.find_single_match(data, "<p><a href='(.*?)'\s+style='float:right")
capitulo = scrapertools.find_single_match(item.extra2, "(?i)capitulo ([^/]+)")
itemlist.append(
Item(channel = item.channel,
title = "[COLOR=yellow]" + "" + "Listado de Capitulos: "+item.extra1.title() +"[/COLOR]",
action = "capitulos_de_una_novela",
url = url,
thumbnail = item.thumbnail,
extra1 = item.extra1,
extra2 = capitulo
))
return itemlist

View File

@@ -0,0 +1,49 @@
{
"active": true,
"changes": [
{
"date": "12/07/2017",
"description": "Versión inicial"
}
],
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "downace.com/embed/(.*?)",
"url": "https://downace.com/embed/\\1"
}
]
},
"free": true,
"id": "downace",
"name": "downace",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s12.postimg.org/4n9fmdy7x/logo-downace.png",
"version": 1
}

View File

@@ -0,0 +1,24 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import logger
from core import scrapertools
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data:
return False, "[Downace] El fichero ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
videourl = scrapertools.find_single_match(data, 'controls preload.*?src="([^"]+)')
video_urls.append([".MP4 [downace]", videourl])
return video_urls

View File

@@ -0,0 +1,48 @@
{
"active": true,
"changes": [
{
"date": "18/07/2017",
"description": "Versión incial"
}
],
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?s)https://youtube.googleapis.com.*?docid=([^(?:&|\")]+)",
"url": "http://docs.google.com/get_video_info?docid=\\1"
}
]
},
"free": true,
"id": "gvideo",
"name": "gvideo",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"version": 1
}

View File

@@ -0,0 +1,40 @@
# -*- coding: utf-8 -*-
import urllib
from core import httptools
from core import logger
from core import scrapertools
def test_video_exists(page_url):
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
if "no+existe" in response.data:
return False, "[gvideo] El video no existe o ha sido borrado"
if "Se+ha+excedido+el" in response.data:
return False, "[gvideo] Se ha excedido el número de reproducciones permitidas"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
video_urls = []
urls = []
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
cookies = ""
cookie = response.headers["set-cookie"].split("HttpOnly, ")
for c in cookie:
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = "|Cookie=" + cookies
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
streams = scrapertools.find_multiple_matches(url_streams,
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
itags = {'18':'360p', '22':'720p', '34':'360p', '35':'480p', '37':'1080p', '43':'360p', '59':'480p'}
for itag, video_url in streams:
if not video_url in urls:
video_url += headers_string
video_urls.append([itags[itag], video_url])
urls.append(video_url)
video_urls.sort(key=lambda video_urls: int(video_urls[0].replace("p", "")))
return video_urls