deleted
This commit is contained in:
@@ -1,67 +0,0 @@
|
||||
{
|
||||
"id": "yaske",
|
||||
"name": "Yaske",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": "es",
|
||||
"banner": "yaske.png",
|
||||
"fanart": "https://github.com/master-1970/resources/raw/master/images/fanart/yaske.png",
|
||||
"thumbnail": "yaske.png",
|
||||
"version": 1,
|
||||
"changes": [
|
||||
{
|
||||
"date": "27/06/17",
|
||||
"description": "Desactivar por falta de contenidos"
|
||||
},
|
||||
{
|
||||
"date": "04/06/17",
|
||||
"description": "Desactivar por falta de contenidos"
|
||||
},
|
||||
{
|
||||
"date": "15/03/2017",
|
||||
"description": "limpieza código"
|
||||
},
|
||||
{
|
||||
"date": "01/02/17",
|
||||
"description": "Añadir imagenes, sinopsis, etc..."
|
||||
},
|
||||
{
|
||||
"date": "18/01/17",
|
||||
"description": "Uso de httptools"
|
||||
},
|
||||
{
|
||||
"date": "12/12/16",
|
||||
"description": "Cambios en la web"
|
||||
},
|
||||
{
|
||||
"date": "01/07/16",
|
||||
"description": "Eliminado código innecesario."
|
||||
},
|
||||
{
|
||||
"date": "29/04/16",
|
||||
"description": "Adaptar a Novedades Peliculas e Infantiles"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"latino",
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,263 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import base64
|
||||
|
||||
from core import channeltools
|
||||
from core import config
|
||||
from core import httptools
|
||||
from core import logger
|
||||
from core import scrapertoolsV2
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
|
||||
|
||||
HOST = 'http://www.yaske.ro'
|
||||
parameters = channeltools.get_channel_parameters('yaske')
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
color1, color2, color3 = ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.url = HOST
|
||||
item.text_color = color2
|
||||
item.fanart = fanart_host
|
||||
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png"
|
||||
|
||||
itemlist.append(item.clone(title="Novedades", action="peliculas", text_bold=True, viewcontent='movies',
|
||||
url=HOST + "/ultimas-y-actualizadas",
|
||||
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
|
||||
itemlist.append(item.clone(title="Estrenos", action="peliculas", text_bold=True,
|
||||
url=HOST + "/genre/premieres", thumbnail=thumbnail % 'estrenos'))
|
||||
itemlist.append(item.clone(title="", folder=False))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Filtrar por:", fanart=fanart_host, folder=False,
|
||||
text_color=color3, text_bold=True, thumbnail=thumbnail_host))
|
||||
itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", text_color=color1, text_italic=True,
|
||||
extra="genre", thumbnail=thumbnail % 'generos', viewmode="thumbnails"))
|
||||
itemlist.append(item.clone(title=" Idioma", action="menu_buscar_contenido", text_color=color1, text_italic=True,
|
||||
extra="audio", thumbnail=thumbnail % 'idiomas'))
|
||||
itemlist.append(item.clone(title=" Calidad", action="menu_buscar_contenido", text_color=color1, text_italic=True,
|
||||
extra="quality", thumbnail=thumbnail % 'calidad'))
|
||||
itemlist.append(item.clone(title=" Año", action="menu_buscar_contenido", text_color=color1, text_italic=True,
|
||||
extra="year", thumbnail=thumbnail % 'year'))
|
||||
|
||||
itemlist.append(item.clone(title="", folder=False))
|
||||
itemlist.append(item.clone(title="Buscar por título", action="search", thumbnail=thumbnail % 'buscar'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
try:
|
||||
# http://www.yaske.ro/search/?q=los+pitufos
|
||||
item.url = HOST + "/search/?q=" + texto.replace(' ', '+')
|
||||
item.extra = ""
|
||||
itemlist.extend(peliculas(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
item_pag = itemlist[-1]
|
||||
itemlist = sorted(itemlist[:-1], key=lambda Item: Item.contentTitle)
|
||||
itemlist.append(item_pag)
|
||||
else:
|
||||
itemlist = sorted(itemlist, key=lambda Item: Item.contentTitle)
|
||||
|
||||
return itemlist
|
||||
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = HOST + "/ultimas-y-actualizadas"
|
||||
elif categoria == 'infantiles':
|
||||
item.url = HOST + "/search/?q=&genre%5B%5D=animation"
|
||||
else:
|
||||
return []
|
||||
|
||||
itemlist = peliculas(item)
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url_next_page = ""
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<article class.*?'
|
||||
patron += '<a href="([^"]+)">.*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
patron += '<aside class="item-control down">(.*?)</aside>.*?'
|
||||
patron += '<small class="pull-right text-muted">([^<]+)</small>.*?'
|
||||
patron += '<h2 class.*?>([^<]+)</h2>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
# Paginacion
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > 30:
|
||||
url_next_page = item.url
|
||||
matches = matches[:30]
|
||||
next_page = 'b'
|
||||
else:
|
||||
matches = matches[30:]
|
||||
next_page = 'a'
|
||||
patron_next_page = 'Anteriores</a> <a href="([^"]+)" class="btn btn-default ".*?Siguiente'
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = matches_next_page[0]
|
||||
|
||||
for scrapedurl, scrapedthumbnail, idiomas, year, scrapedtitle in matches:
|
||||
patronidiomas = "<img src='([^']+)'"
|
||||
matchesidiomas = re.compile(patronidiomas, re.DOTALL).findall(idiomas)
|
||||
|
||||
idiomas_disponibles = []
|
||||
for idioma in matchesidiomas:
|
||||
if idioma.endswith("la_la.png"):
|
||||
idiomas_disponibles.append("LAT")
|
||||
elif idioma.endswith("en_en.png"):
|
||||
idiomas_disponibles.append("VO")
|
||||
elif idioma.endswith("en_es.png"):
|
||||
idiomas_disponibles.append("VOSE")
|
||||
elif idioma.endswith("es_es.png"):
|
||||
idiomas_disponibles.append("ESP")
|
||||
|
||||
if idiomas_disponibles:
|
||||
idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]"
|
||||
|
||||
contentTitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle.strip())
|
||||
title = "%s %s" % (contentTitle, idiomas_disponibles)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
|
||||
infoLabels={"year": year}, text_color=color1))
|
||||
|
||||
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
# Si es necesario añadir paginacion
|
||||
if url_next_page:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host,
|
||||
url=url_next_page, next_page=next_page, folder=True, text_color=color3, text_bold=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu_buscar_contenido(item):
|
||||
logger.info(item)
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<select name="' + item.extra + '(.*?)</select>'
|
||||
data = scrapertoolsV2.get_match(data, patron)
|
||||
|
||||
# Extrae las entradas
|
||||
patron = "<option value='([^']+)'>([^<]+)</option>"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
itemlist = []
|
||||
for scrapedvalue, scrapedtitle in matches:
|
||||
thumbnail = ""
|
||||
|
||||
if item.extra == 'genre':
|
||||
if scrapedtitle.strip() in ['Documental', 'Short', 'News']:
|
||||
continue
|
||||
|
||||
url = HOST + "/search/?q=&genre%5B%5D=" + scrapedvalue
|
||||
filename = scrapedtitle.lower().replace(' ', '%20')
|
||||
if filename == "ciencia%20ficción":
|
||||
filename = "ciencia%20ficcion"
|
||||
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png" \
|
||||
% filename
|
||||
|
||||
elif item.extra == 'year':
|
||||
url = HOST + "/search/?q=&year=" + scrapedvalue
|
||||
thumbnail = item.thumbnail
|
||||
else:
|
||||
# http://www.yaske.ro/search/?q=&quality%5B%5D=c9
|
||||
# http://www.yaske.ro/search/?q=&audio%5B%5D=es
|
||||
url = HOST + "/search/?q=&" + item.extra + "%5B%5D=" + scrapedvalue
|
||||
thumbnail = item.thumbnail
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, text_color=color1,
|
||||
thumbnail=thumbnail, contentType='movie', folder=True, viewmode="movie_with_plot"))
|
||||
|
||||
if item.extra in ['genre', 'audio', 'year']:
|
||||
return sorted(itemlist, key=lambda i: i.title.lower(), reverse=item.extra == 'year')
|
||||
else:
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
sublist = list()
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
if not item.plot:
|
||||
item.plot = scrapertoolsV2.find_single_match(data, '>Sinopsis</dt> <dd>([^<]+)</dd>')
|
||||
item.plot = scrapertoolsV2.decodeHtmlentities(item.plot)
|
||||
|
||||
patron = '<option value="([^"]+)"[^>]+'
|
||||
patron += '>([^<]+).*?</i>([^<]+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for url, idioma, calidad in matches:
|
||||
if 'yaske' in url:
|
||||
data = httptools.downloadpage(url).data
|
||||
url_enc = scrapertoolsV2.find_single_match(data, "eval.*?'(.*?)'")
|
||||
url_dec = base64.b64decode(url_enc)
|
||||
url = scrapertoolsV2.find_single_match(url_dec, 'iframe src="(.*?)"')
|
||||
sublist.append(item.clone(action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
|
||||
language=idioma.strip()))
|
||||
|
||||
sublist = servertools.get_servers_itemlist(sublist, lambda i: "Ver en %s %s" % (i.server, i.quality), True)
|
||||
|
||||
# Añadir servidores encontrados, agrupandolos por idioma
|
||||
for k in ["Español", "Latino", "Subtitulado", "Ingles"]:
|
||||
lista_idioma = filter(lambda i: i.language == k, sublist)
|
||||
if lista_idioma:
|
||||
itemlist.append(Item(channel=item.channel, title=k, fanart=item.fanart, folder=False,
|
||||
text_color=color2, text_bold=True, thumbnail=thumbnail_host))
|
||||
itemlist.extend(lista_idioma)
|
||||
|
||||
# Insertar items "Buscar trailer" y "Añadir a la videoteca"
|
||||
if itemlist and item.extra != "library":
|
||||
title = "%s [Buscar trailer]" % (item.contentTitle)
|
||||
itemlist.insert(0, item.clone(channel="trailertools", action="buscartrailer",
|
||||
text_color=color3, title=title, viewmode="list"))
|
||||
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca",
|
||||
action="add_pelicula_to_library", url=item.url, text_color="green",
|
||||
contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))
|
||||
|
||||
return itemlist
|
||||
@@ -1,53 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"changes": [
|
||||
{
|
||||
"date": "18/07/2017",
|
||||
"description": "Versión incial"
|
||||
}
|
||||
],
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(?s)https://youtube.googleapis.com.*?docid=([^(?:&|\")]+)",
|
||||
"url": "http://docs.google.com/get_video_info?docid=\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "(?s)https://drive.google.com/file/d/(.*?)/preview",
|
||||
"url": "http://docs.google.com/get_video_info?docid=\\1"
|
||||
}
|
||||
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "gvideo",
|
||||
"name": "gvideo",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"version": 1
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import urllib
|
||||
|
||||
from core import httptools
|
||||
from core import logger
|
||||
from core import scrapertools
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
|
||||
if "no+existe" in response.data:
|
||||
return False, "[gvideo] El video no existe o ha sido borrado"
|
||||
if "Se+ha+excedido+el" in response.data:
|
||||
return False, "[gvideo] Se ha excedido el número de reproducciones permitidas"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
video_urls = []
|
||||
urls = []
|
||||
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
|
||||
cookies = ""
|
||||
cookie = response.headers["set-cookie"].split("HttpOnly, ")
|
||||
for c in cookie:
|
||||
cookies += c.split(";", 1)[0] + "; "
|
||||
data = response.data.decode('unicode-escape')
|
||||
data = urllib.unquote_plus(urllib.unquote_plus(data))
|
||||
headers_string = "|Cookie=" + cookies
|
||||
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
|
||||
streams = scrapertools.find_multiple_matches(url_streams,
|
||||
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
|
||||
itags = {'18':'360p', '22':'720p', '34':'360p', '35':'480p', '37':'1080p', '43':'360p', '59':'480p'}
|
||||
for itag, video_url in streams:
|
||||
if not video_url in urls:
|
||||
video_url += headers_string
|
||||
video_urls.append([itags[itag], video_url])
|
||||
urls.append(video_url)
|
||||
video_urls.sort(key=lambda video_urls: int(video_urls[0].replace("p", "")))
|
||||
return video_urls
|
||||
Reference in New Issue
Block a user