Canales eliminados

seriesverde
alltorrent
yaske
This commit is contained in:
Intel1
2018-08-15 09:52:57 -05:00
parent 5e828f73b1
commit d7b7006709
6 changed files with 0 additions and 1108 deletions

View File

@@ -1,39 +0,0 @@
{
"id": "alltorrent",
"name": "Alltorrent",
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "altorrent.png",
"fanart": "altorrent.jpg",
"categories": [
"torrent",
"movie"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,314 +0,0 @@
# -*- coding: utf-8 -*-
import re
import sys
import urllib
import urlparse
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
from lib import generictools
host = 'http://alltorrent.net/'
__modo_grafico__ = config.get_setting('modo_grafico', 'alltorrent')
def mainlist(item):
logger.info()
itemlist = []
thumb_pelis = get_thumb("channels_movie.png")
thumb_pelis_hd = get_thumb("channels_movie_hd.png")
thumb_series = get_thumb("channels_tvshow.png")
thumb_series_hd = get_thumb("channels_tvshow_hd.png")
thumb_buscar = get_thumb("search.png")
itemlist.append(item.clone(title="[COLOR springgreen][B]Todas Las Películas[/B][/COLOR]", action="listado",
url=host, thumbnail=thumb_pelis, extra="pelicula"))
itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 1080p[/COLOR]", action="listado",
url=host + "rezolucia/1080p/", thumbnail=thumb_pelis_hd, extra="pelicula"))
itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 720p[/COLOR]", action="listado",
url=host + "rezolucia/720p/", thumbnail=thumb_pelis_hd, extra="pelicula"))
itemlist.append(item.clone(title="[COLOR springgreen] Incluyen Hdrip[/COLOR]", action="listado",
url=host + "rezolucia/hdrip/", thumbnail=thumb_pelis, extra="pelicula"))
itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 3D[/COLOR]", action="listado",
url=host + "rezolucia/3d/", thumbnail=thumb_pelis_hd, extra="pelicula"))
itemlist.append(item.clone(title="[COLOR floralwhite][B]Buscar[/B][/COLOR]", action="search", thumbnail=thumb_buscar,
extra="titulo"))
itemlist.append(item.clone(title="[COLOR oldlace] Por Título[/COLOR]", action="search", thumbnail=thumb_buscar,
extra="titulo"))
itemlist.append(item.clone(title="[COLOR oldlace] Por Año[/COLOR]", action="search", thumbnail=thumb_buscar,
extra="año"))
itemlist.append(item.clone(title="[COLOR oldlace] Por Rating Imdb[/COLOR]", action="search", thumbnail=thumb_buscar,
extra="rating"))
return itemlist
def listado(item):
logger.info()
itemlist = []
# Descarga la página
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
except:
pass
if not data and item.extra != "año": #Si la web está caída salimos sin dar error
logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: LISTADO:. La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
elif not data and item.extra == "año": #cuando no hay datos para un año, da error. Tratamos de evitar el error
return itemlist
patron = '<div class="browse-movie-wrap col-xs-10 col-sm-4 col-md-5 col-lg-4"><a href="([^"]+)".*?src="([^"]+)".*?alt="([^"]+)".*?rel="tag">([^"]+)<\/a>\s?<\/div><div class="[^"]+">(.*?)<\/div><\/div><\/div>'
#data = scrapertools.find_single_match(data, patron)
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches and not '<ul class="tsc_pagination tsc_pagination' in data: #error
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error
return itemlist #Salimos
logger.error("ERROR 02: LISTADO: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: LISTADO: Ha cambiado la estructura de la Web. Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedqualities in matches:
item_local = item.clone() #Creamos copia de Item para trabajar
title_subs = []
title = re.sub('\r\n', '', scrapedtitle).decode('utf8').strip()
item_local.url = scrapedurl
item_local.thumbnail = scrapedthumbnail
scrapedtorrent = ''
if scrapedqualities:
patron_quality = '<a href="([^"]+)"\s?rel="[^"]+"\s?title="[^"]+">(.*?)<\/a>'
matches_quality = re.compile(patron_quality, re.DOTALL).findall(scrapedqualities)
quality = ''
for scrapedtorrent, scrapedquality in matches_quality:
quality_inter = scrapedquality
quality_inter = re.sub('HDr$', 'HDrip', quality_inter)
quality_inter = re.sub('720$', '720p', quality_inter)
quality_inter = re.sub('1080$', '1080p', quality_inter)
if quality:
quality += ', %s' % quality_inter
else:
quality = quality_inter
if quality:
item_local.quality = quality
item_local.language = [] #Verificamos el idioma por si encontramos algo
if "latino" in scrapedtorrent.lower() or "latino" in item.url or "latino" in title.lower():
item_local.language += ["LAT"]
if "ingles" in scrapedtorrent.lower() or "ingles" in item.url or "vose" in scrapedurl or "vose" in item.url:
if "VOSE" in scrapedtorrent.lower() or "sub" in title.lower() or "vose" in scrapedurl or "vose" in item.url:
item_local.language += ["VOS"]
else:
item_local.language += ["VO"]
if "dual" in scrapedtorrent.lower() or "dual" in title.lower():
item_local.language[0:0] = ["DUAL"]
#Limpiamos el título de la basura innecesaria
title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("Sub", "").replace("sub", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "")
title = title.replace("- HDRip", "").replace("(HDRip)", "").replace("- Hdrip", "").replace("(microHD)", "").replace("(DVDRip)", "").replace("(HDRip)", "").replace("(BR-LINE)", "").replace("(HDTS-SCREENER)", "").replace("(BDRip)", "").replace("(BR-Screener)", "").replace("(DVDScreener)", "").replace("TS-Screener", "").replace(" TS", "").replace(" Ts", "")
title = re.sub(r'\??\s?\d*?\&.*', '', title).title().strip()
item_local.from_title = title #Guardamos esta etiqueta para posible desambiguación de título
item_local.contentType = "movie"
item_local.contentTitle = title
item_local.extra = "peliculas"
item_local.action = "findvideos"
item_local.title = title.strip()
item_local.infoLabels['year'] = "-"
if scrapedyear >= "1900" and scrapedyear <= "2040":
title_subs += [scrapedyear]
itemlist.append(item_local.clone()) #Pintar pantalla
#if not item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
# return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo
#Pasamos a TMDB la lista completa Itemlist
tmdb.set_infoLabels(itemlist, True)
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
item, itemlist = generictools.post_tmdb_listado(item, itemlist)
# Extrae el paginador
patron = '<li><a href="[^"]+">(\d+)<\/a><\/li>' #total de páginas
patron += '<li><a href="([^"]+\/page\/(\d+)\/)"\s?rel="[^"]+">Página siguiente[^<]+<\/a><\/li><\/ul><\/div><\/ul>' #url siguiente
url_next = ''
if scrapertools.find_single_match(data, patron):
last_page, url_next, next_num = scrapertools.find_single_match(data, patron)
if url_next:
if last_page:
title = '[COLOR gold]Página siguiente >>[/COLOR] %s de %s' % (int(next_num) - 1, last_page)
else:
title = '[COLOR gold]Página siguiente >>[/COLOR] %s' % (int(next_num) - 1)
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url_next, extra=item.extra))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
#Bajamos los datos de la página
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
except:
pass
if not data:
logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
patron = 'id="modal-quality-\w+"><span>(.*?)</span>.*?class="quality-size">(.*?)</p>.*?href="([^"]+)"' #coge los .torrent
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches: #error
logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
for scrapedquality, scrapedsize, scrapedtorrent in matches: #leemos los torrents con la diferentes calidades
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
item_local.quality = scrapedquality
if item.infoLabels['duration']:
item_local.quality += scrapertools.find_single_match(item.quality, '(\s\[.*?\])') #Copiamos la duración
#Añadimos el tamaño para todos
item_local.quality = '%s [%s]' % (item_local.quality, scrapedsize) #Agregamos size al final de calidad
item_local.quality = item_local.quality.replace("G", "G ").replace("M", "M ") #Se evita la palabra reservada en Unify
#Ahora pintamos el link del Torrent
item_local.url = scrapedtorrent
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
item_local.alive = "??" #Calidad del link sin verificar
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Seridor Torrent
itemlist.append(item_local.clone()) #Pintar pantalla
#logger.debug("TORRENT: " + scrapedtorrent + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + scrapedsize + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
#logger.debug(item_local)
#Ahora tratamos el servidor directo
item_local = item.clone()
servidor = 'openload'
item_local.quality = ''
if item.infoLabels['duration']:
item_local.quality = scrapertools.find_single_match(item.quality, '(\s\[.*?\])') #Copiamos la duración
enlace = scrapertools.find_single_match(data, 'button-green-download-big".*?href="([^"]+)"><span class="icon-play">')
if enlace:
try:
devuelve = servertools.findvideosbyserver(enlace, servidor) #existe el link ?
if devuelve:
enlace = devuelve[0][1] #Se guarda el link
item_local.alive = "??" #Se asume poe defecto que es link es dudoso
#Llama a la subfunción de check_list_links(itemlist) para cada link de servidor
item_local.alive = servertools.check_video_link(enlace, servidor, timeout=5) #activo el link ?
#Si el link no está activo se ignora
if item_local.alive == "??": #dudoso
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (servidor.capitalize(), item_local.quality, str(item_local.language))
elif item_local.alive.lower() == "no": #No está activo. Lo preparo, pero no lo pinto
item_local.title = '[COLOR red][%s][/COLOR] [COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.alive, servidor.capitalize(), item_local.quality, str(item_local.language))
logger.debug(item_local.alive + ": ALIVE / " + title + " / " + servidor + " / " + enlace)
raise
else: #Sí está activo
item_local.title = '[COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (servidor.capitalize(), item_local.quality, str(item_local.language))
#Preparamos el resto de variables de Item para ver los vídeos en directo
item_local.action = "play"
item_local.server = servidor
item_local.url = enlace
item_local.title = item_local.title.replace("[]", "").strip()
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip()
itemlist.append(item_local.clone())
#logger.debug(item_local)
except:
pass
return itemlist
def actualizar_titulos(item):
logger.info()
item = generictools.update_title(item) #Llamamos al método que actualiza el título con tmdb.find_and_set_infoLabels
#Volvemos a la siguiente acción en el canal
return item
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
if item.extra == "titulo":
item.url = host + "?s=" + texto
elif item.extra == "año":
item.url = host + "weli/" + texto + "/"
else:
item.extra == "imdb"
item.url = host + "imdb/" + texto + "/"
if texto != '':
return listado(item)
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = host
item.extra = "peliculas"
item.channel = "alltorrents"
itemlist = listado(item)
if itemlist[-1].title == "Página siguiente >>":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,37 +0,0 @@
{
"id": "seriesverde",
"name": "SeriesVerde",
"active": false,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://s33.postimg.cc/96dhv4trj/seriesverde.png",
"banner": "",
"categories": [
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Cast",
"Lat",
"VOSE",
"VO"
]
}
]
}

View File

@@ -1,321 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel SeriesVerde -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://seriesverde.com/'
IDIOMAS = {'es': 'Cast', 'la': 'Lat', 'vos': 'VOSE', 'vo': 'VO'}
list_language = IDIOMAS.values()
list_quality = ['SD', 'Micro-HD-720p', '720p', 'HDitunes', 'Micro-HD-1080p' ]
list_servers = ['powvideo','yourupload', 'openload', 'gamovideo', 'flashx', 'clipwatching', 'streamango', 'streamcloud']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel,
title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url=host + 'listado/',
))
itemlist.append(Item(channel=item.channel,
title="Generos",
action="section",
thumbnail=get_thumb('genres', auto=True),
url=host,
))
itemlist.append(Item(channel=item.channel,
title="A - Z",
action="section",
thumbnail=get_thumb('alphabet', auto=True),
url=host+'listado/', ))
itemlist.append(Item(channel=item.channel,
title="Buscar",
action="search",
thumbnail=get_thumb('search', auto=True)))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
contentSerieName = ''
patron = "<div style='float.*?<a href='(.*?)'>.*?src='(.*?)' title='(.*?)'"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = host + scrapedurl
thumbnail = scrapedthumbnail
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(Item(channel=item.channel,
action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=scrapedtitle,
contentSerieName=contentSerieName,
context=filtertools.context(item, list_language, list_quality),
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# #Paginacion
if itemlist != []:
base_page = scrapertools.find_single_match(item.url,'(.*?)?')
next_page = scrapertools.find_single_match(data, '</span><a href=?pagina=2>>></a>')
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="lista",
title='Siguiente >>>',
url=base_page+next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.title == 'Generos':
patron = '<li><a href=([^ ]+) rel=nofollow><i class=fa fa-bookmark-o></i> (.*?)</a></li>'
elif item.title == 'A - Z':
patron = "<a dir='ltr' href=(.*?) class='label label-success'>(.*?)</a>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if item.title == 'Generos':
url = host + scrapedurl
else:
url = scrapedurl
title = scrapedtitle
itemlist.append(Item(channel=item.channel,
action='list_all',
title=title,
url=url
))
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<span itemprop=seasonNumber class=fa fa-arrow-down>.*?Temporada (\d+) '
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
for scrapedseason in matches:
url = item.url
title = 'Temporada %s' % scrapedseason
contentSeasonNumber = scrapedseason
infoLabels['season'] = contentSeasonNumber
thumbnail = item.thumbnail
itemlist.append(Item(channel=item.channel,
action="episodesxseason",
title=title,
url=url,
thumbnail=thumbnail,
contentSeasonNumber=contentSeasonNumber,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
data = get_source(item.url)
season = item.contentSeasonNumber
season_data = scrapertools.find_single_match(data, '<div id=collapse%s.*?panel-success' % season)
patron = "<td><a href='([^ ]+)'.*?itemprop='episodeNumber'>%s+x(\d+)</span> - (.*?) </a>.*?(/banderas.*?)</td>" % season
matches = re.compile(patron, re.DOTALL).findall(season_data)
infoLabels = item.infoLabels
for scrapedurl, scraped_episode, scrapedtitle, lang_data in matches:
url = host + scrapedurl
title = '%sx%s - %s' % (season, scraped_episode, scrapedtitle.strip())
infoLabels['episode'] = scraped_episode
thumbnail = item.thumbnail
title, language = add_language(title, lang_data)
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
url=url,
thumbnail=thumbnail,
language=language,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def add_language(title, string):
logger.info()
languages = scrapertools.find_multiple_matches(string, '/banderas/(.*?).png')
language = []
for lang in languages:
if 'jap' in lang or lang not in IDIOMAS:
lang = 'vos'
if len(languages) == 1:
language = IDIOMAS[lang]
title = '%s [%s]' % (title, language)
else:
language.append(IDIOMAS[lang])
title = '%s [%s]' % (title, IDIOMAS[lang])
return title, language
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = "<a href=([^ ]+) target=_blank><img src='/servidores/(.*?).(?:png|jpg)'.*?sno.*?"
patron += "<span>(.*?)<.*?(/banderas.*?)td"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, server, quality, lang_data in matches:
title = server.capitalize()
if quality == '':
quality = 'SD'
title = '%s [%s]' % (title, quality)
title, language = add_language(title, lang_data)
thumbnail = item.thumbnail
enlace_id, serie_id, se, ep = scrapertools.find_single_match(scrapedurl,'enlace(\d+)/(\d+)/(\d+)/(\d+)/')
url = host + 'ajax/load_enlace.php?serie=%s&temp=%s&cap=%s&id=%s' % (serie_id, se, ep, enlace_id)
itemlist.append(Item(channel=item.channel,
title=title,
url=url,
action="play",
thumbnail=thumbnail,
server=server,
quality=quality,
language=language,
infoLabels=item.infoLabels
))
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return sorted(itemlist, key=lambda it: it.language)
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, follow_redirects=False).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.infoLabels = item.infoLabels
return itemlist
def search_results(item):
logger.info()
itemlist = []
data = httptools.downloadpage(host + 'finder.php', post=item.post).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = "<a href='(.*?)'>.*?src=(.*?) style.*?value=(.*?)>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle in matches:
itemlist.append(Item(channel=item.channel,
title=scrapedtitle,
url=host+scrapedurl,
action="seasons",
thumbnail=scrapedthumb,
contentSerieName=scrapedtitle,
context=filtertools.context(item, list_language, list_quality)
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
logger.info()
import urllib
if texto != '':
post = {'query':texto}
post = urllib.urlencode(post)
item.post = post
return search_results(item)

View File

@@ -1,48 +0,0 @@
{
"id": "yaske",
"name": "Yaske",
"active": false,
"adult": false,
"language": ["cast", "lat"],
"banner": "yaske.png",
"fanart": "https://github.com/master-1970/resources/raw/master/images/fanart/yaske.png",
"thumbnail": "yaske.png",
"categories": [
"direct",
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,349 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
import unicodedata
from core import channeltools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
idiomas1 = {"/es.png":"CAST","/en_es.png":"VOSE","/la.png":"LAT","/en.png":"ENG"}
domain = "yaske.ro"
HOST = "http://www." + domain
HOST_MOVIES = "http://peliculas." + domain + "/now_playing/"
HOST_TVSHOWS = "http://series." + domain + "/popular/"
HOST_TVSHOWS_TPL = "http://series." + domain + "/tpl"
parameters = channeltools.get_channel_parameters('yaske')
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
color1, color2, color3 = ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E']
def mainlist(item):
logger.info()
itemlist = []
item.url = HOST
item.text_color = color2
item.fanart = fanart_host
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png"
itemlist.append(item.clone(title="Peliculas", text_bold=True, viewcontent='movies',
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title=" Novedades", action="peliculas", viewcontent='movies',
url=HOST_MOVIES,
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title=" Estrenos", action="peliculas",
url=HOST + "/premiere", thumbnail=thumbnail % 'estrenos'))
itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", thumbnail=thumbnail % 'generos', viewmode="thumbnails",
url=HOST
))
itemlist.append(item.clone(title=" Buscar película", action="search", thumbnail=thumbnail % 'buscar',
type = "movie" ))
itemlist.append(item.clone(title="Series", text_bold=True, viewcontent='movies',
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title=" Novedades", action="series", viewcontent='movies',
url=HOST_TVSHOWS,
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title=" Buscar serie", action="search", thumbnail=thumbnail % 'buscar',
type = "tvshow" ))
return itemlist
def series(item):
logger.info()
itemlist = []
url_p = scrapertools.find_single_match(item.url, '(.*?).page=')
page = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
if not page:
page = 1
url_p = item.url
else:
page = int(page) + 1
if "search" in item.url:
url_p += "&page=%s" %page
else:
url_p += "?page=%s" %page
data = httptools.downloadpage(url_p).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '(?s)class="post-item-image btn-play-item".*?'
patron += 'href="(http://series[^"]+)">.*?'
patron += '<img data-original="([^"]+)".*?'
patron += 'glyphicon-play-circle"></i>([^<]+).*?'
patron += 'glyphicon-calendar"></i>([^<]+).*?'
patron += 'text-muted f-14">(.*?)</h3'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedepisodes, year, scrapedtitle in matches:
scrapedepisodes.strip()
year = year.strip()
contentSerieName = scrapertools.htmlclean(scrapedtitle.strip())
title = "%s (%s)" %(contentSerieName, scrapedepisodes)
if "series" in scrapedurl:
itemlist.append(Item(channel=item.channel, action="temporadas", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentSerieName=contentSerieName,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist, True)
# Si es necesario añadir paginacion
patron_next_page = 'href="([^"]+)">\s*&raquo;'
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
if matches_next_page and len(itemlist)>0:
itemlist.append(
Item(channel=item.channel, action="series", title=">> Página siguiente", thumbnail=thumbnail_host,
url=url_p, folder=True, text_color=color3, text_bold=True))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
post = []
data = httptools.downloadpage(item.url).data
patron = 'media-object" src="([^"]+).*?'
patron += 'media-heading">([^<]+).*?'
patron += '<code>(.*?)</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, scrapedcapitulos in matches:
id = scrapertools.find_single_match(item.url, "yaske.ro/([0-9]+)")
season = scrapertools.find_single_match(scrapedtitle, "[0-9]+")
title = scrapedtitle + " (%s)" %scrapedcapitulos.replace("</code>","").replace("\n","")
post = {"data[season]" : season, "data[id]" : id, "name" : "list_episodes" , "both" : "0", "type" : "template"}
post = urllib.urlencode(post)
item.infoLabels["season"] = season
itemlist.append(item.clone(action = "capitulos",
post = post,
title = title,
url = HOST_TVSHOWS_TPL
))
tmdb.set_infoLabels(itemlist)
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title =""))
itemlist.append(item.clone(action = "add_serie_to_library",
channel = item.channel,
extra = "episodios",
title = '[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url = item.url
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += capitulos(tempitem)
return itemlist
def capitulos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, post=item.post).data
data = data.replace("<wbr>","")
patron = 'href=."([^"]+).*?'
patron += 'media-heading.">([^<]+).*?'
patron += 'fecha de emisi.*?: ([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapeddate in matches:
scrapedtitle = scrapedtitle + " (%s)" %scrapeddate
episode = scrapertools.find_single_match(scrapedurl, "capitulo-([0-9]+)")
query = item.contentSerieName + " " + scrapertools.find_single_match(scrapedtitle, "\w+")
item.infoLabels["episode"] = episode
itemlist.append(item.clone(action = "findvideos",
title = scrapedtitle.decode("unicode-escape"),
query = query.replace(" ","+"),
url = scrapedurl.replace("\\","")
))
tmdb.set_infoLabels(itemlist)
return itemlist
def search(item, texto):
logger.info()
itemlist = []
try:
item.url = HOST + "/search/?query=" + texto.replace(' ', '+')
item.extra = ""
if item.type == "movie":
itemlist.extend(peliculas(item))
else:
itemlist.extend(series(item))
if itemlist[-1].title == ">> Página siguiente":
item_pag = itemlist[-1]
itemlist = sorted(itemlist[:-1], key=lambda Item: Item.contentTitle)
itemlist.append(item_pag)
else:
itemlist = sorted(itemlist, key=lambda Item: Item.contentTitle)
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
item = Item()
try:
if categoria == 'peliculas':
item.url = HOST
elif categoria == 'infantiles':
item.url = HOST + "/genre/16/"
elif categoria == 'terror':
item.url = HOST + "/genre/27/"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
url_p = scrapertools.find_single_match(item.url, '(.*?).page=')
page = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
if not page:
page = 1
url_p = item.url
else:
page = int(page) + 1
if "search" in item.url:
url_p += "&page=%s" %page
else:
url_p += "?page=%s" %page
data = httptools.downloadpage(url_p).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '(?s)class="post-item-image btn-play-item".*?'
patron += 'href="([^"]+)">.*?'
patron += '<img data-original="([^"]+)".*?'
patron += 'glyphicon-calendar"></i>([^<]+).*?'
patron += 'post(.*?)</div.*?'
patron += 'text-muted f-14">(.*?)</h3'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, year, idiomas, scrapedtitle in matches:
query = scrapertools.find_single_match(scrapedurl, 'yaske.ro/[0-9]+/(.*?)/').replace("-","+")
year = year.strip()
patronidiomas = '<img src="([^"]+)"'
matchesidiomas = scrapertools.find_multiple_matches(idiomas, patronidiomas)
idiomas_disponibles = []
for idioma in matchesidiomas:
for lang in idiomas1.keys():
if idioma.endswith(lang):
idiomas_disponibles.append(idiomas1[lang])
if idiomas_disponibles:
idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]"
contentTitle = scrapertools.htmlclean(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle, query = query,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist)
# Si es necesario añadir paginacion
patron_next_page = 'href="([^"]+)">\s*&raquo;'
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
if matches_next_page and len(itemlist)>0:
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host,
url=url_p, folder=True, text_color=color3, text_bold=True))
return itemlist
def menu_buscar_contenido(item):
logger.info(item)
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'Generos.*?</ul>'
data = scrapertools.find_single_match(data, patron)
patron = 'href="([^"]+)">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
url = HOST + scrapedurl
itemlist.append(Item(channel = item.channel,
action = "peliculas",
title = scrapedtitle,
url = url,
text_color = color1,
contentType = 'movie',
folder = True,
viewmode = "movie_with_plot"
))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
sublist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)id="online".*?server="([^"]+)"'
mserver = scrapertools.find_single_match(data, patron)
if not item.query:
item.query = scrapertools.find_single_match(item.url, "peliculas.*?/[0-9]+/([^/]+)").replace("-","+")
url_m = "http://olimpo.link/?q=%s&server=%s" %(item.query, mserver)
patron = 'class="favicon.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?'
patron += '\[([^\]]+)\].*?\[([^\]]+)\]'
data = httptools.downloadpage(url_m).data
matches = scrapertools.find_multiple_matches(data, patron)
page = 2
while len(matches)>0:
for server, url, idioma, calidad in matches:
if "drive" in server:
server = "gvideo"
sublist.append(item.clone(action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip(),
server = server,
title="Ver en %s %s" %(server, calidad)
))
data = httptools.downloadpage(url_m + "&page=%s" %page).data
matches = scrapertools.find_multiple_matches(data, patron)
page +=1
sublist = sorted(sublist, key=lambda Item: Item.quality + Item.server)
for k in ["Español", "Latino", "Ingles - Sub Español", "Ingles"]:
lista_idioma = filter(lambda i: i.language == k, sublist)
if lista_idioma:
itemlist.append(item.clone(title=k, folder=False, infoLabels = "",
text_color=color2, text_bold=True, thumbnail=thumbnail_host))
itemlist.extend(lista_idioma)
tmdb.set_infoLabels(itemlist, True)
# Insertar items "Buscar trailer" y "Añadir a la videoteca"
if itemlist and item.extra != "library":
title = "%s [Buscar trailer]" % (item.contentTitle)
itemlist.insert(0, item.clone(channel="trailertools", action="buscartrailer",
text_color=color3, title=title, viewmode="list"))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca",
action="add_pelicula_to_library", url=item.url, text_color="green",
contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))
return itemlist
def play(item):
logger.info()
itemlist = []
ddd = httptools.downloadpage(item.url).data
url = "http://olimpo.link" + scrapertools.find_single_match(ddd, '<iframe src="([^"]+)')
item.url = httptools.downloadpage(url + "&ge=1", follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist.append(item.clone(server = ""))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist