@@ -1,54 +0,0 @@
|
||||
{
|
||||
"id": "goodpelis",
|
||||
"name": "GoodPelis",
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"thumbnail": "http://goodpelis.net/wp-content/uploads/2017/11/Logo-GP.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,354 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel GoodPelis -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
host = 'https://goodpelis.net/'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Peliculas",
|
||||
action="menu_peliculas",
|
||||
thumbnail=get_thumb('movies', auto=True),
|
||||
))
|
||||
|
||||
# itemlist.append(item.clone(title="Series",
|
||||
# action="menu_series",
|
||||
# thumbnail=get_thumb('tvshows', auto=True),
|
||||
# ))
|
||||
|
||||
itemlist.append(item.clone(title="Buscar", action="search",
|
||||
thumbnail=get_thumb('search', auto=True),
|
||||
url=host + '?s='
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu_peliculas(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Todas",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('all', auto=True),
|
||||
url=host + 'page/1/?s'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Generos",
|
||||
action="seccion",
|
||||
url=host + 'page/1/?s',
|
||||
thumbnail=get_thumb('genres', auto=True),
|
||||
seccion='generos-pelicula'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Por Año",
|
||||
action="seccion",
|
||||
url=host + 'page/1/?s',
|
||||
thumbnail=get_thumb('year', auto=True),
|
||||
seccion='fecha-estreno'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Calidad",
|
||||
action="seccion",
|
||||
url=host + 'page/1/?s',
|
||||
thumbnail=get_thumb('quality', auto=True),
|
||||
seccion='calidad'
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu_series(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Todas",
|
||||
action="list_all", thumbnail=get_thumb('all', auto=True),
|
||||
url=host + 'series/page/1/',
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Generos",
|
||||
action="seccion",
|
||||
url=host + 'series/page/1/',
|
||||
thumbnail=get_thumb('genres', auto=True),
|
||||
seccion='generos-serie'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Por Año",
|
||||
action="seccion",
|
||||
url=host + 'series/page/1/',
|
||||
thumbnail=get_thumb('year', auto=True),
|
||||
seccion='series-lanzamiento'
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = 'class=item.*?<a href=(.*?)><div class=image.*?<img src=(.*?) alt=(.*?) (?:\(\d{4}|width).*?'
|
||||
patron += 'fixyear><h2>.*?<\/h2>.*?<span class=year>(.*?)<\/span><\/div>(.*?)<\/div>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedquality in matches:
|
||||
url = scrapedurl
|
||||
action = 'findvideos'
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ''
|
||||
contentSerieName = ''
|
||||
contentTitle = scrapedtitle
|
||||
title = contentTitle
|
||||
quality = 'Full HD'
|
||||
if scrapedquality != '':
|
||||
quality = scrapertools.find_single_match(scrapedquality, 'calidad2>(.*?)<')
|
||||
title = contentTitle + ' (%s)' % quality
|
||||
|
||||
year = scrapedyear
|
||||
|
||||
if 'series' in item.url or 'series' in url:
|
||||
action = 'seasons'
|
||||
contentSerieName = contentTitle
|
||||
quality = ''
|
||||
new_item = Item(channel=item.channel,
|
||||
action=action,
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
quality=quality,
|
||||
infoLabels={'year': year}
|
||||
)
|
||||
if 'series' not in item.url:
|
||||
new_item.contentTitle = contentTitle
|
||||
else:
|
||||
new_item.contentSerieName = contentSerieName
|
||||
if 'temporada' not in url:
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
# Paginacion
|
||||
|
||||
if itemlist != []:
|
||||
next_page = scrapertools.find_single_match(data,
|
||||
'<div class=pag_b><a href=(.*?)>Siguiente</a>')
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="list_all",
|
||||
title='Siguiente >>>',
|
||||
url=next_page,
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def seccion(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
if item.seccion == 'generos-pelicula':
|
||||
patron = '<li class=cat-item cat-item-.*?><a href=(.*?)>(.*?<\/a> <span>.*?)<\/span><\/li>'
|
||||
elif item.seccion == 'generos-serie':
|
||||
patron = '<li class=cat-item cat-item-.*?><a href=(.*?\/series-genero\/.*?)>(.*?<\/a> <span>.*?)<\/span><\/li>'
|
||||
elif item.seccion in ['fecha-estreno', 'series-lanzamiento']:
|
||||
patron = '<li><a href=%sfecha-estreno(.*?)>(.*?)<\/a>' % host
|
||||
elif item.seccion == 'calidad':
|
||||
patron = '<li><a href=%scalidad(.*?)>(.*?)<\/a>' % host
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
thumbnail = ''
|
||||
if 'generos' in item.seccion:
|
||||
cantidad = scrapertools.find_single_match(scrapedtitle, '<span>(\d+)')
|
||||
title = scrapertools.find_single_match(scrapedtitle, '(.*?)<')
|
||||
url = scrapedurl
|
||||
title = scrapertools.decodeHtmlentities(title)
|
||||
title = title + ' (%s)' % cantidad
|
||||
elif item.seccion in ['series-lanzamiento', 'fecha-estreno', 'calidad']:
|
||||
title = scrapedtitle
|
||||
url = '%s%s%s' % (host, item.seccion, scrapedurl)
|
||||
|
||||
itemlist.append(item.clone(action='list_all',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def seasons(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '<span class=title>.*?- Temporada (.*?)<\/span>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for temporada in matches:
|
||||
title = 'Temporada %s' % temporada
|
||||
contentSeasonNumber = temporada
|
||||
item.infoLabels['season'] = contentSeasonNumber
|
||||
itemlist.append(item.clone(action='episodiosxtemp',
|
||||
title=title,
|
||||
contentSeasonNumber=contentSeasonNumber
|
||||
))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
contentSerieName=item.contentSerieName,
|
||||
contentSeasonNumber=contentSeasonNumber
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
|
||||
patron = '<li><div class=numerando>(\d+).*?x.*?(\d+)<\/div>.*?<a href=(.*?)> (.*?)<\/a>.*?<\/i>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedtemp, scrapedep, scrapedurl, scrapedtitle in matches:
|
||||
temporada = scrapedtemp
|
||||
title = temporada + 'x%s %s' % (scrapedep, scrapedtitle)
|
||||
url = scrapedurl
|
||||
contentEpisodeNumber = scrapedep
|
||||
item.infoLabels['episode'] = contentEpisodeNumber
|
||||
itemlist.append(item.clone(action='findvideos',
|
||||
title=title,
|
||||
url=url,
|
||||
contentEpisodeNumber=contentEpisodeNumber,
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodiosxtemp(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
temporada = item.contentSeasonNumber
|
||||
patron = '<li><div class=numerando>%s.*?x.*?(\d+)<\/div>.*?<a href=(.*?)> (.*?)<\/a>.*?<\/i>' % temporada
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedep, scrapedurl, scrapedtitle in matches:
|
||||
title = temporada + 'x%s %s' % (scrapedep, scrapedtitle)
|
||||
url = scrapedurl
|
||||
contentEpisodeNumber = scrapedep
|
||||
item.infoLabels['episode'] = contentEpisodeNumber
|
||||
itemlist.append(item.clone(action='findvideos',
|
||||
title=title,
|
||||
url=url,
|
||||
contentEpisodeNumber=contentEpisodeNumber,
|
||||
))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
url_list = []
|
||||
itemlist = []
|
||||
duplicados = []
|
||||
data = get_source(item.url)
|
||||
src = data
|
||||
patron = '<(?:iframe|IFRAME).*?(?:src|SRC)=(.*?) (?:scrolling|frameborder|FRAMEBORDER)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for url in matches:
|
||||
lang = 'LAT'
|
||||
quality = item.quality
|
||||
title = '[%s] [%s]'
|
||||
if url != '':
|
||||
itemlist.append(item.clone(title=title, url=url, action='play', language=lang))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % (i.server, i.language))
|
||||
|
||||
if item.infoLabels['mediatype'] == 'movie':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
action="add_pelicula_to_library",
|
||||
extra="findvideos",
|
||||
contentTitle=item.contentTitle
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
try:
|
||||
if texto != '':
|
||||
return list_all(item)
|
||||
else:
|
||||
return []
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas', 'latino']:
|
||||
item.url = host + 'page/1/?s'
|
||||
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + 'category/animacion/'
|
||||
|
||||
elif categoria == 'terror':
|
||||
item.url = host + 'category/terror/'
|
||||
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == 'Siguiente >>>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
@@ -12,7 +12,7 @@ from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
host = "http://www.pelismundo.com"
|
||||
host = "https://www.pelisvips.com"
|
||||
idiomas = {"Castellano":"CAST","Subtitulad":"VOSE","Latino":"LAT"}
|
||||
|
||||
def mainlist(item):
|
||||
@@ -209,17 +209,14 @@ def findvideos(item):
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
if itemlist:
|
||||
if itemlist and item.contentChannel != "videolibrary":
|
||||
itemlist.append(Item(channel = item.channel))
|
||||
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
|
||||
text_color="magenta"))
|
||||
# Opción "Añadir esta película a la biblioteca de KODI"
|
||||
if item.extra != "library":
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
fulltitle = item.contentTitle
|
||||
))
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(item.clone(title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, fulltitle = item.contentTitle
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ list_servers = ['rapidvideo', 'streamango', 'fastplay', 'openload', 'netu', 'vid
|
||||
|
||||
__channel__='repelis'
|
||||
|
||||
host = "https://repelisgo.io"
|
||||
host = "https://repelisgo.com"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
@@ -207,23 +207,23 @@ def findvideos(item):
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if itemlist:
|
||||
if itemlist and item.contentChannel != "videolibrary":
|
||||
itemlist.append(Item(channel = item.channel))
|
||||
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
|
||||
text_color="magenta"))
|
||||
# Opción "Añadir esta película a la biblioteca de KODI"
|
||||
if item.extra != "library":
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
contentTitle = item.contentTitle
|
||||
))
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
contentTitle = item.contentTitle
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
#headers = {"}
|
||||
url1 = httptools.downloadpage(host + item.url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
itemlist.append(item.clone(url=url1))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
|
||||
@@ -481,19 +481,16 @@ def do_search(item, categories=None):
|
||||
time.sleep(0.5)
|
||||
progreso = platformtools.dialog_progress(config.get_localized_string(30993) % tecleado, "")
|
||||
channel_files = sorted(glob.glob(channels_path), key=lambda x: os.path.basename(x))
|
||||
|
||||
import math
|
||||
# fix float porque la division se hace mal en python 2.x
|
||||
number_of_channels = float(100) / len(channel_files)
|
||||
|
||||
threads = []
|
||||
search_results = {}
|
||||
start_time = time.time()
|
||||
list_channels_search = []
|
||||
|
||||
# Extrae solo los canales a buscar
|
||||
for index, infile in enumerate(channel_files):
|
||||
try:
|
||||
percentage = int(math.ceil((index + 1) * number_of_channels))
|
||||
|
||||
basename = os.path.basename(infile)
|
||||
basename_without_extension = basename[:-5]
|
||||
logger.info("%s..." % basename_without_extension)
|
||||
@@ -539,29 +536,40 @@ def do_search(item, categories=None):
|
||||
if not include_in_global_search:
|
||||
logger.info("%s -no incluido en lista a buscar-" % basename_without_extension)
|
||||
continue
|
||||
list_channels_search.append(infile)
|
||||
except:
|
||||
logger.error("No se puede buscar en: %s" % channel_parameters["title"])
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
continue
|
||||
|
||||
|
||||
for index, infile in enumerate(list_channels_search):
|
||||
try:
|
||||
# fix float porque la division se hace mal en python 2.x
|
||||
percentage = int(float((index+1))/len(list_channels_search)*float(100))
|
||||
basename = os.path.basename(infile)
|
||||
basename_without_extension = basename[:-5]
|
||||
logger.info("%s..." % basename_without_extension)
|
||||
channel_parameters = channeltools.get_channel_parameters(basename_without_extension)
|
||||
# Movido aqui el progreso, para que muestre el canal exacto que está buscando
|
||||
progreso.update(percentage,
|
||||
config.get_localized_string(60520) % (channel_parameters["title"]))
|
||||
# Modo Multi Thread
|
||||
if progreso.iscanceled():
|
||||
progreso.close()
|
||||
logger.info("Búsqueda cancelada")
|
||||
return itemlist
|
||||
|
||||
# Modo Multi Thread
|
||||
if multithread:
|
||||
t = Thread(target=channel_search, args=[search_results, channel_parameters, tecleado],
|
||||
name=channel_parameters["title"])
|
||||
t.setDaemon(True)
|
||||
t.start()
|
||||
threads.append(t)
|
||||
|
||||
# Modo single Thread
|
||||
else:
|
||||
logger.info("Intentado búsqueda en %s de %s " % (basename_without_extension, tecleado))
|
||||
channel_search(search_results, channel_parameters, tecleado)
|
||||
|
||||
logger.info("%s incluido en la búsqueda" % basename_without_extension)
|
||||
progreso.update(percentage,
|
||||
config.get_localized_string(60520) % channel_parameters["title"])
|
||||
|
||||
except:
|
||||
logger.error("No se puede buscar en: %s" % channel_parameters["title"])
|
||||
import traceback
|
||||
@@ -581,24 +589,18 @@ def do_search(item, categories=None):
|
||||
|
||||
list_pendent_names = [a.getName() for a in pendent]
|
||||
mensaje = config.get_localized_string(70282) % (", ".join(list_pendent_names))
|
||||
progreso.update(percentage, config.get_localized_string(60521) % (len(threads) - len(pendent), len(threads)),
|
||||
progreso.update(percentage, config.get_localized_string(60521) % (len(threads) - len(pendent) + 1, len(threads)),
|
||||
mensaje)
|
||||
logger.debug(mensaje)
|
||||
|
||||
if progreso.iscanceled():
|
||||
logger.info("Búsqueda cancelada")
|
||||
break
|
||||
|
||||
time.sleep(0.5)
|
||||
pendent = [a for a in threads if a.isAlive()]
|
||||
|
||||
total = 0
|
||||
|
||||
for channel in sorted(search_results.keys()):
|
||||
for element in search_results[channel]:
|
||||
total += len(element["itemlist"])
|
||||
title = channel
|
||||
|
||||
# resultados agrupados por canales
|
||||
if item.contextual == True or item.action == 'search_tmdb':
|
||||
result_mode = 1
|
||||
@@ -606,10 +608,8 @@ def do_search(item, categories=None):
|
||||
if len(search_results[channel]) > 1:
|
||||
title += " -%s" % element["item"].title.strip()
|
||||
title += " (%s)" % len(element["itemlist"])
|
||||
|
||||
title = re.sub("\[COLOR [^\]]+\]", "", title)
|
||||
title = re.sub("\[/COLOR]", "", title)
|
||||
|
||||
itemlist.append(Item(title=title, channel="search", action="show_result", url=element["item"].url,
|
||||
extra=element["item"].extra, folder=True, adult=element["adult"],
|
||||
from_action="search", from_channel=element["item"].channel, tecleado=tecleado))
|
||||
@@ -625,13 +625,10 @@ def do_search(item, categories=None):
|
||||
i.infoPlus = True
|
||||
itemlist.append(i.clone(title=title, from_action=i.action, from_channel=i.channel,
|
||||
channel="search", action="show_result", adult=element["adult"]))
|
||||
|
||||
title = config.get_localized_string(59972) % (
|
||||
tecleado, total, time.time() - start_time)
|
||||
itemlist.insert(0, Item(title=title, text_color='yellow'))
|
||||
|
||||
progreso.close()
|
||||
|
||||
#Para opcion Buscar en otros canales
|
||||
if item.contextual == True:
|
||||
return exact_results(itemlist, tecleado)
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
{
|
||||
"id": "wopelis",
|
||||
"name": "WoPelis",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"banner": "https://github.com/master-1970/resources/raw/master/images/bannermenu/wopelis.png",
|
||||
"fanart": "https://github.com/master-1970/resources/raw/master/images/fanart/wopelis.png",
|
||||
"thumbnail": "https://github.com/master-1970/resources/raw/master/images/squares/wopelis.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Películas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Series",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,366 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from core import channeltools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
HOST = 'http://www.wopelis.com'
|
||||
__channel__ = 'wopelis'
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
color1, color2, color3 = ['0xFF58D3F7', '0xFF2E64FE', '0xFF0404B4']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.url = HOST
|
||||
item.text_color = color2
|
||||
item.fanart = fanart_host
|
||||
|
||||
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png"
|
||||
url = HOST + "/galep.php?solo=cenlaces&empen=0"
|
||||
itemlist.append(item.clone(title="Películas:", folder=False, text_color=color3, text_bold=True))
|
||||
itemlist.append(item.clone(title=" Recientes", action="listado", url=url))
|
||||
itemlist.append(item.clone(title=" Mas populares de la semana", action="listado", url=url + "&ord=popu"))
|
||||
itemlist.append(item.clone(title=" Por géneros", action="generos", url=HOST + "/index.php"))
|
||||
itemlist.append(item.clone(title=" Buscar película", action="search", url=url))
|
||||
|
||||
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
|
||||
|
||||
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png"
|
||||
url = HOST + "/gales.php?empen=0"
|
||||
itemlist.append(item.clone(title="Series:", folder=False, text_color=color3, text_bold=True))
|
||||
itemlist.append(item.clone(title=" Nuevos episodios", action="listado", url=url + "&ord=reci"))
|
||||
itemlist.append(item.clone(title=" Mas populares de la semana", action="listado", url=url + "&ord=popu"))
|
||||
itemlist.append(item.clone(title=" Por géneros", action="generos", url=HOST + "/series.php"))
|
||||
itemlist.append(item.clone(title=" Buscar serie", action="search", url=url + "&ord=popu"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = HOST + "/galep.php?solo=cenlaces&empen=0"
|
||||
|
||||
elif categoria == 'series':
|
||||
item.url = HOST + "/gales.php?empen=0&ord=reci"
|
||||
|
||||
else:
|
||||
return []
|
||||
|
||||
itemlist = listado(item)
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("search:" + texto)
|
||||
try:
|
||||
if texto:
|
||||
item.url = "%s&busqueda=%s" % (item.url, texto.replace(" ", "+"))
|
||||
return listado(item)
|
||||
else:
|
||||
return []
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
dict_gender = {"acción": "accion", "animación": "animacion", "ciencia ficción": "ciencia%20ficcion",
|
||||
"fantasía": "fantasia", "música": "musica", "película de la televisión": "pelicula%20de%20tv"}
|
||||
|
||||
data = downloadpage(item.url)
|
||||
data = scrapertools.find_single_match(data, '<select name="gener">(.*?)</select>')
|
||||
|
||||
for genero in scrapertools.find_multiple_matches(data, '<option value="([^"]+)'):
|
||||
if genero != 'Todos':
|
||||
if 'series' in item.url:
|
||||
url = HOST + "/gales.php?empen=0&gener=%s" % genero
|
||||
else:
|
||||
url = HOST + "/galep.php?solo=cenlaces&empen=0&gener=%s" % genero
|
||||
|
||||
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/azul/%s.png"
|
||||
thumbnail = thumbnail % dict_gender.get(genero.lower(), genero.lower())
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=genero, url=url, text_color=color1,
|
||||
contentType='movie', folder=True,
|
||||
thumbnail=thumbnail)) # ,viewmode="movie_with_plot"))
|
||||
|
||||
return sorted(itemlist, key=lambda i: i.title.lower())
|
||||
|
||||
|
||||
def listado(item):
|
||||
logger.info(item)
|
||||
itemlist = []
|
||||
|
||||
data = downloadpage(item.url)
|
||||
|
||||
patron = '<a class="extended" href=".([^"]+).*?'
|
||||
patron += '<img class="centeredPicFalse"([^>]+).*?'
|
||||
patron += '<span class="year">(\d{4})</span>.*?'
|
||||
patron += '<span class="title">(.*?)</span>'
|
||||
|
||||
for url, pic, year, title in scrapertools.find_multiple_matches(data, patron):
|
||||
thumbnail = scrapertools.find_single_match(pic, 'src="([^"]+)')
|
||||
if not thumbnail:
|
||||
thumbnail = HOST + "/images/cover-notfound.png"
|
||||
|
||||
new_item = Item(channel=__channel__, thumbnail=thumbnail, text_color=color2, infoLabels={"year": year})
|
||||
|
||||
if "galep.php" in item.url:
|
||||
# movie
|
||||
new_item.contentTitle = title
|
||||
new_item.action = "findvideos"
|
||||
new_item.url = HOST + url.replace('peli.php?id=', 'venlaces.php?npl=')
|
||||
|
||||
|
||||
elif "gales.php" in item.url:
|
||||
# tvshow
|
||||
title = title.replace(' - 0x0', '')
|
||||
new_item.contentSerieName = title
|
||||
new_item.action = "temporadas"
|
||||
new_item.url = HOST + url
|
||||
if "ord=reci" in item.url:
|
||||
# episode
|
||||
season_episode = scrapertools.get_season_and_episode(title)
|
||||
if season_episode:
|
||||
new_item.contentSeason, new_item.contentEpisodeNumber = season_episode.split('x')
|
||||
new_item.action = "get_episodio"
|
||||
new_item.contentSerieName = title.split('-', 1)[1].strip()
|
||||
|
||||
elif "gener=" in item.url and scrapertools.get_season_and_episode(title):
|
||||
# Las series filtrada por genero devuelven capitulos y series completas
|
||||
title = title.split('-', 1)[1].strip()
|
||||
new_item.contentSerieName = title
|
||||
|
||||
else:
|
||||
return []
|
||||
|
||||
new_item.title = "%s (%s)" % (title, year)
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
if itemlist:
|
||||
# Obtenemos los datos basicos mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
# Si es necesario añadir paginacion
|
||||
if len(itemlist) == 35:
|
||||
empen = scrapertools.find_single_match(item.url, 'empen=(\d+)')
|
||||
url_next_page = item.url.replace('empen=%s' % empen, 'empen=%s' % (int(empen) + 35))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
|
||||
thumbnail=thumbnail_host, url=url_next_page, folder=True,
|
||||
text_color=color3, text_bold=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info(item)
|
||||
itemlist = []
|
||||
|
||||
data = downloadpage(item.url)
|
||||
patron = '<div class="checkSeason" data-num="([^"]+)[^>]+>([^<]+)'
|
||||
|
||||
for num_season, title in scrapertools.find_multiple_matches(data, patron):
|
||||
itemlist.append(item.clone(contentSeason=num_season, title="%s - %s" % (item.contentSerieName, title),
|
||||
action="episodios"))
|
||||
|
||||
if itemlist:
|
||||
# Obtenemos los datos de las temporadas mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(item.clone(title="Añadir esta serie a la videoteca",
|
||||
action="add_serie_to_library", extra="episodios",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = downloadpage(item.url)
|
||||
patron = '<div class="checkSeason" data-num="([^"]+)(.*?)</div></div></div>'
|
||||
for num_season, data in scrapertools.find_multiple_matches(data, patron):
|
||||
if item.contentSeason and item.contentSeason != int(num_season):
|
||||
# Si buscamos los episodios de una temporada concreta y no es esta (num_season)...
|
||||
continue
|
||||
|
||||
patron = '<div class="info"><a href="..([^"]+).*?class="number">([^<]+)'
|
||||
for url, num_episode in scrapertools.find_multiple_matches(data, patron):
|
||||
if item.contentEpisodeNumber and item.contentEpisodeNumber != int(num_episode):
|
||||
# Si buscamos un episodio concreto y no es este (num_episode)...
|
||||
continue
|
||||
|
||||
title = "%sx%s - %s" % (num_season, num_episode.strip().zfill(2), item.contentSerieName)
|
||||
itemlist.append(item.clone(title=title, url=HOST + url, action="findvideos",
|
||||
contentSeason=num_season, contentEpisodeNumber=num_episode))
|
||||
|
||||
if itemlist and hasattr(item, 'contentSeason'):
|
||||
# Obtenemos los datos de los episodios de esta temporada mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
for i in itemlist:
|
||||
if i.infoLabels['title']:
|
||||
# Si el capitulo tiene nombre propio añadirselo al titulo del item
|
||||
i.title = "%sx%s %s" % (
|
||||
i.infoLabels['season'], str(i.infoLabels['episode']).zfill(2), i.infoLabels['title'])
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_episodio(item):
|
||||
logger.info()
|
||||
itemlist = episodios(item)
|
||||
if itemlist:
|
||||
itemlist = findvideos(itemlist[0])
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
dic_langs = {'esp': 'Español', 'english': 'Ingles', 'japo': 'Japones', 'argentina': 'Latino', 'ntfof': ''}
|
||||
dic_servers = {'ntfof': 'Servidor Desconocido', 'stramango': 'streamango', 'flasht': 'flashx'}
|
||||
|
||||
data1 = downloadpage(item.url)
|
||||
patron = '(?s)onclick="redir\(([^\)]+).*?'
|
||||
patron += '<img style="float:left" src="./[^/]+/([^\.]+).+?'
|
||||
patron += '<span[^>]+>([^<]+).*?'
|
||||
patron += '<img(.*?)on'
|
||||
|
||||
if "Descarga:</h1>" in data1:
|
||||
list_showlinks = [('Online:', 'Online:</h1>(.*?)Descarga:</h1>'),
|
||||
('Download:', 'Descarga:</h1>(.*?)</section>')]
|
||||
else:
|
||||
list_showlinks = [('Online:', 'Online:</h1>(.*?)</section>')]
|
||||
|
||||
for t in list_showlinks:
|
||||
data = scrapertools.find_single_match(data1, t[1])
|
||||
|
||||
if data:
|
||||
itemlist.append(Item(title=t[0], text_color=color3, text_bold=True,
|
||||
folder=False, thumbnail=thumbnail_host))
|
||||
|
||||
for redir, server, quality, langs in scrapertools.find_multiple_matches(data,
|
||||
patron): # , server, quality, langs
|
||||
redir = redir.split(",")
|
||||
url = redir[0][1:-1]
|
||||
id = redir[1][1:-1]
|
||||
# type = redir[2][1:-1]
|
||||
# url = url.split("','")[0] # [2] = 0 movies, [2] = 1 tvshows
|
||||
|
||||
langs = scrapertools.find_multiple_matches(langs, 'src="./images/([^\.]+)')
|
||||
idioma = dic_langs.get(langs[0], langs[0])
|
||||
subtitulos = dic_langs.get(langs[1], langs[1])
|
||||
if subtitulos:
|
||||
idioma = "%s (Sub: %s)" % (idioma, subtitulos)
|
||||
|
||||
if server in dic_servers: server = dic_servers[server]
|
||||
|
||||
itemlist.append(
|
||||
item.clone(url=url, action="play", language=idioma, contentQuality=quality, server=server,
|
||||
title=" %s: %s [%s]" % (server.capitalize(), idioma, quality)))
|
||||
|
||||
if itemlist and config.get_videolibrary_support() and not "library" in item.extra:
|
||||
if item.contentType == 'movie':
|
||||
itemlist.append(item.clone(title="Añadir película a la videoteca",
|
||||
action="add_pelicula_to_library", text_color=color1,
|
||||
contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))
|
||||
else:
|
||||
# http://www.wopelis.com/serie.php?id=275641
|
||||
item.url = "http://www.wopelis.com/serie.php?id=" + id
|
||||
item.contentSeason = 0
|
||||
item.contentEpisodeNumber = 0
|
||||
# logger.error(item)
|
||||
itemlist.append(item.clone(title="Añadir esta serie a la videoteca",
|
||||
action="add_serie_to_library", extra="episodios###library",
|
||||
text_color=color1, thumbnail=thumbnail_host))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Buscamos video por servidor ...
|
||||
devuelve = servertools.findvideosbyserver(item.url, item.server)
|
||||
if not devuelve:
|
||||
# ...sino lo encontramos buscamos en todos los servidores disponibles
|
||||
devuelve = servertools.findvideos(item.url, skip=True)
|
||||
|
||||
if devuelve:
|
||||
# logger.debug(devuelve)
|
||||
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
|
||||
url=devuelve[0][1], thumbnail=item.thumbnail, folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def downloadpage(url):
|
||||
cookievalue = config.get_setting("cookie", "wopelis")
|
||||
if not cookievalue:
|
||||
data = httptools.downloadpage(url).data
|
||||
cookievalue = get_cookie(data)
|
||||
|
||||
headers = {'Cookie': '%s' % cookievalue}
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
if "Hola bienvenido" in data:
|
||||
cookievalue = get_cookie(data)
|
||||
headers = {'Cookie': '%s' % cookievalue}
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
|
||||
return re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
|
||||
def get_cookie(data):
|
||||
import random
|
||||
cookievalue = ""
|
||||
cookiename = scrapertools.find_single_match(data, 'document.cookie\s*=\s*"([^"]+)"')
|
||||
cookiename = cookiename.replace("=", "")
|
||||
posible = scrapertools.find_single_match(data, 'var possible\s*=\s*"([^"]+)"')
|
||||
bloque = scrapertools.find_single_match(data, 'function cok(.*?);')
|
||||
lengths = scrapertools.find_multiple_matches(bloque, '([\S]{1}\d+)')
|
||||
for numero in lengths:
|
||||
if numero.startswith("("):
|
||||
for i in range(0, int(numero[1:])):
|
||||
cookievalue += posible[int(random.random() * len(posible))]
|
||||
else:
|
||||
cookievalue += numero[1:]
|
||||
|
||||
cookievalue = "%s=%s" % (cookiename, cookievalue)
|
||||
config.set_setting("cookie", cookievalue, "wopelis")
|
||||
|
||||
return cookievalue
|
||||
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user