fix language labels

This commit is contained in:
Alfa-Addon
2017-09-29 21:00:42 -04:00
parent 3a62974ba3
commit ce8c4580ef

View File

@@ -15,7 +15,6 @@ from core.item import Item
from core import channeltools
from core import tmdb
from platformcode import config, logger
from channelselector import get_thumb
__channel__ = "pedropolis"
@@ -44,49 +43,41 @@ parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
thumbnail = "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/%s.png"
def mainlist(item):
logger.info()
itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True,
viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb("channels_movie.png")),
item.clone(title="Series", action="menuseries", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot",
thumbnail=get_thumb("channels_tvshow.png")),
item.clone(title="Buscar", action="search", text_blod=True, extra='buscar',
thumbnail=get_thumb('search.png'), url=host)]
viewcontent='movies', viewmode="movie_with_plot"),
item.clone(title="Series", action="menuseries", text_blod=True, extra='serie', mediatype= "tvshow",
viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot")]
return itemlist
def menumovies(item):
logger.info()
itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True, url=host + 'movies/',
viewcontent='movies', viewmode="movie_with_plot"),
itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True,
viewcontent='movies', url=host + 'movies/', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="peliculas", text_blod=True,
viewcontent='movies', url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
item.clone(title="Más Valoradas", action="peliculas", text_blod=True, viewcontent='movies',
url=host + 'calificaciones/?get=movies', viewmode="movie_with_plot"),
item.clone(title="Géneros", action="generos", text_blod=True, viewmode="movie_with_plot",
viewcontent='movies', url=host)]
item.clone(title="Más Valoradas", action="peliculas", text_blod=True,
viewcontent='movies', url=host + 'calificaciones/?get=movies',
viewmode="movie_with_plot"), item.clone(title="Géneros", action="generos", text_blod=True,
viewcontent='movies', url=host,
viewmode="movie_with_plot")]
return itemlist
def menuseries(item):
logger.info()
itemlist = [item.clone(title="Todas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
itemlist = [item.clone(title="Todas", action="series", text_blod=True, extra='serie', mediatype= "tvshow",
viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype= "tvshow",
viewcontent='tvshows', url=host + 'tendencias/?get=tv', viewmode="movie_with_plot"),
item.clone(title="Mejor Valoradas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
item.clone(title="Mejor Valoradas", action="series", text_blod=True, extra='serie', mediatype= "tvshow",
viewcontent='tvshows', url=host + 'calificaciones/?get=tv', viewmode="movie_with_plot")]
return itemlist
@@ -97,14 +88,14 @@ def peliculas(item):
itemlist = []
url_next_page = ''
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
datas = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title
patron += '<div class="rating"><span class="[^"]+"></span>([^<]+).*?' # rating
patron += '<span class="quality">([^<]+)</span><a href="([^"]+)">.*?' # calidad, url
patron += '<span>([^<]+)</span>' # year
matches = scrapertools.find_multiple_matches(data, patron)
matches = scrapertools.find_multiple_matches(datas, patron)
# Paginación
if item.next_page != 'b':
@@ -124,6 +115,8 @@ def peliculas(item):
if 'Proximamente' not in calidad:
scrapedtitle = scrapedtitle.replace('Ver ', '').partition(' /')[0].partition(':')[0].replace(
'Español Latino', '').strip()
item.infoLabels['year'] = year
item.infoLabels['rating'] = rating
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, calidad)
new_item = Item(channel=__channel__, action="findvideos", contentTitle=scrapedtitle,
@@ -134,9 +127,8 @@ def peliculas(item):
itemlist.append(new_item)
if url_next_page:
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
url=url_next_page, next_page=next_page, folder=True, text_blod=True,
thumbnail=get_thumb("next.png")))
itemlist.append(Item(channel=__channel__, action="peliculas", title=">> Página siguiente",
url=url_next_page, next_page=next_page, folder=True, text_blod=True))
for item in itemlist:
if item.infoLabels['plot'] == '':
@@ -158,92 +150,11 @@ def peliculas(item):
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />' # url, img, title
patron += '<span class="[^"]+">([^<]+)</span>.*?' # tipo
patron += '<span class="year">([^"]+)</span>.*?<div class="contenido"><p>([^<]+)</p>' # year, plot
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, year, plot in matches:
title = scrapedtitle
if tipo == 'Serie':
contentType = 'tvshow'
action = 'temporadas'
title += ' [COLOR red](' + tipo + ')[/COLOR]'
else:
contentType = 'movie'
action = 'findvideos'
title += ' [COLOR green](' + tipo + ')[/COLOR]'
itemlist.append(item.clone(title=title, url=scrapedurl, contentTitle=scrapedtitle, extra='buscar',
action=action, infoLabels={"year": year}, contentType=contentType,
thumbnail=scrapedthumbnail, text_color=color1, contentSerieName=scrapedtitle))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
if paginacion:
itemlist.append(Item(channel=item.channel, action="sub_search",
title="» Siguiente »", url=paginacion, thumbnail=get_thumb("next.png")))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host + 'movies/'
elif categoria == 'infantiles':
item.url = host + "genre/animacion/"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == "» Siguiente »":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.cache_page(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
data = scrapertools.find_single_match(data, 'Genero</a><ulclass="sub-menu">(.*?)</ul></li><li id')
@@ -269,7 +180,7 @@ def series(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
# logger.info(datas)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">'
@@ -289,29 +200,21 @@ def series(item):
url_next_page = urlparse.urljoin(item.url, matches_next_page[0])
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapedtitle.replace('&#8217;', "'")
itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie',
url=scrapedurl, thumbnail=scrapedthumbnail,
contentSerieName=scrapedtitle, show=scrapedtitle,
next_page=next_page, action="temporadas", contentType='tvshow'))
scrapedtitle = scrapedtitle.replace('Ver ',
'').replace(' Online HD',
'').replace('ver ', '').replace(' Online',
'').replace('&#8217;', "'")
itemlist.append(Item(channel=__channel__, title=scrapedtitle,
url=scrapedurl, thumbnail=scrapedthumbnail,
contentSerieName=scrapedtitle, show=scrapedtitle,
next_page=next_page, action="temporadas", contentType='tvshow'))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
if url_next_page:
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=url_next_page,
next_page=next_page, thumbnail=get_thumb("next.png")))
for item in itemlist:
if item.infoLabels['plot'] == '':
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
item.fanart = scrapertools.find_single_match(data,
"<meta property='og:image' content='([^']+)' />").replace(
'w780', 'original')
item.plot = scrapertools.find_single_match(data, '<h2>Sinopsis</h2><div class="wp-content"><p>([^<]+)</p>')
item.plot = scrapertools.htmlclean(item.plot)
itemlist.append(Item(channel=__channel__, action="series", title=">> Página Siguiente", url=url_next_page,
next_page=next_page))
return itemlist
@@ -321,17 +224,17 @@ def temporadas(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
datas = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
logger.info(datas)
patron = '<span class="title">([^<]+)<i>.*?' # numeros de temporadas
patron += '<img src="([^"]+)"></a></div>' # capítulos
matches = scrapertools.find_multiple_matches(data, patron)
matches = scrapertools.find_multiple_matches(datas, patron)
if len(matches) > 1:
for scrapedseason, scrapedthumbnail in matches:
scrapedseason = " ".join(scrapedseason.split())
temporada = scrapertools.find_single_match(scrapedseason, '(\d+)')
new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail, extra='serie')
new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail)
new_item.infoLabels['season'] = temporada
new_item.extra = ""
itemlist.append(new_item)
@@ -348,11 +251,6 @@ def temporadas(item):
itemlist.sort(key=lambda it: it.title)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
return itemlist
else:
return episodios(item)
@@ -363,13 +261,13 @@ def episodios(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
datas = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(datas)
patron = '<div class="imagen"><a href="([^"]+)">.*?' # url cap, img
patron += '<div class="numerando">(.*?)</div>.*?' # numerando cap
patron += '<a href="[^"]+">([^<]+)</a>' # title de episodios
matches = scrapertools.find_multiple_matches(data, patron)
matches = scrapertools.find_multiple_matches(datas, patron)
for scrapedurl, scrapedtitle, scrapedname in matches:
scrapedtitle = scrapedtitle.replace('--', '0')
@@ -382,7 +280,7 @@ def episodios(item):
title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname))
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
contentType="episode", extra='serie')
contentType="episode")
if 'infoLabels' not in new_item:
new_item.infoLabels = {}
@@ -390,7 +288,6 @@ def episodios(item):
new_item.infoLabels['episode'] = episode.zfill(2)
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# TODO no hacer esto si estamos añadiendo a la videoteca
if not item.extra:
@@ -399,7 +296,7 @@ def episodios(item):
for i in itemlist:
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadírselo al titulo del item
i.title = "%sx%s: %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title'])
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']
@@ -411,7 +308,7 @@ def episodios(item):
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
return itemlist
@@ -423,42 +320,25 @@ def findvideos(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '<div id="option-(\d+)" class="[^"]+"><iframe.*?src="([^"]+)".*?</iframe>' # lang, url
patron = '<div id="option-(\d+)" class="[^"]+"><iframe.*?src="([^"]+)".*?</iframe>' #
matches = re.compile(patron, re.DOTALL).findall(data)
for option, url in matches:
lang = scrapertools.find_single_match(data, '<li><a class="options" href="#option-%s">.*?<img '
'src="http://pedropolis.com/wp-content/themes/dooplay/assets/img'
'/flags/(\w+)' % option)
idioma = {'mx': '[COLOR cornflowerblue](LAT)[/COLOR]', 'pe': '[COLOR cornflowerblue](LAT)[/COLOR]',
'co': '[COLOR cornflowerblue](LAT)[/COLOR]', 'es': '[COLOR green](CAST)[/COLOR]',
'en': '[COLOR red](VOS)[/COLOR]', 'jp': '[COLOR green](VOS)[/COLOR]'}
if lang in idioma:
lang = idioma[lang]
# obtenemos los redirecionamiento de shorturl en caso de coincidencia
if "bit.ly" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist.append(item.clone(channel=__channel__, url=url, title=item.contentTitle,
action='play', language=lang))
itemlist.append(item.clone(channel=__channel__, url=url, title=item.contentTitle, action='play', language=lang))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)
for x in itemlist:
if x.extra != 'directo':
x.thumbnail = item.thumbnail
x.title = "Ver en: [COLOR yellow](%s)[/COLOR] %s" % (x.server.title(), x.language)
if item.extra != 'serie' and item.extra != 'buscar':
x.title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
x.server.title(), x.quality, x.language)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'serie':
itemlist.append(Item(channel=__channel__,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library",
thumbnail=get_thumb("videolibrary_movie.png"),
extra="findvideos", contentTitle=item.contentTitle))
x.title = "%s %s [COLOR yellow](%s)[/COLOR] [COLOR yellow](%s)[/COLOR]" % (
x.language, x.title, x.server.title(), x.quality)
return itemlist