From ce8c4580efb79761f9b6ecd889fb5957e9fbf4f7 Mon Sep 17 00:00:00 2001 From: Alfa-Addon Date: Fri, 29 Sep 2017 21:00:42 -0400 Subject: [PATCH] fix language labels --- plugin.video.alfa/channels/pedropolis.py | 214 +++++------------------ 1 file changed, 47 insertions(+), 167 deletions(-) diff --git a/plugin.video.alfa/channels/pedropolis.py b/plugin.video.alfa/channels/pedropolis.py index c5a01871..aa4df645 100644 --- a/plugin.video.alfa/channels/pedropolis.py +++ b/plugin.video.alfa/channels/pedropolis.py @@ -15,7 +15,6 @@ from core.item import Item from core import channeltools from core import tmdb from platformcode import config, logger -from channelselector import get_thumb __channel__ = "pedropolis" @@ -44,49 +43,41 @@ parameters = channeltools.get_channel_parameters(__channel__) fanart_host = parameters['fanart'] thumbnail_host = parameters['thumbnail'] +thumbnail = "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/%s.png" + def mainlist(item): logger.info() - itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True, - viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb("channels_movie.png")), - - item.clone(title="Series", action="menuseries", text_blod=True, extra='serie', mediatype="tvshow", - viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot", - thumbnail=get_thumb("channels_tvshow.png")), - - item.clone(title="Buscar", action="search", text_blod=True, extra='buscar', - thumbnail=get_thumb('search.png'), url=host)] + viewcontent='movies', viewmode="movie_with_plot"), + item.clone(title="Series", action="menuseries", text_blod=True, extra='serie', mediatype= "tvshow", + viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot")] return itemlist def menumovies(item): logger.info() - itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True, url=host + 'movies/', - viewcontent='movies', viewmode="movie_with_plot"), - + itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True, + viewcontent='movies', url=host + 'movies/', viewmode="movie_with_plot"), item.clone(title="Más Vistas", action="peliculas", text_blod=True, viewcontent='movies', url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"), - - item.clone(title="Más Valoradas", action="peliculas", text_blod=True, viewcontent='movies', - url=host + 'calificaciones/?get=movies', viewmode="movie_with_plot"), - - item.clone(title="Géneros", action="generos", text_blod=True, viewmode="movie_with_plot", - viewcontent='movies', url=host)] + item.clone(title="Más Valoradas", action="peliculas", text_blod=True, + viewcontent='movies', url=host + 'calificaciones/?get=movies', + viewmode="movie_with_plot"), item.clone(title="Géneros", action="generos", text_blod=True, + viewcontent='movies', url=host, + viewmode="movie_with_plot")] return itemlist def menuseries(item): logger.info() - itemlist = [item.clone(title="Todas", action="series", text_blod=True, extra='serie', mediatype="tvshow", + itemlist = [item.clone(title="Todas", action="series", text_blod=True, extra='serie', mediatype= "tvshow", viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot"), - - item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype="tvshow", + item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype= "tvshow", viewcontent='tvshows', url=host + 'tendencias/?get=tv', viewmode="movie_with_plot"), - - item.clone(title="Mejor Valoradas", action="series", text_blod=True, extra='serie', mediatype="tvshow", + item.clone(title="Mejor Valoradas", action="series", text_blod=True, extra='serie', mediatype= "tvshow", viewcontent='tvshows', url=host + 'calificaciones/?get=tv', viewmode="movie_with_plot")] return itemlist @@ -97,14 +88,14 @@ def peliculas(item): itemlist = [] url_next_page = '' data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data) + datas = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data) patron = '
([^.*?' # img, title patron += '
([^<]+).*?' # rating patron += '([^<]+).*?' # calidad, url patron += '([^<]+)' # year - matches = scrapertools.find_multiple_matches(data, patron) + matches = scrapertools.find_multiple_matches(datas, patron) # Paginación if item.next_page != 'b': @@ -124,6 +115,8 @@ def peliculas(item): if 'Proximamente' not in calidad: scrapedtitle = scrapedtitle.replace('Ver ', '').partition(' /')[0].partition(':')[0].replace( 'Español Latino', '').strip() + item.infoLabels['year'] = year + item.infoLabels['rating'] = rating title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, calidad) new_item = Item(channel=__channel__, action="findvideos", contentTitle=scrapedtitle, @@ -134,9 +127,8 @@ def peliculas(item): itemlist.append(new_item) if url_next_page: - itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »", - url=url_next_page, next_page=next_page, folder=True, text_blod=True, - thumbnail=get_thumb("next.png"))) + itemlist.append(Item(channel=__channel__, action="peliculas", title=">> Página siguiente", + url=url_next_page, next_page=next_page, folder=True, text_blod=True)) for item in itemlist: if item.infoLabels['plot'] == '': @@ -158,92 +150,11 @@ def peliculas(item): return itemlist -def search(item, texto): - logger.info() - - texto = texto.replace(" ", "+") - item.url = urlparse.urljoin(item.url, "?s={0}".format(texto)) - - try: - return sub_search(item) - - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - -def sub_search(item): - logger.info() - - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |
", "", data) - - patron = '
([^' # url, img, title - patron += '([^<]+).*?' # tipo - patron += '([^"]+).*?

([^<]+)

' # year, plot - - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, year, plot in matches: - title = scrapedtitle - if tipo == 'Serie': - contentType = 'tvshow' - action = 'temporadas' - title += ' [COLOR red](' + tipo + ')[/COLOR]' - else: - contentType = 'movie' - action = 'findvideos' - title += ' [COLOR green](' + tipo + ')[/COLOR]' - - itemlist.append(item.clone(title=title, url=scrapedurl, contentTitle=scrapedtitle, extra='buscar', - action=action, infoLabels={"year": year}, contentType=contentType, - thumbnail=scrapedthumbnail, text_color=color1, contentSerieName=scrapedtitle)) - - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - paginacion = scrapertools.find_single_match(data, '') - - if paginacion: - itemlist.append(Item(channel=item.channel, action="sub_search", - title="» Siguiente »", url=paginacion, thumbnail=get_thumb("next.png"))) - - return itemlist - - -def newest(categoria): - logger.info() - itemlist = [] - item = Item() - try: - if categoria == 'peliculas': - item.url = host + 'movies/' - elif categoria == 'infantiles': - item.url = host + "genre/animacion/" - else: - return [] - - itemlist = peliculas(item) - if itemlist[-1].title == "» Siguiente »": - itemlist.pop() - - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - return itemlist - - def generos(item): logger.info() itemlist = [] - data = httptools.downloadpage(item.url).data + data = scrapertools.cache_page(item.url) data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # logger.info(data) data = scrapertools.find_single_match(data, 'Genero
(.*?)
  • ", "", data) - # logger.info(data) + # logger.info(datas) patron = '
    ([^.*?' @@ -289,29 +200,21 @@ def series(item): url_next_page = urlparse.urljoin(item.url, matches_next_page[0]) for scrapedthumbnail, scrapedtitle, scrapedurl in matches: - scrapedtitle = scrapedtitle.replace('’', "'") - itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie', - url=scrapedurl, thumbnail=scrapedthumbnail, - contentSerieName=scrapedtitle, show=scrapedtitle, - next_page=next_page, action="temporadas", contentType='tvshow')) + scrapedtitle = scrapedtitle.replace('Ver ', + '').replace(' Online HD', + '').replace('ver ', '').replace(' Online', + '').replace('’', "'") + itemlist.append(Item(channel=__channel__, title=scrapedtitle, + url=scrapedurl, thumbnail=scrapedthumbnail, + contentSerieName=scrapedtitle, show=scrapedtitle, + next_page=next_page, action="temporadas", contentType='tvshow')) tmdb.set_infoLabels(itemlist, __modo_grafico__) tmdb.set_infoLabels(itemlist, __modo_grafico__) if url_next_page: - itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=url_next_page, - next_page=next_page, thumbnail=get_thumb("next.png"))) - - for item in itemlist: - if item.infoLabels['plot'] == '': - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - # logger.info(data) - item.fanart = scrapertools.find_single_match(data, - "").replace( - 'w780', 'original') - item.plot = scrapertools.find_single_match(data, '

    Sinopsis

    ' # capítulos - matches = scrapertools.find_multiple_matches(data, patron) + matches = scrapertools.find_multiple_matches(datas, patron) if len(matches) > 1: for scrapedseason, scrapedthumbnail in matches: scrapedseason = " ".join(scrapedseason.split()) temporada = scrapertools.find_single_match(scrapedseason, '(\d+)') - new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail, extra='serie') + new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail) new_item.infoLabels['season'] = temporada new_item.extra = "" itemlist.append(new_item) @@ -348,11 +251,6 @@ def temporadas(item): itemlist.sort(key=lambda it: it.title) - if config.get_videolibrary_support() and len(itemlist) > 0: - itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, - action="add_serie_to_library", extra="episodios", show=item.show, category="Series", - text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host)) - return itemlist else: return episodios(item) @@ -363,13 +261,13 @@ def episodios(item): itemlist = [] data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |
    ", "", data) - # logger.info(data) + datas = re.sub(r"\n|\r|\t| |
    ", "", data) + # logger.info(datas) patron = '
    .*?' # url cap, img patron += '
    (.*?)
    .*?' # numerando cap patron += '
    ([^<]+)' # title de episodios - matches = scrapertools.find_multiple_matches(data, patron) + matches = scrapertools.find_multiple_matches(datas, patron) for scrapedurl, scrapedtitle, scrapedname in matches: scrapedtitle = scrapedtitle.replace('--', '0') @@ -382,7 +280,7 @@ def episodios(item): title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname)) new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title, - contentType="episode", extra='serie') + contentType="episode") if 'infoLabels' not in new_item: new_item.infoLabels = {} @@ -390,7 +288,6 @@ def episodios(item): new_item.infoLabels['episode'] = episode.zfill(2) itemlist.append(new_item) - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) # TODO no hacer esto si estamos añadiendo a la videoteca if not item.extra: @@ -399,7 +296,7 @@ def episodios(item): for i in itemlist: if i.infoLabels['title']: # Si el capitulo tiene nombre propio añadírselo al titulo del item - i.title = "%sx%s: %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title']) + i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title']) if i.infoLabels.has_key('poster_path'): # Si el capitulo tiene imagen propia remplazar al poster i.thumbnail = i.infoLabels['poster_path'] @@ -411,7 +308,7 @@ def episodios(item): if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show, category="Series", - text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host)) + text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) return itemlist @@ -423,42 +320,25 @@ def findvideos(item): data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # logger.info(data) - patron = '
    ' # lang, url + patron = '
    ' # matches = re.compile(patron, re.DOTALL).findall(data) for option, url in matches: lang = scrapertools.find_single_match(data, '
  • .*? 0 and item.extra != 'serie': - itemlist.append(Item(channel=__channel__, - title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', - url=item.url, action="add_pelicula_to_library", - thumbnail=get_thumb("videolibrary_movie.png"), - extra="findvideos", contentTitle=item.contentTitle)) + x.title = "%s %s [COLOR yellow](%s)[/COLOR] [COLOR yellow](%s)[/COLOR]" % ( + x.language, x.title, x.server.title(), x.quality) return itemlist