# -*- coding: utf-8 -*- # -*- Channel PedroPolis -*- # -*- Created for Alfa-addon -*- # -*- By the Alfa Develop Group -*- import re import sys import urllib import urlparse from channels import autoplay from channels import filtertools from core import httptools from core import scrapertools from core import servertools from core.item import Item from core import channeltools from core import tmdb from platformcode import config, logger from channelselector import get_thumb __channel__ = "pedropolis" host = "https://pedropolis.tv/" try: __modo_grafico__ = config.get_setting('modo_grafico', __channel__) __perfil__ = int(config.get_setting('perfil', __channel__)) except: __modo_grafico__ = True __perfil__ = 0 # Fijar perfil de color perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'], ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'], ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']] if __perfil__ < 3: color1, color2, color3, color4, color5 = perfil[__perfil__] else: color1 = color2 = color3 = color4 = color5 = "" headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], ['Referer', host]] parameters = channeltools.get_channel_parameters(__channel__) fanart_host = parameters['fanart'] thumbnail_host = parameters['thumbnail'] IDIOMAS = {'Latino': 'LAT'} list_language = IDIOMAS.values() list_quality = [] list_servers = ['rapidvideo', 'streamango', 'fastplay', 'openload'] def mainlist(item): logger.info() autoplay.init(item.channel, list_servers, list_quality) itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True, viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb("channels_movie.png")), item.clone(title="Series", action="menuseries", text_blod=True, extra='serie', mediatype="tvshow", viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot", thumbnail=get_thumb("channels_tvshow.png")), item.clone(title="Buscar", action="search", text_blod=True, extra='buscar', thumbnail=get_thumb('search.png'), url=host)] autoplay.show_option(item.channel, itemlist) return itemlist def menumovies(item): logger.info() itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True, url=host + 'pelicula/', viewcontent='movies', viewmode="movie_with_plot"), item.clone(title="Más Vistas", action="peliculas", text_blod=True, viewcontent='movies', url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"), item.clone(title="Mejor Valoradas", action="peliculas", text_blod=True, viewcontent='movies', url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"), item.clone(title="Por año", action="p_portipo", text_blod=True, extra="Películas Por año", viewcontent='movies', url=host, viewmode="movie_with_plot"), item.clone(title="Por género", action="p_portipo", text_blod=True, extra="Categorías", viewcontent='movies', url=host, viewmode="movie_with_plot")] return itemlist def menuseries(item): logger.info() itemlist = [item.clone(title="Todas", action="series", text_blod=True, extra='serie', mediatype="tvshow", viewcontent='tvshows', url=host + 'serie/', viewmode="movie_with_plot"), item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype="tvshow", viewcontent='tvshows', url=host + 'tendencias/?get=tv', viewmode="movie_with_plot"), item.clone(title="Mejor Valoradas", action="series", text_blod=True, extra='serie', mediatype="tvshow", viewcontent='tvshows', url=host + 'calificaciones/?get=tv', viewmode="movie_with_plot")] return itemlist def p_portipo(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data bloque = scrapertools.find_single_match(data, '(?is)%s.*?' %item.extra) patron = 'href="([^"]+).*?' patron += '>([^"<]+)' matches = scrapertools.find_multiple_matches(bloque, patron) for scrapedurl, scrapedtitle in matches: itemlist.append(item.clone(action = "peliculas", title = scrapedtitle, url = scrapedurl )) return itemlist def peliculas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '
([^.*?' # img, title patron += '
([^<]+).*?' # rating patron += '([^<]+)
.*?' # calidad, url patron += '([^<]+).*?' # year matches = scrapertools.find_multiple_matches(data, patron) for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches: scrapedtitle = scrapedtitle.replace('Ver ', '').partition(' /')[0].partition(':')[0].replace( 'Español Latino', '').strip() title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, quality) itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle, infoLabels={"year":year, "rating":rating}, thumbnail=scrapedthumbnail, url=scrapedurl, quality=quality, title=title)) tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) pagination = scrapertools.find_single_match(data, '') if pagination: itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »", url=pagination, folder=True, text_blod=True, thumbnail=get_thumb("next.png"))) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = urlparse.urljoin(item.url, "?s={0}".format(texto)) try: return sub_search(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] def sub_search(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data bloque = scrapertools.find_single_match(data, 'Resultados encontrados.*?class="widget widget_fbw_id') patron = '(?is).*?' patron += '') if pagination: itemlist.append(Item(channel=item.channel, action="sub_search", title="» Siguiente »", url=pagination, thumbnail=get_thumb("next.png"))) return itemlist def newest(categoria): logger.info() itemlist = [] item = Item() try: if categoria == 'peliculas': item.url = host + 'movies/' elif categoria == 'infantiles': item.url = host + "genre/animacion/" elif categoria == 'terror': item.url = host + "genre/terror/" else: return [] itemlist = peliculas(item) if itemlist[-1].title == "» Siguiente »": itemlist.pop() # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] return itemlist def series(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '
') if pagination: itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=pagination, thumbnail=get_thumb("next.png"))) return itemlist def temporadas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '([^<]+).*?' # season patron += '
' # img matches = scrapertools.find_multiple_matches(data, patron) if len(matches) > 1: for scrapedseason, scrapedthumbnail in matches: scrapedseason = " ".join(scrapedseason.split()) temporada = scrapertools.find_single_match(scrapedseason, '(\d+)') new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail, extra='serie') new_item.infoLabels['season'] = temporada new_item.extra = "" itemlist.append(new_item) tmdb.set_infoLabels(itemlist, __modo_grafico__) for i in itemlist: i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle']) if i.infoLabels['title']: # Si la temporada tiene nombre propio añadírselo al titulo del item i.title += " - %s" % (i.infoLabels['title']) if i.infoLabels.has_key('poster_path'): # Si la temporada tiene poster propio remplazar al de la serie i.thumbnail = i.infoLabels['poster_path'] itemlist.sort(key=lambda it: it.title) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show, category="Series", text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host)) return itemlist else: return episodios(item) def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '
.*?' # url patron += '
(.*?)
.*?' # numerando cap patron += '
([^<]+)' # title de episodios matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedtitle, scrapedname in matches: scrapedtitle = scrapedtitle.replace('--', '0') patron = '(\d+) - (\d+)' match = re.compile(patron, re.DOTALL).findall(scrapedtitle) season, episode = match[0] if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season): continue title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname)) new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title, contentType="episode", extra='serie') if 'infoLabels' not in new_item: new_item.infoLabels = {} new_item.infoLabels['season'] = season new_item.infoLabels['episode'] = episode.zfill(2) itemlist.append(new_item) # TODO no hacer esto si estamos añadiendo a la videoteca if not item.extra: # Obtenemos los datos de todos los capítulos de la temporada mediante multihilos tmdb.set_infoLabels(itemlist, __modo_grafico__) for i in itemlist: if i.infoLabels['title']: # Si el capitulo tiene nombre propio añadírselo al titulo del item i.title = "%sx%s: %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title']) if i.infoLabels.has_key('poster_path'): # Si el capitulo tiene imagen propia remplazar al poster i.thumbnail = i.infoLabels['poster_path'] itemlist.sort(key=lambda it: int(it.infoLabels['episode']), reverse=config.get_setting('orden_episodios', __channel__)) tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) # Opción "Añadir esta serie a la videoteca" if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show, category="Series", text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host)) return itemlist def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '