# -*- coding: utf-8 -*- # -*- Channel PelisHD24 -*- # -*- Created for Alfa-addon -*- # -*- By the Alfa Develop Group -*- import re import sys import urlparse from channels import autoplay from lib import generictools from channels import filtertools from core import httptools from core import scrapertools from core import servertools from core.item import Item from core import channeltools from core import tmdb from platformcode import config, logger from channelselector import get_thumb __channel__ = "pelishd24" host = "https://pelishd24.com/" try: __modo_grafico__ = config.get_setting('modo_grafico', __channel__) __perfil__ = int(config.get_setting('perfil', __channel__)) except: __modo_grafico__ = True __perfil__ = 0 # Fijar perfil de color perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'], ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'], ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']] if __perfil__ < 3: color1, color2, color3, color4, color5 = perfil[__perfil__] else: color1 = color2 = color3 = color4 = color5 = "" headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], ['Referer', host]] parameters = channeltools.get_channel_parameters(__channel__) fanart_host = parameters['fanart'] thumbnail_host = parameters['thumbnail'] IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST', 'English': 'VOS'} list_language = IDIOMAS.values() list_quality = [] list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry'] def mainlist(item): logger.info() autoplay.init(item.channel, list_servers, list_quality) itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True, viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb('movies', auto=True)), item.clone(title="Series", action="series", extra='serie', url=host + 'series/', viewmode="movie_with_plot", text_blod=True, viewcontent='movies', thumbnail=get_thumb('tvshows', auto=True), page=0), item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True), text_blod=True, url=host, page=0)] autoplay.show_option(item.channel, itemlist) return itemlist def menumovies(item): logger.info() itemlist = [item.clone(title="Todas", action="peliculas", thumbnail=get_thumb('all', auto=True), text_blod=True, page=0, viewcontent='movies', url=host + 'peliculas/', viewmode="movie_with_plot"), item.clone(title="Estrenos", action="peliculas", thumbnail=get_thumb('estrenos', auto=True), text_blod=True, page=0, viewcontent='movies', url=host + '?s=trfilter&trfilter=1&years=2018', viewmode="movie_with_plot"), item.clone(title="Más Vistas", action="peliculas", thumbnail=get_thumb('more watched', auto=True), text_blod=True, page=0, viewcontent='movies', url=host + 'mas-vistas/', viewmode="movie_with_plot"), item.clone(title="Más Votadas", action="peliculas", thumbnail=get_thumb('more voted', auto=True), text_blod=True, page=0, viewcontent='movies', url=host + 'peliculas-mas-votadas/', viewmode="movie_with_plot"), item.clone(title="Géneros", action="genres_atoz", thumbnail=get_thumb('genres', auto=True), text_blod=True, page=0, viewcontent='movies', url=host, viewmode="movie_with_plot"), item.clone(title="A-Z", action="genres_atoz", thumbnail=get_thumb('year', auto=True), text_blod=True, page=0, viewcontent='movies', url=host, viewmode="movie_with_plot"), item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True), text_blod=True, url=host, page=0, extra='buscarP')] return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = urlparse.urljoin(item.url, "?s={0}".format(texto)) try: return peliculas(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] def peliculas(item): logger.info() itemlist = [] action = '' contentType = '' data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data) data = scrapertools.decodeHtmlentities(data) patron = '
.*?' # url patron += '') if next_page: itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3)) return itemlist def genres_atoz(item): logger.info() itemlist = [] action = '' data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\(.*?\)| |
", "", data) data = scrapertools.decodeHtmlentities(data) if item.title == "A-Z": patron_todas = '' action = 'atoz' else: patron_todas = 'GENERO(.*?)' action = 'peliculas' data = scrapertools.find_single_match(data, patron_todas) patron = '([^<]+)' # url, title matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedtitle in matches: itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action=action)) return itemlist def atoz(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\(.*?\)| |
", "", data) data = scrapertools.decodeHtmlentities(data) patron = '
.*?' # url patron += '') if next_page: itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3)) return itemlist def temporadas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) patron = '
0: itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show, category="Series", text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) return itemlist else: return episodios(item) def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) patron = '
([^<]+)' # title de episodios matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedtitle, scrapedname in matches: scrapedtitle = scrapedtitle.replace('--', '0') patron = '(\d+)x(\d+)' match = re.compile(patron, re.DOTALL).findall(scrapedtitle) season, episode = match[0] if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season): continue title = "%sx%s: %s" % (season, episode.zfill(2), scrapedname) new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title, contentType="episode", extra='episodios') if 'infoLabels' not in new_item: new_item.infoLabels = {} new_item.infoLabels['season'] = season new_item.infoLabels['episode'] = episode.zfill(2) itemlist.append(new_item) # TODO no hacer esto si estamos añadiendo a la videoteca if not item.extra: # Obtenemos los datos de todos los capitulos de la temporada mediante multihilos tmdb.set_infoLabels(itemlist, __modo_grafico__) for i in itemlist: if i.infoLabels['title']: # Si el capitulo tiene nombre propio añadirselo al titulo del item i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[ 'episode'], i.infoLabels['title']) if i.infoLabels.has_key('poster_path'): # Si el capitulo tiene imagen propia remplazar al poster i.thumbnail = i.infoLabels['poster_path'] itemlist.sort(key=lambda it: int(it.infoLabels['episode']), reverse=config.get_setting('orden_episodios', __channel__)) tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) # Opción "Añadir esta serie a la videoteca" if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show, category="Series", text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) return itemlist def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data) data = scrapertools.decodeHtmlentities(data) patron = 'data-tplayernv="Opt(.*?)">[^"<]+(.*?)' # option, servername, lang - quality matches = re.compile(patron, re.DOTALL).findall(data) for option, quote in matches: patron = '(.*?) -([^<]+)