# -*- coding: utf-8 -*- # -*- Channel CanalPelis -*- # -*- Created for Alfa-addon -*- # -*- By the Alfa Develop Group -*- import re import sys import urllib import urlparse from channels import autoplay from channels import filtertools from core import httptools from core import scrapertools from core import servertools from core.item import Item from core import channeltools from core import tmdb from platformcode import config, logger from channelselector import get_thumb __channel__ = "pelis24" host = "https://www.pelis24.in/" try: __modo_grafico__ = config.get_setting('modo_grafico', __channel__) __perfil__ = int(config.get_setting('perfil', __channel__)) except: __modo_grafico__ = True __perfil__ = 0 # Fijar perfil de color perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'], ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'], ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']] if __perfil__ < 3: color1, color2, color3, color4, color5 = perfil[__perfil__] else: color1 = color2 = color3 = color4 = color5 = "" headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], ['Referer', host]] parameters = channeltools.get_channel_parameters(__channel__) fanart_host = parameters['fanart'] thumbnail_host = parameters['thumbnail'] IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST'} list_language = IDIOMAS.values() list_quality = [] list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry'] def mainlist(item): logger.info() autoplay.init(item.channel, list_servers, list_quality) itemlist = [item.clone(title="Novedades", action="peliculas", thumbnail=get_thumb('newest', auto=True), text_blod=True, page=0, viewcontent='movies', url=host + 'movies/', viewmode="movie_with_plot"), item.clone(title="Tendencias", action="peliculas", thumbnail=get_thumb('newest', auto=True), text_blod=True, page=0, viewcontent='movies', url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"), item.clone(title="Estrenos", action="peliculas", thumbnail=get_thumb('estrenos', auto=True), text_blod=True, page=0, viewcontent='movies', url=host + 'genre/estrenos/', viewmode="movie_with_plot"), item.clone(title="Géneros", action="genresYears", thumbnail=get_thumb('genres', auto=True), text_blod=True, page=0, viewcontent='movies', url=host, viewmode="movie_with_plot"), item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True), text_blod=True, url=host, page=0)] autoplay.show_option(item.channel, itemlist) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = urlparse.urljoin(item.url, "?s={0}".format(texto)) try: return sub_search(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] def sub_search(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) data = scrapertools.find_single_match(data, 'Archivos (.*?)resppages') patron = 'img alt="([^"]+)".*?' patron += 'src="([^"]+)".*?' patron += 'href="([^"]+)".*?' patron += 'fechaestreno">([^<]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedtitle, scrapedthumbnail, scrapedurl, year in matches: if 'tvshows' not in scrapedurl: itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle, action="findvideos", infoLabels={"year": year}, thumbnail=scrapedthumbnail, text_color=color3)) paginacion = scrapertools.find_single_match(data, "\d+(.*?) Géneros' else: patron_todas = '(?is)data-label="CATEGORIAS">(.*?)show-bigmenu' # logger.error(texto='***********uuuuuuu*****' + patron_todas) data = scrapertools.find_single_match(data, patron_todas) # logger.error(texto='***********uuuuuuu*****' + data) patron = '", "", data) # logger.info(data) patron = '
\s*.*?' # url patron += '') if next_page: itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3)) return itemlist def temporadas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) # logger.info(data) patron = '
0: itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show, category="Series", text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) return itemlist else: return episodios(item) def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) patron = '
0: itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show, category="Series", text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) return itemlist def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data) data = scrapertools.decodeHtmlentities(data) # logger.info(data) # patron1 = 'data-tplayernv="Opt(.*?)">(.*?)(.*?)' # option, server, lang - quality patron = 'href="#option-(.*?)">([^<]+)' matches = re.compile(patron, re.DOTALL).findall(data) # urls = re.compile(patron2, re.DOTALL).findall(data) for option, lang in matches: url = scrapertools.find_single_match( data, '