# -*- coding: utf-8 -*- import urllib from core import httptools from core import jsontools from core import scrapertools from core import servertools from core import tmdb from core.item import Item from platformcode import config, logger __modo_grafico__ = config.get_setting("modo_grafico", "peliculasnu") __perfil__ = config.get_setting("perfil", "peliculasnu") # Fijar perfil de color perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] color1, color2, color3 = perfil[__perfil__] host = "http://peliculas.nu/" def mainlist(item): logger.info() itemlist = [] item.text_color = color1 itemlist.append(item.clone(title="Novedades", action="entradas", url=host, fanart="http://i.imgur.com/c3HS8kj.png")) itemlist.append(item.clone(title="Más Vistas", action="entradas", url=host + "mas-vistas", fanart="http://i.imgur.com/c3HS8kj.png")) itemlist.append(item.clone(title="Mejor Valoradas", action="entradas", url=host + "mejor-valoradas", fanart="http://i.imgur.com/c3HS8kj.png")) item.text_color = color2 itemlist.append(item.clone(title="En Español", action="entradas", url=host + "?s=Español", fanart="http://i.imgur.com/c3HS8kj.png")) itemlist.append(item.clone(title="En Latino", action="entradas", url=host + "?s=Latino", fanart="http://i.imgur.com/c3HS8kj.png")) itemlist.append( item.clone(title="En VOSE", action="entradas", url=host + "?s=VOSE", fanart="http://i.imgur.com/c3HS8kj.png")) item.text_color = color3 itemlist.append(item.clone(title="Por género", action="indices", fanart="http://i.imgur.com/c3HS8kj.png")) itemlist.append(item.clone(title="Por letra", action="indices", fanart="http://i.imgur.com/c3HS8kj.png")) itemlist.append(item.clone(title="", action="")) itemlist.append(item.clone(title="Buscar...", action="search")) itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) return itemlist def configuracion(item): from platformcode import platformtools ret = platformtools.show_channel_settings() platformtools.itemlist_refresh() return ret def search(item, texto): logger.info() texto = texto.replace(" ", "+") try: item.url = "%s?s=%s" % (host, texto) item.action = "entradas" return entradas(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def newest(categoria): logger.info() itemlist = [] item = Item() try: if categoria == "peliculas": item.url = host elif categoria == "terror": item.url = host+"terror/" item.from_newest = True item.action = "entradas" itemlist = entradas(item) if itemlist[-1].action == "entradas": itemlist.pop() # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] return itemlist def entradas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '
  • .*?href="([^"]+)".*?src="([^"]+)".*?class="Title">([^<]+)<.*?' \ '.*?"Date AAIco-date_range">(\d+).*?class="Qlty">([^<]+)<.*?

    15: itemlist.append(item.clone(title=">> Página Siguiente", extra="next", text_color=color3)) elif item.extra == "next": next_page = scrapertools.find_single_match(data, '(.*?)') matches = scrapertools.find_multiple_matches(bloque, '([^<]+)') for scrapedurl, scrapedtitle in matches: itemlist.append(item.clone(action=action, url=scrapedurl, title=scrapedtitle)) return itemlist def findvideos(item): logger.info() itemlist = [] tmdb.set_infoLabels_item(item, __modo_grafico__) data = httptools.downloadpage(item.url).data if not item.infoLabels["plot"]: item.infoLabels["plot"] = scrapertools.find_single_match(data, '

    .*?

    (.*?)

    ') fanart = scrapertools.find_single_match(data, '|\}\)\))') if not packed: packed = data data_js = jsunpack.unpack(packed) subtitle = scrapertools.find_single_match(data_js, 'tracks:\[\{"file":"([^"]+)"') patron = '{"file":\s*"([^"]+)","label":\s*"([^"]+)","type":\s*"video/([^"]+)"' matches = scrapertools.find_multiple_matches(data_js, patron) for url, calidad, extension in matches: url = url.replace(",", "%2C") title = ".%s %s [directo]" % (extension, calidad) itemlist.insert(0, [title, url, 0, subtitle]) else: return [item] return itemlist