# -*- coding: utf-8 -*- import re import urlparse from core import scrapertools from core import httptools from core import tmdb from core.item import Item from platformcode import logger def mainlist(item): logger.info() itemlist = [] itemlist.append( Item(channel=item.channel, title="Últimas añadidas", action="peliculas", url="http://www.peliculasmx.net/")) itemlist.append( Item(channel=item.channel, title="Últimas por género", action="generos", url="http://www.peliculasmx.net/")) itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url="http://www.peliculasmx.net/")) return itemlist def generos(item): logger.info() itemlist = [] # Descarga la página data = httptools.downloadpage(item.url).data logger.debug(data) #
  • Accion 246 patron = '
  • .*?lateral") patron = ".*?href='([^']+)" scrapedurl = scrapertools.find_single_match(paginador, patron) if scrapedurl: scrapedtitle = "!Pagina Siguiente ->" itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, folder=True)) return itemlist def search(item, texto): logger.info() itemlist = [] texto = texto.replace(" ", "+") try: # Series item.url = "http://www.peliculasmx.net/?s=%s" % texto itemlist.extend(peliculas(item)) itemlist = sorted(itemlist, key=lambda Item: Item.title) return itemlist # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return []