# -*- coding: utf-8 -*- # ------------------------------------------------------------ # Alfa # ------------------------------------------------------------ import re from core import httptools from core import scrapertools from core import servertools from core import tmdb from core.item import Item from platformcode import logger host = 'http://www.gnula.mobi/' def mainlist(item): logger.info() itemlist = list() itemlist.append(item.clone(title="Novedades", action="peliculas", url=host)) itemlist.append(item.clone(title="Castellano", action="peliculas", url="http://www.gnula.mobi/tag/espanol/")) itemlist.append(item.clone(title="Latino", action="peliculas", url="http://gnula.mobi/tag/latino/")) itemlist.append(item.clone(title="VOSE", action="peliculas", url="http://gnula.mobi/tag/subtitulada/")) itemlist.append(item.clone(title="Buscar", action="search")) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = "http://gnula.mobi/?s=%s" % texto try: return sub_search(item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def sub_search(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) patron = '
.*?.*?') if paginacion: itemlist.append(Item(channel=item.channel, action="sub_search", title="Next page >>", url=paginacion)) return itemlist def peliculas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) patron = '
.*?href="(.*?)" title="(.*?)".*?<.*?src="(.*?)"' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedtitle, scrapedthumbnail in matches: filter_thumb = scrapedthumbnail.replace("http://image.tmdb.org/t/p/w300", "") filter_list = {"poster_path": filter_thumb} filter_list = filter_list.items() itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle = scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={'filtro': filter_list})) tmdb.set_infoLabels(itemlist, True) next_page_url = scrapertools.find_single_match(data, '