# -*- coding: utf-8 -*- import re import urllib import urlparse import requests from core import servertools from core import scrapertools from core.item import Item from platformcode import logger from core import httptools Host='http://torrentrapid.com' def mainlist(item): logger.info() itemlist = [] itemlist.append(Item(channel=item.channel, action="submenu", title="Películas",url=Host+"/peliculas/")) itemlist.append(Item(channel=item.channel, action="submenu", title="Series",url=Host+"/series/")) itemlist.append(Item(channel=item.channel, action="search", title="Buscar")) return itemlist def search(item, texto): logger.info() itemlist = [] payload = {'q': 'data'} payload["q"] = texto data = requests.post("http://torrentrapid.com/buscar", data=payload) data = re.sub(r"\n|\r|\t|\s{2}| ", "", data.text) patron_data='' data_listado = scrapertools.find_single_match(data, patron_data) data_listado=re.sub("Descargar Todas ", "",data_listado) data_listado=re.sub("Descargar Pel\xedculas ", "",data_listado) data_listado=re.sub("Descargar ", "",data_listado) patron_listado='
  • (.+?)<\/ul>' #Filtrado por url data_cat = scrapertools.find_single_match(data, patron) patron_cat='
  • <\/li>' matches = scrapertools.find_multiple_matches(data_cat, patron_cat) for scrapedurl, scrapedtitle in matches: itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,action="listado")) return itemlist def listado(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron_data='' data_listado = scrapertools.find_single_match(data, patron_data) patron_listado='