# -*- coding: utf-8 -*- import re import urllib import urlparse from core import servertools from core import scrapertools from core.item import Item from platformcode import logger from core import httptools from platformcode import config __comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'newpct') __comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'newpct') Host='http://www.tvsinpagar.com' def mainlist(item): logger.info() itemlist = [] itemlist.append(Item(channel=item.channel, action="submenu", title="Películas",url=Host+"/peliculas/")) itemlist.append(Item(channel=item.channel, action="submenu", title="Series",url=Host+"/series/")) #itemlist.append(Item(channel=item.channel, action="listado", title="Anime", url=Host+"/anime/", # viewmode="movie_with_plot")) #itemlist.append( # Item(channel=item.channel, action="listado", title="Documentales", url=Host+"/documentales/", # viewmode="movie_with_plot")) #itemlist.append(Item(channel=item.channel, action="search", title="Buscar")) return itemlist def submenu(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '
  • (.+?)<\/ul>' #Filtrado por url data_cat = scrapertools.find_single_match(data, patron) patron_cat='
  • <\/li>' matches = scrapertools.find_multiple_matches(data_cat, patron_cat) for scrapedurl, scrapedtitle in matches: itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,action="listado")) return itemlist def listado(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron_data='' data_listado = scrapertools.find_single_match(data, patron_data) patron_listado='