# -*- coding: utf-8 -*- #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os, sys from platformcode import config, logger from core import scrapertools from core.item import Item from core import servertools from core import httptools from core import tmdb from core import jsontools host = 'http://es.foxtube.com' def mainlist(item): logger.info() itemlist = [] itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host)) itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = host + "/buscador/%s" % texto try: return peliculas(item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def categorias(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) patron = '
  • ([^"]+)' matches = re.compile(patron,re.DOTALL).findall(data) scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" scrapedurl = host + scrapedurl itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) return itemlist def peliculas(item): logger.info() itemlist = [] data = scrapertools.cachePage(item.url) patron = '.*?src="([^"]+)".*?alt="([^"]+)".*?(.*?)' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches: url = host + scrapedurl title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle contentTitle = title thumbnail = scrapedthumbnail + "|Referer=%s" %host plot = "" year = "" itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} )) next_page_url = scrapertools.find_single_match(data,'') if next_page_url!="": next_page_url = urlparse.urljoin(item.url,next_page_url) itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) return itemlist def play(item): logger.info() itemlist = [] url = scrapertools.find_single_match(scrapertools.cachePage(item.url),'