# -*- coding: utf-8 -*- #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os, sys from platformcode import config, logger from core import scrapertools from core.item import Item from core import servertools from core import httptools host = 'https://hotmovs.com' def mainlist(item): logger.info() itemlist = [] itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/latest-updates/")) itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/most-popular/?sort_by=video_viewed_week")) itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-rated/?sort_by=rating_week")) itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels/?sort_by=cs_viewed")) itemlist.append( Item(channel=item.channel, title="Pornstars" , action="categorias", url=host + "/models/")) itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/?sort_by=title")) itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = host + "/search/?q=%s" % texto try: return lista(item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def catalogo(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) patron = '.*?' patron += '", "", data) patron = '.*?' patron += '", "", data) patron = '
.*?src="([^"]+)" alt="([^"]+)".*?
(.*?)
' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches: url = urlparse.urljoin(item.url,"/embed/" + scrapedurl) title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle thumbnail = scrapedthumbnail plot = "" itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot, contentTitle = title)) next_page = scrapertools.find_single_match(data,'