# -*- coding: utf-8 -*- #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os, sys from core import scrapertools from core import servertools from core.item import Item from platformcode import config, logger from core import httptools from channels import pornhub, xvideos,youporn host = 'http://qwertty.net' def mainlist(item): logger.info() itemlist = [] itemlist.append( Item(channel=item.channel, title="Recientes" , action="lista", url=host)) itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/?filter=most-viewed")) itemlist.append( Item(channel=item.channel, title="Mas popular" , action="lista", url=host + "/?filter=popular")) itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="lista", url=host + "/?filter=random")) itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = host + "/?s=%s" % texto try: return lista(item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def categorias(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '
  • (.*?)
  • ' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" scrapedurl = host + scrapedurl itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot) ) return itemlist def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
    ", "", data) patron = '
    Next') if next_page=="": next_page = scrapertools.find_single_match(data,'
  • .*?
  • ') if next_page!="": next_page = urlparse.urljoin(item.url,next_page) itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) return itemlist def play(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data url1 = scrapertools.find_single_match(data,'