# -*- coding: utf-8 -*- #------------------------------------------------------------ import re import urlparse from core import httptools from core import scrapertools from core import servertools from core.item import Item from platformcode import logger from platformcode import config host = 'http://www.likuoo.video' def mainlist(item): logger.info() itemlist = [] itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host)) itemlist.append( Item(channel=item.channel, title="Pornstar" , action="categorias", url=host + "/pornstars/")) itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/all-channels/")) itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = host + "/search/?s=%s" % texto try: return lista(item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def categorias(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) patron = '
.*?»') if next_page!="": next_page = urlparse.urljoin(item.url,next_page) itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) ) return itemlist def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) patron = '
.*?' patron += '.*?' patron += 'src="(.*?)".*?' patron += '
(.*?)
' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches: url = urlparse.urljoin(item.url,scrapedurl) scrapedtime = scrapedtime.replace("m", ":").replace("s", " ") title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " +scrapedtitle contentTitle = title thumbnail = "https:" + scrapedthumbnail plot = "" itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle = contentTitle)) next_page = scrapertools.find_single_match(data,'...
') if next_page!="": next_page = urlparse.urljoin(item.url,next_page) itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) return itemlist def play(item): logger.info() data = httptools.downloadpage(item.url).data itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.title = item.fulltitle videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videochannel=item.channel return itemlist