# -*- coding: utf-8 -*- #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os, sys from platformcode import config, logger from core import scrapertools from core.item import Item from core import servertools from core import httptools from core import tmdb from core import jsontools host = 'https://www.cliphunter.com' def mainlist(item): logger.info() itemlist = [] itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/categories/All")) itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/popular/ratings/yesterday")) itemlist.append( Item(channel=item.channel, title="Pornstars" , action="catalogo", url=host + "/pornstars/")) itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = host + "/search/%s" % texto try: return peliculas(item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def catalogo(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) patron = '\s*.*?([^"]+)' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedthumbnail,scrapedtitle in matches: scrapedplot = "" scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/movies" itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) next_page_url = scrapertools.find_single_match(data,'
  • ') if next_page_url!="": next_page_url = urlparse.urljoin(item.url,next_page_url) itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page_url) ) return itemlist def categorias(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
    ", "", data) patron = '.*?' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedtitle,scrapedthumbnail in matches: scrapedplot = "" scrapedtitle = scrapedtitle scrapedurl = urlparse.urljoin(item.url,scrapedurl) itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot) ) return itemlist def peliculas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
    ", "", data) patron = '(.*?).*?
    (.*?)' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedthumbnail,scrapedtime,scrapedurl,scrapedtitle in matches: url = urlparse.urljoin(item.url,scrapedurl) title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle thumbnail = scrapedthumbnail plot = "" year = "" itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} )) next_page_url = scrapertools.find_single_match(data,'
  • ') if next_page_url!="": next_page_url = urlparse.urljoin(item.url,next_page_url) itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue", url=next_page_url) ) return itemlist def play(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '"url"\:"(.*?)"' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl in matches: scrapedurl = scrapedurl.replace("\/", "/") title = scrapedurl itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl, thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo")) return itemlist