# -*- coding: utf-8 -*- #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os, sys from platformcode import config, logger from core import scrapertools from core.item import Item from core import servertools from core import httptools host = 'http://www.hclips.com' def mainlist(item): logger.info() itemlist = [] itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/latest-updates/")) itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-popular/?")) itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/longest/?")) itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = host + "/search/?q=%s" % texto try: return peliculas(item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def categorias(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) patron = '.*?' patron += 'src="([^"]+)".*?' patron += '([^"]+).*?' patron += '(.*?)' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedthumbnail,scrapedtitle,vidnum in matches: scrapedplot = "" title = scrapedtitle + " \(" + vidnum + "\)" itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot) ) return itemlist def peliculas(item): logger.info() itemlist = [] data = scrapertools.cachePage(item.url) patron = '.*?' patron += '([^Next') if next_page_url!="": next_page_url = urlparse.urljoin(item.url,next_page_url) itemlist.append(item.clone(action="peliculas", title="Página Siguiente >>", text_color="blue", url=next_page_url) ) return itemlist def play(item): logger.info() itemlist = [] data = scrapertools.cachePage(item.url) video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"') video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"') partes = video_url.split('||') video_url = decode_url(partes[0]) video_url = re.sub('/get_file/\d+/[0-9a-z]{32}/', partes[1], video_url) video_url += '&' if '?' in video_url else '?' video_url += 'lip=' + partes[2] + '<=' + partes[3] itemlist.append(item.clone(action="play", title=item.title, url=video_url)) return itemlist def decode_url(txt): _0x52f6x15 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~' reto = ''; n = 0 # En las dos siguientes líneas, ABCEM ocupan 2 bytes cada letra! El replace lo deja en 1 byte. !!!!: АВСЕМ (10 bytes) ABCEM (5 bytes) txt = re.sub('[^АВСЕМA-Za-z0-9\.\,\~]', '', txt) txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M') while n < len(txt): a = _0x52f6x15.index(txt[n]) n += 1 b = _0x52f6x15.index(txt[n]) n += 1 c = _0x52f6x15.index(txt[n]) n += 1 d = _0x52f6x15.index(txt[n]) n += 1 a = a << 2 | b >> 4 b = (b & 15) << 4 | c >> 2 e = (c & 3) << 6 | d reto += chr(a) if c != 64: reto += chr(b) if d != 64: reto += chr(e) return urllib.unquote(reto)