KoD 0.5 -riscritti molti canali per cambiamenti nella struttura stessa di kod -altre robe carine
148 lines
6.3 KiB
Python
148 lines
6.3 KiB
Python
# -*- coding: utf-8 -*-
|
||
#------------------------------------------------------------
|
||
import urlparse,urllib2,urllib,re
|
||
import os, sys
|
||
from platformcode import config, logger
|
||
from core import scrapertools
|
||
from core.item import Item
|
||
from core import servertools
|
||
from core import httptools
|
||
|
||
host = 'https://hotmovs.com'
|
||
|
||
def mainlist(item):
|
||
logger.info()
|
||
itemlist = []
|
||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/latest-updates/"))
|
||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/most-popular/?sort_by=video_viewed_week"))
|
||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-rated/?sort_by=rating_week"))
|
||
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels/?sort_by=cs_viewed"))
|
||
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="categorias", url=host + "/models/"))
|
||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/?sort_by=title"))
|
||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||
return itemlist
|
||
|
||
|
||
def search(item, texto):
|
||
logger.info()
|
||
texto = texto.replace(" ", "+")
|
||
item.url = host + "/search/?q=%s" % texto
|
||
try:
|
||
return lista(item)
|
||
except:
|
||
import sys
|
||
for line in sys.exc_info():
|
||
logger.error("%s" % line)
|
||
return []
|
||
|
||
|
||
def catalogo(item):
|
||
logger.info()
|
||
itemlist = []
|
||
data = httptools.downloadpage(item.url).data
|
||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||
patron = '<a class="thumbnail" href="([^"]+)">.*?'
|
||
patron += '<img src="([^"]+)".*?'
|
||
patron += '<span class="thumbnail__info__right">\s+([^"]+)\s+</span>.*?'
|
||
patron += '<h5>([^"]+)</h5>'
|
||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||
for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches:
|
||
scrapedplot = ""
|
||
cantidad = cantidad.replace(" ", "")
|
||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
|
||
if next_page!="":
|
||
next_page = urlparse.urljoin(item.url,next_page)
|
||
itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||
return itemlist
|
||
|
||
|
||
def categorias(item):
|
||
logger.info()
|
||
itemlist = []
|
||
data = httptools.downloadpage(item.url).data
|
||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||
patron = '<a class="thumbnail" href="([^"]+)" title="([^"]+)">.*?'
|
||
patron += '<img src="([^"]+)".*?'
|
||
patron += '<i class="mdi mdi-video"></i>([^"]+)</div>'
|
||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||
scrapedplot = ""
|
||
cantidad = cantidad.replace(" ", "")
|
||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
|
||
if next_page!="":
|
||
next_page = urlparse.urljoin(item.url,next_page)
|
||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||
return itemlist
|
||
|
||
|
||
def lista(item):
|
||
logger.info()
|
||
itemlist = []
|
||
data = httptools.downloadpage(item.url).data
|
||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||
patron = '<article class="item" data-video-id="([^"]+)">.*?src="([^"]+)" alt="([^"]+)".*?<div class="thumbnail__info__right">(.*?)</div>'
|
||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
|
||
url = urlparse.urljoin(item.url,"/embed/" + scrapedurl)
|
||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
|
||
thumbnail = scrapedthumbnail
|
||
plot = ""
|
||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||
plot=plot, contentTitle = title))
|
||
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
|
||
if next_page!="":
|
||
next_page = urlparse.urljoin(item.url,next_page)
|
||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||
return itemlist
|
||
|
||
|
||
def play(item):
|
||
logger.info()
|
||
itemlist = []
|
||
data = httptools.downloadpage(item.url).data
|
||
video_url = scrapertools.find_single_match(data, 'var video_url="([^"]*)"')
|
||
video_url += scrapertools.find_single_match(data, 'video_url\+=\'([^\']+)\'')
|
||
partes = video_url.split('||')
|
||
video_url = decode_url(partes[0])
|
||
video_url = re.sub('/get_file/\d+/[0-9a-z]{32}/', partes[1], video_url)
|
||
video_url += '&' if '?' in video_url else '?'
|
||
video_url += 'lip=' + partes[2] + '<=' + partes[3]
|
||
itemlist.append(item.clone(action="play", title=item.title, url=video_url))
|
||
return itemlist
|
||
|
||
|
||
def decode_url(txt):
|
||
_0x52f6x15 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~'
|
||
reto = ''; n = 0
|
||
# En las dos siguientes líneas, ABCEM ocupan 2 bytes cada letra! El replace lo deja en 1 byte. !!!!: АВСЕМ (10 bytes) ABCEM (5 bytes)
|
||
txt = re.sub('[^АВСЕМA-Za-z0-9\.\,\~]', '', txt)
|
||
txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M')
|
||
|
||
while n < len(txt):
|
||
a = _0x52f6x15.index(txt[n])
|
||
n += 1
|
||
b = _0x52f6x15.index(txt[n])
|
||
n += 1
|
||
c = _0x52f6x15.index(txt[n])
|
||
n += 1
|
||
d = _0x52f6x15.index(txt[n])
|
||
n += 1
|
||
|
||
a = a << 2 | b >> 4
|
||
b = (b & 15) << 4 | c >> 2
|
||
e = (c & 3) << 6 | d
|
||
reto += chr(a)
|
||
if c != 64: reto += chr(b)
|
||
if d != 64: reto += chr(e)
|
||
|
||
return urllib.unquote(reto)
|
||
|