Files
addon/channels/hentaiid.py
mac12m99 a3c90d90d7 KoD 0.5
KoD 0.5
-riscritti molti canali per cambiamenti nella struttura stessa di kod
-altre robe carine
2019-08-30 20:47:43 +02:00

148 lines
5.2 KiB
Python

# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
CHANNEL_HOST = "http://hentai-id.tv/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, action="series", title="Novedades",
url=urlparse.urljoin(CHANNEL_HOST, "archivos/h2/"), extra="novedades"))
itemlist.append(Item(channel=item.channel, action="generos", title="Por géneros", url=CHANNEL_HOST))
itemlist.append(Item(channel=item.channel, action="series", title="Sin Censura",
url=urlparse.urljoin(CHANNEL_HOST, "archivos/sin-censura/")))
itemlist.append(Item(channel=item.channel, action="series", title="High Definition",
url=urlparse.urljoin(CHANNEL_HOST, "archivos/high-definition/")))
itemlist.append(Item(channel=item.channel, action="series", title="Mejores Hentais",
url=urlparse.urljoin(CHANNEL_HOST, "archivos/ranking-hentai/")))
return itemlist
def generos(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(item.url).data)
pattern = 'id="hentai2"><div[^>]+>(.*?)</div></div>'
data = scrapertools.find_single_match(data, pattern)
patron = 'href="([^"]+)"[^>]+>(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
# logger.debug("title=[{0}], url=[{1}]".format(title, url))
itemlist.append(Item(channel=item.channel, action="series", title=title, url=url))
return itemlist
def series(item):
logger.info()
data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(item.url).data)
pattern = "<div class='wp-pagenavi'>(.*?)</div>"
pagination = scrapertools.find_single_match(data, pattern)
pattern = '<div class="col-xs-12 col-md-12 col-lg-9px-3"><ul>(.*?)</ul><div class="clearfix">'
data = scrapertools.find_single_match(data, pattern)
pattern = '<a href="([^"]+)".*?<img src="([^"]+)" title="([^"]+)"'
matches = re.compile(pattern, re.DOTALL).findall(data)
itemlist = []
if item.extra == "novedades":
action = "findvideos"
else:
action = "episodios"
for url, thumbnail, title in matches:
contentTitle = title
show = title
# logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail))
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
show=show, fanart=thumbnail, folder=True))
if pagination:
page = scrapertools.find_single_match(pagination, '>(?:Page|Página)\s*(\d+)\s*(?:of|de)\s*\d+<')
pattern = 'href="([^"]+)">%s<' % (int(page) + 1)
url_page = scrapertools.find_single_match(pagination, pattern)
if url_page:
itemlist.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url=url_page))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(item.url).data)
pattern = '<div class="box-entry-title text-center">Lista de Capítulos</div>(.*?)</div></div>'
data = scrapertools.find_single_match(data, pattern)
patron = '<a href="([^"]+)"[^>]+>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapertools.unescape(scrapedtitle)
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = item.thumbnail
plot = item.plot
# logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail))
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
thumbnail=thumbnail, plot=plot,
fanart=thumbnail))
return itemlist
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
video_urls = []
down_urls = []
patron = '<(?:iframe)?(?:IFRAME)?\s*(?:src)?(?:SRC)?="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
if 'goo.gl' in url or 'tinyurl' in url:
video = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers["location"]
video_urls.append(video)
else:
video_urls.append(url)
paste = scrapertools.find_single_match(data, 'https://gpaste.us/([a-zA-Z0-9]+)')
if paste:
try:
new_data = httptools.downloadpage('https://gpaste.us/'+paste).data
bloq = scrapertools.find_single_match(new_data, 'id="input_text">(.*?)</div>')
matches = bloq.split('<br>')
for url in matches:
down_urls.append(url)
except:
pass
video_urls.extend(down_urls)
from core import servertools
itemlist = servertools.find_video_items(data=",".join(video_urls))
for videoitem in itemlist:
videoitem.contentTitle = item.contentTitle
videoitem.channel = item.channel
videoitem.thumbnail = item.thumbnail
return itemlist