# -*- coding: utf-8 -*- from core import httptools from core import scrapertools from core import servertools from core import tmdb from core.item import Item from platformcode import logger host = "https://jkanime.net" def mainlist(item): logger.info() itemlist = list() itemlist.append(Item(channel=item.channel, action="ultimas_series", title="Últimas Series", url=host)) itemlist.append(Item(channel=item.channel, action="ultimos_episodios", title="Últimos Episodios", url=host)) itemlist.append(Item(channel=item.channel, action="p_tipo", title="Listado Alfabetico", url=host, extra="Animes por letra")) itemlist.append(Item(channel=item.channel, action="p_tipo", title="Listado por Genero", url=host, extra="Animes por Genero")) itemlist.append(Item(channel=item.channel, action="search", title="Buscar")) return itemlist def ultimas_series(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = scrapertools.find_single_match(data, 'Últimos capitulos agregados.*?/div>') patron = '(.*?)') patron = '%s(.*?)' %item.extra) patron = 'href="([^"]+)".*?' patron += 'title.*?>([^<]+)' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedtitle in matches: if "Por Genero" not in scrapedtitle: itemlist.append( Item(channel=item.channel, action="series", title=scrapedtitle, url=host + scrapedurl, viewmode="movie_with_plot")) return itemlist def series(item): logger.info() # Descarga la pagina data = httptools.downloadpage(item.url).data # Extrae las entradas patron = '(?is)let-post.*?src="([^"]+).*?' patron += 'alt="([^"]+).*?' patron += 'href="([^"]+).*?' patron += '
([^\<]+).*?'
patron += 'eps-num">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
itemlist = []
for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedplot, scrapedepisode in matches:
title = scrapedtitle + " (" + scrapedepisode + ")"
scrapedthumbnail = scrapedthumbnail.replace("thumbnail", "image")
plot = scrapertools.htmlclean(scrapedplot)
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail,
plot=scrapedplot, show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
try:
siguiente = scrapertools.find_single_match(data, '0:
itemlist.append(
Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
plot=scrapedplot, folder=True, viewmode="movie_with_plot"))
except:
pass
return itemlist
def get_pages_and_episodes(data):
results = scrapertools.find_multiple_matches(data, 'href="#pag([0-9]+)".*?>[0-9]+ - ([0-9]+)')
if results:
return int(results[-1][0]), int(results[-1][1])
return 1, 0
def episodios(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
scrapedplot = scrapertools.find_single_match(data, '')
scrapedthumbnail = scrapertools.find_single_match(data, '