# -*- coding: utf-8 -*- from core import httptools from core import scrapertools from core import servertools from core import tmdb from core.item import Item from platformcode import logger host = "https://jkanime.net" def mainlist(item): logger.info() itemlist = list() itemlist.append(Item(channel=item.channel, action="ultimas_series", title="Últimas Series", url=host)) itemlist.append(Item(channel=item.channel, action="ultimos_episodios", title="Últimos Episodios", url=host)) itemlist.append(Item(channel=item.channel, action="p_tipo", title="Listado Alfabetico", url=host, extra="Animes por letra")) itemlist.append(Item(channel=item.channel, action="p_tipo", title="Listado por Genero", url=host, extra="Animes por Genero")) itemlist.append(Item(channel=item.channel, action="search", title="Buscar")) return itemlist def ultimas_series(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = scrapertools.find_single_match(data, 'Últimos capitulos agregados.*?/div>') patron = '(.*?)') patron = '%s(.*?)' %item.extra) patron = 'href="([^"]+)".*?' patron += 'title.*?>([^<]+)' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedtitle in matches: if "Por Genero" not in scrapedtitle: itemlist.append( Item(channel=item.channel, action="series", title=scrapedtitle, url=host + scrapedurl, viewmode="movie_with_plot")) return itemlist def series(item): logger.info() # Descarga la pagina data = httptools.downloadpage(item.url).data # Extrae las entradas patron = '(?is)let-post.*?src="([^"]+).*?' patron += 'alt="([^"]+).*?' patron += 'href="([^"]+).*?' patron += '

([^\<]+).*?' patron += 'eps-num">([^<]+)' matches = scrapertools.find_multiple_matches(data, patron) itemlist = [] for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedplot, scrapedepisode in matches: title = scrapedtitle + " (" + scrapedepisode + ")" scrapedthumbnail = scrapedthumbnail.replace("thumbnail", "image") plot = scrapertools.htmlclean(scrapedplot) itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle)) tmdb.set_infoLabels(itemlist) try: siguiente = scrapertools.find_single_match(data, '0: itemlist.append( Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True, viewmode="movie_with_plot")) except: pass return itemlist def get_pages_and_episodes(data): results = scrapertools.find_multiple_matches(data, 'href="#pag([0-9]+)".*?>[0-9]+ - ([0-9]+)') if results: return int(results[-1][0]), int(results[-1][1]) return 1, 0 def episodios(item): logger.info() itemlist = [] # Descarga la pagina data = httptools.downloadpage(item.url).data scrapedplot = scrapertools.find_single_match(data, '') scrapedthumbnail = scrapertools.find_single_match(data, '

.*?src="([^"]+)"') idserie = scrapertools.find_single_match(data, "ajax/pagination_episodes/(\d+)/") logger.info("idserie=" + idserie) if " Eps" in item.extra and "Desc" not in item.extra: caps_x = item.extra caps_x = caps_x.replace(" Eps", "") capitulos = int(caps_x) paginas = capitulos / 10 + (capitulos % 10 > 0) else: paginas, capitulos = get_pages_and_episodes(data) for num_pag in range(1, paginas + 1): numero_pagina = str(num_pag) headers = {"Referer": item.url} data2 = httptools.downloadpage(host + "/ajax/pagination_episodes/%s/%s/" % (idserie, numero_pagina), headers=headers).data patron = '"number"\:"(\d+)","title"\:"([^"]+)"' matches = scrapertools.find_multiple_matches(data2, patron) for numero, scrapedtitle in matches: title = scrapedtitle.strip() url = item.url + numero plot = scrapedplot itemlist.append(item.clone(action="findvideos", title=title, url=url, plot=plot)) if len(itemlist) == 0: try: itemlist.append(Item(channel=item.channel, action="findvideos", title="Serie por estrenar", url="", thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot, server="directo", folder=False)) except: pass return itemlist def findvideos(item): logger.info() itemlist = [] aux_url = [] data = httptools.downloadpage(item.url).data list_videos = scrapertools.find_multiple_matches(data, '