# -*- coding: utf-8 -*- import re import urlparse from channels import renumbertools from core import httptools from core import jsontools from core import servertools from core import scrapertools from core.item import Item from platformcode import config, logger HOST = "https://animeflv.net/" def mainlist(item): logger.info() itemlist = list() itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", url=HOST)) itemlist.append(Item(channel=item.channel, action="novedades_anime", title="Últimos animes", url=HOST)) itemlist.append(Item(channel=item.channel, action="listado", title="Animes", url=HOST + "browse?order=title")) itemlist.append(Item(channel=item.channel, title="Buscar por:")) itemlist.append(Item(channel=item.channel, action="search", title=" Título")) itemlist.append(Item(channel=item.channel, action="search_section", title=" Género", url=HOST + "browse", extra="genre")) itemlist.append(Item(channel=item.channel, action="search_section", title=" Tipo", url=HOST + "browse", extra="type")) itemlist.append(Item(channel=item.channel, action="search_section", title=" Año", url=HOST + "browse", extra="year")) itemlist.append(Item(channel=item.channel, action="search_section", title=" Estado", url=HOST + "browse", extra="status")) itemlist = renumbertools.show_option(item.channel, itemlist) return itemlist def search(item, texto): logger.info() itemlist = [] item.url = urlparse.urljoin(HOST, "api/animes/search") texto = texto.replace(" ", "+") post = "value=%s" % texto data = httptools.downloadpage(item.url, post=post).data try: dict_data = jsontools.load(data) for e in dict_data: if e["id"] != e["last_id"]: _id = e["last_id"] else: _id = e["id"] url = "%sanime/%s/%s" % (HOST, _id, e["slug"]) title = e["title"] thumbnail = "%suploads/animes/covers/%s.jpg" % (HOST, e["id"]) new_item = item.clone(action="episodios", title=title, url=url, thumbnail=thumbnail) if e["type"] != "movie": new_item.show = title new_item.context = renumbertools.context(item) else: new_item.contentType = "movie" new_item.contentTitle = title itemlist.append(new_item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] return itemlist def search_section(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) patron = 'id="%s_select"[^>]+>(.*?)' % item.extra data = scrapertools.find_single_match(data, patron) matches = re.compile('', re.DOTALL).findall(data) for _id, title in matches: url = "%s?%s=%s&order=title" % (item.url, item.extra, _id) itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, context=renumbertools.context(item))) return itemlist def newest(categoria): itemlist = [] if categoria == 'anime': itemlist = novedades_episodios(Item(url=HOST)) return itemlist def novedades_episodios(item): logger.info() data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) data = scrapertools.find_single_match(data, '

Últimos episodios

.+?