# -*- coding: utf-8 -*- import re import urlparse from channels import renumbertools from core import httptools from core import jsontools from core import servertools from core import scrapertools from core.item import Item from platformcode import config, logger HOST = "https://animeflv.net/" def mainlist(item): logger.info() itemlist = list() itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", url=HOST)) itemlist.append(Item(channel=item.channel, action="novedades_anime", title="Últimos animes", url=HOST)) itemlist.append(Item(channel=item.channel, action="listado", title="Animes", url=HOST + "browse?order=title")) itemlist.append(Item(channel=item.channel, title="Buscar por:")) itemlist.append(Item(channel=item.channel, action="search", title=" Título")) itemlist.append(Item(channel=item.channel, action="search_section", title=" Género", url=HOST + "browse", extra="genre")) itemlist.append(Item(channel=item.channel, action="search_section", title=" Tipo", url=HOST + "browse", extra="type")) itemlist.append(Item(channel=item.channel, action="search_section", title=" Año", url=HOST + "browse", extra="year")) itemlist.append(Item(channel=item.channel, action="search_section", title=" Estado", url=HOST + "browse", extra="status")) itemlist = renumbertools.show_option(item.channel, itemlist) return itemlist def search(item, texto): logger.info() itemlist = [] item.url = urlparse.urljoin(HOST, "api/animes/search") texto = texto.replace(" ", "+") post = "value=%s" % texto data = httptools.downloadpage(item.url, post=post).data try: dict_data = jsontools.load(data) for e in dict_data: if e["id"] != e["last_id"]: _id = e["last_id"] else: _id = e["id"] url = "%sanime/%s/%s" % (HOST, _id, e["slug"]) title = e["title"] thumbnail = "%suploads/animes/covers/%s.jpg" % (HOST, e["id"]) new_item = item.clone(action="episodios", title=title, url=url, thumbnail=thumbnail) if e["type"] != "movie": new_item.show = title new_item.context = renumbertools.context(item) else: new_item.contentType = "movie" new_item.contentTitle = title itemlist.append(new_item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] return itemlist def search_section(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) patron = 'id="%s_select"[^>]+>(.*?)' % item.extra data = scrapertools.find_single_match(data, patron) matches = re.compile('', re.DOTALL).findall(data) for _id, title in matches: url = "%s?%s=%s&order=title" % (item.url, item.extra, _id) itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, context=renumbertools.context(item))) return itemlist def newest(categoria): itemlist = [] if categoria == 'anime': itemlist = novedades_episodios(Item(url=HOST)) return itemlist def novedades_episodios(item): logger.info() data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) data = scrapertools.find_single_match(data, '
(.*?)
.+?)?', re.DOTALL).findall(data) itemlist = [] for url, thumbnail, _type, title, plot in matches: url = urlparse.urljoin(HOST, url) thumbnail = urlparse.urljoin(HOST, thumbnail) new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, fulltitle=title, plot=plot) if _type != "Película": new_item.show = title new_item.context = renumbertools.context(item) else: new_item.contentType = "movie" new_item.contentTitle = title itemlist.append(new_item) return itemlist def listado(item): logger.info() data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) url_pagination = scrapertools.find_single_match(data, '(.*?)
', re.DOTALL).findall(data) itemlist = [] for url, thumbnail, _type, title, plot in matches: url = urlparse.urljoin(HOST, url) thumbnail = urlparse.urljoin(HOST, thumbnail) new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, fulltitle=title, plot=plot) if _type == "Anime": new_item.show = title new_item.context = renumbertools.context(item) else: new_item.contentType = "movie" new_item.contentTitle = title itemlist.append(new_item) if url_pagination: url = urlparse.urljoin(HOST, url_pagination) title = ">> Pagina Siguiente" itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url)) return itemlist def episodios(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) info = eval(scrapertools.find_single_match(data, 'anime_info = (.*?);')) episodes = eval(scrapertools.find_single_match(data, 'var episodes = (.*?);')) for episode in episodes: url = '%s/ver/%s/%s-%s' % (HOST, episode[1], info[2], episode[0]) title = '1x%s Episodio %s' % (episode[0], episode[0]) itemlist.append(item.clone(title=title, url=url, action='findvideos', show=info[1])) itemlist = itemlist[::-1] if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios")) return itemlist def findvideos(item): logger.info() itemlist = [] data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data) list_videos = scrapertools.find_multiple_matches(data, 'video\[\d\]\s=\s\'