# -*- coding: utf-8 -*- import re import urlparse from channels import renumbertools from core import httptools from core import servertools from core import jsontools from core import scrapertools from core.item import Item from platformcode import logger from channels import autoplay IDIOMAS = {'VOSE': 'VOSE'} list_language = IDIOMAS.values() list_servers = ['directo'] list_quality = ['default'] HOST = "https://animeflv.ru/" def mainlist(item): logger.info() autoplay.init(item.channel, list_servers, list_quality) itemlist = list() itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", url=HOST)) itemlist.append(Item(channel=item.channel, action="novedades_anime", title="Últimos animes", url=HOST)) itemlist.append(Item(channel=item.channel, action="listado", title="Animes", url=HOST + "animes/nombre/lista")) itemlist.append(Item(channel=item.channel, title="Buscar por:")) itemlist.append(Item(channel=item.channel, action="search", title=" Título")) itemlist.append(Item(channel=item.channel, action="search_section", title=" Género", url=HOST + "animes", extra="genre")) itemlist = renumbertools.show_option(item.channel, itemlist) autoplay.show_option(item.channel, itemlist) return itemlist def clean_title(title): year_pattern = r'\([\d -]+?\)' return re.sub(year_pattern, '', title).strip() def search(item, texto): logger.info() itemlist = [] item.url = urlparse.urljoin(HOST, "search_suggest") texto = texto.replace(" ", "+") post = "value=%s" % texto data = httptools.downloadpage(item.url, post=post).data try: dict_data = jsontools.load(data) for e in dict_data: title = clean_title(scrapertools.htmlclean(e["name"])) url = e["url"] plot = e["description"] thumbnail = e["thumb"] new_item = item.clone(action="episodios", title=title, url=url, plot=plot, thumbnail=thumbnail) if "Pelicula" in e["genre"]: new_item.contentType = "movie" new_item.contentTitle = title else: new_item.show = title new_item.context = renumbertools.context(item) itemlist.append(new_item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] return itemlist def search_section(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) patron = 'id="%s_filter"[^>]+>
(.*?)
') data = scrapertools.find_single_match(data, '