# -*- coding: utf-8 -*- import re from channels import renumbertools from channelselector import get_thumb from core import httptools from core import scrapertools from core import servertools from core import tmdb from core.item import Item from platformcode import config, logger from channels import autoplay IDIOMAS = {'latino': 'Latino'} list_language = IDIOMAS.values() list_servers = ['openload', 'okru', 'netutv', 'rapidvideo' ] list_quality = ['default'] host = "http://www.anitoonstv.com" def mainlist(item): logger.info() thumb_series = get_thumb("tvshows", auto=True) autoplay.init(item.channel, list_servers, list_quality) itemlist = list() itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host+"/lista-de-anime.php", thumbnail=thumb_series, range=[0,19])) itemlist.append(Item(channel=item.channel, action="lista", title="Películas", url=host+"/catalogo.php?g=&t=peliculas&o=0", thumbnail=thumb_series, range=[0,19] )) itemlist.append(Item(channel=item.channel, action="lista", title="Especiales", url=host+"/catalogo.php?g=&t=especiales&o=0", thumbnail=thumb_series, range=[0,19])) itemlist = renumbertools.show_option(item.channel, itemlist) autoplay.show_option(item.channel, itemlist) return itemlist def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) #logger.info("Pagina para regex "+data) patron = '
' #Encabezado regex patron +="" #scrapedurl patron +="Titulo.+?<\/span>(.+?)
') scrapedthumbnail = scrapertools.find_single_match(data, "") scrapedplot = scrapertools.find_single_match(data, 'Descripcion.+?<\/span>(.+?)
') i = 0 temp = 0 infoLabels = item.infoLabels for link, cap, name in matches: if int(cap) == 1: temp = temp + 1 if int(cap) < 10: cap = "0" + cap season = temp episode = int(cap) season, episode = renumbertools.numbered_for_tratk( item.channel, item.show, season, episode) infoLabels['season'] = season infoLabels['episode'] = episode date = name title = "%sx%s %s (%s)" % (season, str(episode).zfill(2), "Episodio %s" % episode, date) # title = str(temp)+"x"+cap+" "+name url = host + "/" + link if "NO DISPONIBLE" not in name: itemlist.append(Item(channel=item.channel, action="findvideos", title=title, thumbnail=scrapedthumbnail, plot=scrapedplot, url=url, contentSeasonNumber=season, contentEpisodeNumber=episode, contentSerieName=item.contentSerieName, infoLabels=infoLabels)) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName)) return itemlist def googl(url): logger.info() a=url.split("/") link=a[3] link="http://www.trueurl.net/?q=http%3A%2F%2Fgoo.gl%2F"+link+"&lucky=on&Uncloak=Find+True+URL" data_other = httptools.downloadpage(link).data data_other = re.sub(r"\n|\r|\t|\s{2}| ", "", data_other) patron='Destination URL<\/td>
Titulo.+?<\/span>([^<]+)
') scrapedplot = scrapertools.find_single_match(data, '
Descrip.+?<\/span>([^<]+)
') scrapedthumbnail = scrapertools.find_single_match(data, '
') itemla = scrapertools.find_multiple_matches(data_vid, '"(.+?)"') for url in itemla: url=url.replace('\/', '/') server1=url.split('/') server=server1[2] if "." in server: server1=server.split('.') if len(server1)==3: server=server1[1] else: server=server1[0] if "goo" in url: url = googl(url) server='netutv' if "ok" in url: url = "https:"+url server='okru' quality="360p" itemlist.append(item.clone(url=url, action="play", thumbnail=scrapedthumbnail, server=server, plot=scrapedplot, title="Enlace encontrado en: %s [%s]" % (server.capitalize(), quality))) if item.contentTitle!="" and config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta película a la videoteca[/COLOR]", url=item.url, action="add_pelicula_to_library", extra="episodios", show=item.contentTitle)) autoplay.start(itemlist, item) return itemlist