# -*- coding: utf-8 -*- import re import base64 from channelselector import get_thumb from core import httptools from core import scrapertools from core import servertools from core import tmdb from core.item import Item from platformcode import config, logger from channels import autoplay host = "https://www.danimados.com/" list_servers = ['openload', 'okru', 'rapidvideo' ] list_quality = ['default'] def mainlist(item): logger.info() thumb_series = get_thumb("channels_tvshow.png") autoplay.init(item.channel, list_servers, list_quality) itemlist = list() itemlist.append(Item(channel=item.channel, action="mainpage", title="Categorías", url=host, thumbnail=thumb_series)) itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas Animadas", url=host+"peliculas/", thumbnail=thumb_series)) itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + "?s=", thumbnail=thumb_series)) autoplay.show_option(item.channel, itemlist) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ","+") item.url = host + "?s=" + texto if texto!='': return sub_search(item) def sub_search(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '(?s)class="thumbnail animation-.*?href="([^"]+).*?' patron += 'img src="([^"]+).*?' patron += 'alt="([^"]+).*?' patron += 'class="meta"(.*?)class="contenido"' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches: scrapedyear = scrapertools.find_single_match(scrapedyear, 'class="year">(\d{4})') item.action = "findvideos" item.contentTitle = scrapedtitle item.contentSerieName = "" if "serie" in scrapedurl: item.action = "episodios" item.contentTitle = "" item.contentSerieName = scrapedtitle title = scrapedtitle if scrapedyear: item.infoLabels['year'] = int(scrapedyear) title += " (%s)" %item.infoLabels['year'] itemlist.append(item.clone(thumbnail = scrapedthumbnail, title = title, url = scrapedurl )) tmdb.set_infoLabels(itemlist) return itemlist def mainpage(item): logger.info() itemlist = [] data1 = httptools.downloadpage(item.url).data data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data1) patron_sec='(.+?)peliculas\/>' patron='([^"]+)<\/a>'#scrapedurl, #scrapedtitle data = scrapertools.find_single_match(data1, patron_sec) matches = scrapertools.find_multiple_matches(data, patron) if item.title=="Géneros" or item.title=="Categorías": for scrapedurl, scrapedtitle in matches: if "Películas Animadas"!=scrapedtitle: itemlist.append( Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="lista")) return itemlist else: for scraped1, scraped2, scrapedtitle in matches: scrapedthumbnail=scraped1 scrapedurl=scraped2 itemlist.append( Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios", show=scrapedtitle)) tmdb.set_infoLabels(itemlist) return itemlist return itemlist def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) if item.title=="Peliculas Animadas": data_lista = scrapertools.find_single_match(data, '
(.*)<\/article><\/div>') patron = '.+?.+?(.+?)<\/div>' matches = scrapertools.find_multiple_matches(data_lista, patron) for scrapedthumbnail,scrapedtitle, scrapedurl, scrapedplot in matches: if item.title=="Peliculas Animadas": itemlist.append( item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentType="movie", plot=scrapedplot, action="findvideos", show=scrapedtitle)) else: itemlist.append( item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, context=autoplay.context,plot=scrapedplot, action="episodios", show=scrapedtitle)) if item.title!="Peliculas Animadas": tmdb.set_infoLabels(itemlist) return itemlist def episodios(item): logger.info() itemlist = [] infoLabels = {} data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) logger.info(data) patron = '<\/div><\/div>' data_lista = scrapertools.find_single_match(data,patron) contentSerieName = item.title patron_caps = 'href=(.+?)> 0: itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + contentSerieName + " a la videoteca[/COLOR]", url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=contentSerieName)) return itemlist def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = 'player-option-\d+.*?' patron += 'data-sv="([^"]+).*?' patron += 'data-user="([^"]+)' matches = scrapertools.find_multiple_matches(data, patron) headers = {"X-Requested-With":"XMLHttpRequest"} for scrapedserver, scrapeduser in matches: data1 = httptools.downloadpage("https://space.danimados.space/gilberto.php?id=%s&sv=mp4" %scrapeduser).data data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data1) url = base64.b64decode(scrapertools.find_single_match(data1, '