# -*- coding: utf-8 -*- import re import urllib from channelselector import get_thumb from core.item import Item from core import httptools from core import jsontools from core import scrapertools from core import servertools from platformcode import config, logger from channels import autoplay host = "http://www.cuevana2.com/" list_quality = [] list_servers = ['rapidvideo', 'streamango', 'directo', 'yourupload', 'openload', 'dostream'] ### MENUS ### def mainlist(item): logger.info() autoplay.init(item.channel, list_servers, list_quality) itemlist = [] # PELICULAS itemlist.append(Item(channel = item.channel, title = "Peliculas", folder=False, thumbnail = get_thumb("movies", auto = True), text_bold=True)) itemlist.append(Item(channel = item.channel, title = "Novedades", action = "movies", url = host + "pelicula", thumbnail = get_thumb("newest", auto = True))) itemlist.append(Item(channel = item.channel, title = "Por género", action = "genre", url = host + "pelicula", thumbnail = get_thumb("genres", auto = True) )) itemlist.append(Item(channel = item.channel, title = "Por año", action = "age", url = host + "pelicula", thumbnail = get_thumb("year", auto = True))) itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "movies", url = host + "peliculas-destacadas", thumbnail = get_thumb("favorites", auto = True) )) itemlist.append(Item(channel = item.channel, title = "Buscar...", action = "search", url = host + "search/", thumbnail = get_thumb("search", auto = True))) # SERIES itemlist.append(Item(channel = item.channel, title = "Series", folder=False, thumbnail = get_thumb("tvshows", auto = True), text_bold=True)) itemlist.append(Item(channel = item.channel, title = "Todas las Series", action = "shows", url = host + "listar-series", thumbnail = get_thumb("tvshows", auto = True))) itemlist.append(Item(channel = item.channel, title = "Buscar...", action = "search", extra='1', url = host + "listar-series", thumbnail = get_thumb("search", auto = True))) autoplay.show_option(item.channel, itemlist) return itemlist ### FIN MENUS ### def inArray(arr, arr2): for word in arr: if word not in arr2: return False return True def load_data(url): data = httptools.downloadpage(url).data data = re.sub(r"\n|\r|\t| |
", "", data) return data def put_movies(itemlist, item, data, pattern): matches = scrapertools.find_multiple_matches(data, pattern) for link, img, title, rating, plot in matches: if 'pelicula' in link: itemTitle = "%s [COLOR yellow](%s/100)[/COLOR]" % (title, rating) itemlist.append(Item(channel = item.channel, title=itemTitle, fulltitle=title, thumbnail=img, url=link, plot=plot, action="findvideos")) logger.info(link) return itemlist def put_episodes(itemlist, item, text): pattern = '
  • .*?ref="([^"]+).*?"tit">(.*?)' matches = scrapertools.find_multiple_matches(text, pattern) for link, title in matches: itemlist.append(item.clone(title=title, fulltitle=item.title, url=link, action='findvideos', extra=1)) def episodes(item): logger.info() itemlist = [] data = load_data(item.url) seasonsPattern = '"#episodios(\d+)".*?>(.*?)' episodesPattern = 'id="episodios%s">(.*?)' matches = scrapertools.find_multiple_matches(data, seasonsPattern) for season, title in matches: itemlist.append(Item(channel = item.channel, title="[COLOR blue]%s[/COLOR]" % title, folder=False, text_bold=True)) episodeMatches = scrapertools.find_single_match(data, episodesPattern % season) put_episodes(itemlist, item, episodeMatches) return itemlist def shows(item): logger.info() itemlist = [] data = load_data(item.url) pattern = '"in">(.*?)' matches = scrapertools.find_multiple_matches(data, pattern) for link, title in matches: itemlist.append(Item(channel = item.channel, title=title, url=host + link, action="episodes")) return itemlist def movies(item): logger.info() itemlist = [] #descarga la pagina html data = load_data(item.url) #patron para buscar las peliculas pattern = '
    ' #link pattern += '') if next_page: itemlist.append(Item(channel = item.channel, title='Siguiente Pagina', url=next_page, action="movies")) #coloca las peliculas encontradas en la lista return itemlist def searchShows(itemlist, item, texto): texto = texto.lower().split() data = load_data(item.url) pattern = '"in">(.*?)' matches = scrapertools.find_multiple_matches(data, pattern) for link, title in matches: keywords = title.lower().split() logger.info(keywords) logger.info(texto) if inArray(texto, keywords): itemlist.append(Item(channel = item.channel, title=title, url=host + link, action="episodes")) def searchMovies(itemlist, item, texto): texto = texto.replace(' ', '+').lower() data = load_data(item.url + texto) #patron para buscar las peliculas pattern = '
    ' #link pattern += '') if next_page: data = load_data(next_page) def search(item, texto): itemlist = [] if item.extra: searchShows(itemlist, item, texto) else: searchMovies(itemlist, item, texto) return itemlist def by(item, pattern): logger.info() itemlist = [] #descarga la pagina html data = load_data(item.url) #patron para buscar en la pagina pattern = '
  • &&'.replace('&&', pattern) matches = scrapertools.find_multiple_matches(data, pattern) for link, genre in matches: itemlist.append(Item(channel = item.channel, title=genre, url=link, action="movies")) return itemlist def genre(item): return by(item, '(\D+)') def age(item): return by(item, '(\d+)') def GKPluginLink(hash): hashdata = urllib.urlencode({r'link':hash}) json = httptools.downloadpage('https://player4.cuevana2.com/plugins/gkpluginsphp.php', post=hashdata).data return jsontools.load(json)['link'] if json else '' #el pattern esta raro para eliminar los duplicados, de todas formas asi es un lenguaje de programacion verificando su sintaxis def getContentMovie(data, item): item.infoLabels["year"] = scrapertools.find_single_match(data, 'rel="tag">(\d+)') genre = '' for found_genre in scrapertools.find_multiple_matches(data, 'genero/.*?">(.*?)(?=.*?

    )'): genre += found_genre + ', ' item.infoLabels["genre"] = genre.strip(', ') director = '' for found_director in scrapertools.find_multiple_matches(data, 'director/.*?">(.*?)(?=.*?

    )'): director += found_director + ', ' item.infoLabels["director"] = director.strip(', ') item.infoLabels["cast"] = tuple(found_cast for found_cast in scrapertools.find_multiple_matches( data, 'reparto/.*?">(.*?)(?=.*?

    )')) def getContentShow(data, item): item.thumbnail = scrapertools.find_single_match(data, 'width="120" height="160" src="([^"]+)"') item.infoLabels['genre'] = scrapertools.find_single_match(data, '-4px;">(.*?)') def findvideos(item): logger.info() itemlist = [] data = load_data(item.url) if item.extra: getContentShow(data, item) else: getContentMovie(data, item) pattern = '