# -*- coding: UTF-8 -*- import re import urlparse from core import httptools from core import scrapertools from core import servertools from core import tmdb from core.item import Item from platformcode import config, logger host = "http://www.asialiveaction.com" def mainlist(item): logger.info() itemlist = list() itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas", url=urlparse.urljoin(host, "p/peliculas.html"), type='pl', first=0)) itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=urlparse.urljoin(host, "p/series.html"), type='sr', first=0)) itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host, cat='genre')) itemlist.append(Item(channel=item.channel, action="category", title="Calidad", url=host, cat='quality')) itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host, cat='abc')) itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host, cat='year')) itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/search?q=")) return itemlist def category(item): logger.info() itemlist = list() data = httptools.downloadpage(host).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) if item.cat == 'abc': data = scrapertools.find_single_match(data, 'Orden Alfabético.*?') elif item.cat == 'genre': data = scrapertools.find_single_match(data, 'Géneros.*?') elif item.cat == 'year': data = scrapertools.find_single_match(data, 'Año.*?') elif item.cat == 'quality': data = scrapertools.find_single_match(data, 'Calidad.*?') patron = "
  • ([^<]+)" matches = re.compile(patron, re.DOTALL).findall(data) for scrapedtitle, scrapedurl in matches: if scrapedtitle != 'Próximas Películas': itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl, type='cat', first=0)) return itemlist def search_results(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '([^<]+).*?class="poster-bg" src="([^"]+)"/>.*?

    .*?' patron +=">(\d{4}).*?

    ([^<]+) 0: itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName)) return itemlist def lista(item): logger.info() next = True itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) data = scrapertools.find_single_match(data, "itemprop='headline'>.*?
    .*?") patron = '.*?
    (\d{4})
    ' patron += '
    (.*?)
    ([^<]+)
    ' matches = scrapertools.find_multiple_matches(data, patron) first = int(item.first) last = first + 19 if last > len(matches): last = len(matches) next = False for scrapedtype, scrapedyear, scrapedthumbnail, scrapedquality, scrapedtitle ,scrapedurl in matches[first:last]: patron_quality="(.+?)" quality = scrapertools.find_multiple_matches(scrapedquality, patron_quality) qual="" for calidad in quality: qual=qual+"["+calidad+"] " title="%s [%s] %s" % (scrapedtitle,scrapedyear,qual) new_item= Item(channel=item.channel, title=title, url=host+scrapedurl, thumbnail=scrapedthumbnail, type=scrapedtype, infoLabels={'year':scrapedyear}) if scrapedtype.strip() == 'sr': new_item.contentSerieName = scrapedtitle new_item.action = 'episodios' else: new_item.contentTitle = scrapedtitle new_item.action = 'findvideos' if scrapedtype == item.type or item.type == 'cat': itemlist.append(new_item) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) #pagination url_next_page = item.url first = last if next: itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', first=first)) return itemlist def findvideos(item): logger.info() itemlist = [] dl_links = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) ### obtiene los gvideo patron = 'class="Button Sm fa fa-download mg">') g_url = '%s%s' % ('https://drive.google.com', video_id) g_url = g_url.replace('&', '&') g_data = httptools.downloadpage(g_url, follow_redirects=False, only_headers=True).headers url = g_data['location'] dl_links.append(Item(channel=item.channel, title='%s', url=url, action='play', infoLabels=item.infoLabels)) if item.type == 'pl': new_url = scrapertools.find_single_match(data, '
    .*?