# -*- coding: utf-8 -*- import re from core import httptools from core import scrapertools from core import servertools from core.item import Item from channelselector import get_thumb from platformcode import logger HOST = "http://documentales-online.com/" def mainlist(item): logger.info() itemlist = list() itemlist.append(Item(channel=item.channel, title="Novedades", action="videos", url=HOST, thumbnail=get_thumb('newest', auto=True))) itemlist.append(Item(channel=item.channel, title="Destacados", action="seccion", url=HOST, extra="destacados", thumbnail=get_thumb('hot', auto=True))) itemlist.append(Item(channel=item.channel, title="Series destacadas", action="seccion", url=HOST, extra="series", thumbnail=get_thumb('tvshows', auto=True))) itemlist.append(Item(channel=item.channel, title="Categorías", action="categorias", url=HOST, thumbnail=get_thumb('categories', auto=True))) itemlist.append(Item(channel=item.channel, title="Top 100", action="listado", url=HOST + "top/", thumbnail=get_thumb('more voted', auto=True))) itemlist.append(Item(channel=item.channel, title="Populares", action="listado", url=HOST + "populares/", thumbnail=get_thumb('more watched', auto=True))) itemlist.append(Item(channel=item.channel, title="Series y Temas", action="listado", url=HOST + "series-temas/", thumbnail=get_thumb('tvshows', auto=True))) itemlist.append(Item(channel=item.channel, title="Buscar", action="search", thumbnail=get_thumb('search', auto=True))) return itemlist def listado(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = data.replace('', '') bloque = scrapertools.find_single_match(data, 'class="post-entry(.*?)class="post-share') if "series-temas" not in item.url: patron = '([^<]+)<.*?""" matches = scrapertools.find_multiple_matches(bloque, patron) for scrapedurl, scrapedtitle in matches: itemlist.append(Item(action = "videos", channel = item.channel, title = scrapedtitle, url = HOST + scrapedurl )) return itemlist def seccion(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) if item.extra == "destacados": patron_seccion = '

Destacados

    (.*?)
' action = "findvideos" else: patron_seccion = '

Series destacadas

    (.*?)
' action = "videos" data = scrapertools.find_single_match(data, patron_seccion) matches = scrapertools.find_multiple_matches(data, '
(.*?)') aux_action = action for url, title in matches: if item.extra != "destacados" and "Cosmos (Carl Sagan)" in title: action = "findvideos" else: action = aux_action itemlist.append(item.clone(title=title, url=url, action=action, fulltitle=title)) return itemlist def videos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) pagination = scrapertools.find_single_match(data, "rel='next' href='([^']+)'") if not pagination: pagination = scrapertools.find_single_match(data, '\d' '') patron = '
    (.*?)
' data = scrapertools.find_single_match(data, patron) matches = re.compile('
(.*?).*?Categorías') matches = scrapertools.find_multiple_matches(data, '(.*?)') for url, title in matches: itemlist.append(item.clone(title=title, url=url, action="videos", fulltitle=title)) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") try: item.url = HOST + "?s=%s" % texto return videos(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) if "Cosmos (Carl Sagan)" in item.title: patron = '(?s)

([^<]+)<.*?' patron += '