# -*- coding: utf-8 -*- import re import urlparse from core import httptools from core import scrapertools from core.item import Item from platformcode import config, logger host = "http://www.freecambay.com" def mainlist(item): logger.info() itemlist = [] itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/latest-updates/")) itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/top-rated/")) itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/most-popular/")) itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories/")) itemlist.append(item.clone(action="categorias", title="Modelos", url=host + "/models/?mode=async&function=get_block&block_id=list_models_models" \ "_list&sort_by=total_videos")) itemlist.append(item.clone(action="playlists", title="Listas", url=host + "/playlists/")) itemlist.append(item.clone(action="tags", title="Tags", url=host + "/tags/")) itemlist.append(item.clone(title="Buscar...", action="search")) itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) return itemlist def configuracion(item): from platformcode import platformtools ret = platformtools.show_channel_settings() platformtools.itemlist_refresh() return ret def search(item, texto): logger.info() item.url = "%s/search/%s/" % (host, texto.replace("+", "-")) item.extra = texto try: return lista(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def lista(item): logger.info() itemlist = [] # Descarga la pagina data = httptools.downloadpage(item.url).data action = "play" if config.get_setting("menu_info", "freecambay"): action = "menu_info" # Extrae las entradas patron = '
([^<]+)<' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches: if duration: scrapedtitle = "%s - %s" % (duration, scrapedtitle) if '>HD<' in quality: scrapedtitle += " [COLOR red][HD][/COLOR]" itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail)) # Extrae la marca de siguiente página if item.extra: next_page = scrapertools.find_single_match(data, '
') matches = scrapertools.find_multiple_matches(bloque, '\s*(.*?)') for title in matches: title = title.strip() if title not in letras: letras.append(title) itemlist.append(Item(channel=item.channel, action="tags", url=item.url, title=title, extra=title)) else: if not item.length: item.length = 0 bloque = scrapertools.find_single_match(data, '>%s(.*?)(?:(?!%s)(?!#)[A-Z#]{1}|