# -*- coding: utf-8 -*- import re import urllib import urlparse from core import httptools from core import scrapertools from core.item import Item from platformcode import config, logger host = "https://www.porntrex.com" perpage = 20 def mainlist(item): logger.info() itemlist = [] config.set_setting("url_error", False, "porntrex") itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/latest-updates/")) itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/top-rated/")) itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/most-popular/")) itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories/")) itemlist.append(item.clone(action="categorias", title="Modelos", url=host + "/models/?mode=async&function=get_block&block_id=list_models_models" \ "_list&sort_by=total_videos")) itemlist.append(item.clone(action="playlists", title="Listas", url=host + "/playlists/")) itemlist.append(item.clone(action="tags", title="Tags", url=host + "/tags/")) itemlist.append(item.clone(title="Buscar...", action="search")) itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) return itemlist def configuracion(item): from platformcode import platformtools ret = platformtools.show_channel_settings() platformtools.itemlist_refresh() return ret def search(item, texto): logger.info() item.url = "%s/search/%s/" % (host, texto.replace("+", "-")) item.extra = texto try: return lista(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def lista(item): logger.info() itemlist = [] # Descarga la pagina data = get_data(item.url) action = "play" if config.get_setting("menu_info", "porntrex"): action = "menu_info" # Quita las entradas, que no son private patron = '
.*?from_videos\+from_albums:(\d+)') if next_page: if "from_videos=" in item.url: next_page = re.sub(r'&from_videos=(\d+)', '&from_videos=%s' % next_page, item.url) else: next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos" \ "&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page) itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) else: next_page = scrapertools.find_single_match(data, '