# -*- coding: utf-8 -*- import re import urllib import urlparse from core import jsontools from core import scrapertools from core.item import Item from platformcode import config, logger host = "http://www.documaniatv.com/" account = config.get_setting("documaniatvaccount", "documaniatv") headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], ['Referer', host]] def login(): logger.info() user = config.get_setting("documaniatvuser", "documaniatv") password = config.get_setting("documaniatvpassword", "documaniatv") if user == "" or password == "": return True, "" data = scrapertools.cachePage(host, headers=headers) if "http://www.documaniatv.com/user/" + user in data: return False, user post = "username=%s&pass=%s&Login=Iniciar Sesión" % (user, password) data = scrapertools.cachePage("http://www.documaniatv.com/login.php", headers=headers, post=post) if "Nombre de usuario o contraseña incorrectas" in data: logger.error("login erróneo") return True, "" return False, user def mainlist(item): logger.info() itemlist = [] itemlist.append(item.clone(action="novedades", title="Novedades", url="http://www.documaniatv.com/newvideos.html")) itemlist.append( item.clone(action="categorias", title="Categorías y Canales", url="http://www.documaniatv.com/browse.html")) itemlist.append(item.clone(action="novedades", title="Top", url="http://www.documaniatv.com/topvideos.html")) itemlist.append(item.clone(action="categorias", title="Series Documentales", url="http://www.documaniatv.com/top-series-documentales-html")) itemlist.append(item.clone(action="viendo", title="Viendo ahora", url="http://www.documaniatv.com")) itemlist.append(item.clone(action="", title="")) itemlist.append(item.clone(action="search", title="Buscar")) folder = False action = "" if account: error, user = login() if error: title = "Playlists Personales (Error en usuario y/o contraseña)" else: title = "Playlists Personales (Logueado)" action = "usuario" folder = True else: title = "Playlists Personales (Sin cuenta configurada)" user = "" url = "http://www.documaniatv.com/user/%s" % user itemlist.append(item.clone(title=title, action=action, url=url, folder=folder)) itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False)) return itemlist def configuracion(item): from platformcode import platformtools platformtools.show_channel_settings() if config.is_xbmc(): import xbmc xbmc.executebuiltin("Container.Refresh") def newest(categoria): itemlist = [] item = Item() try: if categoria == 'documentales': item.url = "http://www.documaniatv.com/newvideos.html" itemlist = novedades(item) if itemlist[-1].action == "novedades": itemlist.pop() # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] return itemlist def search(item, texto): logger.info() data = scrapertools.cachePage(host, headers=headers) item.url = scrapertools.find_single_match(data, 'form action="([^"]+)"') + "?keywords=%s&video-id=" texto = texto.replace(" ", "+") item.url = item.url % texto try: return novedades(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def novedades(item): logger.info() itemlist = [] # Descarga la pagina data = scrapertools.cachePage(item.url, headers=headers) # Saca el plot si lo tuviese scrapedplot = scrapertools.find_single_match(data, '
(.*?)
') if "(.*?)') if "Registrarse" in data or not account: for match in bloque: patron = '(.*?).*?»') next_page_url = urlparse.urljoin(host, next_page_url) itemlist.append(item.clone(action="novedades", title=">> Página siguiente", url=next_page_url)) except: logger.error("Siguiente pagina no encontrada") return itemlist def categorias(item): logger.info() itemlist = [] data = scrapertools.cachePage(item.url, headers=headers) patron = '
.*?(?:|)(.*?)<' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedthumbnail, scrapedtitle in matches: if not scrapedthumbnail.startswith("data:image"): scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1] else: scrapedthumbnail = item.thumbnail itemlist.append(item.clone(action="novedades", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail)) # Busca enlaces de paginas siguientes... next_page_url = scrapertools.find_single_match(data, '') if next_page_url != "": itemlist.append(item.clone(action="categorias", title=">> Página siguiente", url=next_page_url)) return itemlist def viendo(item): logger.info() itemlist = [] # Descarga la pagina data = scrapertools.cachePage(item.url, headers=headers) bloque = scrapertools.find_single_match(data, '
' \ '.*?(.*?)' matches = scrapertools.find_multiple_matches(data, patron) for scrapedthumbnail, scrapedurl, scrapedtitle in matches: scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1] logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append(item.clone(action="play_", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, fulltitle=scrapedtitle, folder=False)) return itemlist