# -*- coding: utf-8 -*- import os import re import sys import urlparse from core import httptools from core import jsontools from core import scrapertools from core import servertools from core.item import Item from platformcode import config, logger from platformcode import platformtools def login(): url_origen = "http://www.pordede.com" data = httptools.downloadpage(url_origen).data if config.get_setting("pordedeuser", "pordede") in data: return True url = "http://www.pordede.com/api/login/auth?response_type=code&client_id=appclient&redirect_uri=http%3A%2F%2Fwww.pordede.com%2Fapi%2Flogin%2Freturn&state=none" post = "username=%s&password=%s&authorized=autorizar" % ( config.get_setting("pordedeuser", "pordede"), config.get_setting("pordedepassword", "pordede")) data = httptools.downloadpage(url, post).data if '"ok":true' in data: return True else: return False def mainlist(item): logger.info() itemlist = [] if config.get_setting("pordedeuser", "pordede") == "": itemlist.append( Item(channel=item.channel, title="Habilita tu cuenta en la configuración...", action="settingCanal", url="")) else: result = login() if not result: itemlist.append(Item(channel=item.channel, action="mainlist", title="Login fallido. Volver a intentar...")) return itemlist itemlist.append(Item(channel=item.channel, action="menuseries", title="Series", url="")) itemlist.append(Item(channel=item.channel, action="menupeliculas", title="Películas y documentales", url="")) itemlist.append(Item(channel=item.channel, action="listas_sigues", title="Listas que sigues", url="http://www.pordede.com/lists/following")) itemlist.append(Item(channel=item.channel, action="tus_listas", title="Tus listas", url="http://www.pordede.com/lists/yours")) itemlist.append( Item(channel=item.channel, action="listas_sigues", title="Top listas", url="http://www.pordede.com/lists")) itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url="")) return itemlist def settingCanal(item): return platformtools.show_channel_settings() def menuseries(item): logger.info() itemlist = [] itemlist.append(Item(channel=item.channel, action="peliculas", title="Novedades", url="http://www.pordede.com/series/loadmedia/offset/0/showlist/hot")) itemlist.append( Item(channel=item.channel, action="generos", title="Por géneros", url="http://www.pordede.com/series")) itemlist.append(Item(channel=item.channel, action="peliculas", title="Siguiendo", url="http://www.pordede.com/series/following")) itemlist.append(Item(channel=item.channel, action="siguientes", title="Siguientes Capítulos", url="http://www.pordede.com/main/index", viewmode="movie")) itemlist.append( Item(channel=item.channel, action="peliculas", title="Favoritas", url="http://www.pordede.com/series/favorite")) itemlist.append( Item(channel=item.channel, action="peliculas", title="Pendientes", url="http://www.pordede.com/series/pending")) itemlist.append( Item(channel=item.channel, action="peliculas", title="Terminadas", url="http://www.pordede.com/series/seen")) itemlist.append(Item(channel=item.channel, action="peliculas", title="Recomendadas", url="http://www.pordede.com/series/recommended")) itemlist.append(Item(channel=item.channel, action="search", title="Buscar...", url="http://www.pordede.com/series")) return itemlist def menupeliculas(item): logger.info() itemlist = [] itemlist.append(Item(channel=item.channel, action="peliculas", title="Novedades", url="http://www.pordede.com/pelis/loadmedia/offset/0/showlist/hot")) itemlist.append( Item(channel=item.channel, action="generos", title="Por géneros", url="http://www.pordede.com/pelis")) itemlist.append( Item(channel=item.channel, action="peliculas", title="Favoritas", url="http://www.pordede.com/pelis/favorite")) itemlist.append( Item(channel=item.channel, action="peliculas", title="Pendientes", url="http://www.pordede.com/pelis/pending")) itemlist.append( Item(channel=item.channel, action="peliculas", title="Vistas", url="http://www.pordede.com/pelis/seen")) itemlist.append(Item(channel=item.channel, action="peliculas", title="Recomendadas", url="http://www.pordede.com/pelis/recommended")) itemlist.append(Item(channel=item.channel, action="search", title="Buscar...", url="http://www.pordede.com/pelis")) return itemlist def generos(item): logger.info() # Descarga la pagina data = httptools.downloadpage(item.url).data logger.debug("data=" + data) # Extrae las entradas (carpetas) data = scrapertools.find_single_match(data, '
(.*?)
') patron = '([^<]+)\((\d+)\)' matches = re.compile(patron, re.DOTALL).findall(data) itemlist = [] for textid, scrapedurl, scrapedtitle, cuantos in matches: title = scrapedtitle.strip() + " (" + cuantos + ")" thumbnail = "" plot = "" # http://www.pordede.com/pelis/loadmedia/offset/30/genre/science%20fiction/showlist/all?popup=1 if "/pelis" in item.url: url = "http://www.pordede.com/pelis/loadmedia/offset/0/genre/" + textid.replace(" ", "%20") + "/showlist/all" else: url = "http://www.pordede.com/series/loadmedia/offset/0/genre/" + textid.replace(" ", "%20") + "/showlist/all" logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") itemlist.append( Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot, fulltitle=title)) return itemlist def search(item, texto): logger.info() if item.url == "": item.url = "http://www.pordede.com/pelis" texto = texto.replace(" ", "-") # Mete el referer en item.extra item.extra = item.url item.url = item.url + "/loadmedia/offset/0/query/" + texto + "/years/1950/on/undefined/showlist/all" try: return buscar(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def buscar(item): logger.info() # Descarga la pagina headers = {"X-Requested-With": "XMLHttpRequest"} data = httptools.downloadpage(item.url, headers=headers).data logger.debug("data=" + data) # Extrae las entradas (carpetas) json_object = jsontools.load(data) logger.debug("html=" + json_object["html"]) data = json_object["html"] return parse_mixed_results(item, data) def parse_mixed_results(item, data): patron = '') patron = '
[^<]+' patron += '' + episode + ' ([^<]+)(\s*
\s*]*>]*>[^<]*]*>[^<]*]*>]*>]*>[^<]*]*>[^<]*
)?' matches = re.compile(patron, re.DOTALL).findall(bloque_episodios) for scrapedurl, scrapedtitle, info, visto in matches: # visto_string = "[visto] " if visto.strip()=="active" else "" if visto.strip() == "active": visto_string = "[visto] " else: visto_string = "" numero = episode title = visto_string + session + "x" + numero + " " + scrapertools.htmlclean(scrapedtitle) thumbnail = "" plot = "" # http://www.pordede.com/peli/the-lego-movie # http://www.pordede.com/links/view/slug/the-lego-movie/what/peli?popup=1 # http://www.pordede.com/links/viewepisode/id/475011?popup=1 epid = scrapertools.find_single_match(scrapedurl, "id/(\d+)") url = "http://www.pordede.com/links/viewepisode/id/" + epid itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=item.fanart, show=item.show)) logger.debug("Abrimos title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") itemlist2 = [] for capitulo in itemlist: itemlist2 = findvideos(capitulo) return itemlist2 def peliculas(item): logger.info() # Descarga la pagina headers = {"X-Requested-With": "XMLHttpRequest"} data = httptools.downloadpage(item.url, headers=headers).data logger.debug("data=" + data) # Extrae las entradas (carpetas) json_object = jsontools.load(data) logger.debug("html=" + json_object["html"]) data = json_object["html"] return parse_mixed_results(item, data) def episodios(item): logger.info() itemlist = [] # Descarga la pagina idserie = '' data = httptools.downloadpage(item.url).data logger.debug("data=" + data) patrontemporada = '
]+>([^<]+)
([^<]+)([^<]+)(\s*
\s*]*>]*>[^<]*]*>[^<]*]*>]*>]*>[^<]*]*>[^<]*
)?' matches = re.compile(patron, re.DOTALL).findall(bloque_episodios) for scrapedurl, numero, scrapedtitle, info, visto in matches: # visto_string = "[visto] " if visto.strip()=="active" else "" if visto.strip() == "active": visto_string = "[visto] " else: visto_string = "" title = visto_string + nombre_temporada.replace("Temporada ", "").replace("Extras", "Extras 0") + "x" + numero + " " + scrapertools.htmlclean( scrapedtitle) thumbnail = item.thumbnail fanart = item.fanart plot = "" # http://www.pordede.com/peli/the-lego-movie # http://www.pordede.com/links/view/slug/the-lego-movie/what/peli?popup=1 # http://www.pordede.com/links/viewepisode/id/475011?popup=1 epid = scrapertools.find_single_match(scrapedurl, "id/(\d+)") url = "http://www.pordede.com/links/viewepisode/id/" + epid itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=fanart, show=item.show)) logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") if config.get_videolibrary_support(): # con año y valoracion la serie no se puede actualizar correctamente, si ademas cambia la valoracion, creara otra carpeta # Sin año y sin valoración: show = re.sub(r"\s\(\d+\)\s\(\d+\.\d+\)", "", item.show) # Sin año: # show = re.sub(r"\s\(\d+\)", "", item.show) # Sin valoración: # show = re.sub(r"\s\(\d+\.\d+\)", "", item.show) itemlist.append(Item(channel='pordede', title="Añadir esta serie a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios###", show=show)) itemlist.append(Item(channel='pordede', title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=show)) itemlist.append(Item(channel='pordede', title="Marcar como Pendiente", tipo="serie", idtemp=idserie, valor="1", action="pordede_check", show=show)) itemlist.append(Item(channel='pordede', title="Marcar como Siguiendo", tipo="serie", idtemp=idserie, valor="2", action="pordede_check", show=show)) itemlist.append(Item(channel='pordede', title="Marcar como Finalizada", tipo="serie", idtemp=idserie, valor="3", action="pordede_check", show=show)) itemlist.append(Item(channel='pordede', title="Marcar como Favorita", tipo="serie", idtemp=idserie, valor="4", action="pordede_check", show=show)) itemlist.append(Item(channel='pordede', title="Quitar marca", tipo="serie", idtemp=idserie, valor="0", action="pordede_check", show=show)) return itemlist def parse_listas(item, patron): logger.info() # Descarga la pagina headers = {"X-Requested-With": "XMLHttpRequest"} data = httptools.downloadpage(item.url, headers=headers).data logger.debug("data=" + data) # Extrae las entradas (carpetas) json_object = jsontools.load(data) logger.debug("html=" + json_object["html"]) data = json_object["html"] matches = re.compile(patron, re.DOTALL).findall(data) itemlist = [] for scrapedurl, scrapedtitle, scrapeduser, scrapedfichas in matches: title = scrapertools.htmlclean(scrapedtitle + ' (' + scrapedfichas + ' fichas, por ' + scrapeduser + ')') url = urlparse.urljoin(item.url, scrapedurl) + "/offset/0/loadmedia" thumbnail = "" itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url)) logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") nextpage = scrapertools.find_single_match(data, 'data-url="(/lists/loadlists/offset/[^"]+)"') if nextpage != '': url = urlparse.urljoin(item.url, nextpage) itemlist.append( Item(channel=item.channel, action="listas_sigues", title=">> Página siguiente", extra=item.extra, url=url)) try: import xbmcplugin xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED) xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE) except: pass return itemlist def listas_sigues(item): logger.info() patron = '