# -*- coding: utf-8 -*- import re import urllib from base64 import b64decode as bdec import xbmcaddon from channelselector import get_thumb from core import filetools from core import httptools from core import jsontools from core import scrapertools from core.item import Item from core.support import typo from core.tmdb import Tmdb from core import trakt_tools from platformcode import config, logger from platformcode import platformtools info_language = ["de", "en", "es", "fr", "it", "pt"] # from videolibrary.json def_lang = info_language[config.get_setting("info_language", "videolibrary")] langs = ['auto', 'de', 'fr', 'pt', 'it', 'es-MX', 'ca', 'en', 'es'] langt = langs[config.get_setting('tmdb', "tvmoviedb")] if langt == 'auto': langt = def_lang langt_alt = langs[config.get_setting('tmdb_alternativo', "tvmoviedb")] langs = ['auto', 'co', 'cl', 'ar', 'mx', 'en', 'es'] langf = langs[config.get_setting('filmaff', "tvmoviedb")] if langf == 'auto': langf = def_lang langs = ['auto', 'de-de', 'fr-fr', 'pt-pt', 'it-it', 'es-MX', 'ca-es', 'en', 'es'] langi = langs[config.get_setting('imdb', "tvmoviedb")] if langi == 'auto': langi = def_lang adult_mal = config.get_setting('adult_mal', "tvmoviedb") mal_ck = "MzE1MDQ2cGQ5N2llYTY4Z2xwbGVzZjFzbTY=" images_predef = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/" default_fan = filetools.join(config.get_runtime_path(), "fanart.jpg") logger.info('FANART= '+default_fan) def mainlist(item): logger.info() itemlist = [] # TMDB itemlist.append(item.clone(title=typo(config.get_localized_string(70021), 'bold'), action="")) itemlist.append(item.clone(title=config.get_localized_string(70022), action="tmdb", extra="movie", thumbnail=get_thumb('search_movie.png'))) itemlist.append(item.clone(title=config.get_localized_string(70023), action="tmdb", extra="tv", thumbnail=get_thumb('search_tvshow.png'))) # Filmaffinity itemlist.append(item.clone(title=typo(config.get_localized_string(70024), 'bold'), action="")) itemlist.append(item.clone(title=config.get_localized_string(70022), action="filmaf", extra="movie", thumbnail=get_thumb('search_movie.png'))) itemlist.append(item.clone(title=config.get_localized_string(70023), action="filmaf", extra="tv", thumbnail=get_thumb('search_tvshow.png'))) # IMDB itemlist.append(item.clone(title=typo(config.get_localized_string(70025), 'bold'), action="")) itemlist.append(item.clone(title=config.get_localized_string(70022), action="imdb", extra="movie", url='&title_type=feature,tv_movie', thumbnail=get_thumb('search_movie.png'))) itemlist.append(item.clone(title=config.get_localized_string(70023), action="imdb", extra="tv", url='&title_type=tv_series,tv_special,mini_series', thumbnail=get_thumb('search_tvshow.png'))) itemlist.append( item.clone(title=typo(config.get_localized_string(70415), 'bold'), action="trakt", thumbnail="http://i.imgur.com/5sQjjuk.png")) itemlist.append( item.clone(title=typo(config.get_localized_string(70026), 'bold'), action="mal", thumbnail="http://i.imgur.com/RhsYWmd.png",)) itemlist.append( item.clone(title=typo(config.get_localized_string(70027), 'color kod'), action="configuracion", folder=False)) return itemlist def configuracion(item): ret = platformtools.show_channel_settings() platformtools.itemlist_refresh() return ret def search_star(item): logger.info() itemlist = [] item.type='movie' itemlist.extend(search_(item)) item.type='tvshow' itemlist.extend(search_(item)) return itemlist def search_(item): texto = platformtools.dialog_input(heading=item.title) if texto: if "imdb" in item.url: item.url += texto.replace(" ", "+") item.action = "listado_imdb" return listado_imdb(item) if "filmaffinity" in item.url: item.url += texto.replace(" ", "+") item.action = "listado_fa" return listado_fa(item) if "myanimelist" in item.url: item.url += texto.replace(" ", "%20") item.url += "&type=0&score=0&status=0&p=0&r=0&sm=0&sd=0&sy=0&em=0&ed=0&ey=0&c[0]=a" \ "&c[1]=b&c[2]=c&c[3]=d&c[4]=f&gx=0" item.action = "busqueda_mal" return busqueda_mal(item) item.search['query'] = texto item.action = "listado_tmdb" if item.star == True: types = ['movie','tv'] itemlist = [] for type in types: item.contentType = type item.search['type']=type itemlist.extend(listado_tmdb(item)) return itemlist else: return listado_tmdb(item) def busqueda(item): logger.info() new_item = Item(title=item.contentTitle, text=item.contentTitle.replace("+", " "), mode=item.contentType, infoLabels=item.infoLabels) from specials import search return search.channel_search(new_item) def tmdb(item): item.contentType = item.extra.replace("tv", "tvshow") itemlist = [] itemlist.append(item.clone(title=config.get_localized_string(70028), action="listado_tmdb", search={'url': item.extra + "/popular", 'language': langt, 'page': 1})) itemlist.append(item.clone(title=config.get_localized_string(70029), action="listado_tmdb", search={'url': item.extra + "/top_rated", 'language': langt, 'page': 1})) if item.extra == "movie": itemlist.append(item.clone(title=config.get_localized_string(70030), action="listado_tmdb", search={'url': item.extra + "/now_playing", 'language': langt, 'page': 1})) else: itemlist.append(item.clone(title=config.get_localized_string(70031), action="listado_tmdb", search={'url': item.extra + "/on_the_air", 'language': langt, 'page': 1})) itemlist.append(item.clone(title=config.get_localized_string(70032), action="indices_tmdb")) itemlist.append(item.clone(title=config.get_localized_string(70042), action="indices_tmdb")) if item.extra == "movie": itemlist.append(item.clone(title=config.get_localized_string(70033), action="listado_tmdb", search={'url': 'person/popular', 'language': langt, 'page': 1})) itemlist.append(item.clone(title=config.get_localized_string(70034), action="listado_tmdb", search={'url': item.extra + "/upcoming", 'language': langt, 'page': 1})) if config.get_platform() != "plex": title = item.contentType.replace("movie", config.get_localized_string(70283)).replace("tvshow", "serie") itemlist.append(item.clone(title=config.get_localized_string(70035) % title, action="search_", search={'url': 'search/%s' % item.extra, 'language': langt, 'page': 1})) itemlist.append(item.clone(title=config.get_localized_string(70036), action="search_", search={'url': 'search/person', 'language': langt, 'page': 1})) if item.extra == "movie": itemlist.append(item.clone(title=config.get_localized_string(70037), action="search_", search={'url': "search/person", 'language': langt, 'page': 1}, crew=True)) itemlist.append(item.clone(title=config.get_localized_string(70038), action="filtro", )) itemlist.append(item.clone(title=config.get_localized_string(70039), action="filtro", )) return itemlist def imdb(item): item.contentType = item.extra.replace("tv", "tvshow") itemlist = [] itemlist.append(item.clone(title=config.get_localized_string(70028), action="listado_imdb")) itemlist.append(item.clone(title=config.get_localized_string(70029), action="listado_imdb", url=item.url + "&num_votes=25000,&sort=user_rating,desc")) if item.extra == "movie": itemlist.append(item.clone(title=config.get_localized_string(70030), action="listado_imdb", url="http://www.imdb.com/showtimes/location?ref_=inth_ov_sh_sm")) itemlist.append(item.clone(title=config.get_localized_string(70032), action="indices_imdb")) itemlist.append(item.clone(title=config.get_localized_string(70042), action="indices_imdb")) if item.extra == "movie": itemlist.append(item.clone(title=config.get_localized_string(70033), action="listado_imdb", url="http://www.imdb.com/search/name?gender=male,female&ref_=nv_cel_m_3")) itemlist.append(item.clone(title=config.get_localized_string(70034), action="listado_imdb", url="http://www.imdb.com/movies-coming-soon/?ref_=shlc_cs")) if config.get_platform() != "plex": title = item.contentType.replace("movie", config.get_localized_string(70283)).replace("tvshow", "serie") itemlist.append(item.clone(title=config.get_localized_string(70035) % title, action="search_", url="http://www.imdb.com/search/title?title=" + item.url)) itemlist.append(item.clone(title=config.get_localized_string(70036), action="search_", url="http://www.imdb.com/search/name?name=")) itemlist.append(item.clone(title=config.get_localized_string(70038), action="filtro_imdb", )) return itemlist def filmaf(item): item.contentType = item.extra.replace("tv", "tvshow") login, message = login_fa() itemlist = [] if item.extra == "movie": itemlist.append(item.clone(title=config.get_localized_string(70040), action="listado_fa", extra="top", url="http://m.filmaffinity.com/%s/topgen.php?genre=&country=&" "fromyear=&toyear=¬vse=1&nodoc=1" % langf)) itemlist.append(item.clone(title=config.get_localized_string(70030), action="listado_fa", url="http://m.filmaffinity.com/%s/rdcat.php?id=new_th_%s" % (langf, langf))) itemlist.append(item.clone(title=config.get_localized_string(70032), action="indices_fa", url="http://m.filmaffinity.com/%s/topgen.php" % langf, thumbnail="%s0/Genres.png" % images_predef)) else: itemlist.append(item.clone(title=config.get_localized_string(70040), action="listado_fa", extra="top", url="http://m.filmaffinity.com/%s/topgen.php?genre=TV_SE&country=&" "fromyear=&toyear=&nodoc" % langf)) itemlist.append(item.clone(title="Series de actualidad", action="listado_fa", url="http://m.filmaffinity.com/%s/category.php?id=current_tv" % langf)) itemlist.append(item.clone(title=config.get_localized_string(70042), action="indices_fa", thumbnail="%s0/Year.png" % images_predef)) if item.extra == "movie": itemlist.append(item.clone(title=config.get_localized_string(70043), action="listado_fa", extra="estrenos", url="http://m.filmaffinity.com/%s/rdcat.php?id=upc_th_%s" % (langf, langf))) itemlist.append(item.clone(title=config.get_localized_string(70044), action="indices_fa", extra="sagas", url="http://www.filmaffinity.com/%s/movie-groups-all.php" % langf)) itemlist.append(item.clone(title=config.get_localized_string(70045), action="indices_fa", url='http://m.filmaffinity.com/%s/topics.php' % langf, )) if config.get_platform() != "plex": itemlist.append(item.clone(title=config.get_localized_string(70046), action="search_", url="http://m.filmaffinity.com/%s/search.php?stype=title&stext=" % langf)) itemlist.append(item.clone(title=config.get_localized_string(70036), action="search_", url="http://m.filmaffinity.com/%s/search.php?stype=cast&stext=" % langf)) itemlist.append(item.clone(title=config.get_localized_string(70047), action="search_", url="http://m.filmaffinity.com/%s/search.php?stype=director&stext=" % langf)) itemlist.append(item.clone(title=config.get_localized_string(70038), action="filtro_fa", extra="top")) itemlist.append(item.clone(title=config.get_localized_string(70048), action="cuenta_fa", )) return itemlist def trakt(item): itemlist = [] token_auth = config.get_setting("token_trakt", "trakt") page = "?page=1&limit=20&extended=full" if not item.extra: item.extra = "movie" itemlist.append(item.clone(title=typo(config.get_localized_string(70416), 'bold'), action="")) itemlist.append(item.clone(title=config.get_localized_string(70049), action="acciones_trakt", url="movies/popular%s" % page)) itemlist.append( item.clone(title=config.get_localized_string(70050), action="acciones_trakt", url="movies/trending%s" % page)) itemlist.append(item.clone(title=config.get_localized_string(70053), action="acciones_trakt", url="movies/watched/all%s" % page)) itemlist.append( item.clone(title=config.get_localized_string(70051), action="acciones_trakt", url="movies/anticipated%s" % page)) if token_auth: itemlist.append(item.clone(title=config.get_localized_string(70052), action="acciones_trakt", url="recommendations/movies?limit=100&extended=full", pagina=0)) itemlist.append(item.clone(title=typo(config.get_localized_string(70417), 'bold'), action="",)) item.extra = "show" itemlist.append(item.clone(title=config.get_localized_string(70049), action="acciones_trakt", url="shows/popular%s" % page)) itemlist.append(item.clone(title=config.get_localized_string(70050), action="acciones_trakt", url="shows/trending%s" % page)) itemlist.append(item.clone(title=config.get_localized_string(70053), action="acciones_trakt", url="shows/watched/all%s" % page)) itemlist.append( item.clone(title=config.get_localized_string(70051), action="acciones_trakt", url="shows/anticipated%s" % page)) if token_auth: itemlist.append(item.clone(title=config.get_localized_string(70052), action="acciones_trakt", url="recommendations/shows?limit=100&extended=full", pagina=0)) itemlist.append(item.clone(title=typo(config.get_localized_string(70048), 'color kod bold'), extra="cuenta")) else: item.extra = "movie" # Se comprueba si existe un token guardado y sino se ejecuta el proceso de autentificación if not token_auth: #folder = (config.get_platform() == "plex") itemlist.append(item.clone(title=config.get_localized_string(70054), action="auth_trakt", folder=folder)) else: itemlist.append(item.clone(title=config.get_localized_string(70055), action="", )) itemlist.append( item.clone(title=config.get_localized_string(60651), action="acciones_trakt", url="users/me/watchlist/movies%s" % page, order="added", how="desc")) itemlist.append( item.clone(title=config.get_localized_string(60652), action="acciones_trakt", url="users/me/watchlist/shows%s" % page, extra="show", order="added", how="desc")) itemlist.append(item.clone(title=config.get_localized_string(70056), action="", )) itemlist.append( item.clone(title=config.get_localized_string(60651), action="acciones_trakt", url="users/me/watched/movies%s" % page, order="added", how="desc")) itemlist.append( item.clone(title=config.get_localized_string(60652), action="acciones_trakt", url="users/me/watched/shows%s" % page, extra="show", order="added", how="desc")) itemlist.append(item.clone(title=config.get_localized_string(70068), action="", )) itemlist.append( item.clone(title=config.get_localized_string(60651), action="acciones_trakt", url="users/me/collection/movies%s" % page, order="added", how="desc")) itemlist.append( item.clone(title=config.get_localized_string(60652), action="acciones_trakt", url="users/me/collection/shows%s" % page, extra="show", order="added", how="desc")) itemlist.append( item.clone(title=config.get_localized_string(70057), action="acciones_trakt", url="users/me/lists", )) return itemlist def mal(item): itemlist = [] login, message, user = login_mal() if login: item.login = True itemlist.append( item.clone(title=config.get_localized_string(70058), url="https://myanimelist.net/topanime.php?type=tv&limit=0", action="top_mal", contentType="tvshow", extra="tv")) itemlist.append(item.clone(title=config.get_localized_string(70059), url="https://myanimelist.net/topanime.php?type=movie&limit=0", action="top_mal", contentType="movie", extra="movie")) itemlist.append( item.clone(title=config.get_localized_string(70061), url="https://myanimelist.net/topanime.php?type=ova&limit=0", action="top_mal", contentType="tvshow", extra="tv", tipo="ova")) itemlist.append( item.clone(title=config.get_localized_string(70028), url="https://myanimelist.net/topanime.php?type=bypopularity&limit=0", action="top_mal")) itemlist.append(item.clone(title=config.get_localized_string(70060), url="https://myanimelist.net/topanime.php?type=upcoming&limit=0", action="top_mal")) itemlist.append(item.clone(title=config.get_localized_string(70062), url="", action="indices_mal")) itemlist.append(item.clone(title=config.get_localized_string(70063), url="", action="indices_mal")) if config.get_platform() != "plex": itemlist.append(item.clone(title=config.get_localized_string(70064), url="https://myanimelist.net/anime.php?q=", action="search_")) itemlist.append(item.clone(title=typo(config.get_localized_string(70038), 'bold submenu'), action="filtro_mal")) itemlist.append(item.clone(title=typo(config.get_localized_string(70057), 'bold submenu'), action="cuenta_mal")) return itemlist ##-------------------- SECCION TMDB ------------------------## def listado_tmdb(item): # Listados principales de la categoría Tmdb (Más populares, más vistas, etc...) itemlist = [] item.fanart = default_fan if not item.pagina: item.pagina = 1 # Listado de actores if 'nm' in item.infoLabels['imdb_id']: try: ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt) id_cast = ob_tmdb.result["person_results"][0]["id"] if item.contentType == "movie": item.search = {'url': 'discover/movie', 'with_cast': id_cast, 'page': item.pagina, 'sort_by': 'primary_release_date.desc', 'language': langt} else: item.search = {'url': 'person/%s/tv_credits' % id_cast, 'language': langt} ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt) except: pass else: ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt) # Sagas y colecciones if "collection" in item.search["url"]: try: new_item = item.clone(action="", url='') new_item.infoLabels["plot"] = ob_tmdb.result["overview"] itemlist.append(new_item) for parte in ob_tmdb.result["parts"]: new_item = item.clone(action="detalles") new_item.infoLabels = ob_tmdb.get_infoLabels(new_item.infoLabels, origen=parte) if new_item.infoLabels['thumbnail']: new_item.thumbnail = new_item.infoLabels['thumbnail'] if new_item.infoLabels['fanart']: new_item.fanart = new_item.infoLabels['fanart'] if new_item.infoLabels['year']: new_item.title = typo(new_item.contentTitle, 'bold') + ' (%s)' % new_item.infoLabels['year'] + typo(str(new_item.infoLabels['rating']).replace("0.0", ""), '_ color kod') else: new_item.title = typo(new_item.contentTitle, 'bold') + ' (%s)' % new_item.infoLabels['year'] + typo(str(new_item.infoLabels['rating']).replace("0.0", ""), '_ color kod') itemlist.append(new_item) except: pass else: try: orden = False # Si se hace una búsqueda por actores o directores, se extraen esos resultados if "cast" in ob_tmdb.result and not item.crew: ob_tmdb.results = ob_tmdb.result["cast"] orden = True elif "crew" in ob_tmdb.result and item.crew: ob_tmdb.results = ob_tmdb.result["crew"] orden = True for i in range(0, len(ob_tmdb.results)): new_item = item.clone(action="detalles", url='', infoLabels={'mediatype': item.contentType}) new_item.infoLabels = ob_tmdb.get_infoLabels(new_item.infoLabels, origen=ob_tmdb.results[i]) # Si no hay sinopsis en idioma elegido, buscar en el alternativo if not new_item.infoLabels["plot"] and not 'person' in item.search["url"]: ob_tmdb2 = Tmdb(id_Tmdb=new_item.infoLabels["tmdb_id"], tipo=item.extra, idioma_busqueda=langt_alt) new_item.infoLabels["plot"] = ob_tmdb2.get_sinopsis() if new_item.infoLabels['thumbnail']: new_item.thumbnail = new_item.infoLabels['thumbnail'] elif new_item.infoLabels['profile_path']: new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + new_item.infoLabels['profile_path'] new_item.infoLabels['profile_path'] = '' new_item.plot = new_item.infoLabels["biography"] if not item.search.get('with_cast', '') and not item.search.get('with_crew', ''): if item.contentType == "movie": new_item.action = "listado_tmdb" cast = 'with_cast' if item.crew: cast = 'with_crew' new_item.search = {'url': 'discover/movie', cast: new_item.infoLabels['tmdb_id'], 'sort_by': 'primary_release_date.desc', 'language': langt, 'page': item.pagina} else: new_item.action = "listado_tmdb" new_item.search = {'url': 'person/%s/tv_credits' % new_item.infoLabels['tmdb_id'], 'language': langt} elif not new_item.infoLabels['thumbnail'] and not new_item.infoLabels['profile_path']: new_item.thumbnail = '' if new_item.infoLabels['fanart']: new_item.fanart = new_item.infoLabels['fanart'] if not 'person' in item.search["url"] or 'tv_credits' in item.search["url"]: if new_item.infoLabels['year']: new_item.title = "%s %s %s" \ % (typo(new_item.contentTitle,'bold'), typo(new_item.infoLabels['year'],'() bold'), typo(str(new_item.infoLabels['rating']).replace("0.0", ""),'color kod bold')) else: new_item.title = "%s %s" \ % (typo(new_item.contentTitle,'bold'), typo(new_item.infoLabels['rating'].replace("0.0", ""),'color kod bold')) else: # Si es una búsqueda de personas se incluye en el título y fanart una película por la que es conocido known_for = ob_tmdb.results[i].get("known_for") type=item.type if known_for: from random import randint random = randint(0, len(known_for) - 1) new_item.title = typo(new_item.contentTitle, 'bold') + typo(known_for[random].get("title", known_for[random].get("name")), '_ () color kod') if known_for[random]["backdrop_path"]: new_item.fanart = 'http://image.tmdb.org/t/p/original' + known_for[random]["backdrop_path"] else: new_item.title = new_item.contentTitle itemlist.append(new_item) except: import traceback logger.error(traceback.format_exc()) if orden: itemlist.sort(key=lambda item: item.infoLabels["year"], reverse=True) if "page" in item.search and ob_tmdb.total_pages > item.search["page"]: item.search["page"] += 1 itemlist.append(Item(channel=item.channel, action=item.action, title=config.get_localized_string(70065), search=item.search, extra=item.extra, pagina=item.pagina + 1, contentType=item.contentType)) return itemlist def detalles(item): itemlist = [] images = {} data = "" # Si viene de seccion imdb if not item.infoLabels["tmdb_id"]: headers = [['Accept-Language', langi]] #data = httptools.downloadpage("http://www.imdb.com/title/" + item.infoLabels['imdb_id'], headers=headers, # replace_headers=True).data data = httptools.downloadpage("http://www.imdb.com/title/" + item.infoLabels['imdb_id'], headers=headers).data pics = scrapertools.find_single_match(data, 'showAllVidsAndPics.*?href=".*?(tt\d+)') # Imágenes imdb if pics: images["imdb"] = {'url': 'http://www.imdb.com/_json/title/%s/mediaviewer' % pics} ob_tmdb = Tmdb(external_id=item.infoLabels["imdb_id"], external_source="imdb_id", tipo=item.extra, idioma_busqueda=langt) item.infoLabels["tmdb_id"] = ob_tmdb.get_id() ob_tmdb = Tmdb(id_Tmdb=item.infoLabels["tmdb_id"], tipo=item.extra, idioma_busqueda=langt) try: item.infoLabels = ob_tmdb.get_infoLabels(item.infoLabels) # Si no hay sinopsis en idioma elegido, buscar en el alternativo if not item.infoLabels["plot"]: item.infoLabels["plot"] = ob_tmdb.get_sinopsis(idioma_alternativo=langt_alt) except: pass if not item.fanart and item.infoLabels['fanart']: item.fanart = item.infoLabels['fanart'] if item.infoLabels['thumbnail']: item.thumbnail = item.infoLabels['thumbnail'] # Sinopsis, votos de imdb if data: plot = scrapertools.find_single_match(data, 'class="inline canwrap" itemprop="description">(.*?)') plot = scrapertools.htmlclean(plot) plot = re.sub(r'(?i)]+>|\n|\s{2}', ' ', plot).strip() if plot and (item.infoLabels['plot'] and item.infoLabels['plot'] != plot): item.infoLabels['plot'] += " (TMDB)\n" + plot + " (IMDB)" elif plot and not item.infoLabels['plot']: item.infoLabels['plot'] = plot rating = scrapertools.find_single_match(data, 'itemprop="ratingValue">([^<]+)<') if rating: item.infoLabels['rating'] = float(rating.replace(",", ".")) votos = scrapertools.find_single_match(data, 'itemprop="ratingCount">([^<]+)<') if votos: item.infoLabels['votes'] = votos if item.infoLabels['tagline']: itemlist.append(item.clone(title="--- %s ---" % item.infoLabels['tagline'], action="")) title = item.contentType.replace("movie", config.get_localized_string(70283)).replace("tvshow", "serie") # Búsqueda por títulos idioma elegido y/o versión original y español itemlist.append(item.clone(action="busqueda", title=config.get_localized_string(70069) % (title, item.contentTitle))) if item.infoLabels['originaltitle'] and item.contentTitle != item.infoLabels['originaltitle']: itemlist.append(item.clone(action="busqueda", contentTitle=item.infoLabels['originaltitle'], title=config.get_localized_string(70070) % item.infoLabels['originaltitle'])) if langt != "es" and langt != "en" and item.infoLabels["tmdb_id"]: tmdb_lang = Tmdb(id_Tmdb=item.infoLabels["tmdb_id"], tipo=item.extra, idioma_busqueda=def_lang) if tmdb_lang.result.get("title") and tmdb_lang.result["title"] != item.contentTitle \ and tmdb_lang.result["title"] != item.infoLabels['originaltitle']: tmdb_lang = tmdb_lang.result["title"] itemlist.append(item.clone(action="busqueda", title=config.get_localized_string(70066) % tmdb_lang, contentTitle=tmdb_lang)) # En caso de serie, opción de info por temporadas if item.contentType == "tvshow" and item.infoLabels['tmdb_id']: itemlist.append(item.clone(action="info_seasons", title=config.get_localized_string(70067) % item.infoLabels["number_of_seasons"])) # Opción de ver el reparto y navegar por sus películas/series if item.infoLabels['tmdb_id']: itemlist.append(item.clone(action="reparto", title=config.get_localized_string(70071), infoLabels={'tmdb_id': item.infoLabels['tmdb_id'], 'mediatype': item.contentType})) if config.is_xbmc(): item.contextual = True itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title=config.get_localized_string(60359), )) try: images['tmdb'] = ob_tmdb.result["images"] itemlist.append(item.clone(action="imagenes", title=config.get_localized_string(70316), images=images, extra="menu")) except: pass try: if item.contentType == "movie" and item.infoLabels["year"] < 2014: post_url = "https://theost.com/search/custom/?key=%s&year=%s&country=0&genre=0" % ( item.infoLabels['originaltitle'].replace(" ", "+"), item.infoLabels["year"]) url = "https://nl.hideproxy.me/includes/process.php?action=update" post = "u=%s&proxy_formdata_server=nl&allowCookies=1&encodeURL=1&encodePage=0&stripObjects=0&stripJS=0&go=" % urllib.quote( post_url) while True: response = httptools.downloadpage(url, post=post, follow_redirects=False) if response.headers.get("location"): url = response.headers["location"] post = "" else: data_music = response.data break url_album = scrapertools.find_single_match(data_music, 'album(?:|s) on request.*?href="([^"]+)"') if url_album: url_album = "https://nl.hideproxy.me" + url_album itemlist.append( item.clone(action="musica_movie", title=config.get_localized_string(70317), url=url_album, )) except: pass token_auth = config.get_setting("token_trakt", "trakt") if token_auth: itemlist.append(item.clone(title=config.get_localized_string(70318), action="menu_trakt")) itemlist.append(item.clone(title="", action="")) # Es parte de una colección try: if ob_tmdb.result.get("belongs_to_collection"): new_item = item.clone(search='', infoLabels={'mediatype': item.contentType}) saga = ob_tmdb.result["belongs_to_collection"] new_item.infoLabels["tmdb_id"] = saga["id"] if saga["poster_path"]: new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + saga["poster_path"] if saga["backdrop_path"]: new_item.fanart = 'http://image.tmdb.org/t/p/original' + saga["backdrop_path"] new_item.search = {'url': 'collection/%s' % saga['id'], 'language': langt} itemlist.append(new_item.clone(title=config.get_localized_string(70327) % saga["name"], action="listado_tmdb", )) except: pass # Películas/Series similares y recomendaciones if item.infoLabels['tmdb_id']: item.extra = item.contentType.replace('tvshow', 'tv') title = title.replace("película", config.get_localized_string(70137)).replace("serie", config.get_localized_string(30123)) itemlist.append(item.clone(title=config.get_localized_string(70328) % title, action="listado_tmdb", search={'url': '%s/%s/similar' % (item.extra, item.infoLabels['tmdb_id']), 'language': langt, 'page': 1}, infoLabels={'mediatype': item.contentType}, )) itemlist.append( item.clone(title=config.get_localized_string(70315), action="listado_tmdb", infoLabels={'mediatype': item.contentType}, search={'url': '%s/%s/recommendations' % (item.extra, item.infoLabels['tmdb_id']), 'language': langt, 'page': 1}, )) return itemlist def reparto(item): # Actores y equipo de rodaje de una película/serie itemlist = [] item.extra=item.contentType.replace('tvshow','tv') item.search = {'url': '%s/%s/credits' % (item.extra, item.infoLabels['tmdb_id'])} ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt) try: cast = ob_tmdb.result["cast"] if cast: itemlist.append(item.clone(title=config.get_localized_string(70314), action="", )) for actor in cast: new_item = item.clone(action="listado_tmdb", fanart=default_fan) new_item.title = " " + actor["name"] + " as " + actor["character"] if actor["profile_path"]: new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + actor["profile_path"] if item.contentType == "movie": new_item.search = {'url': 'discover/movie', 'with_cast': actor['id'], 'language': langt, 'page': 1, 'sort_by': 'primary_release_date.desc'} else: new_item.search = {'url': 'person/%s/tv_credits' % actor['id'], 'language': langt} itemlist.append(new_item) except: pass try: crew = ob_tmdb.result["crew"] if crew: itemlist.append(item.clone(title=config.get_localized_string(70319), action="", )) for c in crew: new_item = item.clone(action="listado_tmdb", fanart=default_fan) new_item.title = " " + c["job"] + ": " + c["name"] if c["profile_path"]: new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + c["profile_path"] if item.contentType == "movie": new_item.search = {'url': 'discover/movie', 'with_crew': c['id'], 'page': 1, 'sort_by': 'primary_release_date.desc'} else: new_item.search = {'url': 'person/%s/tv_credits' % c['id'], 'language': langt} new_item.crew = True itemlist.append(new_item) except: pass return itemlist def info_seasons(item): # Info de temporadas y episodios itemlist = [] ob_tmdb = Tmdb(id_Tmdb=item.infoLabels["tmdb_id"], tipo="tv", idioma_busqueda=langt) for temp in range(item.infoLabels["number_of_seasons"], 0, -1): temporada = ob_tmdb.get_temporada(temp) if temporada: new_item = item.clone(action="", mediatype="season") new_item.infoLabels['title'] = temporada['name'] new_item.infoLabels['season'] = temp if temporada['overview']: new_item.infoLabels['plot'] = temporada['overview'] if temporada['air_date']: date = temporada['air_date'].split('-') new_item.infoLabels['aired'] = date[2] + "/" + date[1] + "/" + date[0] new_item.infoLabels['year'] = date[0] if temporada['poster_path']: new_item.infoLabels['poster_path'] = 'http://image.tmdb.org/t/p/original' + temporada['poster_path'] new_item.thumbnail = new_item.infoLabels['poster_path'] new_item.title = config.get_localized_string(60027) % temp itemlist.append(new_item) for epi in range(1, len(temporada["episodes"])): episodio = ob_tmdb.get_episodio(temp, epi) if episodio: new_item = item.clone(action="", mediatype="episode") new_item.infoLabels['season'] = temp new_item.infoLabels['episode'] = epi new_item.infoLabels['title'] = episodio['episodio_titulo'] if episodio['episodio_sinopsis']: new_item.infoLabels['plot'] = episodio['episodio_sinopsis'] if episodio['episodio_imagen']: new_item.infoLabels['poster_path'] = episodio['episodio_imagen'] new_item.thumbnail = new_item.infoLabels['poster_path'] if episodio['episodio_air_date']: new_item.infoLabels['aired'] = episodio['episodio_air_date'] new_item.infoLabels['year'] = episodio['episodio_air_date'].rsplit("/", 1)[1] if episodio['episodio_vote_average']: new_item.infoLabels['rating'] = episodio['episodio_vote_average'] new_item.infoLabels['votes'] = episodio['episodio_vote_count'] new_item.title = " %sx%s - %s" % (temp, epi, new_item.infoLabels['title']) itemlist.append(new_item) return itemlist def indices_tmdb(item): # Indices por genero y año itemlist = [] from datetime import datetime if config.get_localized_string(70032) in item.title: thumbnail = {} url = ('http://api.themoviedb.org/3/genre/%s/list?api_key=a1ab8b8669da03637a4b98fa39c39228&language=%s' % (item.extra, langt)) lista_generos = {} try: lista = jsontools.load(httptools.downloadpage(url, cookies=False).data)["genres"] for l in lista: lista_generos[str(l["id"])] = l["name"] if "es" in langt: thumbnail[str(l["id"])] = "%s1/%s.jpg" % (images_predef, l["name"].lower() \ .replace("ó", "o").replace("í", "i") \ .replace(" ", "%20").replace("Aventuras", "Aventura") .replace("ú", "u")) else: thumbnail[str(l["id"])] = "%s2/%s.jpg" % (images_predef, l["name"]) except: pass fecha = datetime.now().strftime('%Y-%m-%d') sort_by = 'release_date.desc' param_year = 'release_date.lte' if item.contentType == 'tvshow': sort_by = 'first_air_date.desc' param_year = 'air_date.lte' for key, value in lista_generos.items(): search = {'url': 'discover/%s' % item.extra, 'with_genres': key, 'sort_by': sort_by, param_year: fecha, 'language': langt, 'page': 1} new_item = item.clone(title=value, thumbnail=thumbnail[key], action="listado_tmdb", search=search) itemlist.append(new_item) itemlist.sort(key=lambda item: item.title) else: year = datetime.now().year + 3 for i in range(year, 1899, -1): if item.contentType == 'tvshow': param_year = 'first_air_date_year' else: param_year = 'primary_release_year' search = {'url': 'discover/%s' % item.extra, param_year: i, 'language': langt, 'page': 1} itemlist.append(item.clone(title=str(i), action='listado_tmdb', search=search)) return itemlist def filtro(item): logger.info() from datetime import datetime list_controls = [] valores = {} dict_values = None list_controls.append({'id': 'years', 'label': config.get_localized_string(60232), 'enabled': True, 'color': '0xFFCC2EFA', 'type': 'list', 'default': -1, 'visible': True}) list_controls[0]['lvalues'] = [] valores['years'] = [] year = datetime.now().year + 1 for i in range(1900, year + 1): list_controls[0]['lvalues'].append(str(i)) valores['years'].append(str(i)) list_controls[0]['lvalues'].append(config.get_localized_string(70450)) valores['years'].append('') if "Personalizado" in item.title: # Se utilizan los valores por defecto/guardados valores_guardados = config.get_setting("filtro_defecto_" + item.extra, item.channel) if valores_guardados: dict_values = valores_guardados url = ('http://api.themoviedb.org/3/genre/%s/list?api_key=f7f51775877e0bb6703520952b3c7840&language=%s' % (item.extra, langt)) try: lista = jsontools.load(httptools.downloadpage(url, cookies=False).data)["genres"] if lista: list_controls.append({'id': 'labelgenre', 'enabled': True, 'type': 'label', 'default': None, 'label': config.get_localized_string(70451), 'visible': True, 'color': '0xFFC52020'}) for l in lista: list_controls.append({'id': 'genre' + str(l["id"]), 'label': l["name"], 'enabled': True, 'type': 'bool', 'default': False, 'visible': True}) except: pass list_controls.append({'id': 'orden', 'label': config.get_localized_string(70455), 'enabled': True, 'color': '0xFF25AA48', 'type': 'list', 'default': -1, 'visible': True}) orden = [config.get_localized_string(70456), config.get_localized_string(70457), config.get_localized_string(70458), config.get_localized_string(70459), config.get_localized_string(70460), config.get_localized_string(70461)] if item.extra == "movie": orden.extend([config.get_localized_string(70462), config.get_localized_string(70463)]) orden_tmdb = ['popularity.desc', 'popularity.asc', 'release_date.desc', 'release_date.asc', 'vote_average.desc', 'vote_average.asc', 'original_title.asc', 'original_title.desc'] valores['orden'] = [] list_controls[-1]['lvalues'] = [] for i, tipo_orden in enumerate(orden): list_controls[-1]['lvalues'].insert(0, tipo_orden) valores['orden'].insert(0, orden_tmdb[i]) list_controls.append({'id': 'espacio', 'label': '', 'enabled': False, 'type': 'label', 'default': None, 'visible': True}) list_controls.append({'id': 'save', 'label': config.get_localized_string(70464), 'enabled': True, 'type': 'bool', 'default': False, 'visible': True}) else: list_controls.append({'id': 'keyword', 'label': config.get_localized_string(70465), 'enabled': True, 'type': 'text', 'default': '', 'visible': True}) item.valores = valores return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, caption=config.get_localized_string(70320), item=item, callback='filtrado') def filtrado(item, values): values_copy = values.copy() # Guarda el filtro para que sea el que se cargue por defecto if "save" in values and values["save"]: values_copy.pop("save") config.set_setting("filtro_defecto_" + item.extra, values_copy, item.channel) year = item.valores["years"][values["years"]] if "Personalizado" in item.title: orden = item.valores["orden"][values["orden"]] if item.extra == "tv": orden = orden.replace('release_date', 'first_air_date') genero_ids = [] for v in values: if "genre" in v: if values[v]: genero_ids.append(v.replace('genre', '')) genero_ids = ",".join(genero_ids) if "clave" in item.title: item.search = {'url': 'search/%s' % item.extra, 'year': year, 'query': values["keyword"], 'language': langt, 'page': 1} elif item.extra == "movie": item.search = {'url': 'discover/%s' % item.extra, 'sort_by': orden, 'primary_release_year': year, 'with_genres': genero_ids, 'vote_count.gte': '10', 'language': langt, 'page': 1} else: item.search = {'url': 'discover/%s' % item.extra, 'sort_by': orden, 'first_air_date_year': year, 'with_genres': genero_ids, 'vote_count.gte': '10', 'language': langt, 'page': 1} item.action = "listado_tmdb" return listado_tmdb(item) def musica_movie(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '([^<]+)
([^<]+).*?(\d+:\d+).*?

(.*?)' matches = scrapertools.find_multiple_matches(data, patron) for thumbnail, imdb_id, title, movie, datos in matches: new_item = item.clone(action='listado_tmdb') try: if "@" in thumbnail: thumbnail = thumbnail.rsplit('@', 1)[0] thumbnail += "@._UX482.jpg" elif "._V1_" in thumbnail: thumbnail = thumbnail.rsplit('._V1_', 1)[0] thumbnail += "._V1_UX482.jpg" except: pass new_item.thumbnail = thumbnail datos = datos.strip() if datos: new_item.infoLabels['plot'] = scrapertools.htmlclean(datos) new_item.title = title.strip() + ' (%s)' % (movie.strip()) new_item.infoLabels['imdb_id'] = imdb_id new_item.search = {'url': 'find/%s' % imdb_id, 'external_source': 'imdb_id', 'language': langt} itemlist.append(new_item) else: patron = '(?:

|
([^<]+)|"description">([^<]+)<)' matches = scrapertools.find_multiple_matches(data, patron) for thumbnail, thumb2, imdb_id, title, datos, plot, plot2 in matches: new_item = item.clone(action='detalles') new_item.title = title.strip() if not thumbnail: thumbnail = thumb2 try: if "@" in thumbnail: thumbnail = thumbnail.rsplit('@', 1)[0] thumbnail += "@._UX482.jpg" elif "._V1_" in thumbnail: thumbnail = thumbnail.rsplit('._V1_', 1)[0] thumbnail += "._V1_UX482.jpg" except: pass new_item.thumbnail = thumbnail if not plot: plot = plot2 new_item.infoLabels['plot'] = scrapertools.htmlclean(plot.strip()) generos = scrapertools.find_multiple_matches(datos, 'genre">([^<]+)<') if generos: new_item.infoLabels["genre"] = ", ".join(generos) duracion = scrapertools.find_single_match(datos, '(\d+) min') if duracion: new_item.infoLabels['duration'] = int(duracion) * 60 new_item.infoLabels['year'] = scrapertools.find_single_match(new_item.title, '\((\d{4})') if not new_item.infoLabels['year']: new_item.infoLabels['year'] = scrapertools.find_single_match(datos, 'year.*?\((\d{4})') if new_item.infoLabels['year']: new_item.title += ' (%s)' % new_item.infoLabels['year'] rating = scrapertools.find_single_match(datos, '(?:rating|Metascore).*?([^<]*)') rating = rating.replace(",", ".") if rating: if not "." in rating: try: rating = float(rating) / 10 except: rating = None if rating: new_item.title += typo(str(rating), '_ color kod') new_item.infoLabels['rating'] = float(rating) new_item.infoLabels['imdb_id'] = imdb_id itemlist.append(new_item) next_page = scrapertools.find_single_match(data, ']*>Next') if next_page: next_page = 'http://www.imdb.com' + next_page # if not "title_type" in item.url: # next_page = 'http://www.imdb.com' + next_page # else: # next_page = 'http://www.imdb.com/search/title' + next_page itemlist.append(item.clone(title=config.get_localized_string(70065), url=next_page, )) return itemlist def filtro_imdb(item): logger.info() from datetime import datetime list_controls = [] valores = {} dict_values = None # Se utilizan los valores por defecto/guardados valores_guardados = config.get_setting("filtro_defecto_imdb_" + item.extra, item.channel) if valores_guardados: dict_values = valores_guardados list_controls.append({'id': 'title', 'label': config.get_localized_string(60320), 'enabled': True, 'type': 'text', 'default': '', 'visible': True}) list_controls.append({'id': 'yearsdesde', 'label': config.get_localized_string(70452), 'enabled': True, 'color': '0xFFCC2EFA', 'type': 'list', 'default': -1, 'visible': True}) list_controls.append({'id': 'yearshasta', 'label': config.get_localized_string(70453), 'enabled': True, 'color': '0xFF2ECCFA', 'type': 'list', 'default': -1, 'visible': True}) list_controls[1]['lvalues'] = [] list_controls[2]['lvalues'] = [] valores['years'] = [] year = datetime.now().year + 1 for i in range(1900, year + 1): list_controls[1]['lvalues'].append(str(i)) list_controls[2]['lvalues'].append(str(i)) valores['years'].append(str(i)) list_controls[1]['lvalues'].append(config.get_localized_string(70450)) list_controls[2]['lvalues'].append(config.get_localized_string(70450)) valores['years'].append('') try: generos_spa = {'Action': config.get_localized_string(70394), 'Adventure': config.get_localized_string(60267), 'Animation': config.get_localized_string(60268), 'Biography': config.get_localized_string(70403), 'Comedy': config.get_localized_string(60270), 'Crime': config.get_localized_string(60271), 'Documentary': config.get_localized_string(70396), 'Family': config.get_localized_string(70399), 'Fantasy': config.get_localized_string(60274), 'Film-Noir': config.get_localized_string(70400), 'Game-Show': config.get_localized_string(70401), 'History': config.get_localized_string(70405), 'Horror': config.get_localized_string(70013), 'Music': config.get_localized_string(70404), 'Mistery': config.get_localized_string(70402), 'News': config.get_localized_string(60279), 'Reality-TV': config.get_localized_string(70406), 'Sci-Fi': config.get_localized_string(70397), 'Sport': config.get_localized_string(70395), 'Talk-Show': config.get_localized_string(70398), 'War': config.get_localized_string(70407)} data = httptools.downloadpage("http://www.imdb.com/search/title", cookies=False).data bloque = scrapertools.find_single_match(data, '

Genres

(.*?)') matches = scrapertools.find_multiple_matches(bloque, ' value="([^"]+)"\s*>\s*([^<]+)<') if matches: list_controls.append({'id': 'labelgenre', 'enabled': True, 'type': 'label', 'visible': True, 'label': config.get_localized_string(70451), 'color': '0xFFC52020'}) lista = [] for valor, titulo in matches: titulo = generos_spa.get(titulo, titulo) lista.append([valor, titulo]) lista.sort(key=lambda lista: lista[1]) for valor, titulo in lista: list_controls.append({'id': 'genre' + valor, 'label': titulo, 'enabled': True, 'type': 'bool', 'default': False, 'visible': True}) except: pass list_controls.append({'id': 'espacio', 'label': '', 'enabled': False, 'type': 'label', 'default': None, 'visible': True}) try: bloque = scrapertools.find_single_match(data, '

Countries

(.*?)Less-Common') matches = scrapertools.find_multiple_matches(bloque, ' value="([^"]+)"\s*>([^<]+)<') if matches: list_controls.append({'id': 'pais', 'label': config.get_localized_string(70466), 'enabled': True, 'color': '0xFFFF8000', 'type': 'list', 'default': -1, 'visible': True}) list_controls[-1]['lvalues'] = [] list_controls[-1]['lvalues'].append(config.get_localized_string(70450)) valores['pais'] = [] valores['pais'].append('') for valor, titulo in matches: list_controls[-1]['lvalues'].insert(0, titulo) valores['pais'].insert(0, valor) except: pass list_controls.append({'id': 'orden', 'label': config.get_localized_string(70455), 'enabled': True, 'color': '0xFF25AA48', 'type': 'list', 'default': -1, 'visible': True}) orden = [config.get_localized_string(70456), config.get_localized_string(70457), config.get_localized_string(70458), config.get_localized_string(70459), config.get_localized_string(70460), config.get_localized_string(70461), config.get_localized_string(70462), config.get_localized_string(70463)] orden_imdb = ['moviemeter,asc', 'moviemeter,desc', 'year,desc', 'year,asc', 'user_rating,desc', 'user_rating,asc', 'alpha,asc', 'alpha,desc'] valores['orden'] = [] list_controls[-1]['lvalues'] = [] for i, tipo_orden in enumerate(orden): list_controls[-1]['lvalues'].insert(0, tipo_orden) valores['orden'].insert(0, orden_imdb[i]) list_controls.append({'id': 'save', 'label': config.get_localized_string(70464), 'enabled': True, 'type': 'bool', 'default': False, 'visible': True}) item.valores = valores return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, caption=config.get_localized_string(70320), item=item, callback='filtrado_imdb') def filtrado_imdb(item, values): values_copy = values.copy() # Guarda el filtro para que sea el que se cargue por defecto if "save" in values and values["save"]: values_copy.pop("save") config.set_setting("filtro_defecto_imdb_" + item.extra, values_copy, item.channel) yeard = item.valores["years"][values["yearsdesde"]] yearh = item.valores["years"][values["yearshasta"]] orden = item.valores["orden"][values["orden"]] pais = item.valores["pais"][values["pais"]] genero_ids = [] for v in values: if "genre" in v: if values[v]: genero_ids.append(v.replace('genre', '')) genero_ids = ",".join(genero_ids) try: votos = int(values["votos"]) except: votos = "" item.url = 'http://www.imdb.com/search/title?countries=%s&num_votes=%s,&genres=%s&release_date=%s,%s&sort=%s&' \ 'title=%s&title_type=' % (pais, str(votos), genero_ids, yeard, yearh, orden, values["title"]) if item.contentType == "movie": item.url += "feature,tv_movie" else: item.url += "tv_series,tv_special,mini_series" item.action = "listado_imdb" return listado_imdb(item) def indices_imdb(item): # Índices imdb por año y genero itemlist = [] from datetime import datetime if config.get_localized_string(70032) in item.title: generos_spa = {'Action': config.get_localized_string(70394), 'Adventure': config.get_localized_string(60267), 'Animation': config.get_localized_string(60268), 'Biography': config.get_localized_string(70403), 'Thriller': config.get_localized_string(70410), 'Comedy': config.get_localized_string(60270), 'Crime': config.get_localized_string(60271), 'Documentary': config.get_localized_string(70396), 'Family': config.get_localized_string(70399), 'Romance': config.get_localized_string(70409), 'Fantasy': config.get_localized_string(60274), 'Film-Noir': config.get_localized_string(70400), 'Game-Show': config.get_localized_string(70401), 'Drama': config.get_localized_string(70412), 'Western': config.get_localized_string(70411), 'History': config.get_localized_string(70405), 'Horror': config.get_localized_string(70013), 'Music': config.get_localized_string(70404), 'Musical': config.get_localized_string(70408),'Mystery': config.get_localized_string(70402), 'News': config.get_localized_string(60279), 'Reality-TV': config.get_localized_string(70406), 'Sci-Fi': config.get_localized_string(70397), 'Sport': config.get_localized_string(70395), 'Talk-Show': config.get_localized_string(70398), 'War': config.get_localized_string(70407)} data = httptools.downloadpage("http://www.imdb.com/search/title", cookies=False).data bloque = scrapertools.find_single_match(data, '

Genres

(.*?)') matches = scrapertools.find_multiple_matches(bloque, ' value="([^"]+)"\s*>\s*([^<]+)<') if matches: for valor, titulo in matches: title = generos_spa.get(titulo, titulo) thumbnail = "%s2/%s.jpg" % (images_predef, titulo) itemlist.append(item.clone(title=title, action='listado_imdb', thumbnail=thumbnail, url='http://www.imdb.com/search/title?genres=%s%s' % (valor, item.url))) itemlist = thumb(itemlist) itemlist.sort(key=lambda item: item.title) else: year = datetime.now().year + 3 for i in range(year, 1899, -1): itemlist.append(item.clone(title=str(i), action='listado_imdb', url='http://www.imdb.com/search/title?release_date=%s,%s%s' % (i, i, item.url))) return itemlist ##-------------------- SECCION FILMAFFINITY ------------------------## def listado_fa(item): # Método para listados principales de filmaffinity itemlist = [] # Listados con paginación por post if item.extra == "top": if item.page_fa: post = "from=%s" % item.page_fa data = httptools.downloadpage(item.url, post=post).data if item.total > item.page_fa: item.page_fa += 30 else: item.page_fa = "" else: item.page_fa = 30 data = httptools.downloadpage(item.url).data item.total = int(scrapertools.find_single_match(data, 'tmResCount\s*=\s*(\d+)')) if item.total <= item.page_fa: item.page_fa = "" else: data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) votaciones = [] # Si es la sección de estrenos cambia la estructura del scraper if item.extra == "estrenos": patron = '\s*(\d+[^<]+)<(.*?)(?:
|' \ '
([^<]+)
' \ '\s*(.*?)\s*<|)' matches = scrapertools.find_multiple_matches(bloque, patron) for url, thumb, thumb2, title, year, rating, votos in matches: title = title.strip() new_item = item.clone(action="detalles_fa", contentType="movie", extra="movie", contentTitle=title) if not url.startswith("http://m.filmaffinity"): new_item.url = "http://m.filmaffinity.com" + url else: new_item.url = url if not thumb: thumb = thumb2 new_item.thumbnail = thumb.replace("msmall", "large") if not new_item.thumbnail.startswith("http"): new_item.thumbnail = "http://m.filmaffinity.com" + new_item.thumbnail new_item.title = " " + title + " (%s) %s" % (year, rating) new_item.infoLabels['year'] = year votaciones.append([rating, votos]) if rating: new_item.infoLabels['rating'] = float(rating.replace(",", ".")) new_item.infoLabels['votes'] = votos itemlist.append(new_item) else: patron = '(?:
|)' \ '.*?(?:data-src="([^"]+)"|src="((?!/images/empty.gif)[^"]+)").*?' \ '
' \ '([^<]+)
\s*(.*?)\s*<|)' matches = scrapertools.find_multiple_matches(data, patron) for url, url2, thumb, thumb2, title, year, rating, votos in matches: title = title.strip() new_item = item.clone(action="detalles_fa", extra="movie") if not url: url = url2 if not url.startswith("http://m.filmaffinity"): new_item.url = "http://m.filmaffinity.com" + url else: new_item.url = url if not thumb: thumb = thumb2 new_item.thumbnail = thumb.replace("msmall", "large") if not new_item.thumbnail.startswith("http"): new_item.thumbnail = "http://m.filmaffinity.com" + new_item.thumbnail new_item.title = title.replace("(Serie de TV)", "").replace("(TV)", "") + " (%s) %s" \ % (year, rating) new_item.contentTitle = re.sub(r'(?i)\(serie de tv\)|\(tv\)|\(c\)', '', title) if re.search(r'(?i)serie de tv|\(tv\)', title): new_item.contentType = "tvshow" new_item.extra = "tv" new_item.infoLabels["tvshowtitle"] = new_item.contentTitle new_item.infoLabels['year'] = year votaciones.append([rating, votos]) if rating: new_item.infoLabels['rating'] = float(rating.replace(",", ".")) new_item.infoLabels['votes'] = votos itemlist.append(new_item) if len(itemlist) < 31: from core import tmdb tmdb.set_infoLabels_itemlist(itemlist, True) for i, it in enumerate(itemlist): try: it.infoLabels['votes'] = votaciones[i][1] it.infoLabels['rating'] = float(votaciones[i][0].replace(",", ".")) except: pass next_page = scrapertools.find_single_match(data, 'aria-label="Next" href="([^"]+)"') if next_page: if not next_page.startswith("http://m.filmaffinity"): next_page = "http://m.filmaffinity.com" + next_page itemlist.append(Item(channel=item.channel, action=item.action, title=config.get_localized_string(70065), url=next_page, extra=item.extra)) elif item.page_fa: itemlist.append(item.clone(title=config.get_localized_string(70065), )) return itemlist def indices_fa(item): # Índices por genero, año, temas y sagas/colecciones itemlist = [] if item.url: data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) if "sagas" in item.extra: patron = '
  • .*?href="([^"]+)".*?group-name">([^<]+)<.*?src="([^"]+)".*?' \ '"count-movies">([^<]+)<' matches = scrapertools.find_multiple_matches(data, patron) for url, title, thumbnail, info in matches: new_item = item.clone(action="listado_fa") if not url.startswith("http://www.filmaffinity"): new_item.url = "http://m.filmaffinity.com" + url else: new_item.url = url.replace("www.filmaffinity.com", "m.filmaffinity.com") new_item.thumbnail = thumbnail.replace("mmed", "large") new_item.title = title.strip() + " (%s)" % (info) itemlist.append(new_item) next_page = scrapertools.find_single_match(data, '>>') if next_page: if not next_page.startswith("http://www.filmaffinity.com"): next_page = "http://www.filmaffinity.com" + next_page itemlist.append(Item(channel=item.channel, action=item.action, title=config.get_localized_string(70065), url=next_page, extra=item.extra)) elif config.get_localized_string(70032) in item.title: bloque = scrapertools.find_single_match(data, 'name="genre">.*?(.*?)') matches = scrapertools.find_multiple_matches(bloque, '') for valor, titulo in matches: if valor == "TV_SE": continue new_item = item.clone(title=titulo, action="listado_fa", extra="top") new_item.url = "http://m.filmaffinity.com/%s/topgen.php?genre=%s&country=&fromyear=&toyear=&nodoc=1" \ % (langf, valor) if item.contentType == "movie": new_item.url += "¬vse=1" generos = ['1/accion.jpg', '1/animacion.jpg', '1/aventura.jpg', '1/guerra.jpg', '1/ciencia%20ficcion.jpg', '2/Film-Noir.jpg', '1/comedia.jpg', '0/Unknown.png', '1/documental.jpg', '1/drama.jpg', '1/fantasia.jpg', '2/Kids.jpg', '2/Suspense.jpg', '1/musical.jpg', '1/romance.jpg', '1/terror.jpg', '1/thriler.jpg', '1/western.jpg'] if langf != "en": try: new_item.thumbnail = "%s/%s" % (images_predef, generos[len(itemlist)]) except: new_item.thumbnail = "%s1/%s.jpg" % (images_predef, titulo.lower()) else: new_item.thumbnail = "%s2/%s.jpg" % (images_predef, titulo) itemlist.append(new_item) elif "Temas" in item.title: bloques = scrapertools.find_multiple_matches(data, '
    (.*?)
    ') for letra, bloque in bloques: patron = 'href="([^"]+)">([^<]+)<.*?"badge">(\d+)' matches = scrapertools.find_multiple_matches(bloque, patron) extra = len(matches) + 1 action = "" folder = True if config.is_xbmc(): action = "move" folder = False itemlist.append(item.clone(title=letra, action=action, extra=extra, folder=folder)) for url, titulo, numero in matches: new_item = item.clone(action="temas_fa") topic_id = scrapertools.find_single_match(url, "topic=(\d+)") new_item.url = "http://www.filmaffinity.com/%s/%s&attr=all" % ( langf, url.replace("&nodoc", "").replace("¬vse", "")) new_item.title = titulo + " (%s)" % numero itemlist.append(new_item) else: from datetime import datetime year = datetime.now().year for i in range(year, 1899, -1): new_item = item.clone(title=str(i), action="listado_fa", extra="top") genre = '' if item.contentType == "tvshow": genre = 'TV_SE' new_item.url = "http://m.filmaffinity.com/%s/topgen.php?genre=%s&country=&fromyear=%s&toyear=%s&nodoc=1" \ % (langf, genre, i, i) if item.contentType == "movie": new_item.url += "¬vse=1" itemlist.append(new_item) return itemlist def temas_fa(item): # Películas y series por temas itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) item.infoLabels['plot'] = scrapertools.find_single_match(data, '

    ([^<]+)

    ') patron = '
    \s*([^<]+)\s*\((\d+)\)' matches = scrapertools.find_multiple_matches(data, patron) for url, thumb, title, year in matches: title = title.strip() new_item = item.clone(action="detalles_fa", contentType="movie", extra="movie", ) new_item.url = "http://m.filmaffinity.com/%s/movie.php?id=%s" % (langf, url) new_item.thumbnail = thumb.replace("msmall", "large") if not new_item.thumbnail.startswith("http"): new_item.thumbnail = "http://www.filmaffinity.com" + new_item.thumbnail new_item.infoLabels["year"] = year new_item.title = title + " (%s)" % year if re.search(r'(?i)serie de tv|\(tv\)', title): new_item.contentType = "tvshow" new_item.extra = "tv" new_item.contentTitle = re.sub(r'(?i)\(serie de tv\)|\(tv\)|\(c\)', '', title) itemlist.append(new_item) next_page = scrapertools.find_single_match(data, '>>') if next_page: if not next_page.startswith("http://www.filmaffinity.com"): next_page = "http://www.filmaffinity.com/%s/%s" % (langf, next_page) itemlist.append(Item(channel=item.channel, action=item.action, title=config.get_localized_string(70065), url=next_page)) return itemlist def detalles_fa(item): itemlist = [] item.plot = "" rating = item.infoLabels['rating'] votos = item.infoLabels['votes'] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) # Se extrae el título original para posibles búsquedas en tmdb posteriores orig_title = scrapertools.find_single_match(data, 'itemprop="datePublished">.*?
    ([^<]+)
    ').strip() if item.contentType == "movie": item.infoLabels['originaltitle'] = re.sub(r"(?i)\(TV Series\)|\(S\)|\(TV\)", "", orig_title) else: item.infoLabels['tvshowtitle'] = re.sub(r"(?i)\(TV Series\)|\(S\)|\(TV\)", "", orig_title) item_tmdb = item.clone() if item.contentType == "movie": ob_tmdb = Tmdb(texto_buscado=item_tmdb.contentTitle, year=item_tmdb.infoLabels['year'], tipo=item_tmdb.extra, idioma_busqueda=langt) if not ob_tmdb.result: ob_tmdb = Tmdb(texto_buscado=item_tmdb.infoLabels['originaltitle'], year=item_tmdb.infoLabels['year'], tipo=item_tmdb.extra, idioma_busqueda=langt) else: ob_tmdb = Tmdb(texto_buscado=item_tmdb.contentTitle, tipo=item_tmdb.extra, idioma_busqueda=langt) if not ob_tmdb.result: ob_tmdb = Tmdb(texto_buscado=item_tmdb.infoLabels['tvshowtitle'], tipo=item_tmdb.extra, idioma_busqueda=langt) if ob_tmdb.result: ob_tmdb = Tmdb(id_Tmdb=ob_tmdb.get_id(), tipo=item_tmdb.extra, idioma_busqueda=langt) item.infoLabels = ob_tmdb.get_infoLabels(item.infoLabels) # Si no hay sinopsis en idioma elegido, buscar en el alternativo if not item.infoLabels["plot"]: item.infoLabels["plot"] = ob_tmdb.get_sinopsis(idioma_alternativo=langt_alt) # Se concatena el plot de filmaffinity al de tmdb si lo hay plot = scrapertools.find_single_match(data, '
    (.*?)
    ') plot = plot.replace("

    ", "\n") plot = scrapertools.decodeHtmlentities(plot).replace(" (FILMAFFINITY)", "") if plot and (item.infoLabels['plot'] and item.infoLabels['plot'] != plot): item.infoLabels['plot'] += " (TMDB)\n" + plot + " (FILMAFFINITY)" elif plot and not item.infoLabels['plot']: item.infoLabels['plot'] = plot # Se busca y rellena con la info de filmaffinity para diferenciarla de tmdb if not item.infoLabels['duration']: duration = scrapertools.find_single_match(data, '
    (\d+)') if duration: item.infoLabels['duration'] = int(duration) * 60 if not item.infoLabels['genre']: generos = scrapertools.find_multiple_matches(data, 'class="g-t-item">(.*?)') genres = [] for g in generos: genres.append(scrapertools.htmlclean(g.strip())) item.infoLabels['genre'] = ", ".join(genres) if not rating: rating = scrapertools.find_single_match(data, 'itemprop="ratingValue".*?>([^<]+)<') if rating: rating = float(rating.replace(",", ".")) elif ob_tmdb.result: rating = float(ob_tmdb.result.get('vote_average', 0)) item.infoLabels['rating'] = rating if not votos: votos = scrapertools.find_single_match(data, 'itemprop="ratingCount".*?>([^<]+)<') if votos == "0" and ob_tmdb.result: votos = ob_tmdb.result.get('vote_count', '') item.infoLabels['votes'] = votos if item.infoLabels['fanart']: item.fanart = item.infoLabels['fanart'] else: item.fanart = scrapertools.find_single_match(data, 'Imagen Principal.*?src: "([^"]+)"') if item.infoLabels['thumbnail']: item.thumbnail = item.infoLabels['thumbnail'] if item.infoLabels['tagline']: itemlist.append(item.clone(title="--- %s ---" % item.infoLabels['tagline'], action="")) title = item.contentType.replace("movie", config.get_localized_string(70283)).replace("tvshow", "serie") itemlist.append(item.clone(action="busqueda", title=config.get_localized_string(70069) % (title, item.contentTitle))) if item.infoLabels['originaltitle'] and item.contentTitle != item.infoLabels['originaltitle']: itemlist.append(item.clone(action="busqueda", contentTitle=item.infoLabels['originaltitle'], title=config.get_localized_string(70070) % item.infoLabels['originaltitle'])) if langt != "es" and langt != "en" and item.infoLabels["tmdb_id"]: tmdb_lang = Tmdb(id_Tmdb=item.infoLabels["tmdb_id"], tipo=item.extra, idioma_busqueda=def_lang) if tmdb_lang.result.get("title") and tmdb_lang.result["title"] != item.contentTitle: tmdb_lang = tmdb_lang.result["title"] itemlist.append(item.clone(action="busqueda", title=config.get_localized_string(70066) % tmdb_lang, contentTitle=tmdb_lang)) if item.contentType == "tvshow" and ob_tmdb.result: itemlist.append(item.clone(action="info_seasons", title=config.get_localized_string(70067) % item.infoLabels["number_of_seasons"])) if ob_tmdb.result: itemlist.append(item.clone(action="reparto", title=config.get_localized_string(70071), infoLabels={'tmdb_id': item.infoLabels['tmdb_id'], 'mediatype': item.contentType})) if config.is_xbmc(): item.contextual = True trailer_url = scrapertools.find_single_match(data, '' % langf) images = {} if ob_tmdb.result and ob_tmdb.result.get("images"): images['tmdb'] = ob_tmdb.result["images"] if url_img: images['filmaffinity'] = {} if images: itemlist.append(item.clone(action="imagenes", title=config.get_localized_string(70316), images=images, url=url_img, extra="menu")) try: if item.contentType == "movie" and item.infoLabels["year"] < 2014: post_url = "https://theost.com/search/custom/?key=%s&year=%s&country=0&genre=0" % ( item.infoLabels['originaltitle'].replace(" ", "+"), item.infoLabels["year"]) url = "https://nl.hideproxy.me/includes/process.php?action=update" post = "u=%s&proxy_formdata_server=nl&allowCookies=1&encodeURL=1&encodePage=0&stripObjects=0&stripJS=0&go=" % urllib.quote( post_url) while True: response = httptools.downloadpage(url, post=post, follow_redirects=False) if response.headers.get("location"): url = response.headers["location"] post = "" else: data_music = response.data break url_album = scrapertools.find_single_match(data_music, 'album(?:s|) on request.*?href="([^"]+)"') if url_album: url_album = "https://nl.hideproxy.me" + url_album itemlist.append( item.clone(action="musica_movie", title=config.get_localized_string(70317), url=url_album, )) except: pass token_auth = config.get_setting("token_trakt", "trakt") if token_auth and ob_tmdb.result: itemlist.append(item.clone(title=config.get_localized_string(70323), action="menu_trakt")) # Acciones si se configura cuenta en FA (Votar y añadir/quitar en listas) mivoto = scrapertools.find_single_match(data, 'bg-my-rating.*?>\s*(\d+)') itk = scrapertools.find_single_match(data, 'data-itk="([^"]+)"') folder = not config.is_xbmc() if mivoto: item.infoLabels["userrating"] = int(mivoto) new_item = item.clone(action="votar_fa", title=config.get_localized_string(70324) % mivoto, itk=itk, voto=int(mivoto), folder=folder) new_item.infoLabels["duration"] = "" itemlist.append(new_item) else: if itk: new_item = item.clone(action="votar_fa", title=config.get_localized_string(70325) % title, itk=itk, accion="votar", folder=folder) new_item.infoLabels["duration"] = "" itemlist.append(new_item) if itk: itk = scrapertools.find_single_match(data, 'var itk="([^"]+)"') new_item = item.clone(action="acciones_fa", accion="lista_movie", itk=itk, title=config.get_localized_string(70326)) new_item.infoLabels["duration"] = "" itemlist.append(new_item) # Si pertenece a una saga/colección if ob_tmdb.result: itemlist.append(item.clone(title="", action="", infoLabels={})) if ob_tmdb.result.get("belongs_to_collection"): new_item = item.clone(infoLabels={'mediatype': item.contentType}, action="listado_tmdb", ) saga = ob_tmdb.result["belongs_to_collection"] new_item.infoLabels["tmdb_id"] = saga["id"] if saga["poster_path"]: new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + saga["poster_path"] if saga["backdrop_path"]: new_item.fanart = 'http://image.tmdb.org/t/p/original' + saga["backdrop_path"] new_item.search = {'url': 'collection/%s' % saga['id'], 'language': langt} new_item.title = config.get_localized_string(70327) % saga["name"] itemlist.append(new_item) itemlist.append(item.clone(title=config.get_localized_string(70328) % title.capitalize(), action="listado_tmdb", search={'url': '%s/%s/similar' % (item.extra, item.infoLabels['tmdb_id']), 'language': langt, 'page': 1}, infoLabels={'mediatype': item.contentType}, )) itemlist.append( item.clone(title=config.get_localized_string(70315), action="listado_tmdb", infoLabels={'mediatype': item.contentType}, search={'url': '%s/%s/recommendations' % (item.extra, item.infoLabels['tmdb_id']), 'language': langt, 'page': 1}, )) return itemlist def filtro_fa(item): logger.info() from datetime import datetime list_controls = [] valores = {} dict_values = None # Se utilizan los valores por defecto/guardados valores_guardados = config.get_setting("filtro_defecto_filmaf_" + item.extra, item.channel) if valores_guardados: dict_values = valores_guardados list_controls.append({'id': 'yearsdesde', 'label': config.get_localized_string(70452), 'enabled': True, 'type': 'list', 'default': -1, 'visible': True}) list_controls.append({'id': 'yearshasta', 'label': config.get_localized_string(70453), 'enabled': True, 'type': 'list', 'default': -1, 'visible': True}) list_controls[0]['lvalues'] = [] list_controls[1]['lvalues'] = [] valores['years'] = [] year = datetime.now().year for i in range(1900, year + 1): list_controls[0]['lvalues'].append(str(i)) list_controls[1]['lvalues'].append(str(i)) valores['years'].append(str(i)) list_controls[0]['lvalues'].append(config.get_localized_string(70450)) list_controls[1]['lvalues'].append(config.get_localized_string(70450)) valores['years'].append('') data = httptools.downloadpage("http://m.filmaffinity.com/%s/topgen.php" % langf).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) if item.contentType == "movie": try: bloque = scrapertools.find_single_match(data, 'name="genre">.*?(.*?)') matches = scrapertools.find_multiple_matches(bloque, '') if matches: list_controls.append({'id': 'genero', 'label': config.get_localized_string(70467), 'enabled': True, 'type': 'list', 'default': -1, 'visible': True}) list_controls[2]['lvalues'] = [] list_controls[2]['lvalues'].append("Todos") valores['genero'] = [] valores['genero'].append('') for valor, titulo in matches: if valor == "TV_SE": continue list_controls[2]['lvalues'].insert(0, titulo) valores['genero'].insert(0, valor) except: pass try: bloque = scrapertools.find_single_match(data, 'name="country">.*?(.*?)') matches = scrapertools.find_multiple_matches(bloque, '') if matches: list_controls.append({'id': 'pais', 'label': config.get_localized_string(70466), 'enabled': True, 'type': 'list', 'default': -1, 'visible': True}) list_controls[-1]['lvalues'] = [] list_controls[-1]['lvalues'].append('Todos') valores['pais'] = [] valores['pais'].append('') for valor, titulo in matches: list_controls[-1]['lvalues'].insert(0, titulo) valores['pais'].insert(0, valor) except: pass list_controls.append({'id': 'espacio', 'label': '', 'enabled': False, 'type': 'label', 'default': None, 'visible': True}) list_controls.append({'id': 'save', 'label': config.get_localized_string(70464), 'enabled': True, 'type': 'bool', 'default': False, 'visible': True}) item.valores = valores return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, caption=config.get_localized_string(70320), item=item, callback='filtrado_fa') def filtrado_fa(item, values): values_copy = values.copy() # Guarda el filtro para que sea el que se cargue por defecto if "save" in values and values["save"]: values_copy.pop("save") config.set_setting("filtro_defecto_filmaf_" + item.extra, values_copy, item.channel) yeard = item.valores["years"][values["yearsdesde"]] yearh = item.valores["years"][values["yearshasta"]] pais = item.valores["pais"][values["pais"]] if item.contentType == "movie": genero = item.valores["genero"][values["genero"]] else: genero = "TV_SE" item.url = 'http://m.filmaffinity.com/%s/topgen.php?genre=%s&country=%s&fromyear=%s&toyear=%s&nodoc=1' \ % (langf, genero, pais, yeard, yearh) if item.contentType == "movie": item.url += "¬vse=1" item.action = "listado_fa" return listado_fa(item) def login_fa(): logger.info() try: user = config.get_setting("usuariofa", "tvmoviedb") password = config.get_setting("passfa", "tvmoviedb") userid = config.get_setting("userid", "tvmoviedb") if user == "" or password == "": return False, config.get_localized_string(70329) data = httptools.downloadpage("http://m.filmaffinity.com/%s" % langf).data if "modal-menu-user" in data and userid: return True, "" post = "postback=1&rp=&username=%s&password=%s&rememberme=on" % (user, password) data = httptools.downloadpage("https://m.filmaffinity.com/%s/account.ajax.php?action=login" % langf, post=post).data if "Invalid username" in data: logger.error("Error en el login") return False, config.get_localized_string(70330) else: post = "name=user-menu&url=http://m.filmaffinity.com/%s/main.php" % langf data = httptools.downloadpage("http://m.filmaffinity.com/%s/tpl.ajax.php?action=getTemplate" % langf, post=post).data userid = scrapertools.find_single_match(data, 'id-user=(\d+)') if userid: config.set_setting("userid", userid, "tvmoviedb") logger.info("Login correcto") return True, "" except: import traceback logger.error(traceback.format_exc()) return False, config.get_localized_string(70331) def cuenta_fa(item): # Menú de cuenta filmaffinity itemlist = [] login, message = login_fa() if not login: itemlist.append(item.clone(action="", title=message, )) else: userid = config.get_setting("userid", "tvmoviedb") itemlist.append(item.clone(action="acciones_fa", title=config.get_localized_string(70332), accion="votos", url="http://m.filmaffinity.com/%s/user_ratings.php?id-user=%s" % (langf, userid))) itemlist.append(item.clone(action="acciones_fa", title=config.get_localized_string(70057), accion="listas", url="http://m.filmaffinity.com/%s/mylists.php" % langf)) return itemlist def acciones_fa(item): # Acciones cuenta filmaffinity, votar, ver listas o añadir/quitar de lista itemlist = [] if item.accion == "votos" or item.accion == "lista": data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) bloques = scrapertools.find_multiple_matches(data, 'list-group-item(?:[^>]+>\s*)\s*
    \s*\((\d+)\)
    .*?(?:
    ' \ '([^<]+)
    \s*(.*?)\s*<|
  • ).*?' matches = scrapertools.find_multiple_matches(bloque, patron) mivoto = scrapertools.find_single_match(bloque, 'bg-my-rating[^>]+>(?:\s*|)([^<]+)<') for url, thumb, title, year, rating, votos in matches: new_item = item.clone(action="detalles_fa", ) if not url.startswith("http://m.filmaffinity"): new_item.url = "http://m.filmaffinity.com" + url else: new_item.url = url new_item.infoLabels["year"] = year rating = rating.replace(",", ".") new_item.infoLabels["rating"] = float(rating) new_item.infoLabels["votes"] = votos.replace(".", "") if mivoto.isdigit(): new_item.infoLabels["userrating"] = int(mivoto) new_item.thumbnail = thumb.replace("msmall", "large") if not new_item.thumbnail.startswith("http"): new_item.thumbnail = "http://m.filmaffinity.com" + new_item.thumbnail if re.search(r'(?i)serie de tv|\(tv\)', title): new_item.contentType = "tvshow" new_item.extra = "tv" new_item.title = title.strip() + " (%s) %s/]%s" % ( year, rating, mivoto) new_item.contentTitle = title.strip() itemlist.append(new_item) elif item.accion == "listas": orderby = config.get_setting("orderfa", "tvmoviedb") data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) patron = 'list-group-item rip.*?href="([^"]+)".*?([^<]+).*?([^<]+)' \ '.*?(?:
    (.*?)|)' matches = scrapertools.find_multiple_matches(data, patron) for url, title, content, imgs in matches: new_item = item.clone(accion="lista", ) if not url.startswith("http://m.filmaffinity.com"): new_item.url = "http://m.filmaffinity.com%s&orderby=%s" % (url, orderby) else: new_item.url = "%s&orderby=%s" % (url, orderby) new_item.title = title + " (%s)" % (content) if imgs: imagenes = scrapertools.find_multiple_matches(imgs, 'data-src="([^"]+)"') from random import randint random = randint(0, len(imagenes) - 1) new_item.thumbnail = imagenes[random].replace("msmall", "large") itemlist.append(new_item) elif item.accion == "lista_movie": movieid = item.url.rsplit("=", 1)[1] url = "http://m.filmaffinity.com/%s/edtmovielists.php?movie_id=%s" % (langf, movieid) data = httptools.downloadpage(url).data patron = 'data-list-id="([^"]+)"(.*?)
    ([^<]+)<' matches = scrapertools.find_multiple_matches(data, patron) for listid, chequeo, title in matches: new_item = item.clone(folder=not config.is_xbmc()) new_item.infoLabels["duration"] = "" new_item.listid = listid if "checked" in chequeo: new_item.title = "[COLOR %s]%s[/COLOR] %s" % ("green", u"\u0474".encode('utf-8'), title) new_item.accion = "removeMovieFromList" else: new_item.title = "%s %s" % ( u"\u04FE".encode('utf-8'), title) new_item.accion = "addMovieToList" itemlist.append(new_item) new_item = item.clone(action="newlist", title=config.get_localized_string(70333), ) new_item.infoLabels["duration"] = "" itemlist.append(new_item) else: url = "http://filmaffinity.com/%s/movieslist.ajax.php" % langf movieid = item.url.rsplit("=", 1)[1] post = "action=%s&listId=%s&movieId=%s&itk=%s" % (item.accion, item.listid, movieid, item.itk) data = jsontools.load(httptools.downloadpage(url, post=post).data) if not item.folder: import xbmc return xbmc.executebuiltin("Container.Refresh") else: if data["result"] == 0: title = config.get_localized_string(70334) else: title = config.get_localized_string(70335) itemlist.append(item.clone(action="", title=title)) return itemlist def votar_fa(item): # Ventana para seleccionar el voto logger.info() list_controls = [] valores = {} dict_values = None if item.voto: dict_values = {'voto': item.voto} list_controls.append({'id': 'voto', 'label': config.get_localized_string(70468), 'enabled': True, 'type': 'list', 'default': 0, 'visible': True}) list_controls[0]['lvalues'] = ['No vista'] valores['voto'] = ["-1"] for i in range(1, 11): list_controls[0]['lvalues'].append(str(i)) valores['voto'].append(i) item.valores = valores return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, caption=config.get_localized_string(70336) % item.contentTitle, item=item, callback='callback_voto') def callback_voto(item, values): item.voto = item.valores["voto"][values["voto"]] item.action = "acciones_fa" movieid = item.url.rsplit("=", 1)[1] post = "id=%s&rating=%s&itk=%s&action=rate" % (movieid, item.voto, item.itk) data = jsontools.load(httptools.downloadpage("http://filmaffinity.com/%s/ratingajax.php" % langf, post=post).data) if not item.folder: import xbmc return xbmc.executebuiltin("Container.Refresh") else: if data["result"] == 0: title = config.get_localized_string(70337) else: title = config.get_localized_string(70335) itemlist.append(item.clone(action="", title=title)) return itemlist def newlist(item): # Creación de nueva lista en filmaffinity itemlist = [] if item.accion == "lista": location = httptools.downloadpage(item.url, only_headers=True).headers["location"] data = httptools.downloadpage("http://m.filmaffinity.com" + location).data itemlist.append(item.clone(action="", title=config.get_localized_string(70338))) else: url = "http://m.filmaffinity.com/%s/addlist.php?rp=%s" % (langf, item.url) data = httptools.downloadpage(url).data data = re.sub(r"\n|\r|\t| |\s{2}", "", data) patron = 'data-list-id="[^"]+" href="([^"]+)"><[^>]+>
    ' \ '([^<]+)
    ]+>
    ' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, title in matches: scrapedurl = "http://m.filmaffinity.com" + scrapedurl itemlist.append(item.clone(title=title, url=scrapedurl, accion="lista")) return itemlist ##-------------------- LISTADOS DE IMAGENES ------------------------## def imagenes(item): itemlist = [] if item.extra == "menu": item.folder = not config.is_xbmc() if "tmdb" in item.images: itemlist.append(item.clone(title="Tmdb", extra="")) itemlist.append(item.clone(title="Fanart.Tv", extra="")) if "imdb" in item.images: itemlist.append(item.clone(title="Imdb", extra="")) if "filmaffinity" in item.images: itemlist.append(item.clone(title="Filmaffinity", extra="")) if "myanimelist" in item.images: data = httptools.downloadpage(item.url + "/pics", cookies=False).data images = scrapertools.find_multiple_matches(data, '
    .*?href.*?>([^<]+)<.*?').strip() item_tmdb = item.clone() if item.contentType == "movie": ob_tmdb = Tmdb(texto_buscado=item_tmdb.contentTitle, year=item_tmdb.infoLabels['year'], tipo=item_tmdb.extra, idioma_busqueda=langt) if not ob_tmdb.result and eng_title: ob_tmdb = Tmdb(texto_buscado=eng_title, year=item_tmdb.infoLabels['year'], tipo=item_tmdb.extra, idioma_busqueda=langt) if not ob_tmdb.result and ("Special (" in item.title or item.tipo == "special"): item_tmdb.extra = "tv" search = {'url': 'search/tv', 'language': langt, 'query': item_tmdb.contentTitle, 'first_air_date': item_tmdb.infoLabels["year"]} ob_tmdb = Tmdb(discover=search, tipo=item_tmdb.extra, idioma_busqueda=langt) else: search = {'url': 'search/tv', 'language': langt, 'query': eng_title, 'first_air_date': item_tmdb.infoLabels["year"]} ob_tmdb = Tmdb(discover=search, tipo=item_tmdb.extra, idioma_busqueda=langt) if not ob_tmdb.result and eng_title: search['query'] = eng_title ob_tmdb = Tmdb(discover=search, tipo=item_tmdb.extra, idioma_busqueda=langt) if not ob_tmdb.result and ("OVA (" in item.title or item.tipo == "ova"): item_tmdb.extra = "movie" ob_tmdb = Tmdb(texto_buscado=item_tmdb.contentTitle, tipo=item_tmdb.extra, idioma_busqueda=langt, year=item_tmdb.infoLabels['year']) if ob_tmdb.result: ob_tmdb = Tmdb(id_Tmdb=ob_tmdb.get_id(), tipo=item_tmdb.extra, idioma_busqueda=langt) item.infoLabels = ob_tmdb.get_infoLabels(item.infoLabels) # Se concatena sinopsis myanimelist con la de tmdb si la hubiese plot = scrapertools.find_single_match(data, '(.*?)') plot = plot.replace("
    ", "\n").replace("", "[I]").replace("", "[/I]") plot = scrapertools.decodeHtmlentities(plot) if plot and (item.infoLabels['plot'] and item.infoLabels['plot'] != plot): item.infoLabels['plot'] += " (TMDB)\n\n" + plot + " (MYANIMELIST)" elif plot and not item.infoLabels['plot']: item.infoLabels['plot'] = plot if not item.infoLabels['duration']: try: horas, min1, min2 = scrapertools.find_single_match(data, 'Duration:\s*(?:(\d+) hr\. (\d+) min|(\d+) min)') if horas: horas = int(horas) * 360 else: horas = 0 if not min1: min1 = min2 item.infoLabels['duration'] = horas + (int(min1) * 60) except: pass # Se sobreescribe la info de myanimelist sobre la de tmdb generos = scrapertools.find_single_match(data, 'Genres:(.*?)
    ') if generos: item.infoLabels['genre'] = scrapertools.htmlclean(generos) item.infoLabels['rating'] = float(rating) votos = scrapertools.find_single_match(data, '([^<]+)<') item.infoLabels['votes'] = votos.replace(",", "") if item.infoLabels['fanart']: item.fanart = item.infoLabels['fanart'] if item.infoLabels['thumbnail']: item.thumbnail = item.infoLabels['thumbnail'] if not item.thumbnail: item.thumbnail = scrapertools.find_single_match(data, '/pics">.*?" in data and config.is_xbmc(): itemlist.append( item.clone(title=config.get_localized_string(70321) % score, action="menu_mal", contentTitle=title_mal)) elif item.login and config.is_xbmc(): status = {'1': config.get_localized_string(70479), '2': config.get_localized_string(70480), '3': config.get_localized_string(70384), '4': config.get_localized_string(70385), '6': config.get_localized_string(70481)} estado = scrapertools.find_single_match(data, 'myinfo_updateInfo".*?option selected="selected" value="(\d+)"') try: estado = status[estado] itemlist.append( item.clone(title=config.get_localized_string(70322) % (estado, score), action="menu_mal", contentTitle=title_mal)) except: pass token_auth = config.get_setting("token_trakt", "trakt") if token_auth and ob_tmdb.result: itemlist.append(item.clone(title=config.get_localized_string(70323), action="menu_trakt")) # Se listan precuelas, secuelas y series alternativas prequel = scrapertools.find_single_match(data, 'Prequel:(.*?)') if prequel: matches = scrapertools.find_multiple_matches(prequel, 'href="([^"]+)">(.*?)') for url, title in matches: new_item = item.clone(infoLabels={'mediatype': item.contentType}, extra="", fanart=default_fan, thumbnail="") new_item.title = config.get_localized_string(70355) % title new_item.contentTitle = title new_item.url = "https://myanimelist.net%s" % url itemlist.append(new_item) sequel = scrapertools.find_single_match(data, 'Sequel:(.*?)') if sequel: matches = scrapertools.find_multiple_matches(sequel, 'href="([^"]+)">(.*?)') for url, title in matches: new_item = item.clone(infoLabels={'mediatype': item.contentType}, extra="", fanart=default_fan, thumbnail="") new_item.title = config.get_localized_string(70356) % title new_item.contentTitle = title new_item.url = "https://myanimelist.net%s" % url itemlist.append(new_item) alt_version = scrapertools.find_single_match(data, 'Alternative version:(.*?)') if alt_version: matches = scrapertools.find_multiple_matches(alt_version, 'href="([^"]+)">(.*?)') for url, title in matches: new_item = item.clone(infoLabels={'mediatype': item.contentType}, extra="", fanart=default_fan, thumbnail="") new_item.title = config.get_localized_string(70357) % title new_item.contentTitle = title new_item.url = "https://myanimelist.net%s" % url itemlist.append(new_item) if ob_tmdb.result: itemlist.append(item.clone(title="", action="", infoLabels={})) if ob_tmdb.result.get("belongs_to_collection"): new_item = item.clone(infoLabels={'mediatype': item.contentType}, action="listado_tmdb", ) saga = ob_tmdb.result["belongs_to_collection"] new_item.infoLabels["tmdb_id"] = saga["id"] if saga["poster_path"]: new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + saga["poster_path"] if saga["backdrop_path"]: new_item.fanart = 'http://image.tmdb.org/t/p/original' + saga["backdrop_path"] new_item.search = {'url': 'collection/%s' % saga['id'], 'language': langt} new_item.title = "Es parte de: %s" % saga["name"] itemlist.append(new_item) itemlist.append( item.clone(title=config.get_localized_string(70358), action="listado_tmdb", infoLabels={'mediatype': item.contentType}, search={'url': '%s/%s/recommendations' % (item.extra, item.infoLabels['tmdb_id']), 'language': langt, 'page': 1}, )) # Recomendaciones myanimelist y búsqueda de info en anidb (fansubs en español) itemlist.append(item.clone(title=config.get_localized_string(70359), action="reco_mal")) anidb_link = scrapertools.find_single_match(data, '([^<]+)<(.*?)More') if next_page: itemlist.append(item.clone(title=config.get_localized_string(70361), url=next_page, )) if itemlist: itemlist.insert(0, item.clone(title=config.get_localized_string(70362), action="", )) patron = '([^<]+)<' matches = scrapertools.find_multiple_matches(data, patron) if matches: itemlist.append(item.clone(title=config.get_localized_string(70363), action="", )) for url, thumb, title in matches: url = url.replace("embed/", "watch?v=") itemlist.append( item.clone(title=title, url=url, server="youtube", action="play", thumbnail=thumb, )) return itemlist def reco_mal(item): # Recomendaciones de myanimelist itemlist = [] data = httptools.downloadpage(item.url + "/userrecs", cookies=False).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) bloque = scrapertools.find_single_match(data, '
    ') patron = '
    (.*?).*?' \ '
    (.*?)
    ' matches = scrapertools.find_multiple_matches(data, patron) for url, thumb, title, plot in matches: new_item = item.clone(infoLabels={'mediatype': item.contentType}, action="detalles_mal", fanart=default_fan, title=title, contentType="", extra="", contentTitle=title) new_item.infoLabels["plot"] = scrapertools.htmlclean(plot) new_item.url = "https://myanimelist.net%s" % url new_item.thumbnail = thumb.replace("r/50x70/", "").replace(".jpg", "l.jpg") itemlist.append(new_item) return itemlist def indices_mal(item): # Índices por temporadas y generos itemlist = [] url_base = "" if "Temporadas" in item.title: data = httptools.downloadpage("https://myanimelist.net/anime/season/archive", cookies=False).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) matches = scrapertools.find_multiple_matches(data, '\s*
    \s*(.*?)\s*') for url, title in matches: year = title.rsplit(" ", 1)[1] thumbnail = item.thumbnail if int(year) >= 1968: thumbnail = url_base % year title = title.replace("Winter", config.get_localized_string(70364)).replace("Spring", config.get_localized_string(70365)) \ .replace("Summer", config.get_localized_string(70366)).replace("Fall", config.get_localized_string(70367)) itemlist.append(Item(channel=item.channel, action="season_mal", title=title, url=url, thumbnail=thumbnail, info=True, fanart=thumbnail)) else: data = httptools.downloadpage("https://myanimelist.net/anime.php", cookies=False).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) bloque = scrapertools.find_single_match(data, 'Genres
    (.*?)View More') matches = scrapertools.find_multiple_matches(bloque, '(.*?)') for url, title in matches: genero = title.split(" (", 1)[0] thumbnail = url_base % genero.lower().replace(" ", "%20") if genero in ["Hentai", "Yaoi", "Yuri"] and not adult_mal: continue url = "https://myanimelist.net%s" % url itemlist.append(Item(channel=item.channel, action="season_mal", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail, )) return itemlist def season_mal(item): # Scraper para temporadas de anime itemlist = [] cookie_session = get_cookie_value() header_mal = {'Cookie': '%s search_sort_anime=score; search_view=tile; is_logged_in=1' % cookie_session} data = httptools.downloadpage(item.url, headers=header_mal, cookies=False).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) if item.info: patron = '
    ([^<]+)
    (.*?)
    \s*
    ' bloques = scrapertools.find_multiple_matches(data, patron) for head_title, bloque in bloques: head_title = head_title.replace("(New)", config.get_localized_string(70369)).replace("(Continuing)", config.get_localized_string(70369)) patron = '(.*?).*?(\? ep|\d+ ep).*?' \ '
    (.*?)
    .*?
    (.*?).*?
    \s*(.*?)\s*-.*?(\d{4}).*?' \ 'title="Score">\s*(N/A|\d\.\d+)' matches = scrapertools.find_multiple_matches(bloque, patron) if matches: itemlist.append(Item(channel=item.channel, action="", title=head_title, )) for url, scrapedtitle, epis, generos, thumb, plot, tipo, year, score in matches: if ("Hentai" in generos or "Yaoi" in generos or "Yuri" in generos) and adult_mal: continue scrapedtitle = scrapedtitle.replace("(TV)", "").replace("(Movie)", "") if tipo == "Movie": title = scrapedtitle + " (%s)" % year else: title = scrapedtitle + " %ss (%s)" % (epis, year) infoLabels = {} if score != "N/A": title += " %s" % (score) infoLabels["rating"] = float(score) infoLabels["plot"] = scrapertools.htmlclean(plot) infoLabels["year"] = year genres = scrapertools.find_multiple_matches(generos, 'title="([^"]+)"') infoLabels["genre"] = ", ".join(genres) tipo = tipo.lower() if tipo == "movie" or tipo == "special": extra = "movie" contentType = "movie" else: extra = "tv" contentType = "tvshow" thumb = thumb.replace("r/167x242/", "") + "l.jpg" itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, title=title, thumbnail=thumb, infoLabels=infoLabels, extra=extra, tipo=tipo, contentTitle=scrapedtitle, contentType=contentType, fanart=default_fan)) else: patron = '(.*?).*?(\? ep|\d+ ep).*?' \ '
    (.*?)
    .*?
    (.*?).*?
    \s*(.*?)\s*-.*?(\d{4}).*?' \ 'title="Score">\s*(N/A|\d\.\d+)' matches = scrapertools.find_multiple_matches(data, patron) for url, scrapedtitle, epis, generos, thumb, plot, tipo, year, score in matches: if ("Hentai" in generos or "Yaoi" in generos or "Yuri" in generos) and not adult_mal: continue scrapedtitle = scrapedtitle.replace("(TV)", "").replace("(Movie)", "") if tipo == "Movie": title = scrapedtitle + " (%s)" % year else: title = scrapedtitle + " %ss (%s)" % (epis, year) infoLabels = {} if score != "N/A": title += " %s" % (score) infoLabels["rating"] = float(score) infoLabels["plot"] = scrapertools.htmlclean(plot) infoLabels["year"] = year genres = scrapertools.find_multiple_matches(generos, 'title="([^"]+)"') infoLabels["genre"] = ", ".join(genres) tipo = tipo.lower() if tipo == "movie" or tipo == "special": extra = "movie" contentType = "movie" else: extra = "tv" contentType = "tvshow" thumb = thumb.replace("r/167x242/", "") + "l.jpg" itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, title=title, thumbnail=thumb, infoLabels=infoLabels, extra=extra, tipo=tipo, contentTitle=scrapedtitle, contentType=contentType, fanart=default_fan)) next_page = scrapertools.find_single_match(data, '' matches = scrapertools.find_multiple_matches(data, patron) if matches: itemlist.append(item.clone(title=config.get_localized_string(70370), action="", )) for url, thumb, nombre, rol, voces in matches: url = "https://myanimelist.net%s" % url rol = rol.replace("Main", "Principal").replace("Supporting", "Secundario") nombre = " %s [%s]" % (nombre, rol) thumb = thumb.replace("r/46x64/", "") itemlist.append(Item(channel=item.channel, action="detail_staff", url=url, thumbnail=thumb, fanart=default_fan, title=nombre, extra="character")) patron_voces = '([^<]+)<.*?([^<]+).*?data-src="([^"]+)"' voces_match = scrapertools.find_multiple_matches(voces, patron_voces) for vurl, vnombre, vidioma, vthumb in voces_match: vurl = "https://myanimelist.net%s" % vurl vnombre = " %s [%s]" % (vnombre, vidioma) vthumb = vthumb.replace("r/46x64/", "") itemlist.append(Item(channel=item.channel, action="detail_staff", url=vurl, thumbnail=vthumb, fanart=default_fan, title=vnombre)) bloque = scrapertools.find_single_match(data, '(.*?)') patron = '([^<]+)<.*?([^<]+)' matches = scrapertools.find_multiple_matches(bloque, patron) if matches: itemlist.append(item.clone(title="Staff", action="", )) for url, thumb, nombre, rol in matches: url = "https://myanimelist.net%s" % url nombre = " %s [%s]" % (nombre, rol) thumb = thumb.replace("r/46x64/", "") itemlist.append(Item(channel=item.channel, action="detail_staff", url=url, thumbnail=thumb, fanart=default_fan, title=nombre)) return itemlist def detail_staff(item): itemlist = [] data = httptools.downloadpage(item.url, cookies=False).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) if item.extra == "character" and not "No biography written" in data: bio = scrapertools.find_single_match(data, 'itemprop="name">.*?
    (.*?)
    ", "\n") bio = scrapertools.htmlclean(bio) if not "questionmark" in item.thumbnail: data_img = httptools.downloadpage(item.url + "/pictures", cookies=False).data matches = scrapertools.find_multiple_matches(data_img, 'rel="gallery-character">(.*?)') if matches: itemlist.append(Item(channel=item.channel, title="Animes donde aparece:", action="", )) for url, thumb, title in matches: url = "https://myanimelist.net%s" % url thumb = thumb.replace("r/23x32/", "") itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, thumbnail=thumb, fanart=default_fan, title=title, contentTitle=title)) else: patron_bio = '
    .*?
    (.*?)' bio = scrapertools.find_single_match(data, patron_bio) bio = scrapertools.htmlclean(bio.replace("
    ", "\n")) infoLabels = {'plot': bio} if not "No voice acting roles" in data: itemlist.append(Item(channel=item.channel, title="Da voz a/en:", action="", thumbnail=item.thumbnail, infoLabels=infoLabels)) bloque = scrapertools.find_single_match(data, 'Voice Acting Roles
    (.*?)') patron = '(.*?).*?href="(/character[^"]+)".*?' \ '>(.*?).*?data-src="([^"]+)"' matches = scrapertools.find_multiple_matches(bloque, patron) for url, thumb, title, url_p, personaje, thumb_p in matches: url = "https://myanimelist.net%s" % url url_p = "https://myanimelist.net%s" % url_p thumb = thumb.replace("r/46x64/", "") thumb_p = thumb_p.replace("r/46x64/", "") itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, thumbnail=thumb, fanart=default_fan, title=title, contentTitle=title)) itemlist.append(Item(channel=item.channel, action="detail_staff", url=url_p, thumbnail=thumb_p, fanart=default_fan, title=" %s" % personaje, extra="character")) if not "No staff positions" in data: itemlist.append(Item(channel=item.channel, title="Staff en animes:", action="", thumbnail=item.thumbnail, infoLabels=infoLabels)) bloque = scrapertools.find_single_match(data, 'Anime Staff Positions
    (.*?)') patron = '(.*?).*?(.*?)
    ' matches = scrapertools.find_multiple_matches(bloque, patron) for url, thumb, title, rol in matches: url = "https://myanimelist.net%s" % url thumb = thumb.replace("r/46x64/", "") rol = scrapertools.htmlclean(rol) titulo = "%s [%s]" % (title, rol) itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, thumbnail=thumb, fanart=default_fan, title=titulo, contentTitle=title)) return itemlist def busqueda_mal(item): # Scraper para búsquedas en myanimelist itemlist = [] cookie_session = get_cookie_value() header_mal = {'Cookie': '%s search_sort_anime=score; search_view=tile; is_logged_in=1' % cookie_session} data = httptools.downloadpage(item.url, headers=header_mal, cookies=False).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) patron = '([^<]+)<.*?
    (.*?)<' \ '.*?(.*?).*?(.*?).*?(.*?).*?(.*?)' matches = scrapertools.find_multiple_matches(data, patron) for url, thumb, titulo, plot, tipo, epis, rating, date in matches: infolabels = {"mediatype": "tvshow"} contentType = "tvshow" extra = "tv" titulo = titulo.strip() tipo = tipo.strip() rating = rating.strip() epis = epis.strip() infolabels["plot"] = scrapertools.htmlclean(plot.strip()) thumb = thumb.replace("r/50x70/", "").replace(".jpg", "l.jpg") show = titulo contentitle = titulo title = titulo try: year = date.strip().rsplit("-", 1)[1] if year.isdigit(): if int(year) < 30: year = "20%s" % year else: year = "19%s" % year infolabels["year"] = year if not year in title: title += " (%s)" % year except: import traceback logger.error(traceback.format_exc()) if tipo == "Movie" or tipo == "OVA": infolabels["mediatype"] = "movie" contentType = "movie" extra = "movie" show = "" if epis and tipo != "Movie": title += " %s eps" % epis if rating != "0.00" and rating != "N/A": infolabels["rating"] = float(rating) title += " %s" % (rating) itemlist.append(Item(channel=item.channel, title=title, action="detalles_mal", url=url, show=show, thumbnail=thumb, infoLabels=infolabels, contentTitle=contentitle, contentType=contentType, tipo=tipo.lower(), extra=extra)) if not "&show=" in item.url: next_page = item.url + "&show=50" else: pagina = int(item.url.rsplit("=", 1)[1]) next_page = item.url.replace("&show=%s" % str(pagina), "&show=%s" % str(pagina + 50)) check_page = next_page.replace("https://myanimelist.net/anime.php", "") if check_page in data: itemlist.append(item.clone(title=config.get_localized_string(70065), url=next_page, )) else: check_page = check_page.replace("[", "%5B").replace("]", "%5D") if check_page in data: itemlist.append(item.clone(title=config.get_localized_string(70065), url=next_page, )) return itemlist def info_anidb(item, itemlist, url): # Extrae info, puntuación y fansubs en anidb data = httptools.downloadpage(url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) infoLabels = {'mediatype': item.contentType} plot = scrapertools.find_single_match(data, 'itemprop="description">(.*?)
    ') infoLabels["plot"] = scrapertools.htmlclean(plot) generos = scrapertools.find_multiple_matches(data, '
    (.*?)') for i, genero in enumerate(generos): generos[i] = genero.capitalize() infoLabels["genre"] = ", ".join(generos) rating = scrapertools.find_single_match(data, 'itemprop="ratingValue">(.*?)') try: infoLabels["rating"] = float(rating) except: pass infoLabels["votes"] = scrapertools.find_single_match(data, 'itemprop="ratingCount">(.*?)') thumbnail = scrapertools.find_single_match(data, '
    .*?src="([^"]+)"') if infoLabels: title = config.get_localized_string(70376) % (rating) if re.search(r'(?:subtitle|audio) | language: spanish"', data): title += config.get_localized_string(70377) itemlist.append(Item(channel=item.channel, title=title, infoLabels=infoLabels, action="", thumbnail=thumbnail, )) if re.search(r'(?:subtitle|audio) | language: spanish"', data): epi_total = scrapertools.find_single_match(data, 'itemprop="numberOfEpisodes">([^<]+)') patron = '.*?title="([^"]+)">(.*?).*?>([^<]+).*?' \ '([^<]+).*?title="audio(.*?).*?' \ 'class="source" title="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for fansub, abrev, estado, epis, lang, source in matches: if not "spanish" in lang: continue title = " " + fansub if abrev != title: title += " [%s]" % abrev estado = estado.replace("complete", config.get_localized_string(70378)).replace("finished", config.get_localized_string(70379)) \ .replace("stalled", config.get_localized_string(70380)).replace("dropped", config.get_localized_string(70381)) title += " (%s) %s/%s [%s]" % (estado, epis, epi_total, source) itemlist.append(Item(channel=item.channel, title=title, infoLabels=infoLabels, action="", thumbnail=thumbnail, )) def filtro_mal(item): logger.info() list_controls = [] valores = {} dict_values = None # Se utilizan los valores por defecto/guardados valores_guardados = config.get_setting("filtro_defecto_mal", item.channel) if valores_guardados: dict_values = valores_guardados list_controls.append({'id': 'keyword', 'label': config.get_localized_string(70465), 'enabled': True, 'type': 'text', 'default': '', 'visible': True}) list_controls.append({'id': 'tipo', 'label': config.get_localized_string(70482), 'enabled': True, 'type': 'list', 'default': -1, 'visible': True}) list_controls[1]['lvalues'] = [config.get_localized_string(70483), config.get_localized_string(70484), config.get_localized_string(60244), config.get_localized_string(70136), config.get_localized_string(70450)] valores["tipo"] = ['4', '2', '3', '1', '0'] list_controls.append({'id': 'valoracion', 'label': config.get_localized_string(70473), 'enabled': True, 'type': 'list', 'default': -1, 'visible': True}) list_controls[2]['lvalues'] = [config.get_localized_string(70486), config.get_localized_string(70487), config.get_localized_string(70488), config.get_localized_string(70489), config.get_localized_string(70490), config.get_localized_string(70491), config.get_localized_string(70492), config.get_localized_string(70493), config.get_localized_string(70494), config.get_localized_string(70495), config.get_localized_string(70450)] valores["valoracion"] = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '0'] list_controls.append({'id': 'estado', 'label': config.get_localized_string(70485), 'enabled': True, 'type': 'list', 'default': -1, 'visible': True}) list_controls[3]['lvalues'] = ['Por estrenar', config.get_localized_string(60264), config.get_localized_string(70379), config.get_localized_string(70450)] valores["estado"] = ['3', '1', '2', '0'] try: data = httptools.downloadpage('https://myanimelist.net/anime.php', cookies=False).data patron = 'name="genre\[\]" type="checkbox" value="([^"]+)">.*?>([^<]+)<' generos = scrapertools.find_multiple_matches(data, patron) if generos: list_controls.append({'id': 'labelgenre', 'enabled': True, 'type': 'label', 'default': None, 'label': config.get_localized_string(70451), 'visible': True, 'color': '0xFFC52020'}) for value, genre in generos: list_controls.append({'id': 'genre' + value, 'label': genre, 'enabled': True, 'type': 'bool', 'default': False, 'visible': True}) except: pass list_controls.append({'id': 'espacio', 'label': '', 'enabled': False, 'type': 'label', 'default': None, 'visible': True}) list_controls.append({'id': 'save', 'label': config.get_localized_string(70464), 'enabled': True, 'type': 'bool', 'default': False, 'visible': True}) item.valores = valores return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, caption=config.get_localized_string(70320), item=item, callback='callback_mal') def callback_mal(item, values): values_copy = values.copy() # Guarda el filtro para que sea el que se cargue por defecto if "save" in values and values["save"]: values_copy.pop("save") config.set_setting("filtro_defecto_mal", values_copy, item.channel) genero_ids = [] for v in values: if "genre" in v: if values[v]: genero_ids.append("genre[%s]=%s" % (len(genero_ids), v.replace('genre', ''))) genero_ids = "&".join(genero_ids) query = values["keyword"].replace(" ", "%20") tipo = item.valores["tipo"][values["tipo"]] valoracion = item.valores["valoracion"][values["valoracion"]] estado = item.valores["estado"][values["estado"]] item.url = "https://myanimelist.net/anime.php?q=%s&type=%s&score=%s&status=%s" \ "&p=0&r=0&sm=0&sd=0&sy=0&em=0&ed=0&ey=0&c[0]=a&c[1]=b&c[2]=c&c[3]=d&c[4]=f&gx=0" \ % (query, tipo, valoracion, estado) if genero_ids: item.url += "&" + genero_ids item.action = "busqueda_mal" return busqueda_mal(item) def musica_anime(item): # Lista los animes y canciones disponibles similares al título del anime logger.info() itemlist = [] data = httptools.downloadpage("http://www.freeanimemusic.org/song_search.php", post=item.post).data patron = "(\d+).*?([^<]+)<.*?([^<]+)<" \ ".*?href='http://www.freeanimemusic.org/anime/([^/]+)/index.php\?var=(\d+)" matches = scrapertools.find_multiple_matches(data, patron) animes = {} action = "" if config.is_xbmc(): action = "move" for number, song, anime, id_anime, id_song in matches: if not animes.get(anime): animes[anime] = [] animes[anime].append( Item(channel=item.channel, action=action, title="[%s]" % (anime.capitalize()), url="", number="0", thumbnail=item.thumbnail, fanart=item.fanart)) title = "%s - %s" % (number, song) animes[anime].append( Item(channel=item.channel, action="play", title=title, server="directo", url=id_anime, song=id_song, number=number, thumbnail=item.thumbnail, fanart=item.fanart, )) for k, v in sorted(animes.items()): v.sort(key=lambda x: (x.url, int(x.number))) for lt in v: if lt.action == "move": lt.extra = len(v) lt.folder = False itemlist.append(lt) return itemlist def login_mal(from_list=False): logger.info() try: user = config.get_setting("usuariomal", "tvmoviedb") password = config.get_setting("passmal", "tvmoviedb") generic = False if not user or not password: if not from_list: user = bdec("Y3VlbnRhdHZtb3ZpZWRi") password = bdec("dFlTakE3ekYzbng1") generic = True else: return False, config.get_localized_string(70381), user data = httptools.downloadpage("https://myanimelist.net/login.php?from=%2F").data if re.search(r'(?i)' + user, data) and not generic: return True, "", user token = scrapertools.find_single_match(data, "name='csrf_token' content='([^']+)'") response = httptools.downloadpage("https://myanimelist.net/logout.php", post="csrf_token=%s" % token) post = "user_name=%s&password=%s&cookie=1&sublogin=Login&submit=1&csrf_token=%s" % (user, password, token) response = httptools.downloadpage("https://myanimelist.net/login.php?from=%2F", post=post) if not re.search(r'(?i)' + user, response.data): logger.error("Error en el login") return False, config.get_localized_string(70330), user else: if generic: return False, config.get_localized_string(70381), user logger.info("Login correcto") return True, "", user except: import traceback logger.error(traceback.format_exc()) return False, config.get_localized_string(70331) def cuenta_mal(item): # Menú de cuenta myanimelist itemlist = [] login, message, user = login_mal(True) if not login: itemlist.append(item.clone(action="configuracion", title=message, )) else: itemlist.append( item.clone(action="items_mal", title=config.get_localized_string(70382), accion="lista_mal", url="https://myanimelist.net/animelist/%s?status=1" % user, login=True)) itemlist.append(item.clone(action="items_mal", title=config.get_localized_string(70383), accion="lista_mal", url="https://myanimelist.net/animelist/%s?status=2" % user, login=True)) itemlist.append(item.clone(action="items_mal", title=config.get_localized_string(70384), accion="lista_mal", url="https://myanimelist.net/animelist/%s?status=3" % user, login=True)) itemlist.append(item.clone(action="items_mal", title=config.get_localized_string(70385), accion="lista_mal", url="https://myanimelist.net/animelist/%s?status=4" % user, login=True)) itemlist.append(item.clone(action="items_mal", title=config.get_localized_string(70386), accion="lista_mal", url="https://myanimelist.net/animelist/%s?status=6" % user, login=True)) return itemlist def items_mal(item): # Scraper para las listas personales logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) data_items = scrapertools.find_single_match(data, 'data-items="([^"]+)"') data_items = data_items.replace(""", "'").replace("null", "None") \ .replace("false", "False").replace("true", "True") data_items = eval(data_items) for d in data_items: if d["anime_airing_status"] == 1: title = "[E]" if d["anime_airing_status"] == 2: title = "[F]" else: title = "[P]" title += typo(" %s [%s/%s] (%s)" % ( d["anime_title"], d["num_watched_episodes"], d["anime_num_episodes"], d["anime_media_type_string"]),'bold') title = title.replace("\\", "") contentTitle = d["anime_title"].replace("\\", "") thumbnail = d["anime_image_path"].replace("\\", "").replace("r/96x136/", "").replace(".jpg", "l.jpg") url = "https://myanimelist.net" + d["anime_url"].replace("\\", "") if d["score"] != 0: title += typo(" Punt:%s" % (d["score"]),'color kod bold') if title.count("(TV)") == 2: title = title.replace("] (TV)", "]") elif title.count("(Movie)") == 2: title = title.replace("] (Movie)", "]") tipo = "tvshow" extra = "tv" if "Movie" in d["anime_media_type_string"]: tipo = "movie" extra = "movie" itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, title=title, thumbnail=thumbnail, contentTitle=contentTitle, contentType=tipo, extra=extra, login=True)) if itemlist: itemlist.insert(0, Item(channel=item.channel, action="", title=config.get_localized_string(70387))) return itemlist def menu_mal(item): # Opciones cuenta MAL, añadir a lista/votar itemlist = [] data = httptools.downloadpage(item.url).data try: status = {'1': config.get_localized_string(70479), '2': config.get_localized_string(70480), '3': config.get_localized_string(70384), '4': config.get_localized_string(70385), '6': config.get_localized_string(70481)} button, estado = scrapertools.find_single_match(data, 'myinfo_updateInfo"(.*?)>.*?option selected="selected" value="(\d+)"') if "disabled" in button: title_estado = config.get_localized_string(70388) estado = "1" else: title_estado = typo(config.get_localized_string(70389) % (status[estado]), 'bold') except: title_estado = config.get_localized_string(70388) score = scrapertools.find_single_match(data, 'id="myinfo_score".*?selected" value="(\d+)"') if score != "0": title_estado += " (Punt:%s)" % score if "lista" in title_estado: item.lista = True itemlist.append(item.clone(title="Anime: %s%s" % (item.contentTitle, title_estado), action="")) status = {'1': config.get_localized_string(70479), '2': config.get_localized_string(70480), '3': config.get_localized_string(70384), '4': config.get_localized_string(70385), '6': config.get_localized_string(70481)} for key, value in status.items(): if not value in title_estado: itemlist.append( item.clone(title=typo(config.get_localized_string(70391) % value, 'bold'), action="addlist_mal", value=key, estado=value)) for i in range(10, 0, -1): if i != int(score): itemlist.append(item.clone(title=typo(config.get_localized_string(70392) % (i), 'bold'), action="addlist_mal", value=estado, estado=status[estado], score=i)) return itemlist def addlist_mal(item): data = httptools.downloadpage(item.url).data anime_id = scrapertools.find_single_match(data, 'id="myinfo_anime_id" value="([^"]+)"') if item.value == "2": vistos = scrapertools.find_single_match(data, 'id="myinfo_watchedeps".*?(\d+)') else: vistos = scrapertools.find_single_match(data, 'id="myinfo_watchedeps".*?value="(\d+)"') if not item.score: item.score = scrapertools.find_single_match(data, 'id="myinfo_score".*?selected" value="(\d+)"') token = scrapertools.find_single_match(data, "name='csrf_token' content='([^']+)'") post = {'anime_id': int(anime_id), 'status': int(item.value), 'score': int(item.score), 'num_watched_episodes': int(vistos), 'csrf_token': token} headers_mal = {'User-Agent': 'Mozilla/5.0', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'Referer': item.url, 'X-Requested-With': 'XMLHttpRequest'} url = "https://myanimelist.net/ownlist/anime/add.json" if item.lista: url = "https://myanimelist.net/ownlist/anime/edit.json" #data = httptools.downloadpage(url, post=jsontools.dump(post), headers=headers_mal, replace_headers=True).data data = httptools.downloadpage(url, post=jsontools.dump(post), headers=headers_mal).data item.title = "En tu lista" if config.is_xbmc(): import xbmc xbmc.executebuiltin("Container.Refresh") def move(item): import xbmcgui, xbmc item_focus = str(item.extra) wnd = xbmcgui.Window(xbmcgui.getCurrentWindowId()) id = wnd.getFocusId() return xbmc.executebuiltin('Control.Move(' + str(id) + ',' + item_focus + ')') def play(item): itemlist = [] if not item.server: data = httptools.downloadpage(item.url).data if "Sorry, this video is not available to be embedded" in data: id_video = scrapertools.find_single_match(data, '