# -*- coding: utf-8 -*- import re import urllib from base64 import b64decode as bdec from core import filetools from core import httptools from core import jsontools from core import scrapertools from core.item import Item from core.tmdb import Tmdb from platformcode import config, logger from platformcode import platformtools __perfil__ = config.get_setting('perfil', "tvmoviedb") # Fijar perfil de color perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08', '0xFFFFD700'], ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08', '0xFFFFD700'], ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08', '0xFFFFD700']] if __perfil__ < 3: color1, color2, color3, color4, color5, color6 = perfil[__perfil__] else: color1 = color2 = color3 = color4 = color5 = color6 = "" langs = ['de', 'fr', 'pt', 'it', 'es-MX', 'ca', 'en', 'es'] langt = langs[config.get_setting('tmdb', "tvmoviedb")] langt_alt = langs[config.get_setting('tmdb_alternativo', "tvmoviedb")] langs = ['co', 'cl', 'ar', 'mx', 'en', 'es'] langf = langs[config.get_setting('filmaff', "tvmoviedb")] langs = ['de-de', 'fr-fr', 'pt-pt', 'it-it', 'es-MX', 'ca-es', 'en', 'es'] langi = langs[config.get_setting('imdb', "tvmoviedb")] adult_mal = config.get_setting('adult_mal', "tvmoviedb") mal_ck = "MzE1MDQ2cGQ5N2llYTY4Z2xwbGVzZjFzbTY=" images_predef = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/" default_fan = filetools.join(config.get_runtime_path(), "fanart.jpg") def mainlist(item): logger.info() item.text_color = color1 itemlist = [] itemlist.append(item.clone(title="Búsqueda en TMDB", action="", text_color=color2)) itemlist.append(item.clone(title=" - Películas", action="tmdb", extra="movie", thumbnail="%s0/Movies.png" % images_predef)) itemlist.append(item.clone(title=" - Series", action="tmdb", extra="tv", thumbnail=images_predef + "0/TV%20Series.png")) itemlist.append(item.clone(title="Búsqueda en Filmaffinity", action="", text_color=color2)) itemlist.append(item.clone(title=" - Películas", action="filmaf", extra="movie", thumbnail="%s0/Movies.png" % images_predef)) itemlist.append(item.clone(title=" - Series", action="filmaf", extra="tv", thumbnail=images_predef + "0/TV%20Series.png")) itemlist.append(item.clone(title="Búsqueda en IMDB", action="", text_color=color2)) itemlist.append(item.clone(title=" - Películas", action="imdb", extra="movie", url='&title_type=feature,tv_movie', thumbnail="%s0/Movies.png" % images_predef)) itemlist.append(item.clone(title=" - Series", action="imdb", extra="tv", url='&title_type=tv_series,tv_special,mini_series', thumbnail=images_predef + "0/TV%20Series.png")) itemlist.append( item.clone(title="Trakt.tv", action="trakt", text_color=color2, thumbnail="http://i.imgur.com/5sQjjuk.png")) itemlist.append( item.clone(title="MyAnimeList", action="mal", text_color=color2, thumbnail="http://i.imgur.com/RhsYWmd.png")) itemlist.append(item.clone(title="", action="")) itemlist.append( item.clone(title="Ajustes motores de búsqueda", action="configuracion", text_color=color6, folder=False)) return itemlist def configuracion(item): ret = platformtools.show_channel_settings() platformtools.itemlist_refresh() return ret def search_star(item): logger.info() itemlist = [] item.type='movie' itemlist.extend(search_(item)) item.type='tvshow' itemlist.extend(search_(item)) return itemlist def search_(item): texto = platformtools.dialog_input(heading=item.title) if texto: if "imdb" in item.url: item.url += texto.replace(" ", "+") item.action = "listado_imdb" return listado_imdb(item) if "filmaffinity" in item.url: item.url += texto.replace(" ", "+") item.action = "listado_fa" return listado_fa(item) if "myanimelist" in item.url: item.url += texto.replace(" ", "%20") item.url += "&type=0&score=0&status=0&p=0&r=0&sm=0&sd=0&sy=0&em=0&ed=0&ey=0&c[0]=a" \ "&c[1]=b&c[2]=c&c[3]=d&c[4]=f&gx=0" item.action = "busqueda_mal" return busqueda_mal(item) item.search['query'] = texto item.action = "listado_tmdb" if item.star == True: types = ['movie','tv'] itemlist = [] for type in types: item.contentType = type item.search['type']=type itemlist.extend(listado_tmdb(item)) return itemlist else: return listado_tmdb(item) def busqueda(item): logger.info() cat = [item.extra.replace("tv", "serie")] new_item = Item() new_item.extra = item.contentTitle.replace("+", " ") new_item.category = item.extra from channels import search return search.do_search(new_item, cat) def tmdb(item): item.contentType = item.extra.replace("tv", "tvshow") itemlist = [] itemlist.append(item.clone(title="Más Populares", action="listado_tmdb", search={'url': item.extra + "/popular", 'language': langt, 'page': 1})) itemlist.append(item.clone(title="Más Valoradas", action="listado_tmdb", search={'url': item.extra + "/top_rated", 'language': langt, 'page': 1})) if item.extra == "movie": itemlist.append(item.clone(title="En Cartelera", action="listado_tmdb", search={'url': item.extra + "/now_playing", 'language': langt, 'page': 1})) else: itemlist.append(item.clone(title="En Emisión", action="listado_tmdb", search={'url': item.extra + "/on_the_air", 'language': langt, 'page': 1})) itemlist.append(item.clone(title="Géneros", action="indices_tmdb", thumbnail="%s0/Genres.png" % images_predef)) itemlist.append(item.clone(title="Año", action="indices_tmdb", thumbnail="%s0/Year.png" % images_predef)) if item.extra == "movie": itemlist.append(item.clone(title="Actores/Actrices por popularidad", action="listado_tmdb", search={'url': 'person/popular', 'language': langt, 'page': 1})) itemlist.append(item.clone(title="Próximamente", action="listado_tmdb", search={'url': item.extra + "/upcoming", 'language': langt, 'page': 1})) if config.get_platform() != "plex": title = item.contentType.replace("movie", "película").replace("tvshow", "serie") itemlist.append(item.clone(title="Buscar %s" % title, action="search_", search={'url': 'search/%s' % item.extra, 'language': langt, 'page': 1})) itemlist.append(item.clone(title=" Buscar actor/actriz", action="search_", search={'url': 'search/person', 'language': langt, 'page': 1})) if item.extra == "movie": itemlist.append(item.clone(title=" Buscar director, guionista...", action="search_", search={'url': "search/person", 'language': langt, 'page': 1}, crew=True)) itemlist.append(item.clone(title="Filtro Personalizado", action="filtro", text_color=color4)) itemlist.append(item.clone(title="Filtro por palabra clave", action="filtro", text_color=color4)) return itemlist def imdb(item): item.contentType = item.extra.replace("tv", "tvshow") itemlist = [] itemlist.append(item.clone(title="Más Populares", action="listado_imdb")) itemlist.append(item.clone(title="Más Valoradas", action="listado_imdb", url=item.url + "&num_votes=25000,&sort=user_rating,desc")) if item.extra == "movie": itemlist.append(item.clone(title="En Cartelera", action="listado_imdb", url="http://www.imdb.com/showtimes/location?ref_=inth_ov_sh_sm")) itemlist.append(item.clone(title="Géneros", action="indices_imdb", thumbnail="%s0/Genres.png" % images_predef)) itemlist.append(item.clone(title="Año", action="indices_imdb", thumbnail="%s0/Year.png" % images_predef)) if item.extra == "movie": itemlist.append(item.clone(title="Actores/Actrices por popularidad", action="listado_imdb", url="http://www.imdb.com/search/name?gender=male,female&ref_=nv_cel_m_3")) itemlist.append(item.clone(title="Próximamente", action="listado_imdb", url="http://www.imdb.com/movies-coming-soon/?ref_=shlc_cs")) if config.get_platform() != "plex": title = item.contentType.replace("movie", "película").replace("tvshow", "serie") itemlist.append(item.clone(title="Buscar %s" % title, action="search_", url="http://www.imdb.com/search/title?title=" + item.url)) itemlist.append(item.clone(title=" Buscar actor/actriz", action="search_", url="http://www.imdb.com/search/name?name=")) itemlist.append(item.clone(title="Filtro Personalizado", action="filtro_imdb", text_color=color4)) return itemlist def filmaf(item): item.contentType = item.extra.replace("tv", "tvshow") login, message = login_fa() itemlist = [] if item.extra == "movie": itemlist.append(item.clone(title="Top Filmaffinity", action="listado_fa", extra="top", url="http://m.filmaffinity.com/%s/topgen.php?genre=&country=&" "fromyear=&toyear=¬vse=1&nodoc=1" % langf)) itemlist.append(item.clone(title="En Cartelera", action="listado_fa", url="http://m.filmaffinity.com/%s/rdcat.php?id=new_th_%s" % (langf, langf))) itemlist.append(item.clone(title="Géneros", action="indices_fa", url="http://m.filmaffinity.com/%s/topgen.php" % langf, thumbnail="%s0/Genres.png" % images_predef)) else: itemlist.append(item.clone(title="Top Filmaffinity", action="listado_fa", extra="top", url="http://m.filmaffinity.com/%s/topgen.php?genre=TV_SE&country=&" "fromyear=&toyear=&nodoc" % langf)) itemlist.append(item.clone(title="Series de actualidad", action="listado_fa", url="http://m.filmaffinity.com/%s/category.php?id=current_tv" % langf)) itemlist.append(item.clone(title="Año", action="indices_fa", thumbnail="%s0/Year.png" % images_predef)) if item.extra == "movie": itemlist.append(item.clone(title="Próximos Estrenos", action="listado_fa", extra="estrenos", url="http://m.filmaffinity.com/%s/rdcat.php?id=upc_th_%s" % (langf, langf))) itemlist.append(item.clone(title="Sagas y Colecciones", action="indices_fa", extra="sagas", url="http://www.filmaffinity.com/%s/movie-groups-all.php" % langf)) itemlist.append(item.clone(title="Películas/Series/Documentales por Temas", action="indices_fa", url='http://m.filmaffinity.com/%s/topics.php' % langf, text_color=color3)) if config.get_platform() != "plex": itemlist.append(item.clone(title="Buscar Películas/Series", action="search_", text_color=color4, url="http://m.filmaffinity.com/%s/search.php?stype=title&stext=" % langf)) itemlist.append(item.clone(title=" Buscar por actor/actriz", action="search_", text_color=color4, url="http://m.filmaffinity.com/%s/search.php?stype=cast&stext=" % langf)) itemlist.append(item.clone(title=" Buscar por director", action="search_", text_color=color4, url="http://m.filmaffinity.com/%s/search.php?stype=director&stext=" % langf)) itemlist.append(item.clone(title="Filtro Personalizado", action="filtro_fa", text_color=color4, extra="top")) itemlist.append(item.clone(title="Mi cuenta", action="cuenta_fa", text_color=color3)) return itemlist def trakt(item): itemlist = [] item.text_color = color1 token_auth = config.get_setting("token_trakt", "tvmoviedb") page = "?page=1&limit=20&extended=full" if not item.extra: item.extra = "movie" itemlist.append(item.clone(title="Películas", action="", text_color=color2)) itemlist.append(item.clone(title=" Más Populares", action="acciones_trakt", url="movies/popular%s" % page)) itemlist.append( item.clone(title=" Viéndose Ahora", action="acciones_trakt", url="movies/trending%s" % page)) itemlist.append(item.clone(title=" Más Vistas", action="acciones_trakt", url="movies/watched/all%s" % page)) itemlist.append( item.clone(title=" Más Esperadas", action="acciones_trakt", url="movies/anticipated%s" % page)) if token_auth: itemlist.append(item.clone(title=" Recomendaciones personalizadas", action="acciones_trakt", url="recommendations/movies?limit=100&extended=full", pagina=0)) itemlist.append(item.clone(title="Series", action="", text_color=color2)) item.extra = "show" itemlist.append(item.clone(title=" Más Populares", action="acciones_trakt", url="shows/popular%s" % page)) itemlist.append(item.clone(title=" Viéndose Ahora", action="acciones_trakt", url="shows/trending%s" % page)) itemlist.append(item.clone(title=" Más Vistas", action="acciones_trakt", url="shows/watched/all%s" % page)) itemlist.append( item.clone(title=" Más Esperadas", action="acciones_trakt", url="shows/anticipated%s" % page)) if token_auth: itemlist.append(item.clone(title=" Recomendaciones personalizadas", action="acciones_trakt", url="recommendations/shows?limit=100&extended=full", pagina=0)) itemlist.append(item.clone(title=" Mi Cuenta", text_color=color2, extra="cuenta")) else: item.extra = "movie" # Se comprueba si existe un token guardado y sino se ejecuta el proceso de autentificación if not token_auth: folder = (config.get_platform() == "plex") itemlist.append(item.clone(title="Vincula tu cuenta trakt", action="auth_trakt", folder=folder)) else: itemlist.append(item.clone(title="Watchlists", action="", text_color=color2)) itemlist.append( item.clone(title=" Películas", action="acciones_trakt", url="users/me/watchlist/movies%s" % page, order="added", how="desc")) itemlist.append( item.clone(title=" Series", action="acciones_trakt", url="users/me/watchlist/shows%s" % page, extra="show", order="added", how="desc")) itemlist.append(item.clone(title="Vistas", action="", text_color=color2)) itemlist.append( item.clone(title=" Películas", action="acciones_trakt", url="users/me/watched/movies%s" % page, order="added", how="desc")) itemlist.append( item.clone(title=" Series", action="acciones_trakt", url="users/me/watched/shows%s" % page, extra="show", order="added", how="desc")) itemlist.append(item.clone(title="En mi Colección", action="", text_color=color2)) itemlist.append( item.clone(title=" Películas", action="acciones_trakt", url="users/me/collection/movies%s" % page, order="added", how="desc")) itemlist.append( item.clone(title=" Series", action="acciones_trakt", url="users/me/collection/shows%s" % page, extra="show", order="added", how="desc")) itemlist.append( item.clone(title="Mis listas", action="acciones_trakt", url="users/me/lists", text_color=color2)) return itemlist def mal(item): itemlist = [] item.text_color = color1 login, message, user = login_mal() if login: item.login = True itemlist.append( item.clone(title="Top Series", url="https://myanimelist.net/topanime.php?type=tv&limit=0", action="top_mal", contentType="tvshow", extra="tv")) itemlist.append(item.clone(title="Top Películas", url="https://myanimelist.net/topanime.php?type=movie&limit=0", action="top_mal", contentType="movie", extra="movie")) itemlist.append( item.clone(title="Top Ovas", url="https://myanimelist.net/topanime.php?type=ova&limit=0", action="top_mal", contentType="tvshow", extra="tv", tipo="ova")) itemlist.append( item.clone(title="Más Populares", url="https://myanimelist.net/topanime.php?type=bypopularity&limit=0", action="top_mal")) itemlist.append(item.clone(title="Más Esperados", url="https://myanimelist.net/topanime.php?type=upcoming&limit=0", action="top_mal")) itemlist.append(item.clone(title="Anime por Temporadas", url="", action="indices_mal")) itemlist.append(item.clone(title="Anime por Géneros", url="", action="indices_mal")) if config.get_platform() != "plex": itemlist.append(item.clone(title="Buscar Series/Películas/Ovas", url="https://myanimelist.net/anime.php?q=", action="search_")) itemlist.append(item.clone(title="Filtro Personalizado", action="filtro_mal", text_color=color4)) itemlist.append(item.clone(title="Mis listas", action="cuenta_mal", text_color=color3)) return itemlist ##-------------------- SECCION TMDB ------------------------## def listado_tmdb(item): # Listados principales de la categoría Tmdb (Más populares, más vistas, etc...) itemlist = [] item.text_color = color1 item.fanart = default_fan if not item.pagina: item.pagina = 1 # Listado de actores if 'nm' in item.infoLabels['imdb_id']: try: ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt) id_cast = ob_tmdb.result["person_results"][0]["id"] if item.contentType == "movie": item.search = {'url': 'discover/movie', 'with_cast': id_cast, 'page': item.pagina, 'sort_by': 'primary_release_date.desc', 'language': langt} else: item.search = {'url': 'person/%s/tv_credits' % id_cast, 'language': langt} ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt) except: pass else: ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt) # Sagas y colecciones if "collection" in item.search["url"]: try: new_item = item.clone(action="", url='') new_item.infoLabels["plot"] = ob_tmdb.result["overview"] itemlist.append(new_item) for parte in ob_tmdb.result["parts"]: new_item = item.clone(action="detalles") new_item.infoLabels = ob_tmdb.get_infoLabels(new_item.infoLabels, origen=parte) if new_item.infoLabels['thumbnail']: new_item.thumbnail = new_item.infoLabels['thumbnail'] if new_item.infoLabels['fanart']: new_item.fanart = new_item.infoLabels['fanart'] if new_item.infoLabels['year']: new_item.title = "%s (%s) [COLOR %s]%s[/COLOR]" \ % (new_item.contentTitle, new_item.infoLabels['year'], color6, str(new_item.infoLabels['rating']).replace("0.0", "")) else: new_item.title = "%s [COLOR %s]%s[/COLOR]" \ % (new_item.contentTitle, color6, new_item.infoLabels['rating'].replace("0.0", "")) itemlist.append(new_item) except: pass else: try: orden = False # Si se hace una búsqueda por actores o directores, se extraen esos resultados if "cast" in ob_tmdb.result and not item.crew: ob_tmdb.results = ob_tmdb.result["cast"] orden = True elif "crew" in ob_tmdb.result and item.crew: ob_tmdb.results = ob_tmdb.result["crew"] orden = True for i in range(0, len(ob_tmdb.results)): new_item = item.clone(action="detalles", url='', infoLabels={'mediatype': item.contentType}) new_item.infoLabels = ob_tmdb.get_infoLabels(new_item.infoLabels, origen=ob_tmdb.results[i]) # Si no hay sinopsis en idioma elegido, buscar en el alternativo if not new_item.infoLabels["plot"] and not 'person' in item.search["url"]: ob_tmdb2 = Tmdb(id_Tmdb=new_item.infoLabels["tmdb_id"], tipo=item.extra, idioma_busqueda=langt_alt) new_item.infoLabels["plot"] = ob_tmdb2.get_sinopsis() if new_item.infoLabels['thumbnail']: new_item.thumbnail = new_item.infoLabels['thumbnail'] elif new_item.infoLabels['profile_path']: new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + new_item.infoLabels['profile_path'] new_item.infoLabels['profile_path'] = '' new_item.plot = new_item.infoLabels["biography"] if not item.search.get('with_cast', '') and not item.search.get('with_crew', ''): if item.contentType == "movie": new_item.action = "listado_tmdb" cast = 'with_cast' if item.crew: cast = 'with_crew' new_item.search = {'url': 'discover/movie', cast: new_item.infoLabels['tmdb_id'], 'sort_by': 'primary_release_date.desc', 'language': langt, 'page': item.pagina} else: new_item.action = "listado_tmdb" new_item.search = {'url': 'person/%s/tv_credits' % new_item.infoLabels['tmdb_id'], 'language': langt} elif not new_item.infoLabels['thumbnail'] and not new_item.infoLabels['profile_path']: new_item.thumbnail = '' if new_item.infoLabels['fanart']: new_item.fanart = new_item.infoLabels['fanart'] if not 'person' in item.search["url"] or 'tv_credits' in item.search["url"]: if new_item.infoLabels['year']: new_item.title = "%s (%s) [COLOR %s]%s[/COLOR]" \ % (new_item.contentTitle, new_item.infoLabels['year'], color6, str(new_item.infoLabels['rating']).replace("0.0", "")) else: new_item.title = "%s [COLOR %s]%s[/COLOR]" \ % (new_item.contentTitle, color6, new_item.infoLabels['rating'].replace("0.0", "")) else: # Si es una búsqueda de personas se incluye en el título y fanart una película por la que es conocido known_for = ob_tmdb.results[i].get("known_for") type = item.search['type'] if known_for: from random import randint random = randint(0, len(known_for) - 1) new_item.title = "%s [COLOR %s](%s)[/COLOR] (%s)" \ % (new_item.contentTitle, color6, known_for[random].get("title", known_for[random].get("name")), type) if known_for[random]["backdrop_path"]: new_item.fanart = 'http://image.tmdb.org/t/p/original' + known_for[random]["backdrop_path"] else: new_item.title = new_item.contentTitle itemlist.append(new_item) except: import traceback logger.error(traceback.format_exc()) if orden: itemlist.sort(key=lambda item: item.infoLabels["year"], reverse=True) if "page" in item.search and ob_tmdb.total_pages > item.search["page"]: item.search["page"] += 1 itemlist.append(Item(channel=item.channel, action=item.action, title=">> Página siguiente", search=item.search, extra=item.extra, pagina=item.pagina + 1, contentType=item.contentType)) return itemlist def detalles(item): itemlist = [] images = {} data = "" # Si viene de seccion imdb if not item.infoLabels["tmdb_id"]: headers = [['Accept-Language', langi]] data = httptools.downloadpage("http://www.imdb.com/title/" + item.infoLabels['imdb_id'], headers=headers, replace_headers=True).data pics = scrapertools.find_single_match(data, 'showAllVidsAndPics.*?href=".*?(tt\d+)') # Imágenes imdb if pics: images["imdb"] = {'url': 'http://www.imdb.com/_json/title/%s/mediaviewer' % pics} ob_tmdb = Tmdb(external_id=item.infoLabels["imdb_id"], external_source="imdb_id", tipo=item.extra, idioma_busqueda=langt) item.infoLabels["tmdb_id"] = ob_tmdb.get_id() ob_tmdb = Tmdb(id_Tmdb=item.infoLabels["tmdb_id"], tipo=item.extra, idioma_busqueda=langt) try: item.infoLabels = ob_tmdb.get_infoLabels(item.infoLabels) # Si no hay sinopsis en idioma elegido, buscar en el alternativo if not item.infoLabels["plot"]: item.infoLabels["plot"] = ob_tmdb.get_sinopsis(idioma_alternativo=langt_alt) except: pass if not item.fanart and item.infoLabels['fanart']: item.fanart = item.infoLabels['fanart'] if item.infoLabels['thumbnail']: item.thumbnail = item.infoLabels['thumbnail'] # Sinopsis, votos de imdb if data: plot = scrapertools.find_single_match(data, 'class="inline canwrap" itemprop="description">(.*?)') plot = scrapertools.htmlclean(plot) plot = re.sub(r'(?i)]+>|\n|\s{2}', ' ', plot).strip() if plot and (item.infoLabels['plot'] and item.infoLabels['plot'] != plot): item.infoLabels['plot'] += " (TMDB)\n" + plot + " (IMDB)" elif plot and not item.infoLabels['plot']: item.infoLabels['plot'] = plot rating = scrapertools.find_single_match(data, 'itemprop="ratingValue">([^<]+)<') if rating: item.infoLabels['rating'] = float(rating.replace(",", ".")) votos = scrapertools.find_single_match(data, 'itemprop="ratingCount">([^<]+)<') if votos: item.infoLabels['votes'] = votos if item.infoLabels['tagline']: itemlist.append(item.clone(title="--- %s ---" % item.infoLabels['tagline'], text_color="0xFFFF8C00", action="")) title = item.contentType.replace("movie", "película").replace("tvshow", "serie") # Búsqueda por títulos idioma elegido y/o versión original y español itemlist.append(item.clone(action="busqueda", title="Buscar %s en alfa: %s" % (title, item.contentTitle))) if item.infoLabels['originaltitle'] and item.contentTitle != item.infoLabels['originaltitle']: itemlist.append(item.clone(action="busqueda", contentTitle=item.infoLabels['originaltitle'], title=" Buscar por su nombre original: %s" % item.infoLabels['originaltitle'])) if langt != "es" and langt != "en" and item.infoLabels["tmdb_id"]: tmdb_lang = Tmdb(id_Tmdb=item.infoLabels["tmdb_id"], tipo=item.extra, idioma_busqueda='es') if tmdb_lang.result.get("title") and tmdb_lang.result["title"] != item.contentTitle \ and tmdb_lang.result["title"] != item.infoLabels['originaltitle']: tmdb_lang = tmdb_lang.result["title"] itemlist.append(item.clone(action="busqueda", title=" Buscar por su título en español: %s" % tmdb_lang, contentTitle=tmdb_lang)) # En caso de serie, opción de info por temporadas if item.contentType == "tvshow" and item.infoLabels['tmdb_id']: itemlist.append(item.clone(action="info_seasons", text_color=color4, title="Info de temporadas [%s]" % item.infoLabels["number_of_seasons"])) # Opción de ver el reparto y navegar por sus películas/series if item.infoLabels['tmdb_id']: itemlist.append(item.clone(action="reparto", title="Ver Reparto", text_color=color4, infoLabels={'tmdb_id': item.infoLabels['tmdb_id'], 'mediatype': item.contentType})) if config.is_xbmc(): item.contextual = True itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", text_color=color5)) # try: # images['tmdb'] = ob_tmdb.result["images"] # itemlist.append(item.clone(action="imagenes", title="Lista de Imágenes", text_color=color5, images=images, # extra="menu")) # except: # pass try: if item.contentType == "movie" and item.infoLabels["year"] < 2014: post_url = "https://theost.com/search/custom/?key=%s&year=%s&country=0&genre=0" % ( item.infoLabels['originaltitle'].replace(" ", "+"), item.infoLabels["year"]) url = "https://nl.hideproxy.me/includes/process.php?action=update" post = "u=%s&proxy_formdata_server=nl&allowCookies=1&encodeURL=1&encodePage=0&stripObjects=0&stripJS=0&go=" % urllib.quote( post_url) while True: response = httptools.downloadpage(url, post, follow_redirects=False) if response.headers.get("location"): url = response.headers["location"] post = "" else: data_music = response.data break url_album = scrapertools.find_single_match(data_music, 'album(?:|s) on request.*?href="([^"]+)"') if url_album: url_album = "https://nl.hideproxy.me" + url_album itemlist.append( item.clone(action="musica_movie", title="Escuchar BSO - Lista de canciones", url=url_album, text_color=color5)) except: pass token_auth = config.get_setting("token_trakt", "tvmoviedb") if token_auth: itemlist.append(item.clone(title="Gestionar con tu cuenta Trakt", action="menu_trakt")) itemlist.append(item.clone(title="", action="")) # Es parte de una colección try: if ob_tmdb.result.get("belongs_to_collection"): new_item = item.clone(search='', infoLabels={'mediatype': item.contentType}) saga = ob_tmdb.result["belongs_to_collection"] new_item.infoLabels["tmdb_id"] = saga["id"] if saga["poster_path"]: new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + saga["poster_path"] if saga["backdrop_path"]: new_item.fanart = 'http://image.tmdb.org/t/p/original' + saga["backdrop_path"] new_item.search = {'url': 'collection/%s' % saga['id'], 'language': langt} itemlist.append(new_item.clone(title="Es parte de: %s" % saga["name"], action="listado_tmdb", text_color=color5)) except: pass # Películas/Series similares y recomendaciones if item.infoLabels['tmdb_id']: item.extra = item.contentType.replace('tvshow', 'tv') title = title.replace("película", "Películas").replace("serie", "Series") itemlist.append(item.clone(title="%s similares" % title, action="listado_tmdb", search={'url': '%s/%s/similar' % (item.extra, item.infoLabels['tmdb_id']), 'language': langt, 'page': 1}, infoLabels={'mediatype': item.contentType}, text_color=color2)) itemlist.append( item.clone(title="Recomendaciones", action="listado_tmdb", infoLabels={'mediatype': item.contentType}, search={'url': '%s/%s/recommendations' % (item.extra, item.infoLabels['tmdb_id']), 'language': langt, 'page': 1}, text_color=color2)) return itemlist def reparto(item): # Actores y equipo de rodaje de una película/serie itemlist = [] item.text_color = color1 item.extra=item.contentType.replace('tvshow','tv') item.search = {'url': '%s/%s/credits' % (item.extra, item.infoLabels['tmdb_id'])} ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt) try: cast = ob_tmdb.result["cast"] if cast: itemlist.append(item.clone(title="Actores/Actrices", action="", text_color=color2)) for actor in cast: new_item = item.clone(action="listado_tmdb", fanart=default_fan) new_item.title = " " + actor["name"] + " as " + actor["character"] if actor["profile_path"]: new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + actor["profile_path"] if item.contentType == "movie": new_item.search = {'url': 'discover/movie', 'with_cast': actor['id'], 'language': langt, 'page': 1, 'sort_by': 'primary_release_date.desc'} else: new_item.search = {'url': 'person/%s/tv_credits' % actor['id'], 'language': langt} itemlist.append(new_item) except: pass try: crew = ob_tmdb.result["crew"] if crew: itemlist.append(item.clone(title="Equipo de rodaje", action="", text_color=color2)) for c in crew: new_item = item.clone(action="listado_tmdb", fanart=default_fan) new_item.title = " " + c["job"] + ": " + c["name"] if c["profile_path"]: new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + c["profile_path"] if item.contentType == "movie": new_item.search = {'url': 'discover/movie', 'with_crew': c['id'], 'page': 1, 'sort_by': 'primary_release_date.desc'} else: new_item.search = {'url': 'person/%s/tv_credits' % c['id'], 'language': langt} new_item.crew = True itemlist.append(new_item) except: pass return itemlist def info_seasons(item): # Info de temporadas y episodios itemlist = [] item.text_color = color4 ob_tmdb = Tmdb(id_Tmdb=item.infoLabels["tmdb_id"], tipo="tv", idioma_busqueda=langt) for temp in range(item.infoLabels["number_of_seasons"], 0, -1): temporada = ob_tmdb.get_temporada(temp) if temporada: new_item = item.clone(action="", mediatype="season") new_item.infoLabels['title'] = temporada['name'] new_item.infoLabels['season'] = temp if temporada['overview']: new_item.infoLabels['plot'] = temporada['overview'] if temporada['air_date']: date = temporada['air_date'].split('-') new_item.infoLabels['aired'] = date[2] + "/" + date[1] + "/" + date[0] new_item.infoLabels['year'] = date[0] if temporada['poster_path']: new_item.infoLabels['poster_path'] = 'http://image.tmdb.org/t/p/original' + temporada['poster_path'] new_item.thumbnail = new_item.infoLabels['poster_path'] new_item.title = "Temporada %s" % temp itemlist.append(new_item) for epi in range(1, len(temporada["episodes"])): episodio = ob_tmdb.get_episodio(temp, epi) if episodio: new_item = item.clone(action="", text_color=color1, mediatype="episode") new_item.infoLabels['season'] = temp new_item.infoLabels['episode'] = epi new_item.infoLabels['title'] = episodio['episodio_titulo'] if episodio['episodio_sinopsis']: new_item.infoLabels['plot'] = episodio['episodio_sinopsis'] if episodio['episodio_imagen']: new_item.infoLabels['poster_path'] = episodio['episodio_imagen'] new_item.thumbnail = new_item.infoLabels['poster_path'] if episodio['episodio_air_date']: new_item.infoLabels['aired'] = episodio['episodio_air_date'] new_item.infoLabels['year'] = episodio['episodio_air_date'].rsplit("/", 1)[1] if episodio['episodio_vote_average']: new_item.infoLabels['rating'] = episodio['episodio_vote_average'] new_item.infoLabels['votes'] = episodio['episodio_vote_count'] new_item.title = " %sx%s - %s" % (temp, epi, new_item.infoLabels['title']) itemlist.append(new_item) return itemlist def indices_tmdb(item): # Indices por genero y año itemlist = [] from datetime import datetime if "Géneros" in item.title: thumbnail = {} url = ('http://api.themoviedb.org/3/genre/%s/list?api_key=f7f51775877e0bb6703520952b3c7840&language=%s' % (item.extra, langt)) try: lista = jsontools.load(httptools.downloadpage(url, cookies=False).data)["genres"] lista_generos = {} for l in lista: lista_generos[str(l["id"])] = l["name"] if "es" in langt: thumbnail[str(l["id"])] = "%s1/%s.jpg" % (images_predef, l["name"].lower() \ .replace("ó", "o").replace("í", "i") \ .replace(" ", "%20").replace("Aventuras", "Aventura") .replace("ú", "u")) else: thumbnail[str(l["id"])] = "%s2/%s.jpg" % (images_predef, l["name"]) except: pass fecha = datetime.now().strftime('%Y-%m-%d') sort_by = 'release_date.desc' param_year = 'release_date.lte' if item.contentType == 'tvshow': sort_by = 'first_air_date.desc' param_year = 'air_date.lte' for key, value in lista_generos.items(): new_item = item.clone() new_item.title = value new_item.thumbnail = thumbnail[key] new_item.search = {'url': 'discover/%s' % item.extra, 'with_genres': key, 'sort_by': sort_by, param_year: fecha, 'language': langt, 'page': 1} itemlist.append(new_item) itemlist.sort(key=lambda item: item.title) else: year = datetime.now().year + 3 for i in range(year, 1899, -1): if item.contentType == 'tvshow': param_year = 'first_air_date_year' else: param_year = 'primary_release_year' search = {'url': 'discover/%s' % item.extra, param_year: i, 'language': langt, 'page': 1} itemlist.append(item.clone(title=str(i), action='listado_tmdb', search=search)) return itemlist def filtro(item): logger.info() from datetime import datetime list_controls = [] valores = {} dict_values = None list_controls.append({'id': 'years', 'label': 'Año', 'enabled': True, 'color': '0xFFCC2EFA', 'type': 'list', 'default': -1, 'visible': True}) list_controls[0]['lvalues'] = [] valores['years'] = [] year = datetime.now().year + 1 for i in range(1900, year + 1): list_controls[0]['lvalues'].append(str(i)) valores['years'].append(str(i)) list_controls[0]['lvalues'].append('Cualquiera') valores['years'].append('') if "Personalizado" in item.title: # Se utilizan los valores por defecto/guardados valores_guardados = config.get_setting("filtro_defecto_" + item.extra, item.channel) if valores_guardados: dict_values = valores_guardados url = ('http://api.themoviedb.org/3/genre/%s/list?api_key=f7f51775877e0bb6703520952b3c7840&language=%s' % (item.extra, langt)) try: lista = jsontools.load(httptools.downloadpage(url, cookies=False).data)["genres"] if lista: list_controls.append({'id': 'labelgenre', 'enabled': True, 'type': 'label', 'default': None, 'label': 'Selecciona uno, ninguno o más de un género', 'visible': True, 'color': '0xFFC52020'}) for l in lista: list_controls.append({'id': 'genre' + str(l["id"]), 'label': l["name"], 'enabled': True, 'type': 'bool', 'default': False, 'visible': True}) except: pass list_controls.append({'id': 'orden', 'label': 'Ordenar por', 'enabled': True, 'color': '0xFF25AA48', 'type': 'list', 'default': -1, 'visible': True}) orden = ['Popularidad Desc', 'Popularidad Asc', 'Año Desc', 'Año Asc', 'Valoración Desc', 'Valoración Asc'] if item.extra == "movie": orden.extend(['Título [A-Z]', 'Título [Z-A]']) orden_tmdb = ['popularity.desc', 'popularity.asc', 'release_date.desc', 'release_date.asc', 'vote_average.desc', 'vote_average.asc', 'original_title.asc', 'original_title.desc'] valores['orden'] = [] list_controls[-1]['lvalues'] = [] for i, tipo_orden in enumerate(orden): list_controls[-1]['lvalues'].insert(0, tipo_orden) valores['orden'].insert(0, orden_tmdb[i]) list_controls.append({'id': 'espacio', 'label': '', 'enabled': False, 'type': 'label', 'default': None, 'visible': True}) list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True, 'type': 'bool', 'default': False, 'visible': True}) else: list_controls.append({'id': 'keyword', 'label': 'Palabra Clave', 'enabled': True, 'type': 'text', 'default': '', 'visible': True}) item.valores = valores return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, caption="Filtra la búsqueda", item=item, callback='filtrado') def filtrado(item, values): values_copy = values.copy() # Guarda el filtro para que sea el que se cargue por defecto if "save" in values and values["save"]: values_copy.pop("save") config.set_setting("filtro_defecto_" + item.extra, values_copy, item.channel) year = item.valores["years"][values["years"]] if "Personalizado" in item.title: orden = item.valores["orden"][values["orden"]] if item.extra == "tv": orden = orden.replace('release_date', 'first_air_date') genero_ids = [] for v in values: if "genre" in v: if values[v]: genero_ids.append(v.replace('genre', '')) genero_ids = ",".join(genero_ids) if "clave" in item.title: item.search = {'url': 'search/%s' % item.extra, 'year': year, 'query': values["keyword"], 'language': langt, 'page': 1} elif item.extra == "movie": item.search = {'url': 'discover/%s' % item.extra, 'sort_by': orden, 'primary_release_year': year, 'with_genres': genero_ids, 'vote_count.gte': '10', 'language': langt, 'page': 1} else: item.search = {'url': 'discover/%s' % item.extra, 'sort_by': orden, 'first_air_date_year': year, 'with_genres': genero_ids, 'vote_count.gte': '10', 'language': langt, 'page': 1} item.action = "listado_tmdb" return listado_tmdb(item) def musica_movie(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data patron = '([^<]+)
([^<]+).*?(\d+:\d+).*?

(.*?)' matches = scrapertools.find_multiple_matches(data, patron) for thumbnail, imdb_id, title, movie, datos in matches: new_item = item.clone(action='listado_tmdb') try: if "@" in thumbnail: thumbnail = thumbnail.rsplit('@', 1)[0] thumbnail += "@._UX482.jpg" elif "._V1_" in thumbnail: thumbnail = thumbnail.rsplit('._V1_', 1)[0] thumbnail += "._V1_UX482.jpg" except: pass new_item.thumbnail = thumbnail datos = datos.strip() if datos: new_item.infoLabels['plot'] = scrapertools.htmlclean(datos) new_item.title = title.strip() + ' [COLOR %s](%s)[/COLOR]' % (color6, movie.strip()) new_item.infoLabels['imdb_id'] = imdb_id new_item.search = {'url': 'find/%s' % imdb_id, 'external_source': 'imdb_id', 'language': langt} itemlist.append(new_item) else: patron = '(?:

|
([^<]+)|"description">([^<]+)<)' matches = scrapertools.find_multiple_matches(data, patron) for thumbnail, thumb2, imdb_id, title, datos, plot, plot2 in matches: new_item = item.clone(action='detalles') new_item.title = title.strip() if not thumbnail: thumbnail = thumb2 try: if "@" in thumbnail: thumbnail = thumbnail.rsplit('@', 1)[0] thumbnail += "@._UX482.jpg" elif "._V1_" in thumbnail: thumbnail = thumbnail.rsplit('._V1_', 1)[0] thumbnail += "._V1_UX482.jpg" except: pass new_item.thumbnail = thumbnail if not plot: plot = plot2 new_item.infoLabels['plot'] = scrapertools.htmlclean(plot.strip()) generos = scrapertools.find_multiple_matches(datos, 'genre">([^<]+)<') if generos: new_item.infoLabels["genre"] = ", ".join(generos) duracion = scrapertools.find_single_match(datos, '(\d+) min') if duracion: new_item.infoLabels['duration'] = int(duracion) * 60 new_item.infoLabels['year'] = scrapertools.find_single_match(new_item.title, '\((\d{4})') if not new_item.infoLabels['year']: new_item.infoLabels['year'] = scrapertools.find_single_match(datos, 'year.*?\((\d{4})') if new_item.infoLabels['year']: new_item.title += ' (%s)' % new_item.infoLabels['year'] rating = scrapertools.find_single_match(datos, '(?:rating|Metascore).*?([^<]*)') rating = rating.replace(",", ".") if rating: if not "." in rating: try: rating = float(rating) / 10 except: rating = None if rating: new_item.title += " [COLOR %s]%s[/COLOR]" % (color6, str(rating)) new_item.infoLabels['rating'] = float(rating) new_item.infoLabels['imdb_id'] = imdb_id itemlist.append(new_item) next_page = scrapertools.find_single_match(data, ']*>Next') if next_page: if not "title_type" in item.url: next_page = 'http://www.imdb.com' + next_page else: next_page = 'http://www.imdb.com/search/title' + next_page itemlist.append(item.clone(title=">> Página Siguiente", url=next_page, text_color='')) return itemlist def filtro_imdb(item): logger.info() from datetime import datetime list_controls = [] valores = {} dict_values = None # Se utilizan los valores por defecto/guardados valores_guardados = config.get_setting("filtro_defecto_imdb_" + item.extra, item.channel) if valores_guardados: dict_values = valores_guardados list_controls.append({'id': 'title', 'label': 'Título', 'enabled': True, 'type': 'text', 'default': '', 'visible': True}) list_controls.append({'id': 'yearsdesde', 'label': 'Año desde:', 'enabled': True, 'color': '0xFFCC2EFA', 'type': 'list', 'default': -1, 'visible': True}) list_controls.append({'id': 'yearshasta', 'label': 'Año hasta:', 'enabled': True, 'color': '0xFF2ECCFA', 'type': 'list', 'default': -1, 'visible': True}) list_controls[1]['lvalues'] = [] list_controls[2]['lvalues'] = [] valores['years'] = [] year = datetime.now().year + 1 for i in range(1900, year + 1): list_controls[1]['lvalues'].append(str(i)) list_controls[2]['lvalues'].append(str(i)) valores['years'].append(str(i)) list_controls[1]['lvalues'].append('Cualquiera') list_controls[2]['lvalues'].append('Cualquiera') valores['years'].append('') try: generos_spa = {'Action': 'Acción', 'Adventure': 'Aventura', 'Animation': 'Animación', 'Biography': 'Biografía', 'Comedy': 'Comedia', 'Crime': 'Crimen', 'Documentary': 'Documental', 'Family': 'Familia', 'Fantasy': 'Fantástico', 'Film-Noir': 'Cine Negro', 'Game-Show': 'Concursos', 'History': 'Historia', 'Horror': 'Terror', 'Music': 'Música', 'Mistery': 'Intriga', 'News': 'Noticias', 'Reality-TV': 'Reality', 'Sci-Fi': 'Ciencia Ficción', 'Sport': 'Deportes', 'Talk-Show': 'Entrevistas', 'War': 'Cine Bélico'} data = httptools.downloadpage("http://www.imdb.com/search/title", cookies=False).data bloque = scrapertools.find_single_match(data, '

Genres

(.*?)') matches = scrapertools.find_multiple_matches(bloque, ' value="([^"]+)"\s*>\s*([^<]+)<') if matches: list_controls.append({'id': 'labelgenre', 'enabled': True, 'type': 'label', 'visible': True, 'label': 'Selecciona uno, ninguno o más de un género', 'color': '0xFFC52020'}) lista = [] for valor, titulo in matches: titulo = generos_spa.get(titulo, titulo) lista.append([valor, titulo]) lista.sort(key=lambda lista: lista[1]) for valor, titulo in lista: list_controls.append({'id': 'genre' + valor, 'label': titulo, 'enabled': True, 'type': 'bool', 'default': False, 'visible': True}) except: pass list_controls.append({'id': 'espacio', 'label': '', 'enabled': False, 'type': 'label', 'default': None, 'visible': True}) try: bloque = scrapertools.find_single_match(data, '

Countries

(.*?)Less-Common') matches = scrapertools.find_multiple_matches(bloque, ' value="([^"]+)"\s*>([^<]+)<') if matches: list_controls.append({'id': 'pais', 'label': 'País', 'enabled': True, 'color': '0xFFFF8000', 'type': 'list', 'default': -1, 'visible': True}) list_controls[-1]['lvalues'] = [] list_controls[-1]['lvalues'].append('Cualquiera') valores['pais'] = [] valores['pais'].append('') for valor, titulo in matches: list_controls[-1]['lvalues'].insert(0, titulo) valores['pais'].insert(0, valor) except: pass list_controls.append({'id': 'votos', 'label': 'Número mínimo de votos', 'enabled': True, 'type': 'text', 'default': '10000', 'visible': True, 'color': '0xFFF4FA58'}) list_controls.append({'id': 'orden', 'label': 'Ordenar por', 'enabled': True, 'color': '0xFF25AA48', 'type': 'list', 'default': -1, 'visible': True}) orden = ['Popularidad Desc', 'Popularidad Asc', 'Año Desc', 'Año Asc', 'Valoración Desc', 'Valoración Asc', 'Título [A-Z]', 'Título [Z-A]'] orden_imdb = ['moviemeter,asc', 'moviemeter,desc', 'year,desc', 'year,asc', 'user_rating,desc', 'user_rating,asc', 'alpha,asc', 'alpha,desc'] valores['orden'] = [] list_controls[-1]['lvalues'] = [] for i, tipo_orden in enumerate(orden): list_controls[-1]['lvalues'].insert(0, tipo_orden) valores['orden'].insert(0, orden_imdb[i]) list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True, 'type': 'bool', 'default': False, 'visible': True}) item.valores = valores return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, caption="Filtra la búsqueda", item=item, callback='filtrado_imdb') def filtrado_imdb(item, values): values_copy = values.copy() # Guarda el filtro para que sea el que se cargue por defecto if "save" in values and values["save"]: values_copy.pop("save") config.set_setting("filtro_defecto_imdb_" + item.extra, values_copy, item.channel) yeard = item.valores["years"][values["yearsdesde"]] yearh = item.valores["years"][values["yearshasta"]] orden = item.valores["orden"][values["orden"]] pais = item.valores["pais"][values["pais"]] genero_ids = [] for v in values: if "genre" in v: if values[v]: genero_ids.append(v.replace('genre', '')) genero_ids = ",".join(genero_ids) try: votos = int(values["votos"]) except: votos = "" item.url = 'http://www.imdb.com/search/title?countries=%s&num_votes=%s,&genres=%s&release_date=%s,%s&sort=%s&' \ 'title=%s&title_type=' % (pais, str(votos), genero_ids, yeard, yearh, orden, values["title"]) if item.contentType == "movie": item.url += "feature,tv_movie" else: item.url += "tv_series,tv_special,mini_series" item.action = "listado_imdb" return listado_imdb(item) def indices_imdb(item): # Índices imdb por año y genero itemlist = [] from datetime import datetime if "Géneros" in item.title: generos_spa = {'Action': 'Accion', 'Adventure': 'Aventura', 'Animation': 'Animacion', 'Biography': 'Biografía', 'Comedy': 'Comedia', 'Crime': 'Crimen', 'Documentary': 'Documental', 'Family': 'Familia', 'Fantasy': 'Fantasia', 'Film-Noir': 'Cine Negro', 'Game-Show': 'Concursos', 'History': 'Historia', 'Horror': 'Terror', 'Music': 'Música', 'Mistery': 'Intriga', 'News': 'Noticias', 'Reality-TV': 'Reality', 'Sci-Fi': 'Ciencia Ficcion', 'Sport': 'Deportes', 'Talk-Show': 'Entrevistas', 'War': 'Cine Bélico'} data = httptools.downloadpage("http://www.imdb.com/search/title", cookies=False).data bloque = scrapertools.find_single_match(data, '

Genres

(.*?)') matches = scrapertools.find_multiple_matches(bloque, ' value="([^"]+)"\s*>\s*([^<]+)<') if matches: for valor, titulo in matches: title = generos_spa.get(titulo, titulo) thumbnail = "%s2/%s.jpg" % (images_predef, titulo) itemlist.append(item.clone(title=title, action='listado_imdb', thumbnail=thumbnail, url='http://www.imdb.com/search/title?genres=%s%s' % (valor, item.url))) itemlist.sort(key=lambda item: item.title) else: year = datetime.now().year + 3 for i in range(year, 1899, -1): itemlist.append(item.clone(title=str(i), action='listado_imdb', url='http://www.imdb.com/search/title?release_date=%s,%s%s' % (i, i, item.url))) return itemlist ##-------------------- SECCION FILMAFFINITY ------------------------## def listado_fa(item): # Método para listados principales de filmaffinity itemlist = [] item.text_color = color1 # Listados con paginación por post if item.extra == "top": if item.page_fa: post = "from=%s" % item.page_fa data = httptools.downloadpage(item.url, post).data if item.total > item.page_fa: item.page_fa += 30 else: item.page_fa = "" else: item.page_fa = 30 data = httptools.downloadpage(item.url).data item.total = int(scrapertools.find_single_match(data, 'tmResCount\s*=\s*(\d+)')) if item.total <= item.page_fa: item.page_fa = "" else: data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) votaciones = [] # Si es la sección de estrenos cambia la estructura del scraper if item.extra == "estrenos": patron = '\s*(\d+[^<]+)<(.*?)(?:
|' \ '
([^<]+)
' \ '\s*(.*?)\s*<|)' matches = scrapertools.find_multiple_matches(bloque, patron) for url, thumb, thumb2, title, year, rating, votos in matches: title = title.strip() new_item = item.clone(action="detalles_fa", contentType="movie", extra="movie", contentTitle=title) if not url.startswith("http://m.filmaffinity"): new_item.url = "http://m.filmaffinity.com" + url else: new_item.url = url if not thumb: thumb = thumb2 new_item.thumbnail = thumb.replace("msmall", "large") if not new_item.thumbnail.startswith("http"): new_item.thumbnail = "http://m.filmaffinity.com" + new_item.thumbnail new_item.title = " " + title + " (%s) [COLOR %s]%s[/COLOR]" % (year, color6, rating) new_item.infoLabels['year'] = year votaciones.append([rating, votos]) if rating: new_item.infoLabels['rating'] = float(rating.replace(",", ".")) new_item.infoLabels['votes'] = votos itemlist.append(new_item) else: patron = '(?:
|)' \ '.*?(?:data-src="([^"]+)"|src="((?!/images/empty.gif)[^"]+)").*?' \ '
' \ '([^<]+)
\s*(.*?)\s*<|)' matches = scrapertools.find_multiple_matches(data, patron) for url, url2, thumb, thumb2, title, year, rating, votos in matches: title = title.strip() new_item = item.clone(action="detalles_fa", extra="movie") if not url: url = url2 if not url.startswith("http://m.filmaffinity"): new_item.url = "http://m.filmaffinity.com" + url else: new_item.url = url if not thumb: thumb = thumb2 new_item.thumbnail = thumb.replace("msmall", "large") if not new_item.thumbnail.startswith("http"): new_item.thumbnail = "http://m.filmaffinity.com" + new_item.thumbnail new_item.title = title.replace("(Serie de TV)", "").replace("(TV)", "") + " (%s) [COLOR %s]%s[/COLOR]" \ % (year, color6, rating) new_item.contentTitle = re.sub(r'(?i)\(serie de tv\)|\(tv\)|\(c\)', '', title) if re.search(r'(?i)serie de tv|\(tv\)', title): new_item.contentType = "tvshow" new_item.extra = "tv" new_item.infoLabels["tvshowtitle"] = new_item.contentTitle new_item.infoLabels['year'] = year votaciones.append([rating, votos]) if rating: new_item.infoLabels['rating'] = float(rating.replace(",", ".")) new_item.infoLabels['votes'] = votos itemlist.append(new_item) if len(itemlist) < 31: from core import tmdb tmdb.set_infoLabels_itemlist(itemlist, True) for i, it in enumerate(itemlist): try: it.infoLabels['votes'] = votaciones[i][1] it.infoLabels['rating'] = float(votaciones[i][0].replace(",", ".")) except: pass next_page = scrapertools.find_single_match(data, 'aria-label="Next" href="([^"]+)"') if next_page: if not next_page.startswith("http://m.filmaffinity"): next_page = "http://m.filmaffinity.com" + next_page itemlist.append(Item(channel=item.channel, action=item.action, title=">> Página Siguiente", url=next_page, extra=item.extra)) elif item.page_fa: itemlist.append(item.clone(title=">> Página Siguiente", text_color="")) return itemlist def indices_fa(item): # Índices por genero, año, temas y sagas/colecciones itemlist = [] item.text_color = color1 if item.url: data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) if "sagas" in item.extra: patron = '
  • .*?href="([^"]+)".*?group-name">([^<]+)<.*?src="([^"]+)".*?' \ '"count-movies">([^<]+)<' matches = scrapertools.find_multiple_matches(data, patron) for url, title, thumbnail, info in matches: new_item = item.clone(action="listado_fa") if not url.startswith("http://www.filmaffinity"): new_item.url = "http://m.filmaffinity.com" + url else: new_item.url = url.replace("www.filmaffinity.com", "m.filmaffinity.com") new_item.thumbnail = thumbnail.replace("mmed", "large") new_item.title = title.strip() + " [COLOR %s](%s)[/COLOR]" % (color6, info) itemlist.append(new_item) next_page = scrapertools.find_single_match(data, '>>') if next_page: if not next_page.startswith("http://www.filmaffinity.com"): next_page = "http://www.filmaffinity.com" + next_page itemlist.append(Item(channel=item.channel, action=item.action, title=">> Página Siguiente", url=next_page, extra=item.extra)) elif "Géneros" in item.title: bloque = scrapertools.find_single_match(data, 'name="genre">.*?(.*?)') matches = scrapertools.find_multiple_matches(bloque, '') for valor, titulo in matches: if valor == "TV_SE": continue new_item = item.clone(title=titulo, action="listado_fa", extra="top") new_item.url = "http://m.filmaffinity.com/%s/topgen.php?genre=%s&country=&fromyear=&toyear=&nodoc=1" \ % (langf, valor) if item.contentType == "movie": new_item.url += "¬vse=1" generos = ['1/accion.jpg', '1/animacion.jpg', '1/aventura.jpg', '1/guerra.jpg', '1/ciencia%20ficcion.jpg', '2/Film-Noir.jpg', '1/comedia.jpg', '0/Unknown.png', '1/documental.jpg', '1/drama.jpg', '1/fantasia.jpg', '2/Kids.jpg', '2/Suspense.jpg', '1/musical.jpg', '1/romance.jpg', '1/terror.jpg', '1/thriler.jpg', '1/western.jpg'] if langf != "en": try: new_item.thumbnail = "%s/%s" % (images_predef, generos[len(itemlist)]) except: new_item.thumbnail = "%s1/%s.jpg" % (images_predef, titulo.lower()) else: new_item.thumbnail = "%s2/%s.jpg" % (images_predef, titulo) itemlist.append(new_item) elif "Temas" in item.title: bloques = scrapertools.find_multiple_matches(data, '
    (.*?)
    ') for letra, bloque in bloques: patron = 'href="([^"]+)">([^<]+)<.*?"badge">(\d+)' matches = scrapertools.find_multiple_matches(bloque, patron) extra = len(matches) + 1 action = "" folder = True if config.is_xbmc(): action = "move" folder = False itemlist.append(item.clone(title=letra, text_color=color2, action=action, extra=extra, folder=folder)) for url, titulo, numero in matches: new_item = item.clone(action="temas_fa") topic_id = scrapertools.find_single_match(url, "topic=(\d+)") new_item.url = "http://www.filmaffinity.com/%s/%s&attr=all" % ( langf, url.replace("&nodoc", "").replace("¬vse", "")) new_item.title = titulo + " (%s)" % numero itemlist.append(new_item) else: from datetime import datetime year = datetime.now().year for i in range(year, 1899, -1): new_item = item.clone(title=str(i), action="listado_fa", extra="top") genre = '' if item.contentType == "tvshow": genre = 'TV_SE' new_item.url = "http://m.filmaffinity.com/%s/topgen.php?genre=%s&country=&fromyear=%s&toyear=%s&nodoc=1" \ % (langf, genre, i, i) if item.contentType == "movie": new_item.url += "¬vse=1" itemlist.append(new_item) return itemlist def temas_fa(item): # Películas y series por temas itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) item.infoLabels['plot'] = scrapertools.find_single_match(data, '

    ([^<]+)

    ') patron = '
    \s*([^<]+)\s*\((\d+)\)' matches = scrapertools.find_multiple_matches(data, patron) for url, thumb, title, year in matches: title = title.strip() new_item = item.clone(action="detalles_fa", contentType="movie", extra="movie", text_color=color2) new_item.url = "http://m.filmaffinity.com/%s/movie.php?id=%s" % (langf, url) new_item.thumbnail = thumb.replace("msmall", "large") if not new_item.thumbnail.startswith("http"): new_item.thumbnail = "http://www.filmaffinity.com" + new_item.thumbnail new_item.infoLabels["year"] = year new_item.title = title + " (%s)" % year if re.search(r'(?i)serie de tv|\(tv\)', title): new_item.contentType = "tvshow" new_item.extra = "tv" new_item.contentTitle = re.sub(r'(?i)\(serie de tv\)|\(tv\)|\(c\)', '', title) itemlist.append(new_item) next_page = scrapertools.find_single_match(data, '>>') if next_page: if not next_page.startswith("http://www.filmaffinity.com"): next_page = "http://www.filmaffinity.com/%s/%s" % (langf, next_page) itemlist.append(Item(channel=item.channel, action=item.action, title=">> Página Siguiente", url=next_page)) return itemlist def detalles_fa(item): itemlist = [] item.plot = "" rating = item.infoLabels['rating'] votos = item.infoLabels['votes'] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) # Se extrae el título original para posibles búsquedas en tmdb posteriores orig_title = scrapertools.find_single_match(data, 'itemprop="datePublished">.*?
    ([^<]+)
    ').strip() if item.contentType == "movie": item.infoLabels['originaltitle'] = re.sub(r"(?i)\(TV Series\)|\(S\)|\(TV\)", "", orig_title) else: item.infoLabels['tvshowtitle'] = re.sub(r"(?i)\(TV Series\)|\(S\)|\(TV\)", "", orig_title) item_tmdb = item.clone() if item.contentType == "movie": ob_tmdb = Tmdb(texto_buscado=item_tmdb.contentTitle, year=item_tmdb.infoLabels['year'], tipo=item_tmdb.extra, idioma_busqueda=langt) if not ob_tmdb.result: ob_tmdb = Tmdb(texto_buscado=item_tmdb.infoLabels['originaltitle'], year=item_tmdb.infoLabels['year'], tipo=item_tmdb.extra, idioma_busqueda=langt) else: ob_tmdb = Tmdb(texto_buscado=item_tmdb.contentTitle, tipo=item_tmdb.extra, idioma_busqueda=langt) if not ob_tmdb.result: ob_tmdb = Tmdb(texto_buscado=item_tmdb.infoLabels['tvshowtitle'], tipo=item_tmdb.extra, idioma_busqueda=langt) if ob_tmdb.result: ob_tmdb = Tmdb(id_Tmdb=ob_tmdb.get_id(), tipo=item_tmdb.extra, idioma_busqueda=langt) item.infoLabels = ob_tmdb.get_infoLabels(item.infoLabels) # Si no hay sinopsis en idioma elegido, buscar en el alternativo if not item.infoLabels["plot"]: item.infoLabels["plot"] = ob_tmdb.get_sinopsis(idioma_alternativo=langt_alt) # Se concatena el plot de filmaffinity al de tmdb si lo hay plot = scrapertools.find_single_match(data, '
    (.*?)
    ') plot = plot.replace("

    ", "\n") plot = scrapertools.decodeHtmlentities(plot).replace(" (FILMAFFINITY)", "") if plot and (item.infoLabels['plot'] and item.infoLabels['plot'] != plot): item.infoLabels['plot'] += " (TMDB)\n" + plot + " (FILMAFFINITY)" elif plot and not item.infoLabels['plot']: item.infoLabels['plot'] = plot # Se busca y rellena con la info de filmaffinity para diferenciarla de tmdb if not item.infoLabels['duration']: duration = scrapertools.find_single_match(data, '
    (\d+)') if duration: item.infoLabels['duration'] = int(duration) * 60 if not item.infoLabels['genre']: generos = scrapertools.find_multiple_matches(data, 'class="g-t-item">(.*?)') genres = [] for g in generos: genres.append(scrapertools.htmlclean(g.strip())) item.infoLabels['genre'] = ", ".join(genres) if not rating: rating = scrapertools.find_single_match(data, 'itemprop="ratingValue".*?>([^<]+)<') if rating: rating = float(rating.replace(",", ".")) elif ob_tmdb.result: rating = float(ob_tmdb.result.get('vote_average', 0)) item.infoLabels['rating'] = rating if not votos: votos = scrapertools.find_single_match(data, 'itemprop="ratingCount".*?>([^<]+)<') if votos == "0" and ob_tmdb.result: votos = ob_tmdb.result.get('vote_count', '') item.infoLabels['votes'] = votos if item.infoLabels['fanart']: item.fanart = item.infoLabels['fanart'] else: item.fanart = scrapertools.find_single_match(data, 'Imagen Principal.*?src: "([^"]+)"') if item.infoLabels['thumbnail']: item.thumbnail = item.infoLabels['thumbnail'] if item.infoLabels['tagline']: itemlist.append(item.clone(title="--- %s ---" % item.infoLabels['tagline'], text_color="0xFFFF8C00", action="")) title = item.contentType.replace("movie", "película").replace("tvshow", "serie") itemlist.append(item.clone(action="busqueda", title="Buscar %s en alfa: %s" % (title, item.contentTitle))) if item.infoLabels['originaltitle'] and item.contentTitle != item.infoLabels['originaltitle']: itemlist.append(item.clone(action="busqueda", contentTitle=item.infoLabels['originaltitle'], title=" Buscar por su nombre original: %s" % item.infoLabels['originaltitle'])) if langt != "es" and langt != "en" and item.infoLabels["tmdb_id"]: tmdb_lang = Tmdb(id_Tmdb=item.infoLabels["tmdb_id"], tipo=item.extra, idioma_busqueda='es') if tmdb_lang.result.get("title") and tmdb_lang.result["title"] != item.contentTitle: tmdb_lang = tmdb_lang.result["title"] itemlist.append(item.clone(action="busqueda", title=" Buscar por su título en español: %s" % tmdb_lang, contentTitle=tmdb_lang)) if item.contentType == "tvshow" and ob_tmdb.result: itemlist.append(item.clone(action="info_seasons", text_color=color4, title="Info de temporadas [%s]" % item.infoLabels["number_of_seasons"])) if ob_tmdb.result: itemlist.append(item.clone(action="reparto", title="Ver Reparto", text_color=color4, infoLabels={'tmdb_id': item.infoLabels['tmdb_id'], 'mediatype': item.contentType})) if config.is_xbmc(): item.contextual = True trailer_url = scrapertools.find_single_match(data, '' % langf) images = {} if ob_tmdb.result and ob_tmdb.result.get("images"): images['tmdb'] = ob_tmdb.result["images"] if url_img: images['filmaffinity'] = {} if images: itemlist.append(item.clone(action="imagenes", title="Lista de Imágenes", text_color=color5, images=images, url=url_img, extra="menu")) try: if item.contentType == "movie" and item.infoLabels["year"] < 2014: post_url = "https://theost.com/search/custom/?key=%s&year=%s&country=0&genre=0" % ( item.infoLabels['originaltitle'].replace(" ", "+"), item.infoLabels["year"]) url = "https://nl.hideproxy.me/includes/process.php?action=update" post = "u=%s&proxy_formdata_server=nl&allowCookies=1&encodeURL=1&encodePage=0&stripObjects=0&stripJS=0&go=" % urllib.quote( post_url) while True: response = httptools.downloadpage(url, post, follow_redirects=False) if response.headers.get("location"): url = response.headers["location"] post = "" else: data_music = response.data break url_album = scrapertools.find_single_match(data_music, 'album(?:s|) on request.*?href="([^"]+)"') if url_album: url_album = "https://nl.hideproxy.me" + url_album itemlist.append( item.clone(action="musica_movie", title="Escuchar BSO - Lista de canciones", url=url_album, text_color=color5)) except: pass token_auth = config.get_setting("token_trakt", "tvmoviedb") if token_auth and ob_tmdb.result: itemlist.append(item.clone(title="[Trakt] Gestionar con tu cuenta", action="menu_trakt")) # Acciones si se configura cuenta en FA (Votar y añadir/quitar en listas) mivoto = scrapertools.find_single_match(data, 'bg-my-rating.*?>\s*(\d+)') itk = scrapertools.find_single_match(data, 'data-itk="([^"]+)"') folder = not config.is_xbmc() if mivoto: item.infoLabels["userrating"] = int(mivoto) new_item = item.clone(action="votar_fa", title="[FA] Mi voto: %s ---> ¿Cambiar?" % mivoto, itk=itk, voto=int(mivoto), folder=folder) new_item.infoLabels["duration"] = "" itemlist.append(new_item) else: if itk: new_item = item.clone(action="votar_fa", title="[FA] Votar %s" % title, itk=itk, accion="votar", folder=folder) new_item.infoLabels["duration"] = "" itemlist.append(new_item) if itk: itk = scrapertools.find_single_match(data, 'var itk="([^"]+)"') new_item = item.clone(action="acciones_fa", accion="lista_movie", itk=itk, title="[FA] Añadir o quitar de una lista de usuario") new_item.infoLabels["duration"] = "" itemlist.append(new_item) # Si pertenece a una saga/colección if ob_tmdb.result: itemlist.append(item.clone(title="", action="", infoLabels={})) if ob_tmdb.result.get("belongs_to_collection"): new_item = item.clone(infoLabels={'mediatype': item.contentType}, action="listado_tmdb", text_color=color5) saga = ob_tmdb.result["belongs_to_collection"] new_item.infoLabels["tmdb_id"] = saga["id"] if saga["poster_path"]: new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + saga["poster_path"] if saga["backdrop_path"]: new_item.fanart = 'http://image.tmdb.org/t/p/original' + saga["backdrop_path"] new_item.search = {'url': 'collection/%s' % saga['id'], 'language': langt} new_item.title = "Es parte de: %s" % saga["name"] itemlist.append(new_item) itemlist.append(item.clone(title="%ss similares" % title.capitalize(), action="listado_tmdb", search={'url': '%s/%s/similar' % (item.extra, item.infoLabels['tmdb_id']), 'language': langt, 'page': 1}, infoLabels={'mediatype': item.contentType}, text_color=color2)) itemlist.append( item.clone(title="Recomendaciones", action="listado_tmdb", infoLabels={'mediatype': item.contentType}, search={'url': '%s/%s/recommendations' % (item.extra, item.infoLabels['tmdb_id']), 'language': langt, 'page': 1}, text_color=color2)) return itemlist def filtro_fa(item): logger.info() from datetime import datetime list_controls = [] valores = {} dict_values = None # Se utilizan los valores por defecto/guardados valores_guardados = config.get_setting("filtro_defecto_filmaf_" + item.extra, item.channel) if valores_guardados: dict_values = valores_guardados list_controls.append({'id': 'yearsdesde', 'label': 'Año desde:', 'enabled': True, 'type': 'list', 'default': -1, 'visible': True}) list_controls.append({'id': 'yearshasta', 'label': 'Año hasta:', 'enabled': True, 'type': 'list', 'default': -1, 'visible': True}) list_controls[0]['lvalues'] = [] list_controls[1]['lvalues'] = [] valores['years'] = [] year = datetime.now().year for i in range(1900, year + 1): list_controls[0]['lvalues'].append(str(i)) list_controls[1]['lvalues'].append(str(i)) valores['years'].append(str(i)) list_controls[0]['lvalues'].append('Cualquiera') list_controls[1]['lvalues'].append('Cualquiera') valores['years'].append('') data = httptools.downloadpage("http://m.filmaffinity.com/%s/topgen.php" % langf).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) if item.contentType == "movie": try: bloque = scrapertools.find_single_match(data, 'name="genre">.*?(.*?)') matches = scrapertools.find_multiple_matches(bloque, '') if matches: list_controls.append({'id': 'genero', 'label': 'Selecciona un género', 'enabled': True, 'type': 'list', 'default': -1, 'visible': True}) list_controls[2]['lvalues'] = [] list_controls[2]['lvalues'].append("Todos") valores['genero'] = [] valores['genero'].append('') for valor, titulo in matches: if valor == "TV_SE": continue list_controls[2]['lvalues'].insert(0, titulo) valores['genero'].insert(0, valor) except: pass try: bloque = scrapertools.find_single_match(data, 'name="country">.*?(.*?)') matches = scrapertools.find_multiple_matches(bloque, '') if matches: list_controls.append({'id': 'pais', 'label': 'País', 'enabled': True, 'type': 'list', 'default': -1, 'visible': True}) list_controls[-1]['lvalues'] = [] list_controls[-1]['lvalues'].append('Todos') valores['pais'] = [] valores['pais'].append('') for valor, titulo in matches: list_controls[-1]['lvalues'].insert(0, titulo) valores['pais'].insert(0, valor) except: pass list_controls.append({'id': 'espacio', 'label': '', 'enabled': False, 'type': 'label', 'default': None, 'visible': True}) list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True, 'type': 'bool', 'default': False, 'visible': True}) item.valores = valores return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, caption="Filtra la búsqueda", item=item, callback='filtrado_fa') def filtrado_fa(item, values): values_copy = values.copy() # Guarda el filtro para que sea el que se cargue por defecto if "save" in values and values["save"]: values_copy.pop("save") config.set_setting("filtro_defecto_filmaf_" + item.extra, values_copy, item.channel) yeard = item.valores["years"][values["yearsdesde"]] yearh = item.valores["years"][values["yearshasta"]] pais = item.valores["pais"][values["pais"]] if item.contentType == "movie": genero = item.valores["genero"][values["genero"]] else: genero = "TV_SE" item.url = 'http://m.filmaffinity.com/%s/topgen.php?genre=%s&country=%s&fromyear=%s&toyear=%s&nodoc=1' \ % (langf, genero, pais, yeard, yearh) if item.contentType == "movie": item.url += "¬vse=1" item.action = "listado_fa" return listado_fa(item) def login_fa(): logger.info() try: user = config.get_setting("usuariofa", "tvmoviedb") password = config.get_setting("passfa", "tvmoviedb") userid = config.get_setting("userid", "tvmoviedb") if user == "" or password == "": return False, "Usuario y/o contraseñas no configurados" data = httptools.downloadpage("http://m.filmaffinity.com/%s" % langf).data if "modal-menu-user" in data and userid: return True, "" post = "postback=1&rp=&username=%s&password=%s&rememberme=on" % (user, password) data = httptools.downloadpage("https://m.filmaffinity.com/%s/account.ajax.php?action=login" % langf, post).data if "Invalid username" in data: logger.error("Error en el login") return False, "Error en el usuario y/o contraseña. Comprueba tus credenciales" else: post = "name=user-menu&url=http://m.filmaffinity.com/%s/main.php" % langf data = httptools.downloadpage("http://m.filmaffinity.com/%s/tpl.ajax.php?action=getTemplate" % langf, post).data userid = scrapertools.find_single_match(data, 'id-user=(\d+)') if userid: config.set_setting("userid", userid, "tvmoviedb") logger.info("Login correcto") return True, "" except: import traceback logger.error(traceback.format_exc()) return False, "Error durante el login. Comprueba tus credenciales" def cuenta_fa(item): # Menú de cuenta filmaffinity itemlist = [] login, message = login_fa() if not login: itemlist.append(item.clone(action="", title=message, text_color=color4)) else: userid = config.get_setting("userid", "tvmoviedb") itemlist.append(item.clone(action="acciones_fa", title="Mis votaciones", text_color=color5, accion="votos", url="http://m.filmaffinity.com/%s/user_ratings.php?id-user=%s" % (langf, userid))) itemlist.append(item.clone(action="acciones_fa", title="Mis listas", text_color=color5, accion="listas", url="http://m.filmaffinity.com/%s/mylists.php" % langf)) return itemlist def acciones_fa(item): # Acciones cuenta filmaffinity, votar, ver listas o añadir/quitar de lista itemlist = [] if item.accion == "votos" or item.accion == "lista": data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) bloques = scrapertools.find_multiple_matches(data, 'list-group-item(?:[^>]+>\s*)\s*
    \s*\((\d+)\)
    .*?(?:
    ' \ '([^<]+)
    \s*(.*?)\s*<|
  • ).*?' matches = scrapertools.find_multiple_matches(bloque, patron) mivoto = scrapertools.find_single_match(bloque, 'bg-my-rating[^>]+>(?:\s*|)([^<]+)<') for url, thumb, title, year, rating, votos in matches: new_item = item.clone(action="detalles_fa", text_color=color1) if not url.startswith("http://m.filmaffinity"): new_item.url = "http://m.filmaffinity.com" + url else: new_item.url = url new_item.infoLabels["year"] = year rating = rating.replace(",", ".") new_item.infoLabels["rating"] = float(rating) new_item.infoLabels["votes"] = votos.replace(".", "") if mivoto.isdigit(): new_item.infoLabels["userrating"] = int(mivoto) new_item.thumbnail = thumb.replace("msmall", "large") if not new_item.thumbnail.startswith("http"): new_item.thumbnail = "http://m.filmaffinity.com" + new_item.thumbnail if re.search(r'(?i)serie de tv|\(tv\)', title): new_item.contentType = "tvshow" new_item.extra = "tv" new_item.title = title.strip() + " (%s) [COLOR %s]%s[/COLOR]/[COLOR %s]%s[/COLOR]" % ( year, color6, rating, color4, mivoto) new_item.contentTitle = title.strip() itemlist.append(new_item) elif item.accion == "listas": orderby = config.get_setting("orderfa", "tvmoviedb") data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) patron = 'list-group-item rip.*?href="([^"]+)".*?([^<]+).*?([^<]+)' \ '.*?(?:
    (.*?)|)' matches = scrapertools.find_multiple_matches(data, patron) for url, title, content, imgs in matches: new_item = item.clone(accion="lista", text_color=color1) if not url.startswith("http://m.filmaffinity.com"): new_item.url = "http://m.filmaffinity.com%s&orderby=%s" % (url, orderby) else: new_item.url = "%s&orderby=%s" % (url, orderby) new_item.title = title + " [COLOR %s](%s)[/COLOR]" % (color6, content) if imgs: imagenes = scrapertools.find_multiple_matches(imgs, 'data-src="([^"]+)"') from random import randint random = randint(0, len(imagenes) - 1) new_item.thumbnail = imagenes[random].replace("msmall", "large") itemlist.append(new_item) elif item.accion == "lista_movie": movieid = item.url.rsplit("=", 1)[1] url = "http://m.filmaffinity.com/%s/edtmovielists.php?movie_id=%s" % (langf, movieid) data = httptools.downloadpage(url).data patron = 'data-list-id="([^"]+)"(.*?)
    ([^<]+)<' matches = scrapertools.find_multiple_matches(data, patron) for listid, chequeo, title in matches: new_item = item.clone(folder=not config.is_xbmc()) new_item.infoLabels["duration"] = "" new_item.listid = listid if "checked" in chequeo: new_item.title = "[COLOR %s]%s[/COLOR] %s" % ("green", u"\u0474".encode('utf-8'), title) new_item.accion = "removeMovieFromList" else: new_item.title = "[COLOR %s]%s[/COLOR] %s" % (color4, u"\u04FE".encode('utf-8'), title) new_item.accion = "addMovieToList" itemlist.append(new_item) new_item = item.clone(action="newlist", title="Añadir una nueva lista", text_color=color6) new_item.infoLabels["duration"] = "" itemlist.append(new_item) else: url = "http://filmaffinity.com/%s/movieslist.ajax.php" % langf movieid = item.url.rsplit("=", 1)[1] post = "action=%s&listId=%s&movieId=%s&itk=%s" % (item.accion, item.listid, movieid, item.itk) data = jsontools.load(httptools.downloadpage(url, post).data) if not item.folder: import xbmc return xbmc.executebuiltin("Container.Refresh") else: if data["result"] == 0: title = "Acción completada con éxito" else: title = "Error, algo ha fallado durante el proceso" itemlist.append(item.clone(action="", title=title)) return itemlist def votar_fa(item): # Ventana para seleccionar el voto logger.info() list_controls = [] valores = {} dict_values = None if item.voto: dict_values = {'voto': item.voto} list_controls.append({'id': 'voto', 'label': 'Indica tu voto:', 'enabled': True, 'type': 'list', 'default': 0, 'visible': True}) list_controls[0]['lvalues'] = ['No vista'] valores['voto'] = ["-1"] for i in range(1, 11): list_controls[0]['lvalues'].append(str(i)) valores['voto'].append(i) item.valores = valores return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, caption="Votar %s" % item.contentTitle, item=item, callback='callback_voto') def callback_voto(item, values): item.voto = item.valores["voto"][values["voto"]] item.action = "acciones_fa" movieid = item.url.rsplit("=", 1)[1] post = "id=%s&rating=%s&itk=%s&action=rate" % (movieid, item.voto, item.itk) data = jsontools.load(httptools.downloadpage("http://filmaffinity.com/%s/ratingajax.php" % langf, post).data) if not item.folder: import xbmc return xbmc.executebuiltin("Container.Refresh") else: if data["result"] == 0: title = "Voto contabilizado con éxito" else: title = "Error, algo ha fallado durante el proceso" itemlist.append(item.clone(action="", title=title)) return itemlist def newlist(item): # Creación de nueva lista en filmaffinity itemlist = [] if item.accion == "lista": location = httptools.downloadpage(item.url).headers["location"] data = httptools.downloadpage("http://m.filmaffinity.com" + location).data itemlist.append(item.clone(action="", title="Lista creada correctamente")) else: url = "http://m.filmaffinity.com/%s/addlist.php?rp=%s" % (langf, item.url) data = httptools.downloadpage(url).data data = re.sub(r"\n|\r|\t| |\s{2}", "", data) patron = 'data-list-id="[^"]+" href="([^"]+)"><[^>]+>
    ' \ '([^<]+)
    ]+>
    ' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, title in matches: scrapedurl = "http://m.filmaffinity.com" + scrapedurl itemlist.append(item.clone(title=title, url=scrapedurl, accion="lista")) return itemlist ##-------------------- LISTADOS DE IMAGENES ------------------------## def imagenes(item): itemlist = [] if item.extra == "menu": item.folder = not config.is_xbmc() if "tmdb" in item.images: itemlist.append(item.clone(title="Tmdb", text_color=color2, extra="")) itemlist.append(item.clone(title="Fanart.Tv", text_color=color2, extra="")) if "imdb" in item.images: itemlist.append(item.clone(title="Imdb", text_color=color2, extra="")) if "filmaffinity" in item.images: itemlist.append(item.clone(title="Filmaffinity", text_color=color2, extra="")) if "myanimelist" in item.images: data = httptools.downloadpage(item.url + "/pics", cookies=False).data images = scrapertools.find_multiple_matches(data, '
    > Página Siguiente", text_color="", url=url)) else: data = jsontools.load(data) for entry in data: new_item = item.clone() new_item.title = entry["name"] + " [COLOR %s](%s)[/COLOR]" % (color6, entry["item_count"]) new_item.infoLabels["plot"] = entry.get("description") new_item.url = "users/me/lists/%s/items/?page=1&limit=20&extended=full" % entry["ids"]["trakt"] new_item.order = entry.get("sort_by") new_item.how = entry.get("sort_how") itemlist.append(new_item) return itemlist def order_list(item): logger.info() list_controls = [] valores1 = ['rank', 'added', 'title', 'released', 'runtime', 'popularity', 'percentage', 'votes'] valores2 = ['asc', 'desc'] dict_values = {'orderby': valores1.index(item.order), 'orderhow': valores2.index(item.how)} list_controls.append({'id': 'orderby', 'label': 'Ordenar por:', 'enabled': True, 'type': 'list', 'default': 0, 'visible': True}) list_controls.append({'id': 'orderhow', 'label': 'De forma:', 'enabled': True, 'type': 'list', 'default': 0, 'visible': True}) list_controls[0]['lvalues'] = ['Por defecto', 'Añadido', 'Título', 'Estreno', 'Duración', 'Popularidad', 'Valoración', 'Votos'] list_controls[1]['lvalues'] = ['Ascendente', 'Descendente'] return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, caption="Filtra la búsqueda", item=item, callback='order_trakt') def order_trakt(item, values): valores1 = ['rank', 'added', 'title', 'released', 'runtime', 'popularity', 'percentage', 'votes'] valores2 = ['asc', 'desc'] orderby = valores1[values["orderby"]] item.order = orderby orderhow = valores2[values["orderhow"]] item.how = orderhow item.action = "acciones_trakt" return acciones_trakt(item) ##-------------------- SECCION MYANIMELIST ------------------------## def top_mal(item): # Para los menús principales de tops pelícuas/series/ovas itemlist = [] item.text_color = color1 data = httptools.downloadpage(item.url, cookies=False).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) patron = '.*?href="([^"]+)".*?src="(.*?).jpg.*?' \ '
    .*?href.*?>([^<]+)<.*?').strip() item_tmdb = item.clone() if item.contentType == "movie": ob_tmdb = Tmdb(texto_buscado=item_tmdb.contentTitle, year=item_tmdb.infoLabels['year'], tipo=item_tmdb.extra, idioma_busqueda=langt) if not ob_tmdb.result and eng_title: ob_tmdb = Tmdb(texto_buscado=eng_title, year=item_tmdb.infoLabels['year'], tipo=item_tmdb.extra, idioma_busqueda=langt) if not ob_tmdb.result and ("Special (" in item.title or item.tipo == "special"): item_tmdb.extra = "tv" search = {'url': 'search/tv', 'language': langt, 'query': item_tmdb.contentTitle, 'first_air_date': item_tmdb.infoLabels["year"]} ob_tmdb = Tmdb(discover=search, tipo=item_tmdb.extra, idioma_busqueda=langt) else: search = {'url': 'search/tv', 'language': langt, 'query': eng_title, 'first_air_date': item_tmdb.infoLabels["year"]} ob_tmdb = Tmdb(discover=search, tipo=item_tmdb.extra, idioma_busqueda=langt) if not ob_tmdb.result and eng_title: search['query'] = eng_title ob_tmdb = Tmdb(discover=search, tipo=item_tmdb.extra, idioma_busqueda=langt) if not ob_tmdb.result and ("OVA (" in item.title or item.tipo == "ova"): item_tmdb.extra = "movie" ob_tmdb = Tmdb(texto_buscado=item_tmdb.contentTitle, tipo=item_tmdb.extra, idioma_busqueda=langt, year=item_tmdb.infoLabels['year']) if ob_tmdb.result: ob_tmdb = Tmdb(id_Tmdb=ob_tmdb.get_id(), tipo=item_tmdb.extra, idioma_busqueda=langt) item.infoLabels = ob_tmdb.get_infoLabels(item.infoLabels) # Se concatena sinopsis myanimelist con la de tmdb si la hubiese plot = scrapertools.find_single_match(data, '(.*?)') plot = plot.replace("
    ", "\n").replace("", "[I]").replace("", "[/I]") plot = scrapertools.decodeHtmlentities(plot) if plot and (item.infoLabels['plot'] and item.infoLabels['plot'] != plot): item.infoLabels['plot'] += " (TMDB)\n\n" + plot + " (MYANIMELIST)" elif plot and not item.infoLabels['plot']: item.infoLabels['plot'] = plot if not item.infoLabels['duration']: try: horas, min1, min2 = scrapertools.find_single_match(data, 'Duration:\s*(?:(\d+) hr\. (\d+) min|(\d+) min)') if horas: horas = int(horas) * 360 else: horas = 0 if not min1: min1 = min2 item.infoLabels['duration'] = horas + (int(min1) * 60) except: pass # Se sobreescribe la info de myanimelist sobre la de tmdb generos = scrapertools.find_single_match(data, 'Genres:(.*?)
    ') if generos: item.infoLabels['genre'] = scrapertools.htmlclean(generos) item.infoLabels['rating'] = float(rating) votos = scrapertools.find_single_match(data, '([^<]+)<') item.infoLabels['votes'] = votos.replace(",", "") if item.infoLabels['fanart']: item.fanart = item.infoLabels['fanart'] if item.infoLabels['thumbnail']: item.thumbnail = item.infoLabels['thumbnail'] if not item.thumbnail: item.thumbnail = scrapertools.find_single_match(data, '/pics">.*?" in data and config.is_xbmc(): itemlist.append( item.clone(title="[MAL] Añadir a tus listas/%s" % score, action="menu_mal", contentTitle=title_mal)) elif item.login and config.is_xbmc(): status = {'1': 'Viendo Actualmente', '2': 'Completados', '3': 'En pausa', '4': 'Descartados', '6': 'Previstos para ver'} estado = scrapertools.find_single_match(data, 'myinfo_updateInfo".*?option selected="selected" value="(\d+)"') try: estado = status[estado] itemlist.append( item.clone(title="[MAL] En tu lista de [COLOR %s]%s[/COLOR]. ¿Cambiar?/%s" % (color6, estado, score), action="menu_mal", contentTitle=title_mal)) except: pass token_auth = config.get_setting("token_trakt", "tvmoviedb") if token_auth and ob_tmdb.result: itemlist.append(item.clone(title="[Trakt] Gestionar con tu cuenta", action="menu_trakt")) # Se listan precuelas, secuelas y series alternativas prequel = scrapertools.find_single_match(data, 'Prequel:(.*?)') if prequel: matches = scrapertools.find_multiple_matches(prequel, 'href="([^"]+)">(.*?)') for url, title in matches: new_item = item.clone(infoLabels={'mediatype': item.contentType}, extra="", fanart=default_fan, thumbnail="") new_item.title = "Precuela: %s" % title new_item.contentTitle = title new_item.url = "https://myanimelist.net%s" % url itemlist.append(new_item) sequel = scrapertools.find_single_match(data, 'Sequel:(.*?)') if sequel: matches = scrapertools.find_multiple_matches(sequel, 'href="([^"]+)">(.*?)') for url, title in matches: new_item = item.clone(infoLabels={'mediatype': item.contentType}, extra="", fanart=default_fan, thumbnail="") new_item.title = "Secuela: %s" % title new_item.contentTitle = title new_item.url = "https://myanimelist.net%s" % url itemlist.append(new_item) alt_version = scrapertools.find_single_match(data, 'Alternative version:(.*?)') if alt_version: matches = scrapertools.find_multiple_matches(alt_version, 'href="([^"]+)">(.*?)') for url, title in matches: new_item = item.clone(infoLabels={'mediatype': item.contentType}, extra="", fanart=default_fan, thumbnail="") new_item.title = "Versión alternativa: %s" % title new_item.contentTitle = title new_item.url = "https://myanimelist.net%s" % url itemlist.append(new_item) if ob_tmdb.result: itemlist.append(item.clone(title="", action="", infoLabels={})) if ob_tmdb.result.get("belongs_to_collection"): new_item = item.clone(infoLabels={'mediatype': item.contentType}, action="listado_tmdb", text_color=color5) saga = ob_tmdb.result["belongs_to_collection"] new_item.infoLabels["tmdb_id"] = saga["id"] if saga["poster_path"]: new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + saga["poster_path"] if saga["backdrop_path"]: new_item.fanart = 'http://image.tmdb.org/t/p/original' + saga["backdrop_path"] new_item.search = {'url': 'collection/%s' % saga['id'], 'language': langt} new_item.title = "Es parte de: %s" % saga["name"] itemlist.append(new_item) itemlist.append( item.clone(title="Recomendaciones TMDB", action="listado_tmdb", infoLabels={'mediatype': item.contentType}, search={'url': '%s/%s/recommendations' % (item.extra, item.infoLabels['tmdb_id']), 'language': langt, 'page': 1}, text_color=color2)) # Recomendaciones myanimelist y búsqueda de info en anidb (fansubs en español) itemlist.append(item.clone(title="Recomendaciones MyAnimeList", action="reco_mal")) anidb_link = scrapertools.find_single_match(data, '([^<]+)<(.*?)More') if next_page: itemlist.append(item.clone(title=">> Más Episodios", url=next_page, text_color="")) if itemlist: itemlist.insert(0, item.clone(title="Episodios", action="", text_color=color3)) patron = '([^<]+)<' matches = scrapertools.find_multiple_matches(data, patron) if matches: itemlist.append(item.clone(title="Tráilers/Promocionales", action="", text_color=color3)) for url, thumb, title in matches: url = url.replace("embed/", "watch?v=") itemlist.append( item.clone(title=title, url=url, server="youtube", action="play", thumbnail=thumb, text_color=color1)) return itemlist def reco_mal(item): # Recomendaciones de myanimelist itemlist = [] data = httptools.downloadpage(item.url + "/userrecs", cookies=False).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) bloque = scrapertools.find_single_match(data, '
    ') patron = '
    (.*?).*?' \ '
    (.*?)
    ' matches = scrapertools.find_multiple_matches(data, patron) for url, thumb, title, plot in matches: new_item = item.clone(infoLabels={'mediatype': item.contentType}, action="detalles_mal", fanart=default_fan, title=title, contentType="", extra="", contentTitle=title) new_item.infoLabels["plot"] = scrapertools.htmlclean(plot) new_item.url = "https://myanimelist.net%s" % url new_item.thumbnail = thumb.replace("r/50x70/", "").replace(".jpg", "l.jpg") itemlist.append(new_item) return itemlist def indices_mal(item): # Índices por temporadas y generos itemlist = [] url_base = "" if "Temporadas" in item.title: data = httptools.downloadpage("https://myanimelist.net/anime/season/archive", cookies=False).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) matches = scrapertools.find_multiple_matches(data, '\s*
    \s*(.*?)\s*') for url, title in matches: year = title.rsplit(" ", 1)[1] thumbnail = item.thumbnail if int(year) >= 1968: thumbnail = url_base % year title = title.replace("Winter", "Invierno").replace("Spring", "Primavera") \ .replace("Summer", "Verano").replace("Fall", "Otoño") itemlist.append(Item(channel=item.channel, action="season_mal", title=title, url=url, thumbnail=thumbnail, text_color=color1, info=True, fanart=thumbnail)) else: data = httptools.downloadpage("https://myanimelist.net/anime.php", cookies=False).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) bloque = scrapertools.find_single_match(data, 'Genres
    (.*?)View More') matches = scrapertools.find_multiple_matches(bloque, '(.*?)') for url, title in matches: genero = title.split(" (", 1)[0] thumbnail = url_base % genero.lower().replace(" ", "%20") if genero in ["Hentai", "Yaoi", "Yuri"] and not adult_mal: continue url = "https://myanimelist.net%s" % url itemlist.append(Item(channel=item.channel, action="season_mal", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail, text_color=color1)) return itemlist def season_mal(item): # Scraper para temporadas de anime itemlist = [] cookie_session = get_cookie_value() header_mal = {'Cookie': '%s search_sort_anime=score; search_view=tile; is_logged_in=1' % cookie_session} data = httptools.downloadpage(item.url, headers=header_mal, cookies=False).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) if item.info: patron = '
    ([^<]+)
    (.*?)
    \s*
    ' bloques = scrapertools.find_multiple_matches(data, patron) for head_title, bloque in bloques: head_title = head_title.replace("(New)", "(Nuevos)").replace("(Continuing)", "(Continuación)") patron = '(.*?).*?(\? ep|\d+ ep).*?' \ '
    (.*?)
    .*?
    (.*?).*?
    \s*(.*?)\s*-.*?(\d{4}).*?' \ 'title="Score">\s*(N/A|\d\.\d+)' matches = scrapertools.find_multiple_matches(bloque, patron) if matches: itemlist.append(Item(channel=item.channel, action="", title=head_title, text_color=color3)) for url, scrapedtitle, epis, generos, thumb, plot, tipo, year, score in matches: if ("Hentai" in generos or "Yaoi" in generos or "Yuri" in generos) and adult_mal: continue scrapedtitle = scrapedtitle.replace("(TV)", "").replace("(Movie)", "") if tipo == "Movie": title = scrapedtitle + " (%s)" % year else: title = scrapedtitle + " %ss (%s)" % (epis, year) infoLabels = {} if score != "N/A": title += " [COLOR %s]%s[COLOR]" % (color6, score) infoLabels["rating"] = float(score) infoLabels["plot"] = scrapertools.htmlclean(plot) infoLabels["year"] = year genres = scrapertools.find_multiple_matches(generos, 'title="([^"]+)"') infoLabels["genre"] = ", ".join(genres) tipo = tipo.lower() if tipo == "movie" or tipo == "special": extra = "movie" contentType = "movie" else: extra = "tv" contentType = "tvshow" thumb = thumb.replace("r/167x242/", "") + "l.jpg" itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, title=title, thumbnail=thumb, infoLabels=infoLabels, extra=extra, tipo=tipo, contentTitle=scrapedtitle, contentType=contentType, text_color=color1, fanart=default_fan)) else: patron = '(.*?).*?(\? ep|\d+ ep).*?' \ '
    (.*?)
    .*?
    (.*?).*?
    \s*(.*?)\s*-.*?(\d{4}).*?' \ 'title="Score">\s*(N/A|\d\.\d+)' matches = scrapertools.find_multiple_matches(data, patron) for url, scrapedtitle, epis, generos, thumb, plot, tipo, year, score in matches: if ("Hentai" in generos or "Yaoi" in generos or "Yuri" in generos) and not adult_mal: continue scrapedtitle = scrapedtitle.replace("(TV)", "").replace("(Movie)", "") if tipo == "Movie": title = scrapedtitle + " (%s)" % year else: title = scrapedtitle + " %ss (%s)" % (epis, year) infoLabels = {} if score != "N/A": title += " [COLOR %s]%s[COLOR]" % (color6, score) infoLabels["rating"] = float(score) infoLabels["plot"] = scrapertools.htmlclean(plot) infoLabels["year"] = year genres = scrapertools.find_multiple_matches(generos, 'title="([^"]+)"') infoLabels["genre"] = ", ".join(genres) tipo = tipo.lower() if tipo == "movie" or tipo == "special": extra = "movie" contentType = "movie" else: extra = "tv" contentType = "tvshow" thumb = thumb.replace("r/167x242/", "") + "l.jpg" itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, title=title, thumbnail=thumb, infoLabels=infoLabels, extra=extra, tipo=tipo, contentTitle=scrapedtitle, contentType=contentType, text_color=color1, fanart=default_fan)) next_page = scrapertools.find_single_match(data, '' matches = scrapertools.find_multiple_matches(data, patron) if matches: itemlist.append(item.clone(title="Personajes/Dobladores", action="", text_color=color3)) for url, thumb, nombre, rol, voces in matches: url = "https://myanimelist.net%s" % url rol = rol.replace("Main", "Principal").replace("Supporting", "Secundario") nombre = " %s [%s]" % (nombre, rol) thumb = thumb.replace("r/46x64/", "") itemlist.append(Item(channel=item.channel, action="detail_staff", url=url, text_color=color2, thumbnail=thumb, fanart=default_fan, title=nombre, extra="character")) patron_voces = '([^<]+)<.*?([^<]+).*?data-src="([^"]+)"' voces_match = scrapertools.find_multiple_matches(voces, patron_voces) for vurl, vnombre, vidioma, vthumb in voces_match: vurl = "https://myanimelist.net%s" % vurl vnombre = " %s [%s]" % (vnombre, vidioma) vthumb = vthumb.replace("r/46x64/", "") itemlist.append(Item(channel=item.channel, action="detail_staff", url=vurl, text_color=color1, thumbnail=vthumb, fanart=default_fan, title=vnombre)) bloque = scrapertools.find_single_match(data, '(.*?)') patron = '([^<]+)<.*?([^<]+)' matches = scrapertools.find_multiple_matches(bloque, patron) if matches: itemlist.append(item.clone(title="Staff", action="", text_color=color3)) for url, thumb, nombre, rol in matches: url = "https://myanimelist.net%s" % url nombre = " %s [%s]" % (nombre, rol) thumb = thumb.replace("r/46x64/", "") itemlist.append(Item(channel=item.channel, action="detail_staff", url=url, text_color=color1, thumbnail=thumb, fanart=default_fan, title=nombre)) return itemlist def detail_staff(item): itemlist = [] data = httptools.downloadpage(item.url, cookies=False).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) if item.extra == "character" and not "No biography written" in data: bio = scrapertools.find_single_match(data, 'itemprop="name">.*?
    (.*?)
    ", "\n") bio = scrapertools.htmlclean(bio) if not "questionmark" in item.thumbnail: data_img = httptools.downloadpage(item.url + "/pictures", cookies=False).data matches = scrapertools.find_multiple_matches(data_img, 'rel="gallery-character">(.*?)') if matches: itemlist.append(Item(channel=item.channel, title="Animes donde aparece:", action="", text_color=color3)) for url, thumb, title in matches: url = "https://myanimelist.net%s" % url thumb = thumb.replace("r/23x32/", "") itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, text_color=color1, thumbnail=thumb, fanart=default_fan, title=title, contentTitle=title)) else: patron_bio = '
    .*?
    (.*?)' bio = scrapertools.find_single_match(data, patron_bio) bio = scrapertools.htmlclean(bio.replace("
    ", "\n")) infoLabels = {'plot': bio} if not "No voice acting roles" in data: itemlist.append(Item(channel=item.channel, title="Da voz a/en:", action="", text_color=color3, thumbnail=item.thumbnail, infoLabels=infoLabels)) bloque = scrapertools.find_single_match(data, 'Voice Acting Roles
    (.*?)') patron = '(.*?).*?href="(/character[^"]+)".*?' \ '>(.*?).*?data-src="([^"]+)"' matches = scrapertools.find_multiple_matches(bloque, patron) for url, thumb, title, url_p, personaje, thumb_p in matches: url = "https://myanimelist.net%s" % url url_p = "https://myanimelist.net%s" % url_p thumb = thumb.replace("r/46x64/", "") thumb_p = thumb_p.replace("r/46x64/", "") itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, text_color=color2, thumbnail=thumb, fanart=default_fan, title=title, contentTitle=title)) itemlist.append(Item(channel=item.channel, action="detail_staff", url=url_p, text_color=color1, thumbnail=thumb_p, fanart=default_fan, title=" %s" % personaje, extra="character")) if not "No staff positions" in data: itemlist.append(Item(channel=item.channel, title="Staff en animes:", action="", text_color=color3, thumbnail=item.thumbnail, infoLabels=infoLabels)) bloque = scrapertools.find_single_match(data, 'Anime Staff Positions
    (.*?)') patron = '(.*?).*?(.*?)
    ' matches = scrapertools.find_multiple_matches(bloque, patron) for url, thumb, title, rol in matches: url = "https://myanimelist.net%s" % url thumb = thumb.replace("r/46x64/", "") rol = scrapertools.htmlclean(rol) titulo = "%s [COLOR %s][%s][/COLOR]" % (title, color6, rol) itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, text_color=color2, thumbnail=thumb, fanart=default_fan, title=titulo, contentTitle=title)) return itemlist def busqueda_mal(item): # Scraper para búsquedas en myanimelist itemlist = [] cookie_session = get_cookie_value() header_mal = {'Cookie': '%s search_sort_anime=score; search_view=tile; is_logged_in=1' % cookie_session} data = httptools.downloadpage(item.url, headers=header_mal, cookies=False).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) patron = '([^<]+)<.*?
    (.*?)<' \ '.*?(.*?).*?(.*?).*?(.*?).*?(.*?)' matches = scrapertools.find_multiple_matches(data, patron) for url, thumb, titulo, plot, tipo, epis, rating, date in matches: infolabels = {"mediatype": "tvshow"} contentType = "tvshow" extra = "tv" titulo = titulo.strip() tipo = tipo.strip() rating = rating.strip() epis = epis.strip() infolabels["plot"] = scrapertools.htmlclean(plot.strip()) thumb = thumb.replace("r/50x70/", "").replace(".jpg", "l.jpg") show = titulo contentitle = titulo title = titulo try: year = date.strip().rsplit("-", 1)[1] if year.isdigit(): if int(year) < 30: year = "20%s" % year else: year = "19%s" % year infolabels["year"] = year if not year in title: title += " (%s)" % year except: import traceback logger.error(traceback.format_exc()) if tipo == "Movie" or tipo == "OVA": infolabels["mediatype"] = "movie" contentType = "movie" extra = "movie" show = "" if epis and tipo != "Movie": title += " %s eps" % epis if rating != "0.00" and rating != "N/A": infolabels["rating"] = float(rating) title += " [COLOR %s]%s[/COLOR]" % (color6, rating) itemlist.append(Item(channel=item.channel, title=title, action="detalles_mal", url=url, show=show, thumbnail=thumb, infoLabels=infolabels, contentTitle=contentitle, text_color=color1, contentType=contentType, tipo=tipo.lower(), extra=extra)) if not "&show=" in item.url: next_page = item.url + "&show=50" else: pagina = int(item.url.rsplit("=", 1)[1]) next_page = item.url.replace("&show=%s" % str(pagina), "&show=%s" % str(pagina + 50)) check_page = next_page.replace("https://myanimelist.net/anime.php", "") if check_page in data: itemlist.append(item.clone(title=">> Página Siguiente", url=next_page, text_color="")) else: check_page = check_page.replace("[", "%5B").replace("]", "%5D") if check_page in data: itemlist.append(item.clone(title=">> Página Siguiente", url=next_page, text_color="")) return itemlist def info_anidb(item, itemlist, url): # Extrae info, puntuación y fansubs en anidb data = httptools.downloadpage(url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) infoLabels = {'mediatype': item.contentType} plot = scrapertools.find_single_match(data, 'itemprop="description">(.*?)
    ') infoLabels["plot"] = scrapertools.htmlclean(plot) generos = scrapertools.find_multiple_matches(data, '
    (.*?)') for i, genero in enumerate(generos): generos[i] = genero.capitalize() infoLabels["genre"] = ", ".join(generos) rating = scrapertools.find_single_match(data, 'itemprop="ratingValue">(.*?)') try: infoLabels["rating"] = float(rating) except: pass infoLabels["votes"] = scrapertools.find_single_match(data, 'itemprop="ratingCount">(.*?)') thumbnail = scrapertools.find_single_match(data, '
    .*?src="([^"]+)"') if infoLabels: title = "Info en AniDB [COLOR %s]%s[/COLOR]" % (color6, rating) if re.search(r'(?:subtitle|audio) | language: spanish"', data): title += " - [COLOR %s]Fansubs en español:[/COLOR]" % color3 itemlist.append(Item(channel=item.channel, title=title, infoLabels=infoLabels, action="", thumbnail=thumbnail, text_color=color4)) if re.search(r'(?:subtitle|audio) | language: spanish"', data): epi_total = scrapertools.find_single_match(data, 'itemprop="numberOfEpisodes">([^<]+)') patron = '.*?title="([^"]+)">(.*?).*?>([^<]+).*?' \ '([^<]+).*?title="audio(.*?).*?' \ 'class="source" title="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for fansub, abrev, estado, epis, lang, source in matches: if not "spanish" in lang: continue title = " " + fansub if abrev != title: title += " [%s]" % abrev estado = estado.replace("complete", "Completa").replace("finished", "Terminada") \ .replace("stalled", "Pausa").replace("dropped", "Abandonada") title += " [COLOR %s](%s)[/COLOR] %s/%s [%s]" % (color6, estado, epis, epi_total, source) itemlist.append(Item(channel=item.channel, title=title, infoLabels=infoLabels, action="", thumbnail=thumbnail, text_color=color4)) def filtro_mal(item): logger.info() list_controls = [] valores = {} dict_values = None # Se utilizan los valores por defecto/guardados valores_guardados = config.get_setting("filtro_defecto_mal", item.channel) if valores_guardados: dict_values = valores_guardados list_controls.append({'id': 'keyword', 'label': 'Palabra Clave', 'enabled': True, 'type': 'text', 'default': '', 'visible': True}) list_controls.append({'id': 'tipo', 'label': 'Tipo', 'enabled': True, 'type': 'list', 'default': -1, 'visible': True}) list_controls[1]['lvalues'] = ['Especial', 'OVA', 'Película', 'Serie', 'Cualquiera'] valores["tipo"] = ['4', '2', '3', '1', '0'] list_controls.append({'id': 'valoracion', 'label': 'Valoración', 'enabled': True, 'type': 'list', 'default': -1, 'visible': True}) list_controls[2]['lvalues'] = ['(1) Grotesca', '(2) Horrible', '(3) Muy mala', '(4) Mala', '(5) Regular', '(6) Pasable', '(7) Buena', '(8) Muy buena', '(9) Genial', '(10) Obra maestra', 'Cualquiera'] valores["valoracion"] = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '0'] list_controls.append({'id': 'estado', 'label': 'Estado', 'enabled': True, 'type': 'list', 'default': -1, 'visible': True}) list_controls[3]['lvalues'] = ['Por estrenar', 'En emisión', 'Terminada', 'Cualquiera'] valores["estado"] = ['3', '1', '2', '0'] try: data = httptools.downloadpage('https://myanimelist.net/anime.php', cookies=False).data patron = 'name="genre\[\]" type="checkbox" value="([^"]+)">.*?>([^<]+)<' generos = scrapertools.find_multiple_matches(data, patron) if generos: list_controls.append({'id': 'labelgenre', 'enabled': True, 'type': 'label', 'default': None, 'label': 'Selecciona uno, ninguno o más de un género', 'visible': True, 'color': '0xFFC52020'}) for value, genre in generos: list_controls.append({'id': 'genre' + value, 'label': genre, 'enabled': True, 'type': 'bool', 'default': False, 'visible': True}) except: pass list_controls.append({'id': 'espacio', 'label': '', 'enabled': False, 'type': 'label', 'default': None, 'visible': True}) list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True, 'type': 'bool', 'default': False, 'visible': True}) item.valores = valores return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, caption="Filtra la búsqueda", item=item, callback='callback_mal') def callback_mal(item, values): values_copy = values.copy() # Guarda el filtro para que sea el que se cargue por defecto if "save" in values and values["save"]: values_copy.pop("save") config.set_setting("filtro_defecto_mal", values_copy, item.channel) genero_ids = [] for v in values: if "genre" in v: if values[v]: genero_ids.append("genre[%s]=%s" % (len(genero_ids), v.replace('genre', ''))) genero_ids = "&".join(genero_ids) query = values["keyword"].replace(" ", "%20") tipo = item.valores["tipo"][values["tipo"]] valoracion = item.valores["valoracion"][values["valoracion"]] estado = item.valores["estado"][values["estado"]] item.url = "https://myanimelist.net/anime.php?q=%s&type=%s&score=%s&status=%s" \ "&p=0&r=0&sm=0&sd=0&sy=0&em=0&ed=0&ey=0&c[0]=a&c[1]=b&c[2]=c&c[3]=d&c[4]=f&gx=0" \ % (query, tipo, valoracion, estado) if genero_ids: item.url += "&" + genero_ids item.action = "busqueda_mal" return busqueda_mal(item) def musica_anime(item): # Lista los animes y canciones disponibles similares al título del anime logger.info() itemlist = [] data = httptools.downloadpage("http://www.freeanimemusic.org/song_search.php", post=item.post).data patron = "(\d+).*?([^<]+)<.*?([^<]+)<" \ ".*?href='http://www.freeanimemusic.org/anime/([^/]+)/index.php\?var=(\d+)" matches = scrapertools.find_multiple_matches(data, patron) animes = {} action = "" if config.is_xbmc(): action = "move" for number, song, anime, id_anime, id_song in matches: if not animes.get(anime): animes[anime] = [] animes[anime].append( Item(channel=item.channel, action=action, title="[COLOR %s][%s][/COLOR]" % (color6, anime.capitalize()), url="", number="0", thumbnail=item.thumbnail, fanart=item.fanart)) title = "%s - %s" % (number, song) animes[anime].append( Item(channel=item.channel, action="play", title=title, server="directo", url=id_anime, song=id_song, number=number, thumbnail=item.thumbnail, fanart=item.fanart, text_color=color5)) for k, v in sorted(animes.items()): v.sort(key=lambda x: (x.url, int(x.number))) for lt in v: if lt.action == "move": lt.extra = len(v) lt.folder = False itemlist.append(lt) return itemlist def login_mal(from_list=False): logger.info() try: user = config.get_setting("usuariomal", "tvmoviedb") password = config.get_setting("passmal", "tvmoviedb") generic = False if not user or not password: if not from_list: user = bdec("Y3VlbnRhdHZtb3ZpZWRi") password = bdec("dFlTakE3ekYzbng1") generic = True else: return False, "Usuario y/o contraseña de Myanimelist en blanco", user data = httptools.downloadpage("https://myanimelist.net/login.php?from=%2F").data if re.search(r'(?i)' + user, data) and not generic: return True, "", user token = scrapertools.find_single_match(data, "name='csrf_token' content='([^']+)'") response = httptools.downloadpage("https://myanimelist.net/logout.php", post="csrf_token=%s" % token) post = "user_name=%s&password=%s&cookie=1&sublogin=Login&submit=1&csrf_token=%s" % (user, password, token) response = httptools.downloadpage("https://myanimelist.net/login.php?from=%2F", post=post) if not re.search(r'(?i)' + user, response.data): logger.error("Error en el login") return False, "Error en el usuario y/o contraseña. Comprueba tus credenciales", user else: if generic: return False, "Usuario y/o contraseña de Myanimelist en blanco", user logger.info("Login correcto") return True, "", user except: import traceback logger.error(traceback.format_exc()) return False, "Error durante el login. Comprueba tus credenciales" def cuenta_mal(item): # Menú de cuenta myanimelist itemlist = [] login, message, user = login_mal(True) if not login: itemlist.append(item.clone(action="configuracion", title=message, text_color=color4)) else: itemlist.append( item.clone(action="items_mal", title="Viendo actualmente", text_color=color5, accion="lista_mal", url="https://myanimelist.net/animelist/%s?status=1" % user, login=True)) itemlist.append(item.clone(action="items_mal", title="Completados", text_color=color5, accion="lista_mal", url="https://myanimelist.net/animelist/%s?status=2" % user, login=True)) itemlist.append(item.clone(action="items_mal", title="En pausa", text_color=color5, accion="lista_mal", url="https://myanimelist.net/animelist/%s?status=3" % user, login=True)) itemlist.append(item.clone(action="items_mal", title="Descartados", text_color=color5, accion="lista_mal", url="https://myanimelist.net/animelist/%s?status=4" % user, login=True)) itemlist.append(item.clone(action="items_mal", title="Ver más adelante", text_color=color5, accion="lista_mal", url="https://myanimelist.net/animelist/%s?status=6" % user, login=True)) return itemlist def items_mal(item): # Scraper para las listas personales logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) data_items = scrapertools.find_single_match(data, 'data-items="([^"]+)"') data_items = data_items.replace(""", "'").replace("null", "None") \ .replace("false", "False").replace("true", "True") data_items = eval(data_items) for d in data_items: if d["anime_airing_status"] == 1: title = "[E]" if d["anime_airing_status"] == 2: title = "[F]" else: title = "[P]" title += " %s [COLOR %s][%s/%s][/COLOR] (%s)" % ( d["anime_title"], color6, d["num_watched_episodes"], d["anime_num_episodes"], d["anime_media_type_string"]) title = title.replace("\\", "") contentTitle = d["anime_title"].replace("\\", "") thumbnail = d["anime_image_path"].replace("\\", "").replace("r/96x136/", "").replace(".jpg", "l.jpg") url = "https://myanimelist.net" + d["anime_url"].replace("\\", "") if d["score"] != 0: title += " [COLOR %s]Punt:%s[/COLOR]" % (color4, d["score"]) if title.count("(TV)") == 2: title = title.replace("] (TV)", "]") elif title.count("(Movie)") == 2: title = title.replace("] (Movie)", "]") tipo = "tvshow" extra = "tv" if "Movie" in d["anime_media_type_string"]: tipo = "movie" extra = "movie" itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, title=title, thumbnail=thumbnail, text_color=color3, contentTitle=contentTitle, contentType=tipo, extra=extra, login=True)) if itemlist: itemlist.insert(0, Item(channel=item.channel, action="", title="E=En emisión | F=Finalizado | P=Próximamente")) return itemlist def menu_mal(item): # Opciones cuenta MAL, añadir a lista/votar itemlist = [] data = httptools.downloadpage(item.url).data try: status = {'1': 'Viendo Actualmente', '2': 'Completados', '3': 'En pausa', '4': 'Descartados', '6': 'Previstos para ver'} button, estado = scrapertools.find_single_match(data, 'myinfo_updateInfo"(.*?)>.*?option selected="selected" value="(\d+)"') if "disabled" in button: title_estado = ". Acciones disponibles:" estado = "1" else: title_estado = ". En tu lista [COLOR %s]%s[/COLOR]" % (color6, status[estado]) except: title_estado = ". Acciones disponibles:" score = scrapertools.find_single_match(data, 'id="myinfo_score".*?selected" value="(\d+)"') if score != "0": title_estado += " (Punt:%s)" % score if "lista" in title_estado: item.lista = True itemlist.append(item.clone(title="Anime: %s%s" % (item.contentTitle, title_estado), action="")) status = {'1': 'Viendo Actualmente', '2': 'Completados', '3': 'En pausa', '4': 'Descartados', '6': 'Previstos para ver'} for key, value in status.items(): if not value in title_estado: itemlist.append( item.clone(title="Añadir a lista %s" % value, action="addlist_mal", text_color=color5, value=key, estado=value)) for i in range(10, 0, -1): if i != int(score): itemlist.append(item.clone(title="Puntuar con un [COLOR %s]%s[/COLOR]" % (color6, i), action="addlist_mal", value=estado, estado=status[estado], score=i)) return itemlist def addlist_mal(item): data = httptools.downloadpage(item.url).data anime_id = scrapertools.find_single_match(data, 'id="myinfo_anime_id" value="([^"]+)"') if item.value == "2": vistos = scrapertools.find_single_match(data, 'id="myinfo_watchedeps".*?(\d+)') else: vistos = scrapertools.find_single_match(data, 'id="myinfo_watchedeps".*?value="(\d+)"') if not item.score: item.score = scrapertools.find_single_match(data, 'id="myinfo_score".*?selected" value="(\d+)"') token = scrapertools.find_single_match(data, "name='csrf_token' content='([^']+)'") post = {'anime_id': int(anime_id), 'status': int(item.value), 'score': int(item.score), 'num_watched_episodes': int(vistos), 'csrf_token': token} headers_mal = {'User-Agent': 'Mozilla/5.0', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'Referer': item.url, 'X-Requested-With': 'XMLHttpRequest'} url = "https://myanimelist.net/ownlist/anime/add.json" if item.lista: url = "https://myanimelist.net/ownlist/anime/edit.json" data = httptools.downloadpage(url, post=jsontools.dump(post), headers=headers_mal, replace_headers=True).data item.title = "En tu lista" if config.is_xbmc(): import xbmc xbmc.executebuiltin("Container.Refresh") def move(item): import xbmcgui, xbmc item_focus = str(item.extra) wnd = xbmcgui.Window(xbmcgui.getCurrentWindowId()) id = wnd.getFocusId() return xbmc.executebuiltin('Control.Move(' + str(id) + ',' + item_focus + ')') def play(item): itemlist = [] if not item.server: data = httptools.downloadpage(item.url).data if "Sorry, this video is not available to be embedded" in data: id_video = scrapertools.find_single_match(data, '