From c942f9b4ec8f49aaa6b6a39697e429301bc890ad Mon Sep 17 00:00:00 2001 From: Alhaziel01 Date: Tue, 11 May 2021 18:56:11 +0200 Subject: [PATCH] Videoteca DB --- core/support.py | 14 +- core/tmdb.py | 435 +++--- core/videolibrarydb.py | 20 + core/videolibrarytools.py | 1255 ++++++----------- lib/generictools.py | 2 +- libraryscraper.py | 36 + platformcode/autorenumber.py | 23 +- platformcode/infoplus.py | 4 +- platformcode/launcher.py | 2 +- platformcode/platformtools.py | 4 +- platformcode/xbmc_info_window.py | 1 + platformcode/xbmc_videolibrary.py | 141 +- .../resource.language.en_gb/strings.po | 12 + .../resource.language.it_it/strings.po | 12 + resources/settings.xml | 1 + specials/downloads.py | 2 +- specials/globalsearch.py | 2 +- specials/videolibrary.py | 1111 ++++++++------- 18 files changed, 1514 insertions(+), 1563 deletions(-) create mode 100644 core/videolibrarydb.py create mode 100644 libraryscraper.py diff --git a/core/support.py b/core/support.py index e5af37cc..fd3d2e82 100755 --- a/core/support.py +++ b/core/support.py @@ -384,7 +384,7 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t args=item.args, contentSerieName= title if 'movie' not in [contentType] and function != 'episodios' else item.contentSerieName, contentTitle= title if 'movie' in [contentType] and function == 'peliculas' else item.contentTitle, - contentLanguage = lang1, + contentLanguage = lang1 if lang1 else item.contentLanguage, contentSeason= infolabels.get('season', ''), contentEpisodeNumber=infolabels.get('episode', ''), news= item.news if item.news else '', @@ -529,14 +529,14 @@ def scrape(func): nextArgs['groupExplode'] = False nextArgs['item'] = item itemlist = newFunc() - itemlist = [i for i in itemlist if i.action not in ['add_pelicula_to_library', 'add_serie_to_library']] + itemlist = [i for i in itemlist if i.action not in ['add_movie_to_library', 'add_serie_to_library']] if anime and inspect.stack()[1][3] not in ['find_episodes']: from platformcode import autorenumber if function == 'episodios': autorenumber.start(itemlist, item) else: autorenumber.start(itemlist) - if action != 'play' and 'patronMenu' not in args and not disabletmdb: # and function != 'episodios' and item.contentType in ['movie', 'tvshow', 'episode', 'undefined'] + if action != 'play' and 'patronMenu' not in args and not disabletmdb and inspect.stack()[1][3] not in ['add_tvshow'] or (function in ['episodios'] and config.get_setting('episode_info')): # and function != 'episodios' and item.contentType in ['movie', 'tvshow', 'episode', 'undefined'] tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) if not group and not args.get('groupExplode') and ((pagination and len(matches) <= pag * pagination) or not pagination): # next page with pagination @@ -568,7 +568,7 @@ def scrape(func): prevthumb=item.prevthumb if item.prevthumb else item.thumbnail)) - if inspect.stack()[1][3] not in ['find_episodes']: + if inspect.stack()[1][3] not in ['find_episodes', 'add_tvshow']: if addVideolibrary and (item.infoLabels["title"] or item.fulltitle): # item.fulltitle = item.infoLabels["title"] videolibrary(itemlist, item, function=function) @@ -1078,7 +1078,7 @@ def videolibrary(itemlist, item, typography='', function_level=1, function=''): logger.debug() if item.contentType == 'movie': - action = 'add_pelicula_to_library' + action = 'add_movie_to_library' extra = 'findvideos' contentType = 'movie' else: @@ -1203,9 +1203,11 @@ def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=Tru srv_param = servertools.get_server_parameters(videoitem.server.lower()) logger.debug(videoitem) if videoitem.video_urls or srv_param.get('active', False): - item.title = typo(item.contentTitle.strip(), 'bold') if item.contentType == 'movie' or (config.get_localized_string(30161) in item.title) else item.title + # dbg() + item.title = typo(item.contentTitle.strip(), 'bold') if item.contentType == 'movie' and item.contentTitle or (config.get_localized_string(30161) in item.title) else item.title quality = videoitem.quality if videoitem.quality else item.quality if item.quality else '' + videoitem.contentLanguage = videoitem.contentLanguage if videoitem.contentLanguage else item.contentLanguage videoitem.title = (item.title if item.channel not in ['url'] else '')\ + (typo(videoitem.title, '_ color kod [] bold') if videoitem.title else "")\ + (typo(videoitem.quality, '_ color kod []') if videoitem.quality else "")\ diff --git a/core/tmdb.py b/core/tmdb.py index 4d3f14a0..6b7d3d3d 100644 --- a/core/tmdb.py +++ b/core/tmdb.py @@ -7,22 +7,23 @@ if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int if PY3: import urllib.parse as urllib # It is very slow in PY2. In PY3 it is native + from concurrent import futures else: import urllib # We use the native of PY2 which is faster + from concurrent_py2 import futures from future.builtins import range from future.builtins import object import ast, copy, re, time -from core import filetools, httptools, jsontools, scrapertools +from core import filetools, httptools, jsontools, scrapertools, support from core.item import InfoLabels from platformcode import config, logger, platformtools import threading info_language = ["de", "en", "es", "fr", "it", "pt"] # from videolibrary.json def_lang = info_language[config.get_setting("info_language", "videolibrary")] -lock = threading.Lock() host = 'https://api.themoviedb.org/3' api = 'a1ab8b8669da03637a4b98fa39c39228' @@ -201,45 +202,35 @@ def set_infoLabels_itemlist(item_list, seekTmdb=False, search_language=def_lang, if not config.get_setting('tmdb_active') and not forced: return - # threads_num = config.get_setting("tmdb_threads", default=20) - # semaphore = threading.Semaphore(threads_num) + r_list = list() - i = 0 - l_thread = list() def sub_thread(_item, _i, _seekTmdb): - # semaphore.acquire() ret = 0 try: - ret = set_infoLabels_item(_item, _seekTmdb, search_language, lock) + ret = set_infoLabels_item(_item, _seekTmdb, search_language) except: import traceback logger.error(traceback.format_exc(1)) - if lock and lock.locked(): - lock.release() - # logger.debug(str(ret) + "item: " + _item.tostring()) - # semaphore.release() - r_list.append((_i, _item, ret)) - for item in item_list: - sub_thread(item, i, seekTmdb) - # t = threading.Thread(target=sub_thread, args=(item, i, seekTmdb)) - # t.start() - # i += 1 - # l_thread.append(t) + return (_i, _item, ret) + + # for i, item in enumerate(item_list): + # r_list.append(sub_thread(item, i, seekTmdb)) + with futures.ThreadPoolExecutor() as executor: + searchList = [executor.submit(sub_thread, item, i, seekTmdb) for i, item in enumerate(item_list)] + for res in futures.as_completed(searchList): + r_list.append(res.result()) - # wait for all the threads to end - for x in l_thread: - x.join() # Sort results list by call order to keep the same order q item_list r_list.sort(key=lambda i: i[0]) # Rebuild and return list only with results of individual calls - return [ii[2] for ii in r_list] + return [it[2] for it in r_list] -def set_infoLabels_item(item, seekTmdb=True, search_language=def_lang, lock=None): +def set_infoLabels_item(item, seekTmdb=True, search_language=def_lang): """ Gets and sets (item.infoLabels) the extra data of a series, chapter or movie. @@ -249,7 +240,6 @@ def set_infoLabels_item(item, seekTmdb=True, search_language=def_lang, lock=None @type seekTmdb: bool @param search_language: Language code according to ISO 639-1, in case of search at www.themoviedb.org. @type search_language: str - @param lock: For use of threads when calling the 'set_infoLabels_itemlist' method @return: A number whose absolute value represents the number of elements included in the item.infoLabels attribute. This number will be positive if the data has been obtained from www.themoviedb.org and negative otherwise. @rtype: int """ @@ -257,7 +247,7 @@ def set_infoLabels_item(item, seekTmdb=True, search_language=def_lang, lock=None def read_data(otmdb_aux): infoLabels = otmdb_aux.get_infoLabels(item.infoLabels) - if not infoLabels['plot']: infoLabels['plot'] = otmdb_aux.get_plot('en-US') + if not infoLabels['plot']: infoLabels['plot'] = otmdb_aux.get_plot('en') item.infoLabels = infoLabels if item.infoLabels.get('thumbnail'): item.thumbnail = item.infoLabels['thumbnail'] @@ -273,9 +263,6 @@ def set_infoLabels_item(item, seekTmdb=True, search_language=def_lang, lock=None logger.debug("The season number is not valid.") return -1 * len(item.infoLabels) - if lock: - lock.acquire() - if not otmdb_global or (item.infoLabels['tmdb_id'] and str(otmdb_global.result.get("id")) != item.infoLabels['tmdb_id']) \ or (otmdb_global.searched_text and otmdb_global.searched_text != item.infoLabels['tvshowtitle']): if item.infoLabels['tmdb_id']: @@ -287,10 +274,6 @@ def set_infoLabels_item(item, seekTmdb=True, search_language=def_lang, lock=None read_data(otmdb_global) - # 4l3x87 - fix for overlap infoLabels if there is episode or season - # if lock and lock.locked(): - # lock.release() - if item.infoLabels['episode']: try: ep = int(item.infoLabels['episode']) @@ -319,10 +302,12 @@ def set_infoLabels_item(item, seekTmdb=True, search_language=def_lang, lock=None item.infoLabels['rating'] = episode['episode_vote_average'] if episode.get('episode_vote_count'): item.infoLabels['votes'] = episode['episode_vote_count'] - - # 4l3x87 - fix for overlap infoLabels if there is episode or season - if lock and lock.locked(): - lock.release() + if episode.get('episode_id'): + item.infoLabels['episode_id'] = episode['episode_id'] + if episode.get('episode_imdb_id'): + item.infoLabels['episode_imdb_id'] = episode['episode_imdb_id'] + if episode.get('episode_tvdb_id'): + item.infoLabels['episode_tvdb_id'] = episode['episode_tvdb_id'] return len(item.infoLabels) @@ -331,11 +316,11 @@ def set_infoLabels_item(item, seekTmdb=True, search_language=def_lang, lock=None # ... search season data item.infoLabels['mediatype'] = 'season' season = otmdb_global.get_season(seasonNumber) - enseason = otmdb_global.get_season(seasonNumber, language='en-US') + # enseason = otmdb_global.get_season(seasonNumber, language='en') if not isinstance(season, dict): season = ast.literal_eval(season.decode('utf-8')) - if not isinstance(enseason, dict): - enseason = ast.literal_eval(enseason.decode('utf-8')) + # if not isinstance(enseason, dict): + # enseason = ast.literal_eval(enseason.decode('utf-8')) if season: # Update data @@ -344,33 +329,26 @@ def set_infoLabels_item(item, seekTmdb=True, search_language=def_lang, lock=None seasonPlot = season.get("overview" , '') seasonDate = season.get("air_date", '') seasonPoster = season.get('poster_path', '') + seasonPosters = [] + for image in season['images']['posters']: + seasonPosters.append('https://image.tmdb.org/t/p/original' + image['file_path']) - seasonTitleEN = enseason.get("name", '') - seasonPlotEN = enseason.get("overview" , '') - seasonDateEN = enseason.get("air_date", '') - seasonPosterEN = enseason.get('poster_path', '') - - item.infoLabels['title'] = seasonTitle if seasonTitle else seasonTitleEN if seasonTitleEN else config.get_localized_string(60027) % seasonNumber - item.infoLabels['plot'] = seasonPlot if seasonPlot else seasonPlotEN if seasonPlotEN else '' - date = seasonDate if seasonDate else seasonDateEN if seasonDateEN else '' + item.infoLabels['title'] = seasonTitle + item.infoLabels['plot'] = seasonPlot + date = seasonDate if date: date.split('-') item.infoLabels['aired'] = date[2] + "/" + date[1] + "/" + date[0] - poster = seasonPoster if seasonPoster else seasonPosterEN if seasonPosterEN else '' - if poster: - item.infoLabels['poster_path'] = 'https://image.tmdb.org/t/p/original' + poster - item.thumbnail = item.infoLabels['poster_path'] - # 4l3x87 - fix for overlap infoLabels if there is episode or season - if lock and lock.locked(): - lock.release() + if seasonPoster: + item.infoLabels['poster_path'] = 'https://image.tmdb.org/t/p/original' + seasonPoster + item.thumbnail = item.infoLabels['poster_path'] + if seasonPosters: + if seasonPoster: seasonPosters.insert(0, seasonPoster) + item.infoLabels['posters'] = seasonPosters return len(item.infoLabels) - # 4l3x87 - fix for overlap infoLabels if there is episode or season - if lock and lock.locked(): - lock.release() - # Search... else: otmdb = copy.copy(otmdb_global) @@ -427,9 +405,6 @@ def set_infoLabels_item(item, seekTmdb=True, search_language=def_lang, lock=None otmdb = Tmdb(id_Tmdb=otmdb.result.get("id"), search_type=search_type, search_language=search_language) - if lock and lock.locked(): - lock.release() - if otmdb is not None and otmdb.get_id(): # The search has found a valid result read_data(otmdb) @@ -524,7 +499,6 @@ def get_nfo(item, search_groups=False): @rtype: str @return: """ - # from core.support import dbg;dbg() if search_groups: from platformcode.autorenumber import RENUMBER, GROUP @@ -859,7 +833,7 @@ class Tmdb(object): self.search_text = re.sub('\[\\\?(B|I|COLOR)\s?[^\]]*\]', '', self.searched_text).strip() self.search_type = kwargs.get('search_type', '') self.search_language = kwargs.get('search_language', def_lang) - self.fallback_language = kwargs.get('search_language', 'en-US') + self.fallback_language = kwargs.get('search_language', 'en') # self.search_include_adult = kwargs.get('include_adult', False) self.search_year = kwargs.get('year', '') self.search_filter = kwargs.get('filtro', {}) @@ -958,14 +932,9 @@ class Tmdb(object): if self.search_id: if source == "tmdb": - # http://api.themoviedb.org/3/movie/1924?api_key=a1ab8b8669da03637a4b98fa39c39228&language=es - # &append_to_response=images,videos,external_ids,credits&include_image_language=es,null - # http://api.themoviedb.org/3/tv/1407?api_key=a1ab8b8669da03637a4b98fa39c39228&language=es - # &append_to_response=images,videos,external_ids,credits&include_image_language=es,null - url = ('{}/{}/{}?api_key={}&language={}&append_to_response=images,videos,external_ids,credits&include_image_language={},null'.format(host, self.search_type, self.search_id, api, self.search_language, self.search_language)) + url = ('{}/{}/{}?api_key={}&language={}&append_to_response=images,videos,external_ids,credits&include_image_language={},en,null'.format(host, self.search_type, self.search_id, api, self.search_language, self.search_language)) searching = "id_Tmdb: " + self.search_id else: - # http://api.themoviedb.org/3/find/%s?external_source=imdb_id&api_key=a1ab8b8669da03637a4b98fa39c39228 url = ('{}/find/{}?external_source={}&api_key={}8&language={}'.format(host, self.search_id, source, api, self.search_language)) searching = "{}: {}".format(source.capitalize(), self.search_id) @@ -1005,8 +974,6 @@ class Tmdb(object): searching = "" if self.search_text: - # http://api.themoviedb.org/3/search/movie?api_key=a1ab8b8669da03637a4b98fa39c39228&query=superman&language=es - # &include_adult=false&page=1 url = ('{}/search/{}?api_key={}&query={}&language={}&include_adult={}&page={}'.format(host, self.search_type, api, text_quote, self.search_language, True, page)) if self.search_year: @@ -1023,8 +990,6 @@ class Tmdb(object): if total_results > 0: results = [r for r in result["results"] if r.get('first_air_date', r.get('release_date', ''))] - # results = result["results"] - # logger.debug('RISULTATI', results) if self.search_filter and total_results > 1: for key, value in list(dict(self.search_filter).items()): @@ -1067,7 +1032,7 @@ class Tmdb(object): total_results = 0 total_pages = 0 - # Ejemplo self.discover: {'url': 'discover/movie', 'with_cast': '1'} + # Exampleself.discover: {'url': 'discover/movie', 'with_cast': '1'} # url: API method to run # rest of keys: Search parameters concatenated to the url type_search = self.discover.get('url', '') @@ -1076,7 +1041,7 @@ class Tmdb(object): for key, value in list(self.discover.items()): if key != "url": params.append(key + "=" + str(value)) - # http://api.themoviedb.org/3/discover/movie?api_key=a1ab8b8669da03637a4b98fa39c39228&query=superman&language=es + url = ('{}/{}?api_key={}&{}'.format(host, type_search, api, "&".join(params))) logger.debug("[Tmdb.py] Searching %s:\n%s" % (type_search, url)) @@ -1247,11 +1212,11 @@ class Tmdb(object): return ret - def get_poster(self, tipo_respuesta="str", size="original"): + def get_poster(self, response_type="str", size="original"): """ - @param tipo_respuesta: Data type returned by this method. Default "str" - @type tipo_respuesta: list, str + @param response_type: Data type returned by this method. Default "str" + @type response_type: list, str @param size: ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280", "original") Indicates the width (w) or height (h) of the image to download. Default "original" @return: If the response_type is "list" it returns a list with all the urls of the poster images of the specified size. @@ -1268,7 +1233,7 @@ class Tmdb(object): else: poster_path = 'https://image.tmdb.org/t/p/' + size + self.result["poster_path"] - if tipo_respuesta == 'str': + if response_type == 'str': return poster_path elif not self.result["id"]: return [] @@ -1293,11 +1258,11 @@ class Tmdb(object): return ret - def get_backdrop(self, tipo_respuesta="str", size="original"): + def get_backdrop(self, response_type="str", size="original"): """ Returns the images of type backdrop - @param tipo_respuesta: Data type returned by this method. Default "str" - @type tipo_respuesta: list, str + @param response_type: Data type returned by this method. Default "str" + @type response_type: list, str @param size: ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280", "original") Indicates the width (w) or height (h) of the image to download. Default "original" @type size: str @@ -1313,9 +1278,9 @@ class Tmdb(object): if self.result["backdrop_path"] is None or self.result["backdrop_path"] == "": backdrop_path = "" else: - backdrop_path = 'https://image.tmdb.org/t/p/' + size + self.result["backdrop_path"] + backdrop_path = 'get_posterget_poster' + size + self.result["backdrop_path"] - if tipo_respuesta == 'str': + if response_type == 'str': return backdrop_path elif self.result["id"] == "": return [] @@ -1363,14 +1328,20 @@ class Tmdb(object): # http://api.themoviedb.org/3/tv/1407/season/1?api_key=a1ab8b8669da03637a4b98fa39c39228&language=es& # append_to_response=credits - url = "{}/tv/{}/season/{}?api_key={}&language={}&append_to_response=credits".format(host, self.result["id"], seasonNumber, api, search_language) + url = "{}/tv/{}/season/{}?api_key={}&language={}&append_to_response=videos,images,credits,external_ids&include_image_language={},en,null".format(host, self.result["id"], seasonNumber, api, search_language, search_language) + fallbackUrl = "{}/tv/{}/season/{}?api_key={}&language=en&append_to_response=videos,images,credits&include_image_language={},en,null".format(host, self.result["id"], seasonNumber, api, search_language) logger.debug('TMDB URL', url) searching = "id_Tmdb: " + str(self.result["id"]) + " season: " + str(seasonNumber) + "\nURL: " + url logger.debug("[Tmdb.py] Searching " + searching) - + # from core.support import dbg;dbg() try: - self.season[seasonNumber] = self.get_json(url) + info = self.get_json(url) + if not language: + fallbackInfo = self.get_json(fallbackUrl) + self.season[seasonNumber] = parse_fallback_info(info, fallbackInfo) + else: + self.season[seasonNumber] = self.get_json(url) if not isinstance(self.season[seasonNumber], dict): self.season[seasonNumber] = ast.literal_eval(self.season[seasonNumber].decode('utf-8')) @@ -1389,6 +1360,29 @@ class Tmdb(object): return self.season[seasonNumber] + def get_collection(self, _id=''): + ret = {} + if not _id: + collection = self.result.get('belongs_to_collection', {}) + if collection: + _id = collection.get('id') + if _id: + url = '{}/collection/{}?api_key={}&language={}&append_to_response=images'.format(host, _id, api, self.search_language) + fallbackUrl = '{}/collection/{}?api_key={}&language=en&append_to_response=images'.format(host, _id, api) + info = self.get_json(url) + fallbackInfo = self.get_json(fallbackUrl) + ret['set'] = info.get('name') if info.get('name') else fallbackInfo.get('name') + ret['setoverview'] = info.get('overview') if info.get('overview') else fallbackInfo.get('overview') + posters = ['https://image.tmdb.org/t/p/original' + (info.get('poster_path') if info.get('poster_path') else fallbackInfo.get('poster_path'))] + fanarts = ['https://image.tmdb.org/t/p/original' + (info.get('backdrop_path') if info.get('backdrop_path') else fallbackInfo.get('backdrop_path'))] + for image in info['images']['posters'] + fallbackInfo['images']['posters']: + posters.append('https://image.tmdb.org/t/p/original' + image['file_path']) + for image in info['images']['backdrops'] + fallbackInfo['images']['backdrops']: + fanarts.append('https://image.tmdb.org/t/p/original' + image['file_path']) + ret['setposters'] = posters + ret['setfanarts'] = fanarts + return ret + def get_episode(self, seasonNumber=1, chapter=1): # -------------------------------------------------------------------------------------------------------------------------------------------- # Parameters: @@ -1408,92 +1402,62 @@ class Tmdb(object): try: chapter = int(chapter) - seasonNumber = int(seasonNumber) + season = int(seasonNumber) except ValueError: logger.debug("The episode or season number is not valid") return {} - season = self.get_season(seasonNumber) - enseason = self.get_season(seasonNumber, language='en-US') - if not isinstance(season, dict): - season = ast.literal_eval(season.decode('utf-8')) - if not isinstance(enseason, dict): - enseason = ast.literal_eval(enseason.decode('utf-8')) - if not season and not enseason: - # An error has occurred - return {} + # season = self.get_season(seasonNumber) + # # enseason = self.get_season(seasonNumber, language='en') + # # if not isinstance(season, dict): + # # season = ast.literal_eval(season.decode('utf-8')) + # # if not isinstance(enseason, dict): + # # enseason = ast.literal_eval(enseason.decode('utf-8')) + # if not season: + # # An error has occurred + # return {} - if len(season["episodes"]) == 0 and len(enseason["episodes"]) == 0: - # An error has occurred - logger.error("Episode %d of the season %d not found." % (chapter, seasonNumber)) - return {} + # if len(season["episodes"]) == 0: + # # An error has occurred + # logger.error("Episode %d of the season %d not found." % (chapter, seasonNumber)) + # return {} - elif len(season["episodes"]) < chapter and season["episodes"][-1]['episode_number'] >= chapter: - n = None - for i, chapters in enumerate(season["episodes"]): - if chapters['episode_number'] == chapter: - n = i + 1 - break - if n != None: - chapter = n - else: - logger.error("Episode %d of the season %d not found." % (chapter, seasonNumber)) - return {} + # elif len(season["episodes"]) < chapter and season["episodes"][-1]['episode_number'] >= chapter: + # n = None + # for i, chapters in enumerate(season["episodes"]): + # if chapters['episode_number'] == chapter: + # n = i + 1 + # break + # if n != None: + # chapter = n + # else: + # logger.error("Episode %d of the season %d not found." % (chapter, seasonNumber)) + # return {} - elif len(season["episodes"]) < chapter: - logger.error("Episode %d of the season %d not found." % (chapter, seasonNumber)) - return {} + # elif len(season["episodes"]) < chapter: + # logger.error("Episode %d of the season %d not found." % (chapter, seasonNumber)) + # return {} - ret_dic = dict() - # Get data for this season - seasonTitle = season.get("name", '') - seasonPlot = season.get("overview" , '') - seasonId = season.get("id", '') - seasonEpisodes = len(season.get("episodes",[])) - seasonDate = season.get("air_date", '') - seasonPoster = season.get('poster_path', '') - seasonCredits = season.get('credits', {}) + # ret_dic = get_season_dic(season) - seasonTitleEN = enseason.get("name", '') - seasonPlotEN = enseason.get("overview" , '') - seasonIdEN = enseason.get("id", '') - seasonEpisodesEN = len(enseason.get("episodes",[])) - seasonDateEN = enseason.get("air_date", '') - seasonPosterEN = enseason.get('poster_path', '') - seasonCreditsEN = enseason.get('credits', {}) + # if chapter == 0: + # # If we only look for season data, include the technical team that has intervened in any chapter + # dic_aux = dict((i['id'], i) for i in ret_dic["season_crew"]) + # for e in season["episodes"]: + # for crew in e['crew']: + # if crew['id'] not in list(dic_aux.keys()): + # dic_aux[crew['id']] = crew + # ret_dic["season_crew"] = list(dic_aux.values()) - ret_dic["season_title"] = seasonTitle if seasonTitle else seasonTitleEN if seasonTitleEN else config.get_localized_string(60027) % seasonNumber - ret_dic["season_plot"] = seasonPlot if seasonPlot else seasonPlotEN if seasonPlotEN else '' - ret_dic["season_id"] = seasonId if seasonId else seasonIdEN if seasonIdEN else '' - ret_dic["season_episodes_number"] = seasonEpisodes if seasonEpisodes else seasonEpisodesEN if seasonEpisodesEN else 0 - date = seasonDate if seasonDate else seasonDateEN if seasonDateEN else '' - if date: - date = date.split("-") - ret_dic["season_air_date"] = date[2] + "/" + date[1] + "/" + date[0] - else: - ret_dic["season_air_date"] = '' - poster = seasonPoster if seasonPoster else seasonPosterEN if seasonPosterEN else '' - if poster: - ret_dic["season_poster"] = 'https://image.tmdb.org/t/p/original' + poster - else: - ret_dic["season_poster"] = '' - dic_aux = seasonCredits if seasonCredits else seasonCreditsEN if seasonCreditsEN else {} - ret_dic["season_cast"] = dic_aux.get('cast', []) - ret_dic["season_crew"] = dic_aux.get('crew', []) - if chapter == 0: - # If we only look for season data, include the technical team that has intervened in any chapter - dic_aux = dict((i['id'], i) for i in ret_dic["season_crew"]) - for e in season["episodes"]: - for crew in e['crew']: - if crew['id'] not in list(dic_aux.keys()): - dic_aux[crew['id']] = crew - ret_dic["season_crew"] = list(dic_aux.values()) # Obtain chapter data if applicable # from core.support import dbg;dbg() + ret_dic = {} if chapter > 0: - episode = season["episodes"][chapter - 1] - enepisode = enseason["episodes"][chapter - 1] + # episode = season["episodes"][chapter - 1] + url = "{}/tv/{}/season/{}/episode/{}?api_key={}&language={}&append_to_response=videos,images,credits,external_ids&include_image_language={},en,null".format(host, self.result["id"], seasonNumber, chapter, api, self.search_language, self.search_language) + episode = self.get_json(url) + # logger.debug('EPISODE', jsontools.dump(episode)) episodeTitle = episode.get("name", '') episodeId = episode.get('id', '') @@ -1504,40 +1468,37 @@ class Tmdb(object): episodeStars = episode.get('guest_stars', []) episodeVoteCount = episode.get('vote_count', 0) episodeVoteAverage = episode.get('vote_average', 0) + externalIds = episode.get('external_ids', {}) + imdb_id = externalIds.get('imdb_id') + tvdb_id = externalIds.get('tvdb_id') - episodeTitleEN = enepisode.get("name", '') - episodeIdEN = enepisode.get('id', '') - episodePlotEN = enepisode.get('overview', '') - episodeDateEN = enepisode.get('air_date', '') - episodeImageEN = enepisode.get('still_path', '') - episodeCrewEN = enepisode.get('crew', []) - episodeStarsEN = enepisode.get('guest_stars', []) - episodeVoteCountEN = enepisode.get('vote_count', 0) - episodeVoteAverageEN = enepisode.get('vote_average', 0) + ret_dic["episode_title"] = episodeTitle + ret_dic["episode_plot"] = episodePlot - ret_dic["episode_title"] = episodeTitle if episodeTitle and not episodeTitle.startswith(config.get_localized_string(70677)) else episodeTitleEN if episodeTitleEN and not episodeTitleEN.startswith('Episode') else episodeTitle if episodeTitle else '' - ret_dic["episode_plot"] = episodePlot if episodePlot else episodePlotEN if episodePlotEN else '' - date = episodeDate if episodeDate else episodeDateEN if episodeDateEN else '' - image = episodeImage if episodeImage else episodeImageEN if episodeImageEN else '' - if image: - ret_dic["episode_image"] = 'https://image.tmdb.org/t/p/original' + image + if episodeImage: + ret_dic["episode_image"] = 'https://image.tmdb.org/t/p/original' + episodeImage else: ret_dic["episode_image"] = "" - if date: - date = date.split("-") + if episodeDate: + date = episodeDate.split("-") ret_dic["episode_air_date"] = date[2] + "/" + date[1] + "/" + date[0] else: ret_dic["episode_air_date"] = "" - ret_dic["episode_crew"] = episodeCrew if episodeCrew else episodeCrewEN if episodeCrewEN else [] - ret_dic["episode_guest_stars"] = episodeStars if episodeStars else episodeStarsEN if episodeStarsEN else [] - ret_dic["episode_vote_count"] = episodeVoteCount if episodeVoteCount else episodeVoteCountEN if episodeVoteCountEN else 0 - ret_dic["episode_vote_average"] = episodeVoteAverage if episodeVoteAverage else episodeVoteAverageEN if episodeVoteAverageEN else 0 - ret_dic["episode_id"] = episodeId if episodeId else episodeIdEN if episodeIdEN else '' + + ret_dic["episode_crew"] = episodeCrew + if episodeStars: + ret_dic["episode_actors"] = [[k['name'], k['character'], 'https://image.tmdb.org/t/p/original/' + k['profile_path'] if k['profile_path'] else '', k['order']] for k in episodeStars] + ret_dic["episode_vote_count"] = episodeVoteCount + ret_dic["episode_vote_average"] = episodeVoteAverage + ret_dic["episode_id"] = episodeId + ret_dic["episode_imdb_id"] = imdb_id + ret_dic["episode_tvdb_id"] = tvdb_id + return ret_dic def get_list_episodes(self): - url = '{}/tv/{}?api_key={}&language={}'.format(host=host, id=self.search_id, api=api, lang=self.search_language) + url = '{}/tv/{}?api_key={}&language={}'.format(host, self.search_id, api, self.search_language) results = requests.get(url).json().get('seasons', []) seasons = [] if results and 'Error' not in results: @@ -1622,18 +1583,24 @@ class Tmdb(object): origen['credits_crew'] = dic_origen_credits.get('crew', []) del origen['credits'] + if 'images' in list(origen.keys()): + dic_origen_credits = origen['images'] + origen['posters'] = dic_origen_credits.get('posters', []) + origen['fanarts'] = dic_origen_credits.get('backdrops', []) + del origen['images'] + items = list(origen.items()) # Season / episode information if ret_infoLabels['season'] and self.season.get(ret_infoLabels['season']): # If there is data loaded for the indicated season + episodio = -1 if ret_infoLabels['episode']: episodio = ret_infoLabels['episode'] items.extend(list(self.get_episode(ret_infoLabels['season'], episodio).items())) - # logger.debug("ret_infoLabels" % ret_infoLabels) for k, v in items: if not v: @@ -1662,7 +1629,7 @@ class Tmdb(object): elif k == 'release_date': ret_infoLabels['year'] = int(v[:4]) - ret_infoLabels['release_date'] = v.split("-")[2] + "/" + v.split("-")[1] + "/" + v.split("-")[0] + ret_infoLabels['premiered'] = v.split("-")[2] + "/" + v.split("-")[1] + "/" + v.split("-")[0] elif k == 'first_air_date': ret_infoLabels['year'] = int(v[:4]) @@ -1702,22 +1669,36 @@ class Tmdb(object): elif k == 'name' or k == 'title': ret_infoLabels['title'] = v + elif k == 'tagline': + ret_infoLabels['tagline'] = v + elif k == 'production_companies': ret_infoLabels['studio'] = ", ".join(i['name'] for i in v) elif k == 'credits_cast' or k == 'season_cast' or k == 'episode_guest_stars': - dic_aux = dict((name, character) for (name, character) in l_castandrole) - l_castandrole.extend([(p['name'], p.get('character', '') or p.get('character_name', '')) \ + dic_aux = dict((name, [character, thumb, order]) for (name, character, thumb, order) in l_castandrole) + l_castandrole.extend([(p['name'], p.get('character', '') or p.get('character_name', ''), 'https://image.tmdb.org/t/p/original' + p.get('profile_path', '') if p.get('profile_path', '') else '', p.get('order')) \ for p in v if 'name' in p and p['name'] not in list(dic_aux.keys())]) elif k == 'videos': if not isinstance(v, list): - v = v.get('result', []) + v = v.get('results', []) for i in v: if i.get("site", "") == "YouTube": - ret_infoLabels['trailer'] = "https://www.youtube.com/watch?v=" + v[0]["key"] + ret_infoLabels['trailer'] = "plugin://plugin.video.youtube/play/?video_id=" + v[0]["key"] break + elif k == 'posters': + ret_infoLabels['posters'] = ['https://image.tmdb.org/t/p/original' + p["file_path"] for p in v] + + elif k == 'fanarts': + ret_infoLabels['fanarts'] = ['https://image.tmdb.org/t/p/original' + p["file_path"] for p in v] + + elif k == 'belongs_to_collection': + c = Tmdb.get_collection(self, v.get('id','')) + for k, v in c.items(): + ret_infoLabels[k] = v + elif k == 'production_countries' or k == 'origin_country': if isinstance(v, str): l_country = list(set(l_country + v.split(','))) @@ -1744,6 +1725,7 @@ class Tmdb(object): for crew in v: l_writer = list(set(l_writer + [crew['name']])) + elif isinstance(v, str) or isinstance(v, int) or isinstance(v, float): ret_infoLabels[k] = v @@ -1751,6 +1733,13 @@ class Tmdb(object): # logger.debug("Atributos no añadidos: " + k +'= '+ str(v)) pass + Mpaaurl = '{}/{}/{}/content_ratings?api_key={}'.format(host, self.search_type, ret_infoLabels['tmdb_id'], api) + Mpaas = self.get_json(Mpaaurl).get('results',[]) + for m in Mpaas: + if m.get('iso_3166_1','').lower() == 'us': + ret_infoLabels['Mpaa'] = m['rating'] + break + # Sort the lists and convert them to str if necessary if l_castandrole: ret_infoLabels['castandrole'] = sorted(l_castandrole, key=lambda tup: tup[0]) @@ -1762,3 +1751,71 @@ class Tmdb(object): ret_infoLabels['writer'] = ', '.join(sorted(l_writer)) return ret_infoLabels + + +def get_season_dic(season): + ret_dic = dict() + # logger.debug(jsontools.dump(season)) + # Get data for this season + + seasonTitle = season.get("name", '') + seasonPlot = season.get("overview" , '') + seasonId = season.get("id", '') + seasonEpisodes = len(season.get("episodes",[])) + seasonDate = season.get("air_date", '') + seasonPoster = season.get('poster_path', '') + seasonCredits = season.get('credits', {}) + seasonPosters = season.get('images',{}).get('posters',{}) + seasonFanarts = season.get('images',{}).get('backdrops',{}) + seasonTrailers = season.get('videos',[]).get('results',[]) + + ret_dic["season_title"] = seasonTitle + ret_dic["season_plot"] = seasonPlot + ret_dic["season_id"] = seasonId + ret_dic["season_episodes_number"] = seasonEpisodes + + if seasonDate: + date = seasonDate.split("-") + ret_dic["season_air_date"] = date[2] + "/" + date[1] + "/" + date[0] + else: + ret_dic["season_air_date"] = '' + if seasonPoster: + ret_dic["season_poster"] = 'https://image.tmdb.org/t/p/original' + seasonPoster + else: + ret_dic["season_poster"] = '' + + if seasonPosters: + ret_dic['season_posters'] = ['https://image.tmdb.org/t/p/original' + p["file_path"] for p in seasonPosters] + if seasonFanarts: + ret_dic['season_fanarts'] = ['https://image.tmdb.org/t/p/original' + p["file_path"] for p in seasonFanarts] + if seasonTrailers: + ret_dic['season_trailer'] = [] + for i in seasonTrailers: + if i.get("site", "") == "YouTube": + ret_dic['season_trailer'] = "plugin://plugin.video.youtube/play/?video_id=" + seasonTrailers[0]["key"] + break + + dic_aux = seasonCredits if seasonCredits else {} + ret_dic["season_cast"] = dic_aux.get('cast', []) + ret_dic["season_crew"] = dic_aux.get('crew', []) + return ret_dic + +def parse_fallback_info(info, fallbackInfo): + info_dict = {} + for key, value in info.items(): + if not value: + value = fallbackInfo[key] + info_dict[key] = value + episodes = info_dict['episodes'] + + episodes_list = [] + for i, episode in enumerate(episodes): + episode_dict = {} + for key, value in episode.items(): + if not value: + value = fallbackInfo['episodes'][i][key] + episode_dict[key] = value + episodes_list.append(episode_dict) + + info_dict['episodes'] = episodes_list + return info_dict \ No newline at end of file diff --git a/core/videolibrarydb.py b/core/videolibrarydb.py new file mode 100644 index 00000000..b8a1f170 --- /dev/null +++ b/core/videolibrarydb.py @@ -0,0 +1,20 @@ +from collections import defaultdict +from lib.sqlitedict import SqliteDict + +from core import filetools +from platformcode import config + +class nested_dict_sqlite(defaultdict): + 'like defaultdict but default_factory receives the key' + + def __missing__(self, key): + self[key] = value = self.default_factory(key) + return value + + def close(self): + for key in self.keys(): + self[key].close() + self.clear() + +db_name = filetools.join(config.get_videolibrary_path(), "videolibrary.sqlite") +videolibrarydb = nested_dict_sqlite(lambda table: SqliteDict(db_name, table, 'c', True)) \ No newline at end of file diff --git a/core/videolibrarytools.py b/core/videolibrarytools.py index 0b0a28b5..687a2e11 100644 --- a/core/videolibrarytools.py +++ b/core/videolibrarytools.py @@ -4,17 +4,24 @@ # ------------------------------------------------------------ #from builtins import str +# from specials import videolibrary import sys PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int +if PY3: + from concurrent import futures +else: + from concurrent_py2 import futures + import errno, math, traceback, re, os -from core import filetools, scraper, scrapertools -from core.item import Item +from core import filetools, jsontools, scraper, scrapertools, support, db, httptools, tmdb +from core.item import InfoLabels, Item from lib import generictools from platformcode import config, logger, platformtools from platformcode.autorenumber import RENUMBER +from core.videolibrarydb import videolibrarydb FOLDER_MOVIES = config.get_setting("folder_movies") FOLDER_TVSHOWS = config.get_setting("folder_tvshows") @@ -22,12 +29,20 @@ VIDEOLIBRARY_PATH = config.get_videolibrary_path() MOVIES_PATH = filetools.join(VIDEOLIBRARY_PATH, FOLDER_MOVIES) TVSHOWS_PATH = filetools.join(VIDEOLIBRARY_PATH, FOLDER_TVSHOWS) -if not FOLDER_MOVIES or not FOLDER_TVSHOWS or not VIDEOLIBRARY_PATH \ - or not filetools.exists(MOVIES_PATH) or not filetools.exists(TVSHOWS_PATH): +if not FOLDER_MOVIES or not FOLDER_TVSHOWS or not VIDEOLIBRARY_PATH or not filetools.exists(MOVIES_PATH) or not filetools.exists(TVSHOWS_PATH): config.verify_directories_created() addon_name = "plugin://plugin.video.%s/" % config.PLUGIN_NAME +quality_order = ['4k', '2160p', '2160', '4k2160p', '4k2160', '4k 2160p', '4k 2160', '2k', + 'fullhd', 'fullhd 1080', 'fullhd 1080p', 'full hd', 'full hd 1080', 'full hd 1080p', 'hd1080', 'hd1080p', 'hd 1080', 'hd 1080p', '1080', '1080p', + 'hd', 'hd720', 'hd720p', 'hd 720', 'hd 720p', '720', '720p', 'hdtv', + 'sd', '480p', '480', '360p', '360', '240p', '240'] + +video_extensions = ['3g2', '3gp', '3gp2', 'asf', 'avi', 'divx', 'flv', 'iso', 'm4v', 'mk2', 'mk3d', 'mka', 'mkv', 'mov', 'mp4', 'mp4a', 'mpeg', 'mpg', 'ogg', 'ogm', 'ogv', 'qt', 'ra', 'ram', 'rm', 'ts', 'vob', 'wav', 'webm', 'wma', 'wmv'] +subtitle_extensions = ['srt', 'idx', 'sub', 'ssa', 'ass'] +immage_extensions = ['jpg', 'png'] + def read_nfo(path_nfo, item=None): """ @@ -69,64 +84,23 @@ def read_nfo(path_nfo, item=None): return head_nfo, it -def save_movie(item, silent=False): - """ - saves the item element in the movie library, with the values ​​it contains. - @type item: item - @param item: item to be saved. - @rtype insertados: int - @return: the number of elements inserted - @rtype sobreescritos: int - @return: the number of overwritten elements - @rtype fallidos: int - @return: the number of failed items or -1 if all failed - """ +def set_base_name(item, _id): + # set base_name for videolibrary logger.debug() - # logger.debug(item.tostring('\n')) - insertados = 0 - sobreescritos = 0 - fallidos = 0 - path = "" - - # We try to obtain the correct title: - # 1. contentTitle: This should be the correct site, since the title usually contains "Add to the video library..." - # 2. fulltitle - # 3. title - # if item.contentTitle: item.title = item.contentTitle - # elif item.fulltitle: item.title = item.fulltitle - - if not item.contentTitle: - # We put the correct title on your site so that scraper can locate it - if item.fulltitle: - item.contentTitle = item.fulltitle + if item.contentType == 'movie': + if config.get_setting("original_title_folder", "videolibrary") and item.infoLabels['originaltitle']: + base_name = item.infoLabels['originaltitle'] else: - item.contentTitle = item.title - - # If at this point we do not have a title, we leave - if not item.contentTitle or not item.channel: - logger.debug("contentTitle NOT FOUND") - return 0, 0, -1, path # Salimos sin guardar - - scraper_return = scraper.find_and_set_infoLabels(item) - - # At this point we can have: - # scraper_return = True: An item with infoLabels with the updated information of the movie - # scraper_return = False: An item without movie information (it has been canceled in the window) - # item.infoLabels['code'] == "" : The required IMDB identifier was not found to continue, we quit - if not scraper_return or not item.infoLabels['code']: - logger.debug("NOT FOUND IN SCRAPER OR DO NOT HAVE code") - return 0, 0, -1, path - - _id = item.infoLabels['code'][0] - - # progress dialog - if not silent: - p_dialog = platformtools.dialog_progress(config.get_localized_string(20000), config.get_localized_string(60062)) - - if config.get_setting("original_title_folder", "videolibrary") and item.infoLabels['originaltitle']: - base_name = item.infoLabels['originaltitle'] + base_name = item.contentTitle else: - base_name = item.contentTitle + if config.get_setting("original_title_folder", "videolibrary") and item.infoLabels['originaltitle']: + base_name = item.infoLabels['originaltitle'] + elif item.infoLabels['tvshowtitle']: + base_name = item.infoLabels['tvshowtitle'] + elif item.infoLabels['title']: + base_name = item.infoLabels['title'] + else: + base_name = item.contentSerieName if not PY3: base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").encode("utf8") @@ -136,90 +110,171 @@ def save_movie(item, silent=False): if config.get_setting("lowerize_title", "videolibrary"): base_name = base_name.lower() - for raiz, subcarpetas, ficheros in filetools.walk(MOVIES_PATH): - for c in subcarpetas: - code = scrapertools.find_single_match(c, r'\[(.*?)\]') - if code and code in item.infoLabels['code']: - path = filetools.join(raiz, c) - _id = code - break + return '{} [{}]'.format(base_name, _id) - if not path: - # Create folder - path = filetools.join(MOVIES_PATH, ("%s [%s]" % (base_name, _id)).strip()) + +def save_movie(item, silent=False): + """ + saves the item element in the movie library, with the values ​​it contains. + @type item: item + @param item: item to be saved. + @rtype inserted: int + @return: the number of elements inserted + @rtype overwritten: int + @return: the number of overwritten elements + @rtype failed: int + @return: the number of failed items or -1 if all failed + """ + + logger.debug() + # logger.debug(item.tostring('\n')) + inserted = 0 + overwritten = 0 + failed = 0 + path = "" + + # Put the correct title on your site so that scraper can locate it + if not item.contentTitle: + if item.fulltitle: item.contentTitle = item.fulltitle + else: item.contentTitle = re.sub(r'\[\s*[^\]]+\]', '', item.title).strip() + + # If at this point we do not have a title, we leave + if not item.contentTitle or not item.channel: + logger.debug("contentTitle NOT FOUND") + return 0, 0, -1, path # Salimos sin guardar + + scraper_return = scraper.find_and_set_infoLabels(item) + # support.dbg() + + # At this point we can have: + # scraper_return = True: An item with infoLabels with the updated information of the movie + # scraper_return = False: An item without movie information (it has been canceled in the window) + # item.infoLabels['code'] == "" : The required IMDB identifier was not found to continue, we quit + if not scraper_return or not item.infoLabels['code']: + logger.debug("NOT FOUND IN SCRAPER OR DO NOT HAVE code") + return 0, 0, -1, path + + # Get ID from infoLabels + _id = get_id(item) + if not _id: + logger.debug("NOT FOUND IN SCRAPER OR DO NOT HAVE code") + return 0, 0, -1, path + + # get parameters from db + try: + moviedb = videolibrarydb['movie'].get(_id, {}) + movie_item = moviedb.get('item', Item()) + head_nfo = movie_item.head_nfo + channels = moviedb.get('channels',{}) + except: + logger.debug("The film cannot be added to the database") + videolibrarydb.close() + return 0, 0, -1, path + + # progress dialog + if not silent: p_dialog = platformtools.dialog_progress_bg(config.get_localized_string(20000), config.get_localized_string(60062)) + + base_name = set_base_name(item, _id) + path = filetools.join(MOVIES_PATH, base_name) + + # check if path already exist + if not filetools.exists(path): logger.debug("Creating movie directory:" + path) if not filetools.mkdir(path): logger.debug("Could not create directory") + videolibrarydb.close() return 0, 0, -1, path + try: + # set nfo and strm paths + nfo_path = filetools.join(base_name, "{}.nfo".format(base_name)) + strm_path = filetools.join(base_name, "{}.strm".format(base_name)) - nfo_path = filetools.join(path, "%s [%s].nfo" % (base_name, _id)) - strm_path = filetools.join(path, "%s.strm" % base_name) - json_path = filetools.join(path, ("%s [%s].json" % (base_name, item.channel.lower()))) + # check if nfo and strm file exist + nfo_exists = filetools.exists(filetools.join(MOVIES_PATH, nfo_path)) + strm_exists = filetools.exists(filetools.join(MOVIES_PATH, strm_path)) - nfo_exists = filetools.exists(nfo_path) - strm_exists = filetools.exists(strm_path) - json_exists = filetools.exists(json_path) + if not head_nfo: + head_nfo = scraper.get_nfo(item) - if not nfo_exists: - # We create .nfo if it doesn't exist - logger.debug("Creating .nfo: " + nfo_path) - head_nfo = scraper.get_nfo(item) + # Make or update Videolibrary Movie Item + movie_item.channel = "videolibrary" + movie_item.action = 'findvideos' + movie_item.infoLabels = item.infoLabels + if not movie_item.head_nfo: movie_item.head_nfo = head_nfo + if not movie_item.title: movie_item.title = item.contentTitle + if not movie_item.videolibrary_id: movie_item.videolibrary_id = _id + if not movie_item.strm_path: movie_item.strm_path = strm_path + if not movie_item.nfo_path: movie_item.nfo_path = nfo_path + if not movie_item.base_name: movie_item.base_name = base_name + if not movie_item.fanart: movie_item.fanart = item.infoLabels['fanart'] + if not movie_item.thumbnail: movie_item.thumbnail = item.infoLabels['thumbnail'] + if not movie_item.playtime: movie_item.playtime = 0, + if not movie_item.playcounts: movie_item.playcounts = 0, + if not movie_item.prefered_lang: movie_item.prefered_lang = '' + if not movie_item.lang_list: movie_item.lang_list = [] + # if not movie_item.info: movie_item.info = extra_info(_id) - item_nfo = Item(title=item.contentTitle, channel="videolibrary", action='findvideos', - library_playcounts={"%s [%s]" % (base_name, _id): 0}, infoLabels=item.infoLabels, - library_urls={}) + if not item.contentLanguage: item.contentLanguage = 'ITA' + if not item.contentLanguage in movie_item.lang_list: movie_item.lang_list.append(item.contentLanguage) - else: - # If .nfo exists, but we are adding a new channel we open it - head_nfo, item_nfo = read_nfo(nfo_path) - - if not strm_exists: - # Create base_name.strm if you do not exist - item_strm = Item(channel='videolibrary', action='play_from_library', - strm_path=strm_path.replace(MOVIES_PATH, ""), contentType='movie', - contentTitle=item.contentTitle) - strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl())) - item_nfo.strm_path = strm_path.replace(MOVIES_PATH, "") - - # Only if item_nfo and .strm exist we continue - if item_nfo and strm_exists: - - if json_exists: - logger.debug("The file exists. Is overwritten") - sobreescritos += 1 + if len(movie_item.lang_list) > 1: + movie_item.prefered_lang = movie_item.lang_list[platformtools.dialog_select(config.get_localized_string(70246), movie_item.lang_list)] else: - insertados += 1 + movie_item.prefered_lang = movie_item.lang_list[0] - # If the emergency url option has been checked, it is added to the movie after running Findvideos from the channel - try: - headers = {} - if item.headers: - headers = item.headers - channel = item.channel - if config.get_setting("emergency_urls", channel) in [1, 3]: - item = emergency_urls(item, None, json_path, headers=headers) - if item_nfo.emergency_urls and not isinstance(item_nfo.emergency_urls, dict): - del item_nfo.emergency_urls - if not item_nfo.emergency_urls: - item_nfo.emergency_urls = dict() - item_nfo.emergency_urls.update({item.channel: True}) - except: - logger.error("Unable to save %s emergency urls in the video library" % item.contentTitle) - logger.error(traceback.format_exc()) + # create nfo file if it does not exist + # support.dbg() + if not nfo_exists: + # data = dicttoxml(movie_item) + filetools.write(filetools.join(MOVIES_PATH, movie_item.nfo_path), head_nfo) - if filetools.write(json_path, item.tojson()): - if not silent: p_dialog.update(100, item.contentTitle) - item_nfo.library_urls[item.channel] = item.url + # create strm file if it does not exist + if not strm_exists: + logger.debug("Creating .strm: " + strm_path) + item_strm = Item(channel='videolibrary', action='play_from_library', strm_path=movie_item.strm_path, contentType='movie', contentTitle=item.contentTitle, videolibraryd_id=item.videolibrary_id) + strm_exists = filetools.write(filetools.join(MOVIES_PATH, movie_item.strm_path), '{}?{}'.format(addon_name, item_strm.tourl())) - if filetools.write(nfo_path, head_nfo + item_nfo.tojson()): - #logger.debug("FOLDER_MOVIES : %s" % FOLDER_MOVIES) - # We update the Kodi video library with the movie - if config.is_xbmc() and config.get_setting("videolibrary_kodi") and not silent: - from platformcode import xbmc_videolibrary - xbmc_videolibrary.update() + # checks if the content already exists + if videolibrarydb['movie'].get(_id, {}): + logger.debug("The file exists. Is overwritten") + overwritten += 1 + else: + logger.debug("Creating .nfo: " + nfo_path) + inserted += 1 - if not silent: p_dialog.close() - return insertados, sobreescritos, fallidos, path + item = remove_host(item) + # write on db + if item.channel in channels and item.channel != 'download': + channels_url = [u.url for u in channels[item.channel]] + if item.url not in channels_url: + channels[item.channel].append(item) + else: + del channels[item.channel][channels_url.index(item.url)] + channels[item.channel].append(item) + else: + channels[item.channel] = [item] + + moviedb['item'] = movie_item + moviedb['channels'] = channels + + videolibrarydb['movie'][_id] = moviedb + except: + failed += 1 + + videolibrarydb.close() + + # Only if movie_item and .strm exist we continue + if movie_item and strm_exists: + if not silent: + p_dialog.update(100, item.contentTitle) + p_dialog.close() + # Update Kodi Library + if config.is_xbmc() and config.get_setting("videolibrary_kodi") and not silent and inserted: + from platformcode.xbmc_videolibrary import update + update(MOVIES_PATH) + # from platformcode.xbmc_videolibrary import update_moviedb + # update_moviedb(movie_item) + return inserted, overwritten, failed, path # If we get to this point it is because something has gone wrong logger.error("Could not save %s in the video library" % item.contentTitle) @@ -228,23 +283,19 @@ def save_movie(item, silent=False): p_dialog.close() return 0, 0, -1, path -def update_renumber_options(item, head_nfo, path): +def update_renumber_options(item): from core import jsontools - # from core.support import dbg;dbg() - tvshow_path = filetools.join(path, 'tvshow.nfo') - if filetools.isfile(tvshow_path) and item.channel_prefs: - for channel in item.channel_prefs: - filename = filetools.join(config.get_data_path(), "settings_channels", channel + '_data.json') - if filetools.isfile(filename): - json_file = jsontools.load(filetools.read(filename)) - if RENUMBER in json_file: - json = json_file[RENUMBER] - if item.fulltitle in json: - item.channel_prefs[channel][RENUMBER] = json[item.fulltitle] - logger.debug('UPDATED=\n' + str(item.channel_prefs)) - filetools.write(tvshow_path, head_nfo + item.tojson()) -def add_renumber_options(item, head_nfo, path): + filename = filetools.join(config.get_data_path(), "settings_channels", item.channel + '_data.json') + if filetools.isfile(filename): + json_file = jsontools.load(filetools.read(filename)) + json = json_file.get(RENUMBER,{}).get(item.fulltitle,{}) + if json: + logger.debug('UPDATED=\n' + item.fulltitle) + item.renumber = json + return item + +def add_renumber_options(item): from core import jsontools # from core.support import dbg;dbg() ret = None @@ -270,150 +321,6 @@ def check_renumber_options(item): # if tvshow_item['channel_prefs'][item.fullti] -def filter_list(episodelist, action=None, path=None): - # if path: path = path.decode('utf8') - # import xbmc - # if xbmc.getCondVisibility('system.platform.windows') > 0: path = path.replace('smb:','').replace('/','\\') - channel_prefs = {} - lang_sel = quality_sel = show_title = channel ='' - - if action: - tvshow_path = filetools.join(path, "tvshow.nfo") - head_nfo, tvshow_item = read_nfo(tvshow_path) - channel = episodelist[0].channel - show_title = tvshow_item.infoLabels['tvshowtitle'] - if not tvshow_item.channel_prefs: - tvshow_item.channel_prefs={channel:{}} - list_item = filetools.listdir(path) - for File in list_item: - if (File.endswith('.strm') or File.endswith('.json') or File.endswith('.nfo')): - filetools.remove(filetools.join(path, File)) - if channel not in tvshow_item.channel_prefs: - tvshow_item.channel_prefs[channel] = {} - - channel_prefs = tvshow_item.channel_prefs[channel] - - renumber = add_renumber_options(episodelist[0], head_nfo, tvshow_path) - if renumber: - channel_prefs[RENUMBER] = renumber - - if action == 'get_seasons': - if 'favourite_language' not in channel_prefs: - channel_prefs['favourite_language'] = '' - if 'favourite_quality' not in channel_prefs: - channel_prefs['favourite_quality'] = '' - if channel_prefs['favourite_language']: - lang_sel = channel_prefs['favourite_language'] - if channel_prefs['favourite_quality']: - quality_sel = channel_prefs['favourite_quality'] - # if Download - if not show_title: show_title = episodelist[0].fulltitle - if not channel: channel= episodelist[0].channel - # SELECT EISODE BY LANG AND QUALITY - quality_dict = {'N/A': ['n/a'], - 'BLURAY': ['br', 'bluray'], - 'FULLHD': ['fullhd', 'fullhd 1080', 'fullhd 1080p', 'full hd', 'full hd 1080', 'full hd 1080p', 'hd1080', 'hd1080p', 'hd 1080', 'hd 1080p', '1080', '1080p'], - 'HD': ['hd', 'hd720', 'hd720p', 'hd 720', 'hd 720p', '720', '720p', 'hdtv'], - '480P': ['sd', '480p', '480'], - '360P': ['360p', '360'], - '240P': ['240p', '240'], - 'MAX':['MAX']} - quality_order = ['N/A', '240P', '360P','480P', 'HD', 'FULLHD', 'BLURAY', 'MAX'] - - - lang_list = [] - sub_list = [] - quality_list = ['MAX'] - - # Make Language List - for episode in episodelist: - if not episode.contentLanguage: episode.contentLanguage = 'ITA' - if type(episode.contentLanguage) == list and episode.contentLanguage not in lang_list: - pass - else: - if episode.contentLanguage and episode.contentLanguage not in lang_list: - # Make list of subtitled languages - if 'sub' in episode.contentLanguage.lower(): - sub = re.sub('Sub-','', episode.contentLanguage) - if sub not in sub_list: sub_list.append(sub) - else: - lang_list.append(episode.contentLanguage) - - # add to Language List subtitled languages - if sub_list: - for sub in sub_list: - if sub in lang_list: - lang_list.insert(lang_list.index(sub) + 1, 'Sub-' + sub) - lang_list.insert(lang_list.index(sub) + 2, sub + ' + Sub-' + sub) - else: - lang_list.append('Sub-' + sub) - - # Make Quality List - for episode in episodelist: - for name, var in quality_dict.items(): - if not episode.quality and 'N/A' not in quality_list: - quality_list.append('N/A') - elif episode.quality and episode.quality.lower() in var and name not in quality_list: - quality_list.append(name) - quality_list = sorted(quality_list, key=lambda x:quality_order.index(x)) - - # if more than one language - if len(lang_list) > 1: - selection = lang_list.index(lang_sel) if lang_sel else platformtools.dialog_select(config.get_localized_string(70725) % (show_title, channel),lang_list) - if action: lang_sel = channel_prefs['favourite_language'] = lang_list[selection] - langs = lang_list[selection].split(' + ') - - ep_list = [] - count = 0 - stop = False - while not stop: - for episode in episodelist: - title = scrapertools.find_single_match(episode.title, r'(\d+x\d+)') - if not any(title in word for word in ep_list) and episode.contentLanguage == langs[count]: - ep_list.append(episode.title) - if count < len(langs)-1: count += 1 - else: stop = True - it = [] - for episode in episodelist: - if episode.title in ep_list: - it.append(episode) - episodelist = it - - else: channel_prefs['favourite_language'] = '' - - # if more than one quality - if len(quality_list) > 2: - if config.get_setting('videolibrary_max_quality'): selection = favourite_quality_selection = len(quality_list)-1 - else: selection = favourite_quality_selection = quality_list.index(quality_sel) if quality_sel else platformtools.dialog_select(config.get_localized_string(70726) % (show_title, channel) ,quality_list) - - ep_list = [] - stop = False - while not stop: - for episode in episodelist: - title = scrapertools.find_single_match(episode.title, r'(\d+x\d+)') - if not any(title in word for word in ep_list) and episode.quality.lower() in quality_dict[quality_list[selection]]: - ep_list.append(episode.title) - if selection != 0: selection = selection - 1 - else: stop = True - if quality_list[selection] == 'N/A': - for episode in episodelist: - title = scrapertools.find_single_match(episode.title, r'(\d+x\d+)') - if not any(title in word for word in ep_list): - ep_list.append(episode.title) - - it = [] - for episode in episodelist: - if episode.title in ep_list: - if action: channel_prefs['favourite_quality'] = quality_list[favourite_quality_selection] - it.append(episode) - episodelist = it - - else:channel_prefs['favourite_quality'] = '' - - if action: filetools.write(tvshow_path, head_nfo + tvshow_item.tojson()) - - return episodelist - def save_tvshow(item, episodelist, silent=False): """ stores in the series library the series with all the chapters included in the episodelist @@ -421,19 +328,21 @@ def save_tvshow(item, episodelist, silent=False): @param item: item that represents the series to save @type episodelist: list @param episodelist: list of items that represent the episodes to be saved. - @rtype insertados: int + @rtype inserted: int @return: the number of episodes inserted - @rtype sobreescritos: int + @rtype overwritten: int @return: the number of overwritten episodes - @rtype fallidos: int + @rtype failed: int @return: the number of failed episodes or -1 if the entire series has failed @rtype path: str @return: serial directory """ logger.debug() - # logger.debug(item.tostring('\n')) + inserted = 0 + overwritten = 0 + failed = 0 path = "" - + # support.dbg() # If at this point we do not have a title or code, we leave if not (item.contentSerieName or item.infoLabels['code']) or not item.channel: logger.debug("NOT FOUND contentSerieName or code") @@ -445,6 +354,7 @@ def save_tvshow(item, episodelist, silent=False): else: scraper_return = True item.contentType = contentTypeBackup # Fix errors in some channels + # At this point we can have: # scraper_return = True: An item with infoLabels with the updated information of the series # scraper_return = False: An item without movie information (it has been canceled in the window) @@ -453,113 +363,111 @@ def save_tvshow(item, episodelist, silent=False): logger.debug("NOT FOUND IN SCRAPER OR DO NOT HAVE code") return 0, 0, -1, path - _id = item.infoLabels['code'][0] - if not item.infoLabels['code'][0] or item.infoLabels['code'][0] == 'None': - if item.infoLabels['code'][1] and item.infoLabels['code'][1] != 'None': - _id = item.infoLabels['code'][1] - elif item.infoLabels['code'][2] and item.infoLabels['code'][2] != 'None': - _id = item.infoLabels['code'][2] - else: - logger.error("NOT FOUND IN SCRAPER OR HAS NO CODE: " + item.url + ' / ' + item.infoLabels['code']) + # Get ID from infoLabels + _id = get_id(item) + if not _id: + logger.debug("NOT FOUND IN SCRAPER OR DO NOT HAVE code") + return 0, 0, -1, path + + # get parameters from db + try: + tvshowdb = videolibrarydb['tvshow'].get(_id, {}) + tvshow_item = tvshowdb.get('item', Item()) + head_nfo = tvshow_item.head_nfo + channels = tvshowdb.get('channels',{}) + + except: + logger.debug("The tv show cannot be added to the database") + videolibrarydb.close() + return 0, 0, -1, path + + # set base name + + base_name = set_base_name(item, _id) + path = filetools.join(TVSHOWS_PATH, base_name) + + # check if path already exist + if not filetools.exists(path): + logger.debug("Creating tv show directory:" + path) + if not filetools.mkdir(path): + logger.debug("Could not create directory") return 0, 0, -1, path - if config.get_setting("original_title_folder", "videolibrary") and item.infoLabels['originaltitle']: - base_name = item.infoLabels['originaltitle'] - elif item.infoLabels['tvshowtitle']: - base_name = item.infoLabels['tvshowtitle'] - elif item.infoLabels['title']: - base_name = item.infoLabels['title'] - else: - base_name = item.contentSerieName + nfo_path = filetools.join(base_name, "tvshow.nfo") + nfo_exists = filetools.exists(filetools.join(TVSHOWS_PATH, nfo_path)) - if not PY3: - base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").encode("utf8") - else: - base_name = filetools.validate_path(base_name.replace('/', '-')) + # get parameters + if not item.head_nfo: + head_nfo = scraper.get_nfo(item) + if not head_nfo: return 0, 0, -1, '' - if config.get_setting("lowerize_title", "videolibrary"): - base_name = base_name.lower() - for raiz, subcarpetas, ficheros in filetools.walk(TVSHOWS_PATH): - for c in subcarpetas: - code = scrapertools.find_single_match(c, r'\[(.*?)\]') - if code and code != 'None' and code in item.infoLabels['code']: - path = filetools.join(raiz, c) - _id = code - break + item.infoLabels['mediatype'] = 'tvshow' + item.contentType = 'tvshow' + item.infoLabels['title'] = item.contentSerieName + tvshow_item.infoLabels = item.infoLabels + tvshow_item.channel = 'videolibrary' + tvshow_item.action = 'get_seasons' + tvshow_item.nfo_path = nfo_path + if not tvshow_item.head_nfo: + tvshow_item.head_nfo = head_nfo + if not tvshow_item.title: + tvshow_item.title = item.contentSerieName + if not tvshow_item.videolibrary_id: + tvshow_item.videolibrary_id = _id + if not tvshow_item.fanart: + tvshow_item.fanart = item.infoLabels['fanart'] + if not tvshow_item.thumbnail: + tvshow_item.thumbnail = item.infoLabels['thumbnail'] + if not tvshow_item.base_name: + tvshow_item.base_name = base_name + if tvshow_item.active == '': + tvshow_item.active = True + if not tvshow_item.playcounts: + tvshow_item.playcounts = 0, + if not tvshow_item.prefered_lang: + tvshow_item.prefered_lang = '' + if not tvshow_item.lang_list: + tvshow_item.lang_list = [] - if not path: - path = filetools.join(TVSHOWS_PATH, ("%s [%s]" % (base_name, _id)).strip()) - logger.debug("Creating series directory: " + path) - try: - filetools.mkdir(path) - except OSError as exception: - if exception.errno != errno.EEXIST: - raise - tvshow_path = filetools.join(path, "tvshow.nfo") - if not filetools.exists(tvshow_path): - # We create tvshow.nfo, if it does not exist, with the head_nfo, series info and watched episode marks - logger.debug("Creating tvshow.nfo: " + tvshow_path) - head_nfo = scraper.get_nfo(item, search_groups=True) - if not head_nfo: - return 0, 0, 0, '' - item.infoLabels['mediatype'] = "tvshow" - item.infoLabels['title'] = item.contentSerieName - item_tvshow = Item(title=item.contentSerieName, channel="videolibrary", action="get_seasons", - fanart=item.infoLabels['fanart'], thumbnail=item.infoLabels['thumbnail'], - infoLabels=item.infoLabels, path=path.replace(TVSHOWS_PATH, ""), fulltitle=item.fulltitle) - item_tvshow.library_playcounts = {} - item_tvshow.library_urls = {item.channel: item.url} - - else: - # If tvshow.nfo exists, but we are adding a new channel we update the list of urls - head_nfo, item_tvshow = read_nfo(tvshow_path) - item_tvshow.fulltitle = item.fulltitle - item_tvshow.channel = "videolibrary" - item_tvshow.action = "get_seasons" - item_tvshow.library_urls[item.channel] = item.url - - # FILTERTOOLS - # if the channel has a language filter, we add the channel and the show - if episodelist and "list_language" in episodelist[0]: - # if we have already added a previously filtered channel, we add or update the channel and show - if "library_filter_show" in item_tvshow: - if item.title_from_channel: - item_tvshow.library_filter_show[item.channel] = item.title_from_channel - else: - item_tvshow.library_filter_show[item.channel] = item.show - # there was no filter channel and we generated it for the first time + item = remove_host(item) + item.renumber = add_renumber_options(item) + # write on db + if item.channel in channels and item.channel != 'download': + channels_url = [u.url for u in channels[item.channel]] + if item.url not in channels_url: + channels[item.channel].append(item) else: - if item.title_from_channel: - item_tvshow.library_filter_show = {item.channel: item.title_from_channel} - else: - item_tvshow.library_filter_show = {item.channel: item.show} - - if item.channel != "downloads": - item_tvshow.active = 1 # to be updated daily when service is called - - filetools.write(tvshow_path, head_nfo + item_tvshow.tojson()) + del channels[item.channel][channels_url.index(item.url)] + channels[item.channel].append(item) + else: + channels[item.channel] = [item] + tvshowdb['item'] = tvshow_item + tvshowdb['channels'] = channels + videolibrarydb['tvshow'][_id] = tvshowdb + if not nfo_exists: + filetools.write(filetools.join(TVSHOWS_PATH, tvshow_item.nfo_path), head_nfo + ', kod:' + _id) + # support.dbg() if not episodelist: # The episode list is empty - return 0, 0, 0, path + return 0, 0, -1, path # Save the episodes - '''import time - start_time = time.time()''' - insertados, sobreescritos, fallidos = save_episodes(path, episodelist, item, silent=silent) - '''msg = "Insertados: %d | Sobreescritos: %d | Fallidos: %d | Tiempo: %2.2f segundos" % \ - (insertados, sobreescritos, fallidos, time.time() - start_time) - logger.debug(msg)''' + inserted, overwritten, failed = save_episodes(tvshow_item, episodelist, silent=silent) + videolibrarydb.close() + if config.is_xbmc() and config.get_setting("videolibrary_kodi") and not silent and inserted: + from platformcode.xbmc_videolibrary import update + update(TVSHOWS_PATH, tvshow_item.basename) - return insertados, sobreescritos, fallidos, path + return inserted, overwritten, failed, path -def save_episodes(path, episodelist, serie, silent=False, overwrite=True): +def save_episodes(item, episodelist, silent=False, overwrite=True): """ saves in the indicated path all the chapters included in the episodelist - @type path: str + @type Item: str @param path: path to save the episodes @type episodelist: list @param episodelist: list of items that represent the episodes to be saved. @@ -569,334 +477,167 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): @param silent: sets whether notification is displayed @param overwrite: allows to overwrite existing files @type overwrite: bool - @rtype insertados: int + @rtype inserted: int @return: the number of episodes inserted - @rtype sobreescritos: int + @rtype overwritten: int @return: the number of overwritten episodes - @rtype fallidos: int + @rtype failed: int @return: the number of failed episodes """ + def save_episode(item, seasons, episodes, e, inserted, overwritten, failed): + season_episode = scrapertools.get_season_and_episode(e.title) + + + if season_episode: + # local = True if season_episode in included_files else False + # if local: + # strm_path = filetools.join(item.base_name, included_files[season_episode]) + # else: + strm_path = filetools.join(item.base_name, "{}.strm".format(season_episode)) + nfo_path = filetools.join(item.base_name, "{}.nfo".format(season_episode)) + + e.contentSeason = int(season_episode.split('x')[0]) + e.contentEpisodeNumber = int(season_episode.split('x')[1]) + tmdb.set_infoLabels_item(e) + playcount = db['viewed'].get(e.infoLabels['tmdb_id'], {}).get('{}x{}'.format(e.contentSeason, e.contentEpisodeNumber), 0) + head_nfo = scraper.get_nfo(e) + + episode_item = Item(action='findvideos', + channel='videolibrary', + strm_path=strm_path, + contentSeason = e.contentSeason, + contentEpisodeNumber = e.contentEpisodeNumber, + contentType = e.contentType, + infoLabels = e.infoLabels, + head_nfo = head_nfo, + videolibrary_id = item.videolibrary_id, + thumbnail = e.infoLabels.get('poster_path') if e.infoLabels.get('poster_path') else item.thumbnail, + fanart = e.infoLabels.get('poster_path') if e.infoLabels.get('poster_path') else item.fanart, + title = '{}. {}'.format(e.contentEpisodeNumber, e.infoLabels['title'])) + + episode = episodes.get(season_episode, {}) + + try: + # e.infoLabels['episode'] = '' + + if e.contentSeason not in seasons: + # season_item = Item(infoLabels = e.infoLabels) + tmdb_info = tmdb.Tmdb(id_Tmdb = e.infoLabels['tmdb_id'], search_type='tv') + seasoninfo = tmdb.get_season_dic(tmdb_info.get_season(e.contentSeason)) + infoLabels = {} + if seasoninfo.get('season_posters'): infoLabels['posters'] = seasoninfo.get('season_posters') + if seasoninfo.get('season_fanarts'): infoLabels['fanarts'] = seasoninfo.get('season_fanarts') + if seasoninfo.get('season_trailer'): infoLabels['trailer'] = seasoninfo.get('season_trailer') + + season_item = Item(action="get_episodes", + channel='videolibrary', + title=seasoninfo.get('season_title'), + thumbnail = seasoninfo.get('season_poster') if seasoninfo.get('season_poster') else item.thumbnail, + fanart = item.fanart, + plot = seasoninfo.get('season_plot') if seasoninfo.get('season_plot') else item.infoLabels.get('plot'), + contentType = 'season', + infoLabels = infoLabels, + contentSeason = e.contentSeason, + videolibrary_id = item.videolibrary_id, + playcount=0) + + logger.debug(season_item) + + seasons[e.contentSeason] = season_item + + if not episode: + inserted += 1 + episode['item'] = episode_item + + # else: + epchannels = episode.get('channels',{}) + + e = remove_host(e) + e.contentTitle = e.infoLabels['title'] + e.infoLabels = {} + + if e.channel in epchannels and e.channel != 'download': + channels_url = [u.url for u in epchannels[e.channel]] + if e.url not in channels_url: + epchannels[e.channel].append(e) + else: + del epchannels[e.channel][channels_url.index(e.url)] + epchannels[e.channel].append(e) + overwritten += 1 + else: + epchannels[e.channel] = [e] + + episode['channels'] = epchannels + episodes[season_episode] = episode + + if not filetools.exists(filetools.join(TVSHOWS_PATH, strm_path)): + logger.debug("Creating .strm: " + strm_path) + item_strm = Item(channel='videolibrary', action='play_from_library', strm_path=strm_path, contentType='episode', videolibraryd_id=episode_item.videolibrary_id, contentSeason = episode_item.contentSeason, contentEpisodeNumber = episode_item.contentEpisodeNumber,) + filetools.write(filetools.join(TVSHOWS_PATH, strm_path), '{}?{}'.format(addon_name, item_strm.tourl())) + # if not filetools.exists(filetools.join(TVSHOWS_PATH, nfo_path)): + # filetools.write(filetools.join(TVSHOWS_PATH, nfo_path), head_nfo) + except: + failed += 1 + return item, seasons, episodes, e, inserted, overwritten, failed logger.debug() - episodelist = filter_list(episodelist, serie.action, path) + + # from core import tmdb # No episode list, nothing to save if not len(episodelist): logger.debug("There is no episode list, we go out without creating strm") return 0, 0, 0 - # process local episodes - local_episodes_path = '' - local_episodelist = [] - update = False - nfo_path = filetools.join(path, "tvshow.nfo") - head_nfo, item_nfo = read_nfo(nfo_path) - - if item_nfo.update_last: - local_episodes_path = item_nfo.local_episodes_path - elif config.get_setting("local_episodes", "videolibrary"): - done, local_episodes_path = config_local_episodes_path(path, serie) - if done < 0: - logger.debug("An issue has occurred while configuring local episodes, going out without creating strm") - return 0, 0, done - item_nfo.local_episodes_path = local_episodes_path - filetools.write(nfo_path, head_nfo + item_nfo.tojson()) - - if local_episodes_path: - from platformcode.xbmc_videolibrary import check_db, clean - # check if the local episodes are in the Kodi video library - if check_db(local_episodes_path): - local_episodelist += get_local_content(local_episodes_path) - clean_list = [] - for f in filetools.listdir(path): - match = scrapertools.find_single_match(f, r'[Ss]?(\d+)(?:x|_|\s+)?[Ee]?[Pp]?(\d+)') - if match: - ep = '%dx%02d' % (int(match[0]), int(match[1])) - if ep in local_episodelist: - del_file = filetools.join(path, f) - filetools.remove(del_file) - if f.endswith('strm'): - sep = '\\' if '\\' in path else '/' - clean_path = path[:-len(sep)] if path.endswith(sep) else path - clean_path = '%/' + clean_path.split(sep)[-1] + '/' + f - clean_list.append(clean_path) - clean_list.append(clean_path.replace('/','\\')) - - if clean_list: - clean(clean_list) - update = True - - if item_nfo.local_episodes_list: - difference = [x for x in item_nfo.local_episodes_list if (x not in local_episodelist)] - if len(difference) > 0: - clean_list = [] - for f in difference: - sep = '\\' if '\\' in local_episodes_path else '/' - clean_path = local_episodes_path[:-len(sep)] if local_episodes_path.endswith(sep) else local_episodes_path - clean_path = '%/' + clean_path.split(sep)[-1] + '/%' + f.replace('x','%') + '%' - clean_list.append(clean_path) - clean_list.append(clean_path.replace('/','\\')) - clean(clean_list) - update = True - - item_nfo.local_episodes_list = sorted(local_episodelist) - filetools.write(nfo_path, head_nfo + item_nfo.tojson()) - # the local episodes are not in the Kodi video library - else: - process_local_episodes(local_episodes_path, path) - - insertados = 0 - sobreescritos = 0 - fallidos = 0 - news_in_playcounts = {} - # We list all the files in the series, so we avoid having to check if they exist one by one - raiz, carpetas_series, ficheros = next(filetools.walk(path)) - ficheros = [filetools.join(path, f) for f in ficheros] - # Silent is to show no progress (for service) if not silent: # progress dialog - p_dialog = platformtools.dialog_progress(config.get_localized_string(60064) ,'') - # p_dialog.update(0, config.get_localized_string(60065)) + p_dialog = platformtools.dialog_progress_bg(config.get_localized_string(60064) ,'') - channel_alt = serie.channel # We prepare to add the emergency urls - emergency_urls_stat = config.get_setting("emergency_urls", channel_alt) # Does the channel want emergency urls? - emergency_urls_succ = False - try: channel = __import__('specials.%s' % channel_alt, fromlist=["specials.%s" % channel_alt]) - except: channel = __import__('channels.%s' % channel_alt, fromlist=["channels.%s" % channel_alt]) - if serie.torrent_caching_fail: # If the conversion process has failed, they are not cached - emergency_urls_stat = 0 - del serie.torrent_caching_fail + inserted = 0 + overwritten = 0 + failed = 0 + seasons = videolibrarydb['season'].get(item.videolibrary_id, {}) + episodes = videolibrarydb['episode'].get(item.videolibrary_id, {}) + videolibrarydb.close() - new_episodelist = [] - # We obtain the season and episode number and discard those that are not + try: t = float(100) / len(episodelist) + except: t = 0 - for e in episodelist: - headers = {} - if e.headers: - headers = e.headers - - try: - season_episode = scrapertools.get_season_and_episode(e.title) - - # If the emergency url option has been checked, it is added to each episode after running Findvideos from the channel - if e.emergency_urls and isinstance(e.emergency_urls, dict): del e.emergency_urls # We erase previous traces - json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) # Path of the episode .json - if emergency_urls_stat == 1 and not e.emergency_urls and e.contentType == 'episode': # Do we keep emergency urls? + # for i, e in enumerate(episodelist): + # item, seasons, episodes, e, inserted, overwritten, failed = save_episode(item, seasons, episodes, e, inserted, overwritten, failed) + # if not e.contentLanguage: e.contentLanguage = item.contentLanguage if item.contentLanguage else 'ITA' + # if not e.contentLanguage in item.lang_list: item.lang_list.append(e.contentLanguage) + # if not silent: + # p_dialog.update(int(math.ceil((i + 1) * t)), e.title) + i = 0 + with futures.ThreadPoolExecutor() as executor: + itlist = [executor.submit(save_episode, item, seasons, episodes, e, inserted, overwritten, failed) for e in episodelist] + for res in futures.as_completed(itlist): + if res.result(): + item, seasons, episodes, e, inserted, overwritten, failed = res.result() + if not e.contentLanguage: e.contentLanguage = item.contentLanguage if item.contentLanguage else 'ITA' + if not e.contentLanguage in item.lang_list: item.lang_list.append(e.contentLanguage) if not silent: - p_dialog.update(0, 'Caching links and .torren filest...\n' + e.title) # progress dialog - if json_path in ficheros: # If there is the .json we get the urls from there - if overwrite: # but only if .json are overwritten - json_epi = Item().fromjson(filetools.read(json_path)) #We read the .json - if json_epi.emergency_urls: # if there are emergency urls ... - e.emergency_urls = json_epi.emergency_urls # ... we copy them - else: # if not... - e = emergency_urls(e, channel, json_path, headers=headers) # ... we generate them - else: - e = emergency_urls(e, channel, json_path, headers=headers) # If the episode does not exist, we generate the urls - if e.emergency_urls: #If we already have urls... - emergency_urls_succ = True # ... is a success and we are going to mark the .nfo - elif emergency_urls_stat == 2 and e.contentType == 'episode': # Do we delete emergency urls? - if e.emergency_urls: del e.emergency_urls - emergency_urls_succ = True # ... is a success and we are going to mark the .nfo - elif emergency_urls_stat == 3 and e.contentType == 'episode': # Do we update emergency urls? - if not silent: - p_dialog.update(0, 'Caching links and .torrent files...\n' + e.title) # progress dialog - e = emergency_urls(e, channel, json_path, headers=headers) # we generate the urls - if e.emergency_urls: # If we already have urls... - emergency_urls_succ = True # ... is a success and we are going to mark the .nfo + i += 1 + p_dialog.update(int(math.ceil(i * t)), e.title) - if not e.infoLabels["tmdb_id"] or (serie.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != serie.infoLabels["tmdb_id"]): #en series multicanal, prevalece el infolabels... - e.infoLabels = serie.infoLabels # ... dthe current channel and not the original one - e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") - if e.videolibray_emergency_urls: - del e.videolibray_emergency_urls - if e.channel_redir: - del e.channel_redir # ... and redirect marks are erased - new_episodelist.append(e) - except: - if e.contentType == 'episode': - logger.error("Unable to save %s emergency urls in the video library" % e.contentTitle) - continue - - # No episode list, nothing to save - if not len(new_episodelist): - logger.debug("There is no episode list, we go out without creating strm") - return 0, 0, 0 - - local_episodelist += get_local_content(path) - - # fix float because division is done poorly in python 2.x - try: - t = float(100) / len(new_episodelist) - except: - t = 0 - for i, e in enumerate(scraper.sort_episode_list(new_episodelist)): - if not silent: - p_dialog.update(int(math.ceil((i + 1) * t)), e.title) - - high_sea = e.contentSeason - high_epi = e.contentEpisodeNumber - if scrapertools.find_single_match(e.title, r'[a|A][l|L]\s*(\d+)'): - high_epi = int(scrapertools.find_single_match(e.title, r'al\s*(\d+)')) - max_sea = e.infoLabels["number_of_seasons"] - max_epi = 0 - if e.infoLabels["number_of_seasons"] and (e.infoLabels["temporada_num_episodios"] or e.infoLabels["number_of_seasons"] == 1): - if e.infoLabels["number_of_seasons"] == 1 and e.infoLabels["number_of_episodes"]: - max_epi = e.infoLabels["number_of_episodes"] - else: - max_epi = e.infoLabels["temporada_num_episodios"] - - season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2)) - strm_path = filetools.join(path, "%s.strm" % season_episode) - nfo_path = filetools.join(path, "%s.nfo" % season_episode) - json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) - - if season_episode in local_episodelist: - logger.debug('Skipped: Serie ' + serie.contentSerieName + ' ' + season_episode + ' available as local content') - continue - - # check if the episode has been downloaded - if filetools.join(path, "%s [downloads].json" % season_episode) in ficheros: - logger.debug('INFO: "%s" episode %s has been downloaded, skipping it' % (serie.contentSerieName, season_episode)) - continue - - strm_exists = strm_path in ficheros - nfo_exists = nfo_path in ficheros - json_exists = json_path in ficheros - - if not strm_exists: - # If there is no season_episode.strm add it - item_strm = Item(action='play_from_library', channel='videolibrary', strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) - item_strm.contentSeason = e.contentSeason - item_strm.contentEpisodeNumber = e.contentEpisodeNumber - item_strm.contentType = e.contentType - item_strm.contentTitle = season_episode - - # FILTERTOOLS - if item_strm.list_language: - # if tvshow.nfo has a filter it is passed to the item_strm to be generated - if "library_filter_show" in serie: - item_strm.library_filter_show = serie.library_filter_show - - if item_strm.library_filter_show == "": - logger.error("There was an error getting the name of the series to filter") - - # logger.debug("item_strm" + item_strm.tostring('\n')) - # logger.debug("serie " + serie.tostring('\n')) - strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl())) - - item_nfo = None - if not nfo_exists and e.infoLabels["code"]: - # If there is no season_episode.nfo add it - if serie.infoLabels["code"]: - e.infoLabels["code"] = serie.infoLabels["code"] - else: - scraper.find_and_set_infoLabels(e) - head_nfo = scraper.get_nfo(e) - - item_nfo = e.clone(channel="videolibrary", url="", action='findvideos', strm_path=strm_path.replace(TVSHOWS_PATH, "")) - if item_nfo.emergency_urls: - del item_nfo.emergency_urls # It only stays in the episode's .json - - nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson()) - - # Only if there are season_episode.nfo and season_episode.strm we continue - if nfo_exists and strm_exists: - if not json_exists or overwrite: - # We get infoLabel from the episode - if not item_nfo: - head_nfo, item_nfo = read_nfo(nfo_path) - - # In multichannel series, the infolabels of the current channel prevail and not that of the original - if not e.infoLabels["tmdb_id"] or (item_nfo.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != item_nfo.infoLabels["tmdb_id"]): - e.infoLabels = item_nfo.infoLabels - - if filetools.write(json_path, e.tojson()): - if not json_exists: - logger.debug("Inserted: %s" % json_path) - insertados += 1 - # We mark episode as unseen - news_in_playcounts[season_episode] = 0 - # We mark the season as unseen - news_in_playcounts["season %s" % e.contentSeason] = 0 - # We mark the series as unseen - # logger.debug("serie " + serie.tostring('\n')) - news_in_playcounts[serie.contentSerieName] = 0 - - else: - logger.debug("Overwritten: %s" % json_path) - sobreescritos += 1 - else: - logger.debug("Failed: %s" % json_path) - fallidos += 1 - - else: - logger.debug("Failed: %s" % json_path) - fallidos += 1 - - if not silent and p_dialog.iscanceled(): - break - - #logger.debug('high_sea x high_epi: %sx%s' % (str(high_sea), str(high_epi))) - #logger.debug('max_sea x max_epi: %sx%s' % (str(max_sea), str(max_epi))) if not silent: - p_dialog.close() - - if news_in_playcounts or emergency_urls_succ or serie.infoLabels["status"] == "Ended" or serie.infoLabels["status"] == "Canceled": - # If there are new episodes we mark them as unseen on tvshow.nfo ... - tvshow_path = filetools.join(path, "tvshow.nfo") - try: - import datetime - head_nfo, tvshow_item = read_nfo(tvshow_path) - tvshow_item.library_playcounts.update(news_in_playcounts) - - # If the emergency url insert / delete operation in the .jsons of the episodes was successful, the .nfo is checked - if emergency_urls_succ: - if tvshow_item.emergency_urls and not isinstance(tvshow_item.emergency_urls, dict): - del tvshow_item.emergency_urls - if emergency_urls_stat in [1, 3]: # Save / update links operation - if not tvshow_item.emergency_urls: - tvshow_item.emergency_urls = dict() - if tvshow_item.library_urls.get(serie.channel, False): - tvshow_item.emergency_urls.update({serie.channel: True}) - elif emergency_urls_stat == 2: # Delete links operation - if tvshow_item.emergency_urls and tvshow_item.emergency_urls.get(serie.channel, False): - tvshow_item.emergency_urls.pop(serie.channel, None) # delete the entry of the .nfo - - if tvshow_item.active == 30: - tvshow_item.active = 1 - if tvshow_item.infoLabels["tmdb_id"] == serie.infoLabels["tmdb_id"]: - tvshow_item.infoLabels = serie.infoLabels - tvshow_item.infoLabels["title"] = tvshow_item.infoLabels["tvshowtitle"] - - if max_sea == high_sea and max_epi == high_epi and (tvshow_item.infoLabels["status"] == "Ended" or tvshow_item.infoLabels["status"] == "Canceled") and insertados == 0 and fallidos == 0 and not tvshow_item.local_episodes_path: - tvshow_item.active = 0 # ... nor we will update it more - logger.debug("%s [%s]: 'Finished' or 'Canceled' series. Periodic update is disabled" % (serie.contentSerieName, serie.channel)) - - update_last = datetime.date.today() - tvshow_item.update_last = update_last.strftime('%Y-%m-%d') - update_next = datetime.date.today() + datetime.timedelta(days=int(tvshow_item.active)) - tvshow_item.update_next = update_next.strftime('%Y-%m-%d') - - filetools.write(tvshow_path, head_nfo + tvshow_item.tojson()) - except: - logger.error("Error updating tvshow.nfo") - logger.error("Unable to save %s emergency urls in the video library" % serie.contentSerieName) - logger.error(traceback.format_exc()) - fallidos = -1 + if len(item.lang_list) > 1: + item.prefered_lang = item.lang_list[platformtools.dialog_select(config.get_localized_string(70246), item.lang_list)] else: - # ... if it was correct we update the Kodi video library - if config.is_xbmc() and config.get_setting("videolibrary_kodi") and not silent: - update = True + item.prefered_lang = item.lang_list[0] + tvshowdb = videolibrarydb['tvshow'][item.videolibrary_id] + tvshowdb['item'] = item + videolibrarydb['tvshow'][item.videolibrary_id] = tvshowdb + videolibrarydb.close() - if update: - from platformcode import xbmc_videolibrary - xbmc_videolibrary.update() + videolibrarydb['episode'][item.videolibrary_id] = episodes + videolibrarydb['season'][item.videolibrary_id] = seasons - if fallidos == len(episodelist): - fallidos = -1 + videolibrarydb.close() + p_dialog.close() + + return inserted, overwritten, failed - logger.debug("%s [%s]: inserted= %s, overwritten= %s, failed= %s" % (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) - return insertados, sobreescritos, fallidos def config_local_episodes_path(path, item, silent=False): @@ -904,7 +645,7 @@ def config_local_episodes_path(path, item, silent=False): from platformcode.xbmc_videolibrary import search_local_path local_episodes_path=search_local_path(item) if not local_episodes_path: - title = item.contentSerieName if item.contentSerieName else item.show + title = item.contentSerieName if item.contentSerieName else item.fulltitle if not silent: silent = platformtools.dialog_yesno(config.get_localized_string(30131), config.get_localized_string(80044) % title) if silent: @@ -1013,10 +754,10 @@ def add_movie(item): # del item.tmdb_stat # We clean the status so that it is not recorded in the Video Library if item: new_item = item.clone(action="findvideos") - insertados, sobreescritos, fallidos, path = save_movie(new_item) + inserted, overwritten, failed, path = save_movie(new_item) - if fallidos == 0: - platformtools.dialog_ok(config.get_localized_string(30131), + if failed == 0: + platformtools.dialog_notification(config.get_localized_string(30131), config.get_localized_string(30135) % new_item.contentTitle) # 'has been added to the video library' else: filetools.rmdirtree(path) @@ -1083,8 +824,8 @@ def add_tvshow(item, channel=None): # del item.tmdb_stat # We clean the status so that it is not recorded in the Video Library # Get the episode list - # from core.support import dbg;dbg() - itemlist = getattr(channel, item.action)(item) + it = item.clone() + itemlist = getattr(channel, it.action)(it) if itemlist and not scrapertools.find_single_match(itemlist[0].title, r'[Ss]?(\d+)(?:x|_|\s+)[Ee]?[Pp]?(\d+)'): from platformcode.autorenumber import start, check if not check(item): @@ -1101,31 +842,31 @@ def add_tvshow(item, channel=None): global magnet_caching magnet_caching = False - insertados, sobreescritos, fallidos, path = save_tvshow(item, itemlist) + inserted, overwritten, failed, path = save_tvshow(item, itemlist) if not path: pass - elif not insertados and not sobreescritos and not fallidos: + elif not inserted and not overwritten and not failed: filetools.rmdirtree(path) platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60067) % item.show) logger.error("The string %s could not be added to the video library. Could not get any episode" % item.show) - elif fallidos == -1: + elif failed == -1: filetools.rmdirtree(path) platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60068) % item.show) logger.error("The string %s could not be added to the video library" % item.show) - elif fallidos == -2: + elif failed == -2: filetools.rmdirtree(path) - elif fallidos > 0: + elif failed > 0: platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60069) % item.show) - logger.error("Could not add %s episodes of series %s to the video library" % (fallidos, item.show)) + logger.error("Could not add %s episodes of series %s to the video library" % (failed, item.show)) else: - platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60070) % item.show) - logger.debug("%s episodes of series %s have been added to the video library" % (insertados, item.show)) + platformtools.dialog_notification(config.get_localized_string(30131), config.get_localized_string(60070) % item.show) + logger.debug("%s episodes of series %s have been added to the video library" % (inserted, item.show)) if config.is_xbmc(): if config.get_setting("sync_trakt_new_tvshow", "videolibrary"): import xbmc @@ -1140,94 +881,36 @@ def add_tvshow(item, channel=None): xbmc_videolibrary.sync_trakt_addon(path) -def emergency_urls(item, channel=None, path=None, headers={}): - logger.debug() - import re - from servers import torrent - try: - magnet_caching_e = magnet_caching +def remove_host(item): + try : + channel = __import__('channels.' + item.channel, None, None, ['channels.' + item.channel]) except: - magnet_caching_e = True + channel = __import__('specials.' + item.channel, None, None, ['specials.' + item.channel]) - """ - We call Findvideos of the channel with the variable "item.videolibray_emergency_urls = True" to get the variable - "item.emergency_urls" with the list of tuple lists of torrent links and direct servers for that episode or movie - Torrents should always go in list [0], if any. If you want to cache the .torrents, the search goes against that list. - List two will include direct server links, but also magnet links (which are not cacheable). - """ - # we launched a "lookup" in the "findvideos" of the channel to obtain the emergency links - try: - if channel == None: # If the caller has not provided the channel structure, it is created - channel = item.channel # It is verified if it is a clone, which returns "newpct1" - #channel = __import__('channels.%s' % channel, fromlist=["channels.%s" % channel]) - channel = __import__('specials.%s' % channel_alt, fromlist=["specials.%s" % channel_alt]) - if hasattr(channel, 'findvideos'): # If the channel has "findvideos" ... - item.videolibray_emergency_urls = True # ... marks itself as "lookup" - channel_save = item.channel # ... save the original channel in case of fail-over in Newpct1 - category_save = item.category # ... save the original category in case of fail-over or redirection in Newpct1 - if item.channel_redir: # ... if there is a redir, the alternate channel is temporarily restored - item.channel = scrapertools.find_single_match(item.url, r'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').lower() - item.category = scrapertools.find_single_match(item.url, r'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize() - item_res = getattr(channel, 'findvideos')(item) # ... the process of Findvideos - item_res.channel = channel_save # ... restore the original channel in case there is a fail-over in Newpct1 - item_res.category = category_save # ... restore the original category in case there is a fail-over or redirection in Newpct1 - item.category = category_save # ... restore the original category in case there is a fail-over or redirection in Newpct1 - del item_res.videolibray_emergency_urls # ... and the lookup mark is erased - if item.videolibray_emergency_urls: - del item.videolibray_emergency_urls # ... and the original lookup mark is erased - except: - logger.error('ERROR when processing the title in Findvideos del Canal: ' + item.channel + ' / ' + item.title) - logger.error(traceback.format_exc()) - item.channel = channel_save # ... restore the original channel in case of fail-over or redirection in Newpct1 - item.category = category_save # ... restore the original category in case there is a fail-over or redirection in Newpct1 - item_res = item.clone() # If there has been an error, the original Item is returned - if item_res.videolibray_emergency_urls: - del item_res.videolibray_emergency_urls # ... and the lookup mark is erased - if item.videolibray_emergency_urls: - del item.videolibray_emergency_urls # ... and the original lookup mark is erased - - # If the user has activated the option "emergency_urls_torrents", the .torrent files of each title will be downloaded - else: # If the links have been successfully cached ... - try: - referer = None - post = None - channel_bis =item.channel - if config.get_setting("emergency_urls_torrents", channel_bis) and item_res.emergency_urls and path != None: - videolibrary_path = config.get_videolibrary_path() # we detect the absolute path of the title - movies = config.get_setting("folder_movies") - series = config.get_setting("folder_tvshows") - if movies in path: - folder = movies - else: - folder = series - videolibrary_path = filetools.join(videolibrary_path, folder) - i = 1 - if item_res.referer: referer = item_res.referer - if item_res.post: post = item_res.post - for url in item_res.emergency_urls[0]: # We go through the emergency urls ... - torrents_path = re.sub(r'(?:\.\w+$)', '_%s.torrent' % str(i).zfill(2), path) - path_real = '' - if magnet_caching_e or not url.startswith('magnet'): - path_real = torrent.caching_torrents(url, referer, post, torrents_path=torrents_path, headers=headers) # ... to download the .torrents - if path_real: # If you have been successful ... - item_res.emergency_urls[0][i-1] = path_real.replace(videolibrary_path, '') # if it looks at the relative "path" - i += 1 + host = httptools.downloadpage(channel.host, only_headers=True).url - # We restore original variables - if item.referer: - item_res.referer = item.referer - elif item_res.referer: - del item_res.referer - if item.referer: - item_res.referer = item.referer - elif item_res.referer: - del item_res.referer - item_res.url = item.url + if host.endswith('/'): host = host[:-1] + item.url = item.url.replace(host, '') - except: - logger.error('ERROR when caching the .torrent of: ' + item.channel + ' / ' + item.title) - logger.error(traceback.format_exc()) - item_res = item.clone() # If there has been an error, the original Item is returned + return item - #logger.debug(item_res.emergency_urls) - return item_res # We return the updated Item with the emergency links +def get_id(item): + _id = '' + for i in item.infoLabels['code']: + if i or i != 'None': + _id = i + break + return _id + + +def get_local_files(item): + included_files = {} + # search media files in Videolibrary Folder + for root, folder, files in filetools.walk(filetools.join(TVSHOWS_PATH,item.base_name)): + # for folder in folders: + for f in files: + if f.split('.')[-1] in video_extensions: + s, e = scrapertools.find_single_match(f, r'[Ss]?(\d+)(?:x|_|\s+)[Ee]?[Pp]?(\d+)') + included_files['{}x{}'.format(s,e.zfill(2))] = f + if included_files: + return included_files, 1 diff --git a/lib/generictools.py b/lib/generictools.py index 44789305..382eda66 100644 --- a/lib/generictools.py +++ b/lib/generictools.py @@ -1204,7 +1204,7 @@ def post_tmdb_findvideos(item, itemlist): itemlist.append(item.clone(title="** [COLOR yelow]Actualizar Títulos - vista previa videoteca[/COLOR] **", action="actualizar_titulos", extra="peliculas", tmdb_stat=False, from_action=item.action, from_title_tmdb=item.title, from_update=True)) if item.contentType == 'movie' and item.contentChannel != "videolibrary": - itemlist.append(item.clone(title="**-[COLOR yellow] Añadir a la videoteca [/COLOR]-**", action="add_pelicula_to_library", extra="peliculas", from_action=item.action, from_title_tmdb=item.title)) + itemlist.append(item.clone(title="**-[COLOR yellow] Añadir a la videoteca [/COLOR]-**", action="add_movie_to_library", extra="peliculas", from_action=item.action, from_title_tmdb=item.title)) # We added the option to watch trailers if item.contentChannel != "videolibrary": diff --git a/libraryscraper.py b/libraryscraper.py new file mode 100644 index 00000000..c020e9a1 --- /dev/null +++ b/libraryscraper.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# scraper for Kodi Library based on db +import xbmc, xbmcplugin, sys, os +from platformcode import logger, config + +try: + import xbmcvfs + xbmc.translatePath = xbmcvfs.translatePath + xbmc.validatePath = xbmcvfs.validatePath + xbmc.makeLegalFilename = xbmcvfs.makeLegalFilename +except: + pass + +librerias = xbmc.translatePath(os.path.join(config.get_runtime_path(), 'lib')) +sys.path.insert(0, librerias) + + +from core.videolibrarytools import MOVIES_PATH, TVSHOWS_PATH, videolibrarydb + +try: + from urlparse import parse_qsl +except ImportError: # py2 / py3 + from urllib.parse import parse_qsl + +def get_params(argv): + result = {'handle': int(argv[0])} + if len(argv) < 2 or not argv[1]: + return result + + result.update(parse_qsl(argv[1].lstrip('?'))) + return result + +if __name__ == '__main__': + # params = get_params(sys.argv[1:]) + logger.debug('PARAMS') + # run() \ No newline at end of file diff --git a/platformcode/autorenumber.py b/platformcode/autorenumber.py index 9047ad7c..4f3c8e26 100644 --- a/platformcode/autorenumber.py +++ b/platformcode/autorenumber.py @@ -11,6 +11,10 @@ from core.item import Item from core.support import typo, match, dbg, Item from platformcode import config, platformtools, logger PY3 = True if sys.version_info[0] >= 3 else False +if PY3: + from concurrent import futures +else: + from concurrent_py2 import futures # Json Var RENUMBER = 'TVSHOW_AUTORENUMBER' @@ -171,15 +175,18 @@ class autorenumber(): def renumber(self): + def sub_thread(item): + if not match(item.title, patron=r'[Ss]?(\d+)(?:x|_|\s+)[Ee]?[Pp]?(\d+)').match: + number = match(item.title, patron=r'(\d+)').match.lstrip('0') + if number: + if not number in self.episodes: self.makelist() + item.title = '{} - {}'.format(typo(self.episodes[number], 'bold'), item.title) + item.contentSeason = int(self.episodes[number].split('x')[0]) + item.contentEpisodeNumber = int(self.episodes[number].split('x')[1]) if not self.item.renumber and self.itemlist: - for item in self.itemlist: - if not match(item.title, patron=r'[Ss]?(\d+)(?:x|_|\s+)[Ee]?[Pp]?(\d+)').match: - number = match(item.title, patron=r'(\d+)').match.lstrip('0') - if number: - if not number in self.episodes: self.makelist() - item.title = '{} - {}'.format(typo(self.episodes[number], 'bold'), item.title) - item.contentSeason = int(self.episodes[number].split('x')[0]) - item.contentEpisodeNumber = int(self.episodes[number].split('x')[1]) + with futures.ThreadPoolExecutor() as executor: + renumber_list = [executor.submit(sub_thread, item,) for item in self.itemlist] + else: self.makelist() diff --git a/platformcode/infoplus.py b/platformcode/infoplus.py index aed3fb78..51502a79 100644 --- a/platformcode/infoplus.py +++ b/platformcode/infoplus.py @@ -185,7 +185,7 @@ class SearchWindow(xbmcgui.WindowXMLDialog): self.close() modal() for item in itemlist: - if item.action not in ['save_download', 'add_pelicula_to_library', 'add_serie_to_library', ''] and item.infoLabels['title']: + if item.action not in ['save_download', 'add_movie_to_library', 'add_serie_to_library', ''] and item.infoLabels['title']: if item.action == 'findvideos' and item.contentType in ['episode', 'tvshow']: it = xbmcgui.ListItem(re.sub(r'\[[^\]]+\]', '', item.title)) self.getControl(NUMBER).setText(support.typo(config.get_localized_string(70362),'uppercase bold')) @@ -204,7 +204,7 @@ class SearchWindow(xbmcgui.WindowXMLDialog): self.itemlist.append(item) if itemlist[0].contentType == 'movie': if not itemlist[0].server: - self.commands.append(itemlist[0].clone(action='add_pelicula_to_library', thumbnail=support.thumb('add_to_videolibrary'))) + self.commands.append(itemlist[0].clone(action='add_movie_to_library', thumbnail=support.thumb('add_to_videolibrary'))) self.commands.append(itemlist[0].clone(channel='downloads', action='save_download', from_channel=itemlist[0].channel, from_action=itemlist[0].action, thumbnail=support.thumb('downloads'))) else: self.commands.append(Info.clone(channel='downloads', action='save_download', from_channel=Info.channel, from_action=Info.action, thumbnail=support.thumb('downloads'))) diff --git a/platformcode/launcher.py b/platformcode/launcher.py index 6e41f12a..f76af17b 100644 --- a/platformcode/launcher.py +++ b/platformcode/launcher.py @@ -231,7 +231,7 @@ def run(item=None): platformtools.render_items(itemlist, item) # Special action for adding a movie to the library - elif item.action == "add_pelicula_to_library": + elif item.action == "add_movie_to_library": from core import videolibrarytools videolibrarytools.add_movie(item) diff --git a/platformcode/platformtools.py b/platformcode/platformtools.py index 32c9dce9..29a8c7ae 100644 --- a/platformcode/platformtools.py +++ b/platformcode/platformtools.py @@ -578,7 +578,7 @@ def set_context_commands(item, item_url, parent_item, **kwargs): if parent_item.channel == 'kodfavorites': return context_commands # Options according to criteria, only if the item is not a tag, nor is it "Add to the video library", etc... - if item.action and item.action not in ["add_pelicula_to_library", "add_serie_to_library", "buscartrailer", "actualizar_titulos"]: + if item.action and item.action not in ["add_movie_to_library", "add_serie_to_library", "buscartrailer", "actualizar_titulos"]: # Show information: if the item has a plot, we assume that it is a series, season, chapter or movie # if item.infoLabels['plot'] and (num_version_xbmc < 17.0 or item.contentType == 'season'): # context_commands.append((config.get_localized_string(60348), "Action(Info)")) @@ -647,7 +647,7 @@ def set_context_commands(item, item_url, parent_item, **kwargs): context_commands.append((config.get_localized_string(60352), "RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'action=add_serie_to_library&from_action=' + item.action))) # Add Movie to Video Library elif item.action in ["detail", "findvideos"] and item.contentType == 'movie' and item.contentTitle: - context_commands.append((config.get_localized_string(60353), "RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'action=add_pelicula_to_library&from_action=' + item.action))) + context_commands.append((config.get_localized_string(60353), "RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'action=add_movie_to_library&from_action=' + item.action))) if not item.local and item.channel not in ["downloads", "filmontv", "search"] and item.server != 'torrent' and parent_item.action != 'mainlist' and config.get_setting('downloadenabled'): # Download movie diff --git a/platformcode/xbmc_info_window.py b/platformcode/xbmc_info_window.py index fd514ee5..dac0d037 100644 --- a/platformcode/xbmc_info_window.py +++ b/platformcode/xbmc_info_window.py @@ -69,6 +69,7 @@ class InfoWindow(xbmcgui.WindowXMLDialog): self.response = self.results[int(self.getControl(SELECT).getSelectedItem().getProperty('position'))] self.close() elif control_id == CLOSE: + self.response = None self.close() def onAction(self, action): diff --git a/platformcode/xbmc_videolibrary.py b/platformcode/xbmc_videolibrary.py index bbb221f8..ad1486c4 100644 --- a/platformcode/xbmc_videolibrary.py +++ b/platformcode/xbmc_videolibrary.py @@ -5,6 +5,7 @@ # from future import standard_library # standard_library.install_aliases() #from builtins import str +from core.item import Item import sys, os, threading, time, re, math, xbmc, xbmcgui PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int @@ -14,7 +15,7 @@ if PY3: else: import urllib2 # Usamos el nativo de PY2 que es más rápido -from core import filetools, jsontools +from core import filetools, jsontools, support from platformcode import config, logger, platformtools from core import scrapertools from xml.dom import minidom @@ -67,8 +68,8 @@ def mark_auto_as_watched(item): platformtools.set_played_time(item) if item.options['strm'] : sync = True show_server = False - from specials import videolibrary - videolibrary.mark_content_as_watched2(item) + # from specials import videolibrary + # videolibrary.mark_content_as_watched(item) if not next_episode: break @@ -98,9 +99,13 @@ def mark_auto_as_watched(item): if not show_server and item.play_from != 'window' and not item.no_return: xbmc.sleep(700) xbmc.executebuiltin('Action(ParentDir)') - xbmc.sleep(500) + # xbmc.sleep(500) + + if marked: + from specials import videolibrary + videolibrary.mark_content_as_watched(item) - if next_episode and next_episode.next_ep and config.get_setting('next_ep') < 3: + if next_episode and next_episode.next_ep and config.get_setting('next_ep') == 1: from platformcode.launcher import play_from_library play_from_library(next_episode) @@ -258,8 +263,14 @@ def mark_content_as_watched_on_kodi(item, value=1): @param value: > 0 for seen, 0 for not seen """ logger.debug() + # logger.debug("item:\n" + item.tostring('\n')) payload_f = '' + pos = item.itemlistPosition + winid = xbmcgui.getCurrentWindowId() + win = xbmcgui.Window(winid) + cid = win.getFocusId() + ctl = win.getControl(cid) if item.contentType == "movie": movieid = 0 @@ -300,8 +311,8 @@ def mark_content_as_watched_on_kodi(item, value=1): filename = filetools.basename(item.strm_path) head, tail = filetools.split(filetools.split(item.strm_path)[0]) else: # If Item is from the Series - filename = filetools.basename(item.path) - head, tail = filetools.split(filetools.split(item.path)[0]) + filename = filetools.basename(item.base_name) + head, tail = filetools.split(filetools.split(item.base_name)[0]) path = filetools.join(tail, filename) for d in data['result']['episodes']: @@ -313,6 +324,7 @@ def mark_content_as_watched_on_kodi(item, value=1): if episodeid != 0: payload_f = {"jsonrpc": "2.0", "method": "VideoLibrary.SetEpisodeDetails", "params": {"episodeid": episodeid, "playcount": value}, "id": 1} + if payload_f: # Mark as seen data = get_data(payload_f) @@ -320,6 +332,9 @@ def mark_content_as_watched_on_kodi(item, value=1): if data['result'] != 'OK': logger.error("ERROR putting content as viewed") + xbmc.sleep(700) + ctl.selectItem(pos) + def mark_season_as_watched_on_kodi(item, value=1): """ @@ -330,6 +345,7 @@ def mark_season_as_watched_on_kodi(item, value=1): @param value: > 0 for seen, 0 for not seen """ logger.debug() + # support.dbg() # logger.debug("item:\n" + item.tostring('\n')) # We can only mark the season as seen in the Kodi database if the database is local, in case of sharing database this functionality will not work @@ -344,7 +360,7 @@ def mark_season_as_watched_on_kodi(item, value=1): request_season = ' and c12= %s' % item.contentSeason tvshows_path = filetools.join(config.get_videolibrary_path(), config.get_setting("folder_tvshows")) - item_path1 = "%" + item.path.replace("\\\\", "\\").replace(tvshows_path, "") + item_path1 = "%" + item.base_name.replace("\\\\", "\\").replace(tvshows_path, "") if item_path1[:-1] != "\\": item_path1 += "\\" item_path2 = item_path1.replace("\\", "/") @@ -354,37 +370,64 @@ def mark_season_as_watched_on_kodi(item, value=1): execute_sql_kodi(sql) def set_watched_on_kod(data): + # support.dbg() from specials import videolibrary - from core import videolibrarytools + from core.videolibrarytools import videolibrarydb + data = jsontools.load(data) Type = data.get('item', {}).get('type','') ID = data.get('item', {}).get('id','') if not Type or not ID: return playcount = data.get('playcount',0) - for Type in ['movie', 'episode']: - sql = 'select strFileName, strPath, uniqueid_value from %s_view where (id%s like "%s")' % (Type, Type.capitalize(), ID) + if Type in ['episode']: + sql = 'select c18 from {}_view where (id{} like "{}")'.format(Type, Type.capitalize(), ID) n, records = execute_sql_kodi(sql) if records: - for filename, path, uniqueid_value in records: - if Type in ['movie']: - title = filename.replace('.strm', ' [' + uniqueid_value + ']') - filename = title +'.nfo' - else: - title = filename.replace('.strm', '') - filename = 'tvshow.nfo' + _id = scrapertools.find_single_match(records[0][0], r'\[([^\]]+)') + episode = scrapertools.find_single_match(records[0][0], r'(\d+x\d+)') + season = episode.split('x')[0] + episodes = videolibrarydb['episodes'].get(_id, {}) + item = episodes.get(episode, {}).get('item', None) - path = filetools.join(path, filename) - head_nfo, item = videolibrarytools.read_nfo(path) - item.library_playcounts.update({title: playcount}) - filetools.write(path, head_nfo + item.tojson()) + if Type in ['season']: + sql = 'select season, strPath from {}_view where (id{} like "{}")'.format(Type, Type.capitalize(), ID) + n, records = execute_sql_kodi(sql) + if records: + logger.debug('RECORDS' , records) + _id = scrapertools.find_single_match(records[0][1], r'\[([^\]]+)') + season = records[0][0] + seasons = videolibrarydb['seasons'].get(_id, {}) + item = seasons.get(season, None) + item.all_ep - if item.infoLabels['mediatype'] == "tvshow": - for season in item.library_playcounts: - if "season" in season: - season_num = int(scrapertools.find_single_match(season, r'season (\d+)')) - item = videolibrary.check_season_playcount(item, season_num) - filetools.write(path, head_nfo + item.tojson()) + else: + # support.dbg() + sql = 'select strPath from {}_view where (id{} like "{}")'.format(Type, Type.replace('tv','').capitalize(), ID) + n, records = execute_sql_kodi(sql) + if records: + logger.debug('RECORDS' , records) + _id = scrapertools.find_single_match(records[0][0], r'\[([^\]]+)') + contents = videolibrarydb[Type].get(_id, {}) + item = contents.get('item', None) + if item: + item.playcount = playcount + item.not_update = True + videolibrary.mark_content_as_watched(item) + + + videolibrarydb.close() + # path = filetools.join(path, filename) + # head_nfo, item = videolibrarytools.read_nfo(path) + # item.library_playcounts.update({title: playcount}) + # filetools.write(path, head_nfo + item.tojson()) + + # if item.infoLabels['mediatype'] == "tvshow": + # for season in item.library_playcounts: + # if "season" in season: + # season_num = int(scrapertools.find_single_match(season, r'season (\d+)')) + # item = videolibrary.check_season_playcount(item, season_num) + # filetools.write(path, head_nfo + item.tojson()) def mark_content_as_watched_on_kod(path): from specials import videolibrary @@ -1072,6 +1115,47 @@ def clean(path_list=[]): progress.close() +def clean_by_id(item): + logger.debug() + + # imdb_id = item.infoLabels.get('imdb_id', '') + tmdb_id = item.infoLabels.get('tmdb_id', '') + season_id = item.infoLabels.get('temporada_id', '') + episode_id = item.infoLabels.get('episodio_id', '') + # support.dbg() + + # search movie ID + if item.contentType == 'movie': + nun_records, records = execute_sql_kodi('SELECT idMovie FROM movie_view WHERE uniqueid_value LIKE "%s"' % tmdb_id) + # delete movie + if records: + payload = {"jsonrpc": "2.0", "method": "VideoLibrary.RemoveMovie", "id": 1, "params": {"movieid": records[0][0]}} + data = get_data(payload) + return + + # search tvshow ID + elif item.contentType == 'tvshow': + nun_records, records = execute_sql_kodi('SELECT idShow FROM tvshow_view WHERE uniqueid_value LIKE "%s"' % tmdb_id) + # delete TV show + if records: + payload = {"jsonrpc": "2.0", "method": "VideoLibrary.RemoveTVShow", "id": 1, "params": {"tvshowid": records[0][0]}} + data = get_data(payload) + + elif item.contentType == 'episode': + nun_records, records = execute_sql_kodi('SELECT idEpisode FROM episode_view WHERE uniqueid_value LIKE "%s"' % episode_id) + # delete TV show + if records: + payload = {"jsonrpc": "2.0", "method": "VideoLibrary.RemoveEpisode", "id": 1, "params": {"episodeid": records[0][0]}} + data = get_data(payload) + + elif item.contentType == 'season': + nun_records, records = execute_sql_kodi('SELECT idSeason FROM season_view WHERE uniqueid_value LIKE "%s"' % season_id) + # delete TV show + if records: + payload = {"jsonrpc": "2.0", "method": "VideoLibrary.RemoveSeason", "id": 1, "params": {"seasonid": records[0][0]}} + data = get_data(payload) + + def check_db(path): if '\\' in path: sep = '\\' else: sep = '/' @@ -1428,3 +1512,4 @@ class NextDialog(xbmcgui.WindowXMLDialog): self.set_exit(True) self.set_continue_watching(False) self.close() + diff --git a/resources/language/resource.language.en_gb/strings.po b/resources/language/resource.language.en_gb/strings.po index ec414ea0..ab89450a 100644 --- a/resources/language/resource.language.en_gb/strings.po +++ b/resources/language/resource.language.en_gb/strings.po @@ -6139,6 +6139,18 @@ msgctxt "#70834" msgid "Playlist" msgstr "" +msgctxt "#70835" +msgid "Episode" +msgstr "" + +msgctxt "#70836" +msgid "Season" +msgstr "" + +msgctxt "#70837" +msgid "Enable/Disable Channels" +msgstr "" + # DNS start [ settings and declaration ] msgctxt "#707401" msgid "Enable DNS check alert" diff --git a/resources/language/resource.language.it_it/strings.po b/resources/language/resource.language.it_it/strings.po index d1198d29..6497826c 100644 --- a/resources/language/resource.language.it_it/strings.po +++ b/resources/language/resource.language.it_it/strings.po @@ -6140,6 +6140,18 @@ msgctxt "#70834" msgid "Playlist" msgstr "Playlist" +msgctxt "#70835" +msgid "Episode" +msgstr "Episodio" + +msgctxt "#70836" +msgid "Season" +msgstr "Stagione" + +msgctxt "#70837" +msgid "Enable/Disable Channels" +msgstr "Abilita/Disabilita Canali" + # DNS start [ settings and declaration ] msgctxt "#707401" msgid "Enable DNS check alert" diff --git a/resources/settings.xml b/resources/settings.xml index 3acf0ec7..b13e0096 100644 --- a/resources/settings.xml +++ b/resources/settings.xml @@ -12,6 +12,7 @@ + diff --git a/specials/downloads.py b/specials/downloads.py index 6ff6b739..ca1d5486 100644 --- a/specials/downloads.py +++ b/specials/downloads.py @@ -918,7 +918,7 @@ def get_episodes(item): info("Omitiendo item no válido:", episode.tostring()) # Any other result is not worth it, we ignore it... - itemlist = videolibrarytools.filter_list(itemlist) + # itemlist = videolibrarytools.filter_list(itemlist) return itemlist diff --git a/specials/globalsearch.py b/specials/globalsearch.py index 8d156f3f..7d63b097 100644 --- a/specials/globalsearch.py +++ b/specials/globalsearch.py @@ -642,7 +642,7 @@ class SearchWindow(xbmcgui.WindowXML): busy(False) return - if item.action in ['add_pelicula_to_library', 'add_serie_to_library','save_download']: # special items (add to videolibrary, download ecc.) + if item.action in ['add_movie_to_library', 'add_serie_to_library','save_download']: # special items (add to videolibrary, download ecc.) xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?" + item_url + ")") busy(False) return diff --git a/specials/videolibrary.py b/specials/videolibrary.py index f3d20e28..bbe12af9 100644 --- a/specials/videolibrary.py +++ b/specials/videolibrary.py @@ -2,7 +2,7 @@ #from builtins import str import sys -from core import support +from core import httptools, support PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int @@ -11,13 +11,17 @@ import xbmc, os, traceback from time import time from core import filetools, scrapertools, videolibrarytools -from core.support import typo, thumb +from core.support import typo, thumb, videolibrary from core.item import Item -from platformcode import config, logger, platformtools +from platformcode import config, launcher, logger, platformtools if PY3: from concurrent import futures + import urllib.parse as urlparse else: from concurrent_py2 import futures + import urlparse + +from core.videolibrarytools import videolibrarydb def mainlist(item): @@ -38,28 +42,48 @@ def channel_config(item): def list_movies(item, silent=False): + from core import jsontools logger.debug() + itemlist = [] movies_path = [] - for root, folders, files in filetools.walk(videolibrarytools.MOVIES_PATH): - for f in folders: - movies_path += [filetools.join(root, f, f + ".nfo")] - local = False - for f in filetools.listdir(filetools.join(root, f)): - if f.split('.')[-1] not in ['nfo','json','strm']: - local= True - break + ids = [] - # from core.support import dbg;dbg() - # for movie_path in movies_path: - # get_results(movie_path, root, 'movie', local) - with futures.ThreadPoolExecutor() as executor: - itlist = [executor.submit(get_results, movie_path, root, 'movie', local) for movie_path in movies_path] - for res in futures.as_completed(itlist): - item_movie, value = res.result() - # verify the existence of the channels - if item_movie.library_urls and len(item_movie.library_urls) > 0: - itemlist += [item_movie] + # for root, folders, files in filetools.walk(videolibrarytools.MOVIES_PATH): + # for f in folders: + # ID = scrapertools.find_single_match(f, r'\[([^\]]+)') + # if ID: + # ids.append(ID) + # if ID not in videolibrarydb['movie']: + # ids.append(ID) + # movies_path += [filetools.join(root, f, f + ".nfo")] + # local = False + # for f in filetools.listdir(filetools.join(root, f)): + # if f.split('.')[-1] not in ['nfo','json','strm']: + # local= True + # break + + # with futures.ThreadPoolExecutor() as executor: + # itlist = [executor.submit(get_results, movie_path, root, 'movie', local) for movie_path in movies_path] + # for res in futures.as_completed(itlist): + # item_movie, value = res.result() + # # verify the existence of the channels + # if item_movie.library_urls and len(item_movie.library_urls) > 0: + # code = scrapertools.find_single_match(item_movie.strm_path, r'\[([^\]]+)') + # videolibrarydb['movie'][code] = {'item':jsontools.load(item_movie.tojson())} + movies = dict(videolibrarydb['movie']) + videolibrarydb.close() + for key, value in movies.items(): + # if key not in ids: + # del videolibrarydb['movie'][key] + # else: + item = value['item'] + item.context = [{'title':config.get_localized_string(70084),'channel':'videolibrary', 'action':'delete'}] + if len(item.lang_list) > 1: + item.context += [{"title": config.get_localized_string(70246), + "action": "prefered_lang", + "channel": "videolibrary"}] + itemlist.append(item) if silent == False: return sorted(itemlist, key=lambda it: it.title.lower()) else: return @@ -68,26 +92,98 @@ def list_movies(item, silent=False): def list_tvshows(item): logger.debug() itemlist = [] - lista = [] + tvshows_path = [] + ids = [] + # lista = [] root = videolibrarytools.TVSHOWS_PATH - start = time() - with futures.ThreadPoolExecutor() as executor: - itlist = [executor.submit(get_results, filetools.join(root, folder, "tvshow.nfo"), root, 'tvshow') for folder in filetools.listdir(root)] - for res in futures.as_completed(itlist): - item_tvshow, value = res.result() - # verify the existence of the channels - if item_tvshow.library_urls and len(item_tvshow.library_urls) > 0: - itemlist += [item_tvshow] - lista += [{'title':item_tvshow.contentTitle,'thumbnail':item_tvshow.thumbnail,'fanart':item_tvshow.fanart, 'active': value, 'nfo':item_tvshow.nfo}] - logger.debug('load list',time() - start) + # start = time() + # for root, folders, files in filetools.walk(videolibrarytools.TVSHOWS_PATH): + # for f in folders: + # ID = scrapertools.find_single_match(f, r'\[([^\]]+)') + # if ID: + # ids.append(ID) + # if ID not in videolibrarydb['movie']: + # ids.append(ID) + # tvshows_path += [filetools.join(root, f, f + ".nfo")] + # local = False + # for f in filetools.listdir(filetools.join(root, f)): + # if f.split('.')[-1] not in ['nfo','json','strm']: + # local= True + # break + # with futures.ThreadPoolExecutor() as executor: + # itlist = [executor.submit(get_results, tvshow_path, root, 'tvshow', local) for tvshow_path in tvshows_path] + # itlist = [executor.submit(get_results, filetools.join(root, folder, "tvshow.nfo"), root, 'tvshow') for folder in filetools.listdir(root)] + # for res in futures.as_completed(itlist): + # item_tvshow, value = res.result() + # # verify the existence of the channels + # if item_tvshow.library_urls and len(item_tvshow.library_urls) > 0: + # code = scrapertools.find_single_match(item_tvshow.strm_path, r'\[([^\]]+)') + # db['tvshow'][code] = {'item':jsontools.load(item_tvshow.tojson())} + # # itemlist += [item_tvshow] + # lista += [{'title':item_tvshow.contentTitle,'thumbnail':item_tvshow.thumbnail,'fanart':item_tvshow.fanart, 'active': value, 'nfo':item_tvshow.nfo}] + # logger.debug('load list',time() - start) + + + series = dict(videolibrarydb['tvshow']) + videolibrarydb.close() + + for key, value in series.items(): + + item = value['item'] + item.contentType = 'tvshow' + + item.context = [{'title':config.get_localized_string(70085),'channel':'videolibrary', 'action':'delete'}] + + if len(item.lang_list) > 1: + item.context += [{"title": config.get_localized_string(70246), + "action": "prefered_lang", + "channel": "videolibrary"}] + if len(value['channels'].keys()) > 1: + item.context += [{"title": config.get_localized_string(70837), + "action": "disable_channels", + "channel": "videolibrary"}] + watched = item.infoLabels.get("playcount", 0) + if watched > 0: + title = config.get_localized_string(60020) + value = 0 + else: + title = config.get_localized_string(60021) + value = 1 + + item.context += [{"title": title, + "action": "mark_content_as_watched", + "channel": "videolibrary", + "playcount": value, + "videolibrary_id": item.videolibrary_id}] + if not item.active: + item.title = '{} {}'.format(item.title, support.typo('','bullet bold')) + title = config.get_localized_string(60023) + else: + title = config.get_localized_string(60022) + item.context += [{"title": title, + "action": "set_active", + "channel": "videolibrary", + "playcount": value, + "videolibrary_id": item.videolibrary_id}] + item.context += [{"title": 'Poster', + "action": "change_poster", + "channel": "videolibrary", + "playcount": value, + "videolibrary_id": item.videolibrary_id}] + item.context += [{"title": 'fanart', + "action": "change_fanart", + "channel": "videolibrary", + "playcount": value, + "videolibrary_id": item.videolibrary_id}] + + itemlist.append(item) + if itemlist: itemlist = sorted(itemlist, key=lambda it: it.title.lower()) itemlist += [Item(channel=item.channel, action="update_videolibrary", thumbnail=item.thumbnail, - title=typo(config.get_localized_string(70269), 'bold color kod'), folder=False), - Item(channel=item.channel, action="configure_update_videolibrary", thumbnail=item.thumbnail, - title=typo(config.get_localized_string(60599), 'bold color kod'), lista=lista, folder=False)] + title=typo(config.get_localized_string(70269), 'bold color kod'), folder=False)] return itemlist @@ -220,56 +316,51 @@ def configure_update_videolibrary(item): def get_seasons(item): logger.debug() - # logger.debug("item:\n" + item.tostring('\n')) + itemlist = [] dict_temp = {} - videolibrarytools.check_renumber_options(item) - - raiz, carpetas_series, ficheros = next(filetools.walk(item.path)) - - # Menu contextual: Releer tvshow.nfo - head_nfo, item_nfo = videolibrarytools.read_nfo(item.nfo) if config.get_setting("no_pile_on_seasons", "videolibrary") == 2: # Ever return get_episodes(item) - for f in ficheros: - if f.endswith('.json'): - season = f.split('x')[0] - dict_temp[season] = config.get_localized_string(60027) % season - - if config.get_setting("no_pile_on_seasons", "videolibrary") == 1 and len( - dict_temp) == 1: # Only if there is a season + if config.get_setting("no_pile_on_seasons", "videolibrary") == 1 and len(dict_temp) == 1: # Only if there is a season + item.from_library = True return get_episodes(item) else: + from core import tmdb + seasons = videolibrarydb['season'][item.videolibrary_id] + videolibrarydb.close() # We create one item for each season - for season, title in list(dict_temp.items()): - new_item = item.clone(action="get_episodes", title=title, contentSeason=season, - filtrar_season=True, channel='videolibrary') + for season in seasons.values(): + new_item = season + new_item.contentType = 'season' #Contextual menu: Mark the season as viewed or not - visto = item_nfo.library_playcounts.get("season %s" % season, 0) - new_item.infoLabels["playcount"] = visto - if visto > 0: - texto = config.get_localized_string(60028) + watched = new_item.infoLabels.get("playcount", 0) + if watched > 0: + title = config.get_localized_string(60028) value = 0 else: - texto = config.get_localized_string(60029) + title = config.get_localized_string(60029) value = 1 - new_item.context = [{"title": texto, - "action": "mark_season_as_watched", + + new_item.context = [{"title": title, + "action": "mark_content_as_watched", "channel": "videolibrary", - "playcount": value}] + "playcount": value, + "videolibrary_id": item.videolibrary_id}] # logger.debug("new_item:\n" + new_item.tostring('\n')) itemlist.append(new_item) if len(itemlist) > 1: itemlist = sorted(itemlist, key=lambda it: int(it.contentSeason)) + else: + return get_episodes(itemlist[0]) if config.get_setting("show_all_seasons", "videolibrary"): - new_item = item.clone(action="get_episodes", title=config.get_localized_string(60030)) + new_item = item.clone(action="get_episodes", title=config.get_localized_string(60030), all=True) new_item.infoLabels["playcount"] = 0 itemlist.insert(0, new_item) @@ -279,61 +370,34 @@ def get_seasons(item): def get_episodes(item): logger.debug() - # logger.debug("item:\n" + item.tostring('\n')) itemlist = [] - # We get the archives of the episodes - raiz, carpetas_series, ficheros = next(filetools.walk(item.path)) + episodes = videolibrarydb['episode'][item.videolibrary_id] + videolibrarydb.close() - # Menu contextual: Releer tvshow.nfo - head_nfo, item_nfo = videolibrarytools.read_nfo(item.nfo) + for title, ep in episodes.items(): + it = ep['item'] - # Create an item in the list for each strm found - for i in ficheros: - ext = i.split('.')[-1] - if ext not in ['json','nfo']: - season_episode = scrapertools.get_season_and_episode(i) - if not season_episode: - # The file does not include the season and episode number - continue - season, episode = season_episode.split("x") - # If there is a filter by season, we ignore the chapters of other seasons - if item.filtrar_season and int(season) != int(item.contentSeason): - continue - # Get the data from the season_episode.nfo - nfo_path = filetools.join(raiz, '%sx%s.nfo' % (season, episode)) - if filetools.isfile(nfo_path): - head_nfo, epi = videolibrarytools.read_nfo(nfo_path) + if it.contentSeason == item.contentSeason or item.all: + if config.get_setting("no_pile_on_seasons", "videolibrary") == 2 or item.all: + it.title = '{}x{}'.format(it.contentSeason, it.title) + it = get_host(it) + it.from_library = item.from_library + watched = it.infoLabels.get("playcount", 0) + if watched > 0: + title = config.get_localized_string(60032) + value = 0 + else: + title = config.get_localized_string(60033) + value = 1 - # Set the chapter title if possible - if epi.contentTitle and epi.contentTitle != epi.fulltitle: - title_episodie = epi.contentTitle.strip() - else: - title_episodie = config.get_localized_string(60031) % (epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2)) - - epi.contentTitle = "%sx%s" % (epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2)) - epi.title = "%sx%s - %s" % (epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2), title_episodie) - - if item_nfo.library_filter_show: - epi.library_filter_show = item_nfo.library_filter_show - - # Contextual menu: Mark episode as seen or not - visto = item_nfo.library_playcounts.get(season_episode, 0) - epi.infoLabels["playcount"] = visto - if visto > 0: - texto = config.get_localized_string(60032) - value = 0 - else: - texto = config.get_localized_string(60033) - value = 1 - epi.context = [{"title": texto, - "action": "mark_content_as_watched", - "channel": "videolibrary", - "playcount": value, - "nfo": item.nfo}] - if ext != 'strm': - epi.local = True - itemlist.append(epi) + it.context = [{"title": title, + "action": "mark_content_as_watched", + "channel": "videolibrary", + "playcount": value, + 'allep': True, + "videolibrary_id": item.videolibrary_id}] + itemlist.append(it) itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) add_download_items(item, itemlist) @@ -345,235 +409,110 @@ def findvideos(item): from platformcode import platformtools logger.debug() - # logger.debug("item:\n" + item.tostring('\n')) videolibrarytools.check_renumber_options(item) itemlist = [] - list_canales = {} - item_local = None - # Disable autoplay - # autoplay.set_status(False) - - if not item.contentTitle or not item.strm_path: + if not item.strm_path: logger.debug("Unable to search for videos due to lack of parameters") return [] - - if item.contentEpisodeNumber: - content_title = str(item.contentSeason) + 'x' + (str(item.contentEpisodeNumber) if item.contentEpisodeNumber > 9 else '0' + str(item.contentEpisodeNumber)) - else: - content_title = item.contentTitle.strip().lower() + if not item.videolibrary_id: item.videolibrary_id = scrapertools.find_single_match(item.strm_path , r'\[([^\]]+)') if item.contentType == 'movie': - item.strm_path = filetools.join(videolibrarytools.MOVIES_PATH, item.strm_path) - path_dir = filetools.dirname(item.strm_path) - item.nfo = filetools.join(path_dir, filetools.basename(path_dir) + ".nfo") + videolibrary_items = videolibrarydb['movie'][item.videolibrary_id]['channels'] + prefered_lang = videolibrarydb['movie'].get(item.videolibrary_id, {}).get('item', Item()).prefered_lang + disabled = videolibrarydb['movie'].get(item.videolibrary_id, {}).get('item', Item()).disabled else: - item.strm_path = filetools.join(videolibrarytools.TVSHOWS_PATH, item.strm_path) - path_dir = filetools.dirname(item.strm_path) - item.nfo = filetools.join(path_dir, 'tvshow.nfo') + ep = '{:d}x{:02d}'.format(item.contentSeason, item.contentEpisodeNumber) + videolibrary_items = videolibrarydb['episode'][item.videolibrary_id][ep]['channels'] + prefered_lang = videolibrarydb['tvshow'].get(item.videolibrary_id, {}).get('item', Item()).prefered_lang + disabled = videolibrarydb['tvshow'].get(item.videolibrary_id, {}).get('item', Item()).disabled + videolibrarydb.close() + if 'local' in videolibrary_items: + try: + item.channel = 'local' + item.url = filetools.join(videolibrarytools.TVSHOWS_PATH, videolibrary_items['local']) + return play(item) + except: pass + else: + if prefered_lang: + for key, values in videolibrary_items.items(): + if len(values) > 1: + allowed = [] + for v in values: + v.contentTitle = item.title + if v.contentLanguage == prefered_lang: + allowed.append(v) + if allowed: + videolibrary_items[key] = allowed + else: + videolibrary_items[key] = values + else: + videolibrary_items[key] = values - for fd in filetools.listdir(path_dir): - if fd.endswith('.json'): - contenido, nom_canal = fd[:-6].split('[') - if (contenido.startswith(content_title) or item.contentType == 'movie') and nom_canal not in list(list_canales.keys()): - list_canales[nom_canal] = filetools.join(path_dir, fd) + with futures.ThreadPoolExecutor() as executor: + itlist = [executor.submit(servers, ch, value) for ch, value in videolibrary_items.items() if ch not in disabled] + for res in futures.as_completed(itlist): + itemlist += res.result() - num_canales = len(list_canales) + if len(itlist) > 1: + for it in itemlist: + it.title = '[{}] {}'.format(it.ch_name, it.title) - if 'downloads' in list_canales: - json_path = list_canales['downloads'] - item_json = Item().fromjson(filetools.read(json_path)) - item_json.contentChannel = "local" - # Support for relative paths in downloads - if filetools.is_relative(item_json.url): - item_json.url = filetools.join(videolibrarytools.VIDEOLIBRARY_PATH, item_json.url) - - del list_canales['downloads'] - - # Check that the video has not been deleted - if filetools.exists(item_json.url): - item_local = item_json.clone(action='play') - itemlist.append(item_local) - else: - num_canales -= 1 - - filtro_canal = '' - if num_canales > 1 and config.get_setting("ask_channel", "videolibrary"): - opciones = [config.get_localized_string(70089) % k.capitalize() for k in list(list_canales.keys())] - opciones.insert(0, config.get_localized_string(70083)) - if item_local: - opciones.append(item_local.title) - - index = platformtools.dialog_select(config.get_localized_string(30163), opciones) - if index < 0: + if autoplay.play_multi_channel(item, itemlist): # hideserver return [] - elif item_local and index == len(opciones) - 1: - filtro_canal = 'downloads' - platformtools.play_video(item_local) - - elif index > 0: - filtro_canal = opciones[index].replace(config.get_localized_string(70078), "").strip() - itemlist = [] - - all_videolibrary = [] - ch_results = [] - list_servers = [] - - with futures.ThreadPoolExecutor() as executor: - for nom_canal, json_path in list(list_canales.items()): - if filtro_canal and filtro_canal != nom_canal.capitalize(): - continue - - # We import the channel of the selected part - try: - if nom_canal == 'community': - channel = __import__('specials.%s' % nom_canal, fromlist=["channels.%s" % nom_canal]) - else: - channel = __import__('channels.%s' % nom_canal, fromlist=["channels.%s" % nom_canal]) - except ImportError: - exec("import channels." + nom_canal + " as channel") - except: - dead_list = [] - zombie_list = [] - - if nom_canal not in dead_list and nom_canal not in zombie_list: confirm = platformtools.dialog_yesno(config.get_localized_string(30131), config.get_localized_string(30132) % nom_canal.upper() + '\n' + config.get_localized_string(30133)) - elif nom_canal in zombie_list: confirm = False - else: confirm = True - - if confirm: - # delete the channel from all movie and tvshow - from past.utils import old_div - num_enlaces = 0 - dialog = platformtools.dialog_progress(config.get_localized_string(30131), config.get_localized_string(60005) % nom_canal) - if not all_videolibrary: - all_videolibrary = list_movies(Item()) + list_tvshows(Item()) - for n, it in enumerate(all_videolibrary): - if nom_canal in it.library_urls: - dead_item = Item(multichannel=len(it.library_urls) > 1, - contentType=it.contentType, - dead=nom_canal, - path=filetools.split(it.nfo)[0], - nfo=it.nfo, - library_urls=it.library_urls, - infoLabels={'title': it.contentTitle}) - num_enlaces += delete(dead_item) - dialog.update(old_div(100*n, len(all_videolibrary))) - - dialog.close() - msg_txt = config.get_localized_string(70087) % (num_enlaces, nom_canal) - logger.info(msg_txt) - platformtools.dialog_notification(config.get_localized_string(30131), msg_txt) - platformtools.itemlist_refresh() - - if nom_canal not in dead_list: - dead_list.append(nom_canal) - continue - else: - if nom_canal not in zombie_list: - zombie_list.append(nom_canal) - - if len(dead_list) > 0: - for nom_canal in dead_list: - if nom_canal in item.library_urls: - del item.library_urls[nom_canal] - - item_json = Item().fromjson(filetools.read(json_path)) - # support.dbg() - try: from urllib.parse import urlsplit - except ImportError: from urlparse import urlsplit - try: - if urlsplit(item_json.url).netloc.split('.')[0] in channel.host: - item_json.url = channel.host + urlsplit(item_json.url).path - except: pass - - try: - # FILTERTOOLS - # if the channel has a filter, the name it has saved is passed to it so that it filters correctly. - if "list_language" in item_json: - # if it comes from the addon video library - if "library_filter_show" in item: - item_json.show = item.library_filter_show.get(nom_canal, "") - - # We run find_videos, from the channel or common - item_json.contentChannel = 'videolibrary' - item_json.play_from = item.play_from - item_json.nfo = item.nfo - item_json.strm_path = item.strm_path - if hasattr(channel, 'findvideos'): - from core import servertools - if item_json.videolibray_emergency_urls: - del item_json.videolibray_emergency_urls - ch_results.append(executor.submit(getattr(channel, 'findvideos'), item_json)) - elif item_json.action == 'play': - from platformcode import platformtools - # autoplay.set_status(True) - item_json.contentChannel = item_json.channel - item_json.channel = "videolibrary" - platformtools.play_video(item_json) - return '' - else: - from core import servertools - ch_results.append(executor.submit(servertools.find_video_items, item_json)) - - except: - import traceback - logger.error("The findvideos function for the channel %s failed" % nom_canal) - logger.error(traceback.format_exc()) - - for ris in futures.as_completed(ch_results): - try: - list_servers.extend(ris.result()) - except: - import traceback - logger.error("The findvideos function for a channel failed") - logger.error(traceback.format_exc()) - - - # Change the title to the servers adding the name of the channel in front and the infoLabels and the images of the item if the server does not have - for server in list_servers: - server.contentChannel = server.channel - server.channel = "videolibrary" - server.nfo = item.nfo - server.strm_path = item.strm_path - server.play_from = item.play_from - - # Kodi 18 Compatibility - Prevents wheel from spinning around in Direct Links - if server.action == 'play': - server.folder = False - - # Channel name is added if desired - if config.get_setting("quit_channel_name", "videolibrary") == 0: - server.title = "%s: %s" % (server.contentChannel.capitalize(), server.title) - - if not server.thumbnail: - server.thumbnail = item.thumbnail - - # logger.debug("server:\n%s" % server.tostring('\n')) - itemlist.append(server) - - if autoplay.play_multi_channel(item, itemlist): # hideserver - return [] - + itemlist.sort(key=lambda it: (videolibrarytools.quality_order.index(it.quality.lower()) if it.quality and it.quality.lower() in videolibrarytools.quality_order else 999, it.server)) add_download_items(item, itemlist) + return itemlist +def servers(ch, items): + serverlist = [] + from core import channeltools + ch_params = channeltools.get_channel_parameters(ch) + ch_name = ch_params.get('title', '') + + if ch_params.get('active', False): + + if os.path.isfile(os.path.join(config.get_runtime_path(), 'channels', ch + ".py")): CHANNELS = 'channels' + else: CHANNELS = 'specials' + try: channel = __import__('%s.%s' % (CHANNELS, ch), None, None, ['%s.%s' % (CHANNELS, ch)]) + except ImportError: exec("import " + CHANNELS + "." + ch + " as channel") + with futures.ThreadPoolExecutor() as executor: + itlist = [executor.submit(channel_servers, it, channel, ch_name) for it in items] + for res in futures.as_completed(itlist): + serverlist += res.result() + return serverlist + +def channel_servers(it, channel, ch_name): + serverlist = [] + it.contentChannel = 'videolibrary' + it = get_host(it, channel) + for item in getattr(channel, it.action)(it): + if item.server and item.channel: + item.ch_name = ch_name + serverlist.append(item) + return serverlist + + def play(item): logger.debug() + + itemlist = [] # logger.debug("item:\n" + item.tostring('\n')) - if not item.contentChannel == "local": - if item.contentChannel == 'community': - channel = __import__('specials.%s' % item.contentChannel, fromlist=["channels.%s" % item.contentChannel]) - else: - channel = __import__('channels.%s' % item.contentChannel, fromlist=["channels.%s" % item.contentChannel]) + if not item.channel == "local": + try: + channel = __import__('specials.%s' % item.channel, fromlist=["channels.%s" % item.channel]) + except: + channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel]) if hasattr(channel, "play"): itemlist = getattr(channel, "play")(item) else: itemlist = [item.clone()] else: - itemlist = [item.clone(url=item.url, server="local")] + itemlist = [item] if not itemlist: return [] @@ -582,20 +521,20 @@ def play(item): item.video_urls = itemlist itemlist = [item] - # This is necessary in case the channel play deletes the data - for v in itemlist: - if isinstance(v, Item): - v.nfo = item.nfo - v.strm_path = item.strm_path - v.infoLabels = item.infoLabels - if item.contentTitle: - v.title = item.contentTitle - else: - if item.contentType == "episode": - v.title = config.get_localized_string(60036) % item.contentEpisodeNumber - v.thumbnail = item.thumbnail - v.contentThumbnail = item.thumbnail - v.contentChannel = item.contentChannel + # # This is necessary in case the channel play deletes the data + # for v in itemlist: + # if isinstance(v, Item): + # v.nfo = item.nfo + # v.strm_path = item.strm_path + # v.infoLabels = item.infoLabels + # if item.contentTitle: + # v.title = item.contentTitle + # else: + # if item.contentType == "episode": + # v.title = config.get_localized_string(60036) % item.contentEpisodeNumber + # v.thumbnail = item.thumbnail + # v.contentThumbnail = item.thumbnail + # v.channel = item.channel return itemlist @@ -604,20 +543,114 @@ def update_videolibrary(item=''): logger.debug() # Update active series by overwriting - import service - service.check_for_update(overwrite=True) + # import service + # service.check_for_update(overwrite=True) + check_for_update(item) # Delete movie folders that do not contain strm file - for raiz, subcarpetas, ficheros in filetools.walk(videolibrarytools.MOVIES_PATH): - strm = False - for f in ficheros: - if f.endswith(".strm"): - strm = True - break + # for raiz, subcarpetas, ficheros in filetools.walk(videolibrarytools.MOVIES_PATH): + # strm = False + # for f in ficheros: + # if f.endswith(".strm"): + # strm = True + # break - if ficheros and not strm: - logger.debug("Deleting deleted movie folder: %s" % raiz) - filetools.rmdirtree(raiz) + # if ficheros and not strm: + # logger.debug("Deleting deleted movie folder: %s" % raiz) + # filetools.rmdirtree(raiz) + + +def check_for_update(ITEM = None): + logger.debug("Update Series...") + + import datetime + p_dialog = None + update_when_finished = False + now = datetime.date.today() + + try: + if config.get_setting("update", "videolibrary") != 0: + config.set_setting("updatelibrary_last_check", now.strftime('%Y-%m-%d'), "videolibrary") + + heading = config.get_localized_string(60389) + p_dialog = platformtools.dialog_progress_bg(config.get_localized_string(20000), config.get_localized_string(60037)) + p_dialog.update(0, '') + show_list = [] + + if ITEM: + show = videolibrarydb['tvshow'][ITEM.videolibrary_id] + videolibrarydb.close() + for s in show['channels'].values(): + show_list += s + else: + shows = dict(videolibrarydb['tvshow']).values() + videolibrarydb.close() + + for show in shows: + if show['item'].active: + for s in show['channels'].values(): + show_list += s + + t = float(100) / len(show_list) + i = 0 + for item in show_list: + i += 1 + p_dialog.update(int(i * t), heading % (item.fulltitle, item.channel) ) + item = get_host(item) + try: channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel]) + except: channel = __import__('specials.%s' % item.channel, fromlist=["specials.%s" % item.channel]) + itemlist = getattr(channel, item.action)(item) + videolibrarytools.save_tvshow(item, itemlist, silent=True) + p_dialog.close() + except: + p_dialog.close() + logger.error(traceback.format_exc()) + + if ITEM: + update_when_finished = set_active_tvshow(show) + else: + for show in shows: + update_when_finished = set_active_tvshow(show) + + if update_when_finished: + platformtools.itemlist_refresh() + + + + # if config.get_setting('trakt_sync'): + # from core import trakt_tools + # trakt_tools.update_all() + +def set_active_tvshow(show): + update_when_finished = False + if show['item'].active: + prefered_lang = show['item'].prefered_lang + active = False if show['item'].infoLabels['status'].lower() == 'ended' else True + episodes = videolibrarydb['episode'][show['item'].videolibrary_id] + videolibrarydb.close() + if not active: + total_episodes = show['item'].infoLabels['number_of_episodes'] + episodes_list = [] + for episode in episodes.values(): + for ep in episode['channels'].values(): + ep_list = [e for e in ep if e.contentLanguage == prefered_lang] + if ep_list: episodes_list.append(ep_list) + + if len(episodes_list) == total_episodes: + a = False + update_when_finished = True + for i in range(len(episodes_list) - 1): + if len(episodes_list[i]) == len(episodes_list[i + 1]): + a = False + update_when_finished = True + else: + a = True + break + if not a: + show['item'].active = a + videolibrarydb['tvshow'][show['item'].videolibrary_id] = show + videolibrarydb.close() + return update_when_finished def move_videolibrary(current_path, new_path, current_movies_folder, new_movies_folder, current_tvshows_folder, new_tvshows_folder): @@ -848,135 +881,77 @@ def verify_playcount_series(item, path): return (item, False) -def mark_content_as_watched2(item): - logger.debug() - # logger.debug("item:\n" + item.tostring('\n')) - if filetools.isfile(item.nfo): - head_nfo, it = videolibrarytools.read_nfo(item.nfo) - name_file = "" - if item.contentType == 'movie' or item.contentType == 'tvshow': - name_file = os.path.splitext(filetools.basename(item.nfo))[0] - - if name_file != 'tvshow' : - it.library_playcounts.update({name_file: item.playcount}) - - if item.contentType == 'episode' or item.contentType == 'tvshow' or item.contentType == 'list' or name_file == 'tvshow': - name_file = os.path.splitext(filetools.basename(item.strm_path))[0] - num_season = name_file [0] - item.__setattr__('contentType', 'episode') - item.__setattr__('contentSeason', num_season) - - else: - name_file = item.contentTitle - - if not hasattr(it, 'library_playcounts'): - it.library_playcounts = {} - it.library_playcounts.update({name_file: item.playcount}) - - # it is verified that if all the episodes of a season are marked, tb the season is marked - if item.contentType != 'movie': - it = check_season_playcount(it, item.contentSeason) - - # We save the changes to item.nfo - if filetools.write(item.nfo, head_nfo + it.tojson()): - item.infoLabels['playcount'] = item.playcount - - if config.is_xbmc(): - from platformcode import xbmc_videolibrary - xbmc_videolibrary.mark_content_as_watched_on_kodi(item , item.playcount) - - def mark_content_as_watched(item): logger.debug() - #logger.debug("item:\n" + item.tostring('\n')) - if filetools.exists(item.nfo): - head_nfo, it = videolibrarytools.read_nfo(item.nfo) + if not item.videolibrary_id: + for code in item.infoLabels['code']: + if code and code != 'None': + break + item.videolibrary_id=code + if item.contentType == 'episode': + episodes = videolibrarydb['episode'][item.videolibrary_id] + episodes['{}x{}'.format(item.contentSeason, str(item.contentEpisodeNumber).zfill(2))]['item'].infoLabels['playcount'] = item.playcount + videolibrarydb['episode'][item.videolibrary_id] = episodes + videolibrarydb.close() - if item.contentType == 'movie': - name_file = os.path.splitext(filetools.basename(item.nfo))[0] - elif item.contentType == 'episode': - name_file = "%sx%s" % (item.contentSeason, str(item.contentEpisodeNumber).zfill(2)) + season_episodes = [ep for ep in episodes.values() if ep['item'].contentSeason == item.contentSeason] + watched = [ep for ep in season_episodes if ep['item'].infoLabels['playcount'] > 0] + if len(watched) == len(season_episodes): + item.playcount = 1 else: - name_file = item.contentTitle + item.playcount = 0 + mark_season_as_watched(item) - if not hasattr(it, 'library_playcounts'): - it.library_playcounts = {} - it.library_playcounts.update({name_file: item.playcount}) + elif item.contentType == 'season': + mark_season_as_watched(item) - # it is verified that if all the episodes of a season are marked, tb the season is marked - if item.contentType != 'movie': - it = check_season_playcount(it, item.contentSeason) + else: + content = videolibrarydb[item.contentType][item.videolibrary_id] + content['item'].infoLabels['playcount'] = item.playcount + videolibrarydb[item.contentType][item.videolibrary_id] = content + seasons = videolibrarydb['season'][item.videolibrary_id] + videolibrarydb.close() + item.all_ep = True + if item.contentType == 'tvshow': + for season in seasons.keys(): + item.contentSeason = season + mark_season_as_watched(item) - # We save the changes to item.nfo - if filetools.write(item.nfo, head_nfo + it.tojson()): - item.infoLabels['playcount'] = item.playcount - - if item.contentType == 'tvshow' and item.type != 'episode' : - # Update entire series - new_item = item.clone(contentSeason=-1) - mark_season_as_watched(new_item) - - if config.is_xbmc(): - from platformcode import xbmc_videolibrary - xbmc_videolibrary.mark_content_as_watched_on_kodi(item, item.playcount) - - platformtools.itemlist_refresh() + if config.is_xbmc() and not item.not_update: + from platformcode import xbmc_videolibrary + xbmc_videolibrary.mark_content_as_watched_on_kodi(item, item.playcount) def mark_season_as_watched(item): logger.debug() - # logger.debug("item:\n" + item.tostring('\n')) - # Get dictionary of marked episodes - if not item.path: f = item.nfo - else: f = filetools.join(item.path, 'tvshow.nfo') + seasons = videolibrarydb['season'][item.videolibrary_id] + seasons[item.contentSeason].infoLabels['playcount'] = item.playcount + videolibrarydb['season'][item.videolibrary_id] = seasons + episodes = videolibrarydb['episode'][item.videolibrary_id] + videolibrarydb.close() - head_nfo, it = videolibrarytools.read_nfo(f) - if not hasattr(it, 'library_playcounts'): - it.library_playcounts = {} + for n, ep in episodes.items(): + if ep['item'].contentSeason == item.contentSeason: + episodes[n]['item'].infoLabels['playcount'] = item.playcount - # We get the archives of the episodes - raiz, carpetas_series, ficheros = next(filetools.walk(item.path)) + videolibrarydb['episode'][item.videolibrary_id] = episodes + videolibrarydb.close() - # We mark each of the episodes found this season - episodios_marcados = 0 - for i in ficheros: - if i.endswith(".strm"): - season_episode = scrapertools.get_season_and_episode(i) - if not season_episode: - # The file does not include the season and episode number - continue - season, episode = season_episode.split("x") + watched = True + for season in seasons.values(): + if season.infoLabels['playcount'] != item.playcount: + watched = False - if int(item.contentSeason) == -1 or int(season) == int(item.contentSeason): - name_file = os.path.splitext(filetools.basename(i))[0] - it.library_playcounts[name_file] = item.playcount - episodios_marcados += 1 + if watched or item.playcount == 0: + tvshow = videolibrarydb['tvshow'][item.videolibrary_id] + it = videolibrarydb['tvshow'][item.videolibrary_id]['item'] + it.infoLabels['playcount'] = item.playcount + tvshow['item'] = it + videolibrarydb['tvshow'][item.videolibrary_id] = tvshow + videolibrarydb.close() - if episodios_marcados: - if int(item.contentSeason) == -1: - # We add all seasons to the dictionary item.library_playcounts - for k in list(it.library_playcounts.keys()): - if k.startswith("season"): - it.library_playcounts[k] = item.playcount - else: - # Add season to dictionary item.library_playcounts - it.library_playcounts["season %s" % item.contentSeason] = item.playcount - - # it is verified that if all the seasons are seen, the series is marked as view - it = check_tvshow_playcount(it, item.contentSeason) - - # We save the changes to tvshow.nfo - filetools.write(f, head_nfo + it.tojson()) - item.infoLabels['playcount'] = item.playcount - - if config.is_xbmc(): - # We update the Kodi database - from platformcode import xbmc_videolibrary - xbmc_videolibrary.mark_season_as_watched_on_kodi(item, item.playcount) - - platformtools.itemlist_refresh() def mark_tvshow_as_updatable(item, silent=False): @@ -990,105 +965,76 @@ def mark_tvshow_as_updatable(item, silent=False): def delete(item): - def delete_all(_item): - for file in filetools.listdir(_item.path): - if file.endswith(".strm") or file.endswith(".nfo") or file.endswith(".json")or file.endswith(".torrent"): - filetools.remove(filetools.join(_item.path, file)) + from platformcode import xbmc_videolibrary + select = None + delete = None + if item.contentType == 'movie': + library_path = videolibrarytools.MOVIES_PATH + head = 70084 + else: + library_path = videolibrarytools.TVSHOWS_PATH + head = 70085 + from core import channeltools - if _item.contentType == 'movie': - heading = config.get_localized_string(70084) - else: - heading = config.get_localized_string(70085) + channels = [c for c in videolibrarydb[item.contentType].get(item.videolibrary_id,{}).get('channels',{}).keys()] + channels.sort() + option_list = [config.get_localized_string(head)] + for channel in channels: + option_list.append(channeltools.get_channel_parameters(channel)['title']) + if len(option_list) > 2: + select = platformtools.dialog_select(config.get_localized_string(70088) % item.infoLabels['title'], option_list) + else: + delete = platformtools.dialog_yesno(config.get_localized_string(head), config.get_localized_string(70088) % item.infoLabels['title']) + if select == 0 or delete: + del videolibrarydb[item.contentType][item.videolibrary_id] + if item.contentType == 'tvshow': + del videolibrarydb['season'][item.videolibrary_id] + del videolibrarydb['episode'][item.videolibrary_id] + path = filetools.join(library_path, item.base_name) + + filetools.rmdirtree(path) if config.is_xbmc() and config.get_setting("videolibrary_kodi"): from platformcode import xbmc_videolibrary - if _item.local_episodes_path: - platformtools.dialog_ok(heading, config.get_localized_string(80047) % _item.infoLabels['title']) - path_list = [_item.extra] - xbmc_videolibrary.clean(path_list) - - raiz, carpeta_serie, ficheros = next(filetools.walk(_item.path)) - if ficheros == []: - filetools.rmdir(_item.path) - elif platformtools.dialog_yesno(heading, config.get_localized_string(70081) % filetools.basename(_item.path)): - filetools.rmdirtree(_item.path) - - logger.info("All links removed") - xbmc.sleep(1000) + xbmc_videolibrary.clean_by_id(item) platformtools.itemlist_refresh() + if select and select > 0: - # logger.debug(item.tostring('\n')) + channel_name = channels[select - 1] - if item.contentType == 'movie': - heading = config.get_localized_string(70084) - else: - heading = config.get_localized_string(70085) - if item.multichannel: - # Get channel list - channels = [] - opciones = [] - for k in list(item.library_urls.keys()): - if k != "downloads": - opciones.append(config.get_localized_string(70086) % k.capitalize()) - channels.append(k) - if item.dead == '': - opciones.insert(0, heading) + if item.contentType != 'movie': + episodes = videolibrarydb['episode'][item.videolibrary_id] + seasons = videolibrarydb['season'][item.videolibrary_id] + episodes_dict = dict(episodes) + seasons_dict = dict(seasons) - index = platformtools.dialog_select(config.get_localized_string(30163), opciones) + for key, episode in episodes_dict.items(): + if len(episode['channels']) > 1: + del episode['channels'][channel_name] + elif channel_name in episode['channels']: + xbmc_videolibrary.clean_by_id(episodes[key]['item']) + del episodes[key] + videolibrarydb['episode'][item.videolibrary_id] = episodes + seasons_list = [] - if index == 0: - # Selected Delete movie / series - delete_all(item) - return + for episode in episodes: + season = int(episode.split('x')[0]) + if season not in seasons_list: + seasons_list.append(season) - elif index > 0: - # Selected Delete channel X - canal = opciones[index].replace(config.get_localized_string(70079), "").lower() - channels.remove(canal) - else: - return - else: - canal = item.dead + for season in seasons_dict.keys(): + if season not in seasons_list: + xbmc_videolibrary.clean_by_id(seasons[season]) + del seasons[season] + videolibrarydb['season'][item.videolibrary_id] = seasons + channel = videolibrarydb[item.contentType][item.videolibrary_id] + channels = channel['channels'] + del channels[channel_name] + channel['channels'] = channels + videolibrarydb[item.contentType][item.videolibrary_id] = channel - num_enlaces = 0 - path_list = [] - for fd in filetools.listdir(item.path): - if fd.endswith(canal + '].json') or scrapertools.find_single_match(fd, r'%s]_\d+.torrent' % canal): - if filetools.remove(filetools.join(item.path, fd)): - num_enlaces += 1 - # Remove strm and nfo if no other channel - episode = fd.replace(' [' + canal + '].json', '') - found_ch = False - for ch in channels: - if filetools.exists(filetools.join(item.path, episode + ' [' + ch + '].json')): - found_ch = True - break - if found_ch == False: - filetools.remove(filetools.join(item.path, episode + '.nfo')) - strm_path = filetools.join(item.path, episode + '.strm') - # if it is a local episode, do not delete the strm - if 'plugin://plugin.video.kod/?' in filetools.read(strm_path): - filetools.remove(strm_path) - path_list.append(filetools.join(item.extra, episode + '.strm')) + videolibrarydb.close() - if config.is_xbmc() and config.get_setting("videolibrary_kodi") and path_list: - from platformcode import xbmc_videolibrary - xbmc_videolibrary.clean(path_list) - - if num_enlaces > 0: - # Update .nfo - head_nfo, item_nfo = videolibrarytools.read_nfo(item.nfo) - del item_nfo.library_urls[canal] - if item_nfo.emergency_urls and item_nfo.emergency_urls.get(canal, False): - del item_nfo.emergency_urls[canal] - filetools.write(item.nfo, head_nfo + item_nfo.tojson()) - return num_enlaces - else: - if platformtools.dialog_yesno(heading, config.get_localized_string(70088) % item.infoLabels['title']): - delete_all(item) - return 1 - else: - return 0 def check_season_playcount(item, season): @@ -1171,4 +1117,93 @@ def add_download_items(item, itemlist): itemlist.append(downloadItem) else: # tvshow + not seen itemlist.append(downloadItem) - itemlist.append(downloadItem.clone(title=typo(config.get_localized_string(60003), "color kod bold"), unseen=True)) \ No newline at end of file + itemlist.append(downloadItem.clone(title=typo(config.get_localized_string(60003), "color kod bold"), unseen=True)) + + +def prefered_lang(item): + tempdb = videolibrarydb[item.contentType][item.videolibrary_id] + videolibrarydb.close() + item = tempdb['item'] + lang_list = tempdb['item'].lang_list + prefered = item.lang_list.index(item.prefered_lang) + item.prefered_lang = lang_list[platformtools.dialog_select(config.get_localized_string(70246), lang_list, prefered)] + tempdb['item'] = item + videolibrarydb[item.contentType][item.videolibrary_id] = tempdb + videolibrarydb.close() + +def disable_channels(item): + from core import channeltools + tempdb = videolibrarydb[item.contentType][item.videolibrary_id] + videolibrarydb.close() + item = tempdb['item'] + channels_list = list(tempdb['channels'].keys()) + channels_name = [channeltools.get_channel_parameters(c).get('title', '') for c in channels_list] + disabled = [channels_list.index(c) for c in channels_list if c in item.disabled] + channels_disabled = platformtools.dialog_multiselect(config.get_localized_string(70837), channels_name, preselect=disabled) + if type(channels_disabled) == list: + item.disabled = [channels_list[c] for c in channels_disabled] + videolibrarydb[item.contentType][item.videolibrary_id] = tempdb + videolibrarydb.close() + +def get_host(item , channel=None): + if item.url.startswith('//'): item.url = 'https:' + item.url + if not item.url.startswith('/') and not httptools.downloadpage(item.url, only_headers=True).success: + item.url = urlparse.urlparse(item.url).path + if item.url.startswith('/'): + if not channel: + try : channel = __import__('channels.' + item.channel, None, None, ['channels.' + item.channel]) + except: channel = __import__('specials.' + item.channel, None, None, ['specials.' + item.channel]) + + host = channel.host + if host.endswith('/'): host = host[:-1] + item.url = host + item.url + + return item + +def set_active(item): + show = videolibrarydb['tvshow'][item.videolibrary_id] + videolibrarydb.close() + show['item'].active = False if item.active else True + videolibrarydb['tvshow'][item.videolibrary_id] = show + videolibrarydb.close() + platformtools.itemlist_refresh() + +def change_poster(item): + import xbmcgui + video = videolibrarydb[item.contentType][item.videolibrary_id] + videolibrarydb.close() + options = [] + it = xbmcgui.ListItem('Corrente') + it.setArt({'thumb':item.thumbnail}) + options.append(it) + posters = video['item'].infoLabels.get('posters',[]) + for n, poster in enumerate(posters): + it = xbmcgui.ListItem(str(n)) + it.setArt({'thumb':poster}) + options.append(it) + selection = platformtools.dialog_select('',options, 0, True) + if selection > 0: + video['item'].thumbnail = video['item'].infoLabels['thumbnail'] = posters[selection] + videolibrarydb[item.contentType][item.videolibrary_id] = video + videolibrarydb.close() + platformtools.itemlist_refresh() + +def change_fanart(item): + import xbmcgui + video = videolibrarydb[item.contentType][item.videolibrary_id] + videolibrarydb.close() + options = [] + it = xbmcgui.ListItem('Corrente') + it.setArt({'thumb':item.fanart}) + options.append(it) + fanarts = video['item'].infoLabels.get('fanarts',[]) + for n, poster in enumerate(fanarts): + it = xbmcgui.ListItem(str(n)) + it.setArt({'thumb':poster}) + options.append(it) + selection = platformtools.dialog_select('',options, 0, True) + if selection > 0: + video['item'].fanart = video['item'].infoLabels['fanart'] = fanarts[selection] + videolibrarydb[item.contentType][item.videolibrary_id] = video + videolibrarydb.close() + platformtools.itemlist_refresh() \ No newline at end of file