diff --git a/channels.json b/channels.json index 8219d96d..82faaae9 100644 --- a/channels.json +++ b/channels.json @@ -1,7 +1,7 @@ { "altadefinizione01": "https://altadefinizione01.photo", "altadefinizione01_link": "https://altadefinizione01.baby", - "altadefinizioneclick": "https://altadefinizione.family", + "altadefinizioneclick": "https://altadefinizione.productions", "animeforce": "https://ww1.animeforce.org", "animeleggendari": "https://animepertutti.org", "animesaturn": "https://www.animesaturn.com", @@ -32,12 +32,12 @@ "polpotv": "https://polpotv.club", "pufimovies": "https://pufimovies.com", "raiplay": "https://www.raiplay.it", - "seriehd": "https://seriehd.net", - "serietvonline": "https://serietvonline.host", + "seriehd": "https://seriehd.link", + "serietvonline": "https://serietvonline.work", "serietvsubita": "http://serietvsubita.xyz", "serietvu": "https://www.serietvu.link", "streamtime": "https://t.me/s/StreamTime", - "tantifilm": "https://www.tantifilm.pizza", + "tantifilm": "https://www.tantifilm.red", "toonitalia": "https://toonitalia.org", "vedohd": "https://vedohd.uno", "vvvvid": "https://www.vvvvid.it" diff --git a/core/channeltools.py b/core/channeltools.py index 1ab338ef..86d3757c 100644 --- a/core/channeltools.py +++ b/core/channeltools.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -# channeltools - Herramientas para trabajar con canales +# channeltools - Tools for working with channels # ------------------------------------------------------------ from __future__ import absolute_import @@ -29,11 +29,11 @@ def get_channel_parameters(channel_name): channel_parameters = get_channel_json(channel_name) # logger.debug(channel_parameters) if channel_parameters: - # cambios de nombres y valores por defecto + # name and default changes channel_parameters["title"] = channel_parameters.pop("name") + (' [DEPRECATED]' if 'deprecated' in channel_parameters and channel_parameters['deprecated'] else '') channel_parameters["channel"] = channel_parameters.pop("id") - # si no existe el key se declaran valor por defecto para que no de fallos en las funciones que lo llaman + # if the key does not exist, they are declared a default value so that there are no failures in the functions that call it channel_parameters["update_url"] = channel_parameters.get("update_url", DEFAULT_UPDATE_URL) channel_parameters["language"] = channel_parameters.get("language", ["all"]) channel_parameters["active"] = channel_parameters.get("active", False) @@ -45,7 +45,7 @@ def get_channel_parameters(channel_name): channel_parameters["banner"] = channel_parameters.get("banner", "") channel_parameters["fanart"] = channel_parameters.get("fanart", "") - # Imagenes: se admiten url y archivos locales dentro de "resources/images" + # Images: url and local files are allowed inside "resources / images" if channel_parameters.get("thumbnail") and "://" not in channel_parameters["thumbnail"]: channel_parameters["thumbnail"] = filetools.join(remote_path, "resources", "thumb", channel_parameters["thumbnail"]) if channel_parameters.get("banner") and "://" not in channel_parameters["banner"]: @@ -53,7 +53,7 @@ def get_channel_parameters(channel_name): if channel_parameters.get("fanart") and "://" not in channel_parameters["fanart"]: channel_parameters["fanart"] = filetools.join(remote_path, "resources", channel_parameters["fanart"]) - # Obtenemos si el canal tiene opciones de configuración + # We obtain if the channel has configuration options channel_parameters["has_settings"] = False if 'settings' in channel_parameters: channel_parameters['settings'] = get_default_settings(channel_name) @@ -71,8 +71,7 @@ def get_channel_parameters(channel_name): dict_channels_parameters[channel_name] = channel_parameters else: - # para evitar casos donde canales no están definidos como configuración - # lanzamos la excepcion y asi tenemos los valores básicos + # To avoid cases where channels are not defined as configuration, we throw the exception and thus we have the basic values raise Exception except Exception as ex: @@ -123,7 +122,7 @@ def get_channel_controls_settings(channel_name): for c in list_controls: if 'id' not in c or 'type' not in c or 'default' not in c: - # Si algun control de la lista no tiene id, type o default lo ignoramos + # If any control in the list does not have id, type or default, we ignore it continue # new dict with key(id) and value(default) from settings @@ -173,7 +172,7 @@ def get_default_settings(channel_name): default_controls_renumber = default_file['renumber'] channel_json = get_channel_json(channel_name) - # Collects configurations + # Collects configurations channel_language = channel_json['language'] channel_controls = channel_json['settings'] categories = channel_json['categories'] @@ -189,28 +188,22 @@ def get_default_settings(channel_name): label = label[-1] if label == 'peliculas': if 'movie' in categories: - control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string( - 30122) - control['default'] = False if ('include_in_newest' in default_off) or ( - 'include_in_newest_peliculas' in default_off) else True + control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string( 30122) + control['default'] = False if ('include_in_newest' in default_off) or ( ' include_in_newest_peliculas' in default_off) else True channel_controls.append(control) else: pass elif label == 'series': if 'tvshow' in categories: - control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string( - 30123) - control['default'] = False if ('include_in_newest' in default_off) or ( - 'include_in_newest_series' in default_off) else True + control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string( 30123) + control['default'] = False if ('include_in_newest' in default_off) or ( 'include_in_newest_series' in default_off) else True channel_controls.append(control) else: pass elif label == 'anime': if 'anime' in categories: - control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string( - 30124) - control['default'] = False if ('include_in_newest' in default_off) or ( - 'include_in_newest_anime' in default_off) else True + control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string( 30124) + control['default'] = False if ('include_in_newest' in default_off) or ( 'include_in_newest_anime' in default_off) else True channel_controls.append(control) else: pass @@ -239,24 +232,24 @@ def get_default_settings(channel_name): def get_channel_setting(name, channel, default=None): from core import filetools """ - Retorna el valor de configuracion del parametro solicitado. + Returns the configuration value of the requested parameter. - Devuelve el valor del parametro 'name' en la configuracion propia del canal 'channel'. + Returns the value of the parameter 'name' in the own configuration of the channel 'channel'. - Busca en la ruta \addon_data\plugin.video.kod\settings_channels el archivo channel_data.json y lee - el valor del parametro 'name'. Si el archivo channel_data.json no existe busca en la carpeta channels el archivo - channel.json y crea un archivo channel_data.json antes de retornar el valor solicitado. Si el parametro 'name' - tampoco existe en el el archivo channel.json se devuelve el parametro default. + Look in the path \addon_data\plugin.video.kod\settings_channels for the file channel_data.json and read + the value of the parameter 'name'. If the file channel_data.json does not exist look in the channels folder for the file + channel.json and create a channel_data.json file before returning the requested value. If the parameter 'name' + also does not exist in the channel.json file the default parameter is returned. - @param name: nombre del parametro + @param name: parameter name @type name: str - @param channel: nombre del canal + @param channel: channel name @type channel: str - @param default: valor devuelto en caso de que no exista el parametro name + @param default: return value in case the name parameter does not exist @type default: any - @return: El valor del parametro 'name' + @return: The value of the parameter 'name' @rtype: any """ @@ -266,58 +259,58 @@ def get_channel_setting(name, channel, default=None): if channel not in ['trakt']: def_settings = get_default_settings(channel) if filetools.exists(file_settings): - # Obtenemos configuracion guardada de ../settings/channel_data.json + # We get saved configuration from ../settings/channel_data.json try: dict_file = jsontools.load(filetools.read(file_settings)) if isinstance(dict_file, dict) and 'settings' in dict_file: dict_settings = dict_file['settings'] except EnvironmentError: - logger.error("ERROR al leer el archivo: %s" % file_settings) + logger.error("ERROR when reading the file: %s" % file_settings) if not dict_settings or name not in dict_settings: - # Obtenemos controles del archivo ../channels/channel.json + # We get controls from the file ../channels/channel.json try: list_controls, default_settings = get_channel_controls_settings(channel) except: default_settings = {} - if name in default_settings: # Si el parametro existe en el channel.json creamos el channel_data.json + if name in default_settings: #If the parameter exists in the channel.json we create the channel_data.json default_settings.update(dict_settings) dict_settings = default_settings dict_file['settings'] = dict_settings - # Creamos el archivo ../settings/channel_data.json + # We create the file ../settings/channel_data.json json_data = jsontools.dump(dict_file) if not filetools.write(file_settings, json_data, silent=True): - logger.error("ERROR al salvar el archivo: %s" % file_settings) + logger.error("ERROR saving file: %s" % file_settings) - # Devolvemos el valor del parametro local 'name' si existe, si no se devuelve default + # We return the value of the local parameter 'name' if it exists, if default is not returned return dict_settings.get(name, default) def set_channel_setting(name, value, channel): from core import filetools """ - Fija el valor de configuracion del parametro indicado. + Sets the configuration value of the indicated parameter. - Establece 'value' como el valor del parametro 'name' en la configuracion propia del canal 'channel'. - Devuelve el valor cambiado o None si la asignacion no se ha podido completar. + Set 'value' as the value of the parameter 'name' in the own configuration of the channel 'channel'. + Returns the changed value or None if the assignment could not be completed. - Si se especifica el nombre del canal busca en la ruta \addon_data\plugin.video.kod\settings_channels el - archivo channel_data.json y establece el parametro 'name' al valor indicado por 'value'. - Si el parametro 'name' no existe lo añade, con su valor, al archivo correspondiente. + If the name of the channel is specified, search in the path \addon_data\plugin.video.kod\settings_channels the + channel_data.json file and set the parameter 'name' to the value indicated by 'value'. + If the parameter 'name' does not exist, it adds it, with its value, to the corresponding file. - @param name: nombre del parametro + @param name: parameter name @type name: str - @param value: valor del parametro + @param value: parameter value @type value: str - @param channel: nombre del canal + @param channel: channel name @type channel: str - @return: 'value' en caso de que se haya podido fijar el valor y None en caso contrario + @return: 'value' if the value could be set and None otherwise @rtype: str, None """ - # Creamos la carpeta si no existe + # We create the folder if it does not exist if not filetools.exists(filetools.join(config.get_data_path(), "settings_channels")): filetools.mkdir(filetools.join(config.get_data_path(), "settings_channels")) @@ -327,16 +320,16 @@ def set_channel_setting(name, value, channel): dict_file = None if filetools.exists(file_settings): - # Obtenemos configuracion guardada de ../settings/channel_data.json + # We get saved settings from ../settings/channel_data.json try: dict_file = jsontools.load(filetools.read(file_settings)) dict_settings = dict_file.get('settings', {}) except EnvironmentError: - logger.error("ERROR al leer el archivo: %s" % file_settings) + logger.error("ERROR when reading the file: %s" % file_settings) dict_settings[name] = value - # comprobamos si existe dict_file y es un diccionario, sino lo creamos + # we check if dict_file exists and it is a dictionary, if not we create it if dict_file is None or not dict_file: dict_file = {} @@ -345,7 +338,7 @@ def set_channel_setting(name, value, channel): # Creamos el archivo ../settings/channel_data.json json_data = jsontools.dump(dict_file) if not filetools.write(file_settings, json_data, silent=True): - logger.error("ERROR al salvar el archivo: %s" % file_settings) + logger.error("ERROR saving file: %s" % file_settings) return None return value diff --git a/core/downloader.py b/core/downloader.py index 39f60d39..3cb52c33 100644 --- a/core/downloader.py +++ b/core/downloader.py @@ -1,20 +1,20 @@ # -*- coding: utf-8 -*- """ -Clase Downloader +Downloader class Downloader(url, path [, filename, headers, resume]) - url : string - url para descargar - path : string - Directorio donde se guarda la descarga - filename : [opt] string - Nombre de archivo para guardar - headers : [opt] dict - Headers para usar en la descarga - resume : [opt] bool - continuar una descarga previa en caso de existir, por defecto True + url : string - url to download + path : string - Directory where the download is saved + filename : [opt] string - File name to save + headers : [opt] dict - Headers to use for download + resume : [opt] bool - continue a previous download if it exists, by default True metodos: - start_dialog() Inicia la descarga mostrando el progreso - start() Inicia la descarga en segundo plano - stop(erase = False) Detiene la descarga, con erase = True elimina los datos descargados + start_dialog() Start the download showing the progress + start() Download starts in the background + stop(erase = False) Stop the download, with erase = True it deletes the downloaded data """ from __future__ import division @@ -26,7 +26,7 @@ standard_library.install_aliases() from builtins import range from builtins import object from past.utils import old_div -#from builtins import str +# from builtins import str import sys PY3 = False VFS = True @@ -53,8 +53,7 @@ class Downloader(object): @property def connections(self): - return len([c for c in self._download_info["parts"] if - c["status"] in [self.states.downloading, self.states.connecting]]), self._max_connections + return len([c for c in self._download_info["parts"] if c["status"] in [self.states.downloading, self.states.connecting]]), self._max_connections @property def downloaded(self): @@ -102,7 +101,7 @@ class Downloader(object): def fullpath(self): return os.path.abspath(filetools.join(self._path, self._filename)) - # Funciones + # Features def start_dialog(self, title=config.get_localized_string(60200)): from platformcode import platformtools progreso = platformtools.dialog_progress_bg(title, config.get_localized_string(60201)) @@ -111,9 +110,7 @@ class Downloader(object): while self.state == self.states.downloading: time.sleep(0.2) line1 = "%s" % (self.filename) - line2 = config.get_localized_string(59983) % ( - self.downloaded[1], self.downloaded[2], self.size[1], self.size[2], - self.speed[1], self.speed[2], self.connections[0], self.connections[1]) + line2 = config.get_localized_string(59983) % ( self.downloaded[1], self.downloaded[2], self.size[1], self.size[2], self.speed[1], self.speed[2], self.connections[0], self.connections[1]) line3 = config.get_localized_string(60202) % (self.remaining_time) progreso.update(int(self.progress), line1, line2 + " " + line3) @@ -130,9 +127,7 @@ class Downloader(object): conns.append(self.__open_connection__("0", "")) except: self._max_connections = x - self._threads = [ - Thread(target=self.__start_part__, name="Downloader %s/%s" % (x + 1, self._max_connections)) for x - in range(self._max_connections)] + self._threads = [ Thread(target=self.__start_part__, name="Downloader %s/%s" % (x + 1, self._max_connections)) for x in range(self._max_connections)] break del conns self._start_time = time.time() - 1 @@ -144,7 +139,7 @@ class Downloader(object): def stop(self, erase=False): if self._state == self.states.downloading: - # Detenemos la descarga + # We stop downloading self._state = self.states.stopped for t in self._threads: if t.isAlive(): t.join() @@ -193,10 +188,10 @@ class Downloader(object): time.sleep(0.5) - # Funciones internas + # Internal functions def __init__(self, url, path, filename=None, headers=[], resume=True, max_connections=10, block_size=2 ** 17, part_size=2 ** 24, max_buffer=10, json_path=None): - # Parametros + # Parameters self._resume = resume self._path = path self._filename = filename @@ -214,29 +209,26 @@ class Downloader(object): except: self.tmp_path = os.getenv("TEMP") or os.getenv("TMP") or os.getenv("TMPDIR") - self.states = type('states', (), - {"stopped": 0, "connecting": 1, "downloading": 2, "completed": 3, "error": 4, "saving": 5}) + self.states = type('states', (), {"stopped": 0, "connecting": 1, "downloading": 2, "completed": 3, "error": 4, "saving": 5}) self._state = self.states.stopped self._download_lock = Lock() - self._headers = { - "User-Agent": "Kodi/15.2 (Windows NT 10.0; WOW64) App_Bitness/32 Version/15.2-Git:20151019-02e7013"} + self._headers = {"User-Agent": "Kodi/15.2 (Windows NT 10.0; WOW64) App_Bitness/32 Version/15.2-Git:20151019-02e7013"} self._speed = 0 self._buffer = {} self._seekable = True - self._threads = [Thread(target=self.__start_part__, name="Downloader %s/%s" % (x + 1, self._max_connections)) - for x in range(self._max_connections)] + self._threads = [Thread(target=self.__start_part__, name="Downloader %s/%s" % (x + 1, self._max_connections)) for x in range(self._max_connections)] self._speed_thread = Thread(target=self.__speed_metter__, name="Speed Meter") self._save_thread = Thread(target=self.__save_file__, name="File Writer") - # Actualizamos los headers + # We update the headers self._headers.update(dict(headers)) - # Separamos los headers de la url + # We separate the headers from the url self.__url_to_headers__(url) - # Obtenemos la info del servidor + # We get the server info self.__get_download_headers__() self._file_size = int(self.response_headers.get("content-length", "0")) @@ -246,10 +238,10 @@ class Downloader(object): self._part_size = 0 self._resume = False - # Obtenemos el nombre del archivo + # We get the file name self.__get_download_filename__() - # Abrimos en modo "a+" para que cree el archivo si no existe, luego en modo "r+b" para poder hacer seek() + # We open in "a+" mode to create the file if it does not exist, then in "r + b" mode to be able to do seek () self.file = filetools.file_open(filetools.join(self._path, self._filename), "a+", vfs=VFS) if self.file: self.file.close() self.file = filetools.file_open(filetools.join(self._path, self._filename), "r+b", vfs=VFS) @@ -266,20 +258,17 @@ class Downloader(object): self.__get_download_info__() try: - logger.info("Download started: Parts: %s | Path: %s | File: %s | Size: %s" % \ - (str(len(self._download_info["parts"])), self._pathencode('utf-8'), \ - self._filenameencode('utf-8'), str(self._download_info["size"]))) + logger.info("Download started: Parts: %s | Path: %s | File: %s | Size: %s" % (str(len(self._download_info["parts"])), self._pathencode('utf-8'), self._filenameencode('utf-8'), str(self._download_info["size"]))) except: pass def __url_to_headers__(self, url): - # Separamos la url de los headers adicionales + # We separate the url from the additional headers self.url = url.split("|")[0] - # headers adicionales + # additional headers if "|" in url: - self._headers.update(dict([[header.split("=")[0], urllib.parse.unquote_plus(header.split("=")[1])] for header in - url.split("|")[1].split("&")])) + self._headers.update(dict([[header.split("=")[0], urllib.parse.unquote_plus(header.split("=")[1])] for header in url.split("|")[1].split("&")])) def __get_download_headers__(self): if self.url.startswith("https"): @@ -307,29 +296,21 @@ class Downloader(object): break def __get_download_filename__(self): - # Obtenemos nombre de archivo y extension - if "filename" in self.response_headers.get("content-disposition", - "") and "attachment" in self.response_headers.get( - "content-disposition", ""): - cd_filename, cd_ext = os.path.splitext(urllib.parse.unquote_plus( - re.compile("attachment; filename ?= ?[\"|']?([^\"']+)[\"|']?").match( - self.response_headers.get("content-disposition")).group(1))) - elif "filename" in self.response_headers.get("content-disposition", "") and "inline" in self.response_headers.get( - "content-disposition", ""): - cd_filename, cd_ext = os.path.splitext(urllib.parse.unquote_plus( - re.compile("inline; filename ?= ?[\"|']?([^\"']+)[\"|']?").match( - self.response_headers.get("content-disposition")).group(1))) + # We get file name and extension + if "filename" in self.response_headers.get("content-disposition", "") and "attachment" in self.response_headers.get("content-disposition", ""): + cd_filename, cd_ext = os.path.splitext(urllib.parse.unquote_plus( re.compile("attachment; filename ?= ?[\"|']?([^\"']+)[\"|']?").match(self.response_headers.get("content-disposition")).group(1))) + elif "filename" in self.response_headers.get("content-disposition", "") and "inline" in self.response_headers.get("content-disposition", ""): + cd_filename, cd_ext = os.path.splitext(urllib.parse.unquote_plus(re.compile("inline; filename ?= ?[\"|']?([^\"']+)[\"|']?").match(self.response_headers.get("content-disposition")).group(1))) else: cd_filename, cd_ext = "", "" - url_filename, url_ext = os.path.splitext( - urllib.parse.unquote_plus(filetools.basename(urllib.parse.urlparse(self.url)[2]))) + url_filename, url_ext = os.path.splitext(urllib.parse.unquote_plus(filetools.basename(urllib.parse.urlparse(self.url)[2]))) if self.response_headers.get("content-type", "application/octet-stream") != "application/octet-stream": mime_ext = mimetypes.guess_extension(self.response_headers.get("content-type")) else: mime_ext = "" - # Seleccionamos el nombre mas adecuado + # We select the most suitable name if cd_filename: self.remote_filename = cd_filename if not self._filename: @@ -340,7 +321,7 @@ class Downloader(object): if not self._filename: self._filename = url_filename - # Seleccionamos la extension mas adecuada + # We select the most suitable extension if cd_ext: if not cd_ext in self._filename: self._filename += cd_ext if self.remote_filename: self.remote_filename += cd_ext @@ -360,7 +341,7 @@ class Downloader(object): return value, old_div(value, 1024.0 ** int(math.log(value, 1024))), units[int(math.log(value, 1024))] def __get_download_info__(self): - # Continuamos con una descarga que contiene la info al final del archivo + # We continue with a download that contains the info at the end of the file self._download_info = {} try: @@ -386,25 +367,21 @@ class Downloader(object): part["current"] == part["start"] self._start_downloaded = sum([c["current"] - c["start"] for c in self._download_info["parts"]]) - self.pending_parts = set( - [x for x, a in enumerate(self._download_info["parts"]) if not a["status"] == self.states.completed]) - self.completed_parts = set( - [x for x, a in enumerate(self._download_info["parts"]) if a["status"] == self.states.completed]) + self.pending_parts = set([x for x, a in enumerate(self._download_info["parts"]) if not a["status"] == self.states.completed]) + self.completed_parts = set([x for x, a in enumerate(self._download_info["parts"]) if a["status"] == self.states.completed]) self.save_parts = set() self.download_parts = set() - # La info no existe o no es correcta, comenzamos de 0 + # The info does not exist or is not correct, we start from 0 except: self._download_info["parts"] = [] if self._file_size and self._part_size: for x in range(0, self._file_size, self._part_size): end = x + self._part_size - 1 if end >= self._file_size: end = self._file_size - 1 - self._download_info["parts"].append( - {"start": x, "end": end, "current": x, "status": self.states.stopped}) + self._download_info["parts"].append({"start": x, "end": end, "current": x, "status": self.states.stopped}) else: - self._download_info["parts"].append( - {"start": 0, "end": self._file_size - 1, "current": 0, "status": self.states.stopped}) + self._download_info["parts"].append({"start": 0, "end": self._file_size - 1, "current": 0, "status": self.states.stopped}) self._download_info["size"] = self._file_size self._start_downloaded = 0 @@ -436,7 +413,7 @@ class Downloader(object): logger.info("Thread started: %s" % threading.current_thread().name) while self._state == self.states.downloading: - if not self.pending_parts and not self.download_parts and not self.save_parts: # Descarga finalizada + if not self.pending_parts and not self.download_parts and not self.save_parts: # Download finished self._state = self.states.completed self.file.close() continue @@ -446,8 +423,7 @@ class Downloader(object): save_id = min(self.save_parts) - if not self._seekable and self._download_info["parts"][save_id][ - "start"] >= 2 ** 31 and not self.__check_consecutive__(save_id): + if not self._seekable and self._download_info["parts"][save_id]["start"] >= 2 ** 31 and not self.__check_consecutive__(save_id): continue if self._seekable or self._download_info["parts"][save_id]["start"] < 2 ** 31: @@ -533,8 +509,7 @@ class Downloader(object): self.__set_part_connecting__(id) try: - connection = self.__open_connection__(self._download_info["parts"][id]["current"], - self._download_info["parts"][id]["end"]) + connection = self.__open_connection__(self._download_info["parts"][id]["current"], self._download_info["parts"][id]["end"]) except: self.__set_part__error__(id) time.sleep(5) @@ -559,8 +534,7 @@ class Downloader(object): self.download_parts.remove(id) break else: - if len(buffer) and self._download_info["parts"][id]["current"] < self._download_info["parts"][id][ - "end"]: + if len(buffer) and self._download_info["parts"][id]["current"] < self._download_info["parts"][id]["end"]: # file.write(buffer) self._buffer[id].append(buffer) self._download_info["parts"][id]["current"] += len(buffer) @@ -570,13 +544,9 @@ class Downloader(object): vm = self.__change_units__(velocidad_minima) v = self.__change_units__(velocidad) - if velocidad_minima > speed[-1] and velocidad_minima > speed[-2] and \ - self._download_info["parts"][id]["current"] < \ - self._download_info["parts"][id]["end"]: + if velocidad_minima > speed[-1] and velocidad_minima > speed[-2] and self._download_info["parts"][id]["current"] < self._download_info["parts"][id]["end"]: if connection.fp: connection.fp._sock.close() - logger.info( - "ID: %s Restarting connection! | Minimum Speed: %.2f %s/s | Speed: %.2f %s/s" % \ - (id, vm[1], vm[2], v[1], v[2])) + logger.info("ID: %s Restarting connection! | Minimum Speed: %.2f %s/s | Speed: %.2f %s/s" % (id, vm[1], vm[2], v[1], v[2])) # file.close() break else: diff --git a/core/downloadtools.py b/core/downloadtools.py index 57b020aa..313c1baa 100644 --- a/core/downloadtools.py +++ b/core/downloadtools.py @@ -92,8 +92,8 @@ def limpia_nombre_sin_acentos(s): def limpia_nombre_excepto_1(s): if not s: return '' - # Titulo de entrada - # Convierte a unicode + # Entrance title + # Convert to unicode try: s = unicode(s, "utf-8") except UnicodeError: @@ -103,12 +103,12 @@ def limpia_nombre_excepto_1(s): except UnicodeError: # logger.info("no es iso-8859-1") pass - # Elimina acentos + # Remove accents s = limpia_nombre_sin_acentos(s) - # Elimina caracteres prohibidos + # Remove prohibited characters validchars = " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!#$%&'()-@[]^_`{}~." stripped = ''.join(c for c in s if c in validchars) - # Convierte a iso + # Convert to iso s = stripped.encode("iso-8859-1") if PY3: s = s.decode('utf-8') @@ -124,30 +124,30 @@ def limpia_nombre_excepto_2(s): def getfilefromtitle(url, title): - # Imprime en el log lo que va a descartar + # Print in the log what you will discard logger.info("title=" + title) logger.info("url=" + url) plataforma = config.get_system_platform() logger.info("platform=" + plataforma) - # nombrefichero = xbmc.makeLegalFilename(title + url[-4:]) + # filename = xbmc.makeLegalFilename(title + url[-4:]) from core import scrapertools nombrefichero = title + scrapertools.get_filename_from_url(url)[-4:] - logger.info("filename=%s" % nombrefichero) + logger.info("filename= %s" % nombrefichero) if "videobb" in url or "videozer" in url or "putlocker" in url: nombrefichero = title + ".flv" if "videobam" in url: nombrefichero = title + "." + url.rsplit(".", 1)[1][0:3] - logger.info("filename=%s" % nombrefichero) + logger.info("filename= %s" % nombrefichero) nombrefichero = limpia_nombre_caracteres_especiales(nombrefichero) - logger.info("filename=%s" % nombrefichero) + logger.info("filename= %s" % nombrefichero) fullpath = filetools.join(config.get_setting("downloadpath"), nombrefichero) - logger.info("fullpath=%s" % fullpath) + logger.info("fullpath= %s" % fullpath) if config.is_xbmc() and fullpath.startswith("special://"): import xbmc @@ -164,7 +164,7 @@ def downloadtitle(url, title): def downloadbest(video_urls, title, continuar=False): logger.info() - # Le da la vuelta, para poner el de más calidad primero ( list() es para que haga una copia ) + # Flip it over, to put the highest quality one first (list () is for you to make a copy of) invertida = list(video_urls) invertida.reverse() @@ -176,10 +176,10 @@ def downloadbest(video_urls, title, continuar=False): else: logger.info("Downloading option " + title + " " + url.encode('ascii', 'ignore').decode('utf-8')) - # Calcula el fichero donde debe grabar + # Calculate the file where you should record try: fullpath = getfilefromtitle(url, title.strip()) - # Si falla, es porque la URL no vale para nada + # If it fails, it is because the URL is useless except: import traceback logger.error(traceback.format_exc()) @@ -188,24 +188,24 @@ def downloadbest(video_urls, title, continuar=False): # Descarga try: ret = downloadfile(url, fullpath, continuar=continuar) - # Llegados a este punto, normalmente es un timeout + # At this point, it is usually a timeout. except urllib.error.URLError as e: import traceback logger.error(traceback.format_exc()) ret = -2 - # El usuario ha cancelado la descarga + # The user has canceled the download if ret == -1: return -1 else: - # El fichero ni siquiera existe + # EThe file doesn't even exist if not filetools.exists(fullpath): logger.info("-> You have not downloaded anything, testing with the following option if there is") - # El fichero existe + # The file exists else: tamanyo = filetools.getsize(fullpath) - # Tiene tamaño 0 + # It has size 0 if tamanyo == 0: logger.info("-> Download a file with size 0, testing with the following option if it exists") os.remove(fullpath) @@ -217,8 +217,8 @@ def downloadbest(video_urls, title, continuar=False): def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False, resumir=True): - logger.info("url=" + url) - logger.info("filename=" + nombrefichero) + logger.info("url= " + url) + logger.info("filename= " + nombrefichero) if headers is None: headers = [] @@ -230,36 +230,36 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False nombrefichero = xbmc.translatePath(nombrefichero) try: - # Si no es XBMC, siempre a "Silent" + # If it is not XBMC, always "Silent" from platformcode import platformtools - # antes + # before # f=open(nombrefichero,"wb") try: import xbmc nombrefichero = xbmc.makeLegalFilename(nombrefichero) except: pass - logger.info("filename=" + nombrefichero) + logger.info("filename= " + nombrefichero) - # El fichero existe y se quiere continuar + # The file exists and you want to continue if filetools.exists(nombrefichero) and continuar: f = filetools.file_open(nombrefichero, 'r+b', vfs=VFS) if resumir: exist_size = filetools.getsize(nombrefichero) - logger.info("the file exists, size=%d" % exist_size) + logger.info("the file exists, size= %d" % exist_size) grabado = exist_size f.seek(exist_size) else: exist_size = 0 grabado = 0 - # el fichero ya existe y no se quiere continuar, se aborta + # the file already exists and you don't want to continue, it aborts elif filetools.exists(nombrefichero) and not continuar: logger.info("the file exists, it does not download again") return -3 - # el fichero no existe + # the file does not exist else: exist_size = 0 logger.info("the file does not exist") @@ -267,11 +267,11 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False f = filetools.file_open(nombrefichero, 'wb', vfs=VFS) grabado = 0 - # Crea el diálogo de progreso + # Create the progress dialog if not silent: progreso = platformtools.dialog_progress("plugin", "Downloading...", url, nombrefichero) - # Si la plataforma no devuelve un cuadro de diálogo válido, asume modo silencio + # If the platform does not return a valid dialog box, it assumes silent mode if progreso is None: silent = True @@ -291,29 +291,28 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False url = url.split("|")[0] logger.info("url=" + url) - # Timeout del socket a 60 segundos + # Socket timeout at 60 seconds socket.setdefaulttimeout(60) h = urllib.request.HTTPHandler(debuglevel=0) request = urllib.request.Request(url) for header in headers: - logger.info("Header=" + header[0] + ": " + header[1]) + logger.info("Header= " + header[0] + ": " + header[1]) request.add_header(header[0], header[1]) if exist_size > 0: - request.add_header('Range', 'bytes=%d-' % (exist_size,)) + request.add_header('Range', 'bytes= %d-' % (exist_size,)) opener = urllib.request.build_opener(h) urllib.request.install_opener(opener) try: connexion = opener.open(request) except urllib.error.HTTPError as e: - logger.error("error %d (%s) al abrir la url %s" % - (e.code, e.msg, url)) + logger.error("error %d (%s) opening url %s" % (e.code, e.msg, url)) f.close() if not silent: progreso.close() - # El error 416 es que el rango pedido es mayor que el fichero => es que ya está completo + # Error 416 is that the requested range is greater than the file => is that it is already complete if e.code == 416: return 0 else: @@ -327,25 +326,25 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False if exist_size > 0: totalfichero = totalfichero + exist_size - logger.info("Content-Length=%s" % totalfichero) + logger.info("Content-Length= %s" % totalfichero) blocksize = 100 * 1024 bloqueleido = connexion.read(blocksize) - logger.info("Starting downloading the file, blocked=%s" % len(bloqueleido)) + logger.info("Starting downloading the file, blocked= %s" % len(bloqueleido)) maxreintentos = 10 while len(bloqueleido) > 0: try: - # Escribe el bloque leido + # Write the block read f.write(bloqueleido) grabado += len(bloqueleido) percent = int(float(grabado) * 100 / float(totalfichero)) totalmb = float(float(totalfichero) / (1024 * 1024)) descargadosmb = float(float(grabado) / (1024 * 1024)) - # Lee el siguiente bloque, reintentando para no parar todo al primer timeout + # Read the next block, retrying not to stop everything at the first timeout reintentos = 0 while reintentos <= maxreintentos: try: @@ -371,7 +370,7 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False import traceback logger.error(traceback.print_exc()) - # El usuario cancelo la descarga + # The user cancels the download try: if progreso.iscanceled(): logger.info("Download of file canceled") @@ -381,7 +380,7 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False except: pass - # Ha habido un error en la descarga + # There was an error in the download if reintentos > maxreintentos: logger.info("ERROR in the file download") f.close() @@ -407,7 +406,7 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False error = downloadfileRTMP(url, nombrefichero, silent) if error and not silent: from platformcode import platformtools - platformtools.dialog_ok("No puedes descargar ese vídeo", "Las descargas en RTMP aún no", "están soportadas") + platformtools.dialog_ok("You cannot download that video "," RTMP downloads not yet "," are supported") else: import traceback from pprint import pprint @@ -433,21 +432,21 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False def downloadfileRTMP(url, nombrefichero, silent): - ''' No usa librtmp ya que no siempre está disponible. - Lanza un subproceso con rtmpdump. En Windows es necesario instalarlo. - No usa threads así que no muestra ninguna barra de progreso ni tampoco - se marca el final real de la descarga en el log info. + ''' + Do not use librtmp as it is not always available. + Launch a thread with rtmpdump. In Windows it is necessary to install it. + It doesn't use threads so it doesn't show any progress bar nor the actual end of the download is marked in the log info. ''' Programfiles = os.getenv('Programfiles') if Programfiles: # Windows rtmpdump_cmd = Programfiles + "/rtmpdump/rtmpdump.exe" - nombrefichero = '"' + nombrefichero + '"' # Windows necesita las comillas en el nombre + nombrefichero = '"' + nombrefichero + '"' # Windows needs the quotes in the name else: rtmpdump_cmd = "/usr/bin/rtmpdump" if not filetools.isfile(rtmpdump_cmd) and not silent: from platformcode import platformtools - advertencia = platformtools.dialog_ok("Falta " + rtmpdump_cmd, "Comprueba que rtmpdump está instalado") + advertencia = platformtools.dialog_ok("Lack " + rtmpdump_cmd, "Check that rtmpdump is installed") return True valid_rtmpdump_options = ["help", "url", "rtmp", "host", "port", "socks", "protocol", "playpath", "playlist", @@ -475,13 +474,11 @@ def downloadfileRTMP(url, nombrefichero, silent): try: rtmpdump_args = [rtmpdump_cmd] + rtmpdump_args + ["-o", nombrefichero] from os import spawnv, P_NOWAIT - logger.info("Iniciando descarga del fichero: %s" % " ".join(rtmpdump_args)) + logger.info("Initiating file download: %s" % " ".join(rtmpdump_args)) rtmpdump_exit = spawnv(P_NOWAIT, rtmpdump_cmd, rtmpdump_args) if not silent: from platformcode import platformtools - advertencia = platformtools.dialog_ok("La opción de descarga RTMP es experimental", - "y el vídeo se descargará en segundo plano.", - "No se mostrará ninguna barra de progreso.") + advertencia = platformtools.dialog_ok("RTMP download option is experimental", "and the video will download in the background.", "No progress bar will be displayed.") except: return True @@ -489,13 +486,13 @@ def downloadfileRTMP(url, nombrefichero, silent): def downloadfileGzipped(url, pathfichero): - logger.info("url=" + url) + logger.info("url= " + url) nombrefichero = pathfichero - logger.info("filename=" + nombrefichero) + logger.info("filename= " + nombrefichero) import xbmc nombrefichero = xbmc.makeLegalFilename(nombrefichero) - logger.info("filename=" + nombrefichero) + logger.info("filename= " + nombrefichero) patron = "(http://[^/]+)/.+" matches = re.compile(patron, re.DOTALL).findall(url) @@ -519,11 +516,11 @@ def downloadfileGzipped(url, pathfichero): txdata = "" - # Crea el diálogo de progreso + # Create the progress dialog from platformcode import platformtools progreso = platformtools.dialog_progress("addon", config.get_localized_string(60200), url.split("|")[0], nombrefichero) - # Timeout del socket a 60 segundos + # Socket timeout at 60 seconds socket.setdefaulttimeout(10) h = urllib.request.HTTPHandler(debuglevel=0) @@ -536,10 +533,10 @@ def downloadfileGzipped(url, pathfichero): try: connexion = opener.open(request) except urllib.error.HTTPError as e: - logger.error("error %d (%s) al abrir la url %s" % + logger.error("error %d (%s) when opening the url %s" % (e.code, e.msg, url)) progreso.close() - # El error 416 es que el rango pedido es mayor que el fichero => es que ya está completo + # Error 416 is that the requested range is greater than the file => is that it is already complete if e.code == 416: return 0 else: @@ -562,13 +559,13 @@ def downloadfileGzipped(url, pathfichero): nombrefichero = filetools.join(pathfichero, titulo) totalfichero = int(connexion.headers["Content-Length"]) - # despues + # then f = filetools.file_open(nombrefichero, 'w', vfs=VFS) logger.info("new file open") grabado = 0 - logger.info("Content-Length=%s" % totalfichero) + logger.info("Content-Length= %s" % totalfichero) blocksize = 100 * 1024 @@ -581,7 +578,7 @@ def downloadfileGzipped(url, pathfichero): gzipper = gzip.GzipFile(fileobj=compressedstream) bloquedata = gzipper.read() gzipper.close() - logger.info("Starting downloading the file, blocked=%s" % len(bloqueleido)) + logger.info("Starting downloading the file, blocked= %s" % len(bloqueleido)) except: logger.error("ERROR: The file to be downloaded is not compressed with Gzip") f.close() @@ -592,14 +589,14 @@ def downloadfileGzipped(url, pathfichero): while len(bloqueleido) > 0: try: - # Escribe el bloque leido + # Write the block read f.write(bloquedata) grabado += len(bloqueleido) percent = int(float(grabado) * 100 / float(totalfichero)) totalmb = float(float(totalfichero) / (1024 * 1024)) descargadosmb = float(float(grabado) / (1024 * 1024)) - # Lee el siguiente bloque, reintentando para no parar todo al primer timeout + # Read the next block, retrying not to stop everything at the first timeout reintentos = 0 while reintentos <= maxreintentos: try: @@ -621,8 +618,7 @@ def downloadfileGzipped(url, pathfichero): else: tiempofalta = 0 logger.info(sec_to_hms(tiempofalta)) - progreso.update(percent, "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s mancanti " % - (descargadosmb, totalmb, percent, old_div(velocidad, 1024), sec_to_hms(tiempofalta))) + progreso.update(percent, "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s left " % (descargadosmb, totalmb, percent, old_div(velocidad, 1024), sec_to_hms(tiempofalta))) break except: reintentos += 1 @@ -630,14 +626,14 @@ def downloadfileGzipped(url, pathfichero): for line in sys.exc_info(): logger.error("%s" % line) - # El usuario cancelo la descarga + # The user cancels the download if progreso.iscanceled(): logger.info("Download of file canceled") f.close() progreso.close() return -1 - # Ha habido un error en la descarga + # There was an error in the download if reintentos > maxreintentos: logger.info("ERROR in the file download") f.close() @@ -662,10 +658,10 @@ def downloadfileGzipped(url, pathfichero): def GetTitleFromFile(title): - # Imprime en el log lo que va a descartar - logger.info("title=" + title) + # Print in the log what you will discard + logger.info("title= " + title) plataforma = config.get_system_platform() - logger.info("plataform=" + plataforma) + logger.info("plataform= " + plataforma) # nombrefichero = xbmc.makeLegalFilename(title + url[-4:]) nombrefichero = title @@ -681,16 +677,15 @@ def sec_to_hms(seconds): def downloadIfNotModifiedSince(url, timestamp): logger.info("(" + url + "," + time.ctime(timestamp) + ")") - # Convierte la fecha a GMT + # Convert date to GMT fecha_formateada = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(timestamp)) - logger.info("fechaFormateada=%s" % fecha_formateada) + logger.info("Formatted date= %s" % fecha_formateada) - # Comprueba si ha cambiado + # Check if it has changed inicio = time.clock() req = urllib.request.Request(url) req.add_header('If-Modified-Since', fecha_formateada) - req.add_header('User-Agent', - 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12') + req.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12') updated = False @@ -698,18 +693,18 @@ def downloadIfNotModifiedSince(url, timestamp): response = urllib.request.urlopen(req) data = response.read() - # Si llega hasta aquí, es que ha cambiado + # If it gets this far, it has changed updated = True response.close() except urllib.error.URLError as e: - # Si devuelve 304 es que no ha cambiado + # If it returns 304 it is that it has not changed if hasattr(e, 'code'): logger.info("HTTP response code : %d" % e.code) if e.code == 304: logger.info("It has not changed") updated = False - # Agarra los errores con codigo de respuesta del servidor externo solicitado + # Grab errors with response code from requested external server else: for line in sys.exc_info(): logger.error("%s" % line) @@ -722,20 +717,20 @@ def downloadIfNotModifiedSince(url, timestamp): def download_all_episodes(item, channel, first_episode="", preferred_server="vidspot", filter_language=""): - logger.info("show=" + item.show) + logger.info("show= " + item.show) show_title = item.show - # Obtiene el listado desde el que se llamó + # Gets the listing from which it was called action = item.extra - # Esta marca es porque el item tiene algo más aparte en el atributo "extra" + # This mark is because the item has something else apart in the "extra" attribute if "###" in item.extra: action = item.extra.split("###")[0] item.extra = item.extra.split("###")[1] episode_itemlist = getattr(channel, action)(item) - # Ordena los episodios para que funcione el filtro de first_episode + # Sort episodes for the first_episode filter to work episode_itemlist = sorted(episode_itemlist, key=lambda it: it.title) from core import servertools @@ -744,7 +739,7 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid best_server = preferred_server # worst_server = "moevideos" - # Para cada episodio + # For each episode if first_episode == "": empezar = True else: @@ -752,9 +747,9 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid for episode_item in episode_itemlist: try: - logger.info("episode=" + episode_item.title) - episode_title = scrapertools.find_single_match(episode_item.title, "(\d+x\d+)") - logger.info("episode=" + episode_title) + logger.info("episode= " + episode_item.title) + episode_title = scrapertools.find_single_match(episode_item.title, r"(\d+x\d+)") + logger.info("episode= " + episode_title) except: import traceback logger.error(traceback.format_exc()) @@ -769,7 +764,7 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid if not empezar: continue - # Extrae los mirrors + # Extract the mirrors try: mirrors_itemlist = channel.findvideos(episode_item) except: @@ -787,7 +782,7 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid for mirror_item in mirrors_itemlist: - # Si está en español va al principio, si no va al final + # If it is in Spanish it goes to the beginning, if it does not go to the end if "(Italiano)" in mirror_item.title: if best_server in mirror_item.title.lower(): new_mirror_itemlist_1.append(mirror_item) @@ -818,7 +813,7 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid new_mirror_itemlist_4 + new_mirror_itemlist_5 + new_mirror_itemlist_6) for mirror_item in mirrors_itemlist: - logger.info("mirror=" + mirror_item.title) + logger.info("mirror= " + mirror_item.title) if "(Italiano)" in mirror_item.title: idioma = "(Italiano)" @@ -854,16 +849,13 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid if len(video_items) > 0: video_item = video_items[0] - # Comprueba que está disponible - video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing(video_item.server, - video_item.url, - video_password="", - muestra_dialogo=False) + # Check that it is available + video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing(video_item.server, video_item.url, video_password="", muestra_dialogo=False) - # Lo añade a la lista de descargas + # Adds it to the download list if puedes: logger.info("downloading mirror started...") - # El vídeo de más calidad es el último + # The highest quality video is the latest # mediaurl = video_urls[len(video_urls) - 1][1] devuelve = downloadbest(video_urls, show_title + " " + episode_title + " " + idioma + " [" + video_item.server + "]", continuar=False) @@ -896,9 +888,8 @@ def episodio_ya_descargado(show_title, episode_title): for fichero in ficheros: # logger.info("fichero="+fichero) - if fichero.lower().startswith(show_title.lower()) and \ - scrapertools.find_single_match(fichero, "(\d+x\d+)") == episode_title: - logger.info("encontrado!") + if fichero.lower().startswith(show_title.lower()) and scrapertools.find_single_match(fichero, "(\d+x\d+)") == episode_title: + logger.info("found!") return True return False diff --git a/core/filetools.py b/core/filetools.py index 3b97063c..ec29993b 100644 --- a/core/filetools.py +++ b/core/filetools.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ # filetools -# Gestion de archivos con discriminación xbmcvfs/samba/local +# File management with xbmcvfs / samba / local discrimination # ------------------------------------------------------------ from __future__ import division -#from builtins import str +# from builtins import str from future.builtins import range from past.utils import old_div import sys @@ -18,13 +18,13 @@ import traceback from core import scrapertools from platformcode import platformtools, logger -xbmc_vfs = True # False para desactivar XbmcVFS, True para activar +xbmc_vfs = True # False to disable XbmcVFS, True to enable if xbmc_vfs: try: import xbmcvfs if not PY3: - reload(sys) ### Workoround. Revisar en la migración a Python 3 - sys.setdefaultencoding('utf-8') # xbmcvfs degrada el valor de defaultencoding. Se reestablece + reload(sys) # Workoround. Review on migration to Python 3 + sys.setdefaultencoding('utf-8') # xbmcvfs demeans the value of defaultencoding. It is reestablished xbmc_vfs = True except: xbmc_vfs = False @@ -35,9 +35,9 @@ if not xbmc_vfs: from lib.sambatools import libsmb as samba except: samba = None - # Python 2.4 No compatible con modulo samba, hay que revisar + # Python 2.4 Not compatible with samba module, you have to check -# Windows es "mbcs" linux, osx, android es "utf8" +# Windows is "mbcs" linux, osx, android is "utf8" if os.name == "nt": fs_encoding = "" else: @@ -47,15 +47,15 @@ else: def validate_path(path): """ - Elimina cáracteres no permitidos - @param path: cadena a validar + Eliminate illegal characters + @param path: string to validate @type path: str @rtype: str - @return: devuelve la cadena sin los caracteres no permitidos + @return: returns the string without the characters not allowed """ chars = ":*?<>|" - if scrapertools.find_single_match(path, '(^\w+:\/\/)'): - protocolo = scrapertools.find_single_match(path, '(^\w+:\/\/)') + if scrapertools.find_single_match(path, r'(^\w+:\/\/)'): + protocolo = scrapertools.find_single_match(path, r'(^\w+:\/\/)') import re parts = re.split(r'^\w+:\/\/(.+?)/(.+)', path)[1:3] return protocolo + parts[0] + "/" + ''.join([c for c in parts[1] if c not in chars]) @@ -72,19 +72,19 @@ def validate_path(path): def encode(path, _samba=False): """ - Codifica una ruta según el sistema operativo que estemos utilizando. - El argumento path tiene que estar codificado en utf-8 - @type path unicode o str con codificación utf-8 - @param path parámetro a codificar + It encodes a path according to the operating system we are using. + The path argument has to be encoded in utf-8 + @type unicode or str path with utf-8 encoding + @param path parameter to encode @type _samba bool - @para _samba si la ruta es samba o no + @para _samba if the path is samba or not @rtype: str - @return ruta codificada en juego de caracteres del sistema o utf-8 si samba + @return path encoded in system character set or utf-8 if samba """ if not isinstance(path, unicode): path = unicode(path, "utf-8", "ignore") - if scrapertools.find_single_match(path, '(^\w+:\/\/)') or _samba: + if scrapertools.find_single_match(path, r'(^\w+:\/\/)') or _samba: path = path.encode("utf-8", "ignore") else: if fs_encoding and not PY3: @@ -95,12 +95,12 @@ def encode(path, _samba=False): def decode(path): """ - Convierte una cadena de texto al juego de caracteres utf-8 - eliminando los caracteres que no estén permitidos en utf-8 - @type: str, unicode, list de str o unicode - @param path: puede ser una ruta o un list() con varias rutas + Converts a text string to the utf-8 character set + removing characters that are not allowed in utf-8 + @type: str, unicode, list of str o unicode + @param path:can be a path or a list () with multiple paths @rtype: str - @return: ruta codificado en UTF-8 + @return: ruta encoded in UTF-8 """ if isinstance(path, list): for x in range(len(path)): @@ -116,16 +116,15 @@ def decode(path): def read(path, linea_inicio=0, total_lineas=None, whence=0, silent=False, vfs=True): """ - Lee el contenido de un archivo y devuelve los datos - @param path: ruta del fichero + Read the contents of a file and return the data + @param path: file path @type path: str - @param linea_inicio: primera linea a leer del fichero - @type linea_inicio: int positivo - @param total_lineas: numero maximo de lineas a leer. Si es None o superior al total de lineas se leera el - fichero hasta el final. - @type total_lineas: int positivo + @param linea_inicio: first line to read from the file + @type linea_inicio: positive int + @param total_lineas: maximum number of lines to read. If it is None or greater than the total lines, the file will be read until the end. + @type total_lineas: positive int @rtype: str - @return: datos que contiene el fichero + @return: data contained in the file """ path = encode(path) try: @@ -182,13 +181,13 @@ def read(path, linea_inicio=0, total_lineas=None, whence=0, silent=False, vfs=Tr def write(path, data, mode="wb", silent=False, vfs=True): """ - Guarda los datos en un archivo - @param path: ruta del archivo a guardar + Save the data to a file + @param path: file path to save @type path: str - @param data: datos a guardar + @param data: data to save @type data: str @rtype: bool - @return: devuelve True si se ha escrito correctamente o False si ha dado un error + @return: returns True if it was written correctly or False if it gave an error """ path = encode(path) try: @@ -205,7 +204,7 @@ def write(path, data, mode="wb", silent=False, vfs=True): f.write(data) f.close() except: - logger.error("ERROR al guardar el archivo: %s" % path) + logger.error("ERROR saving file: %s" % path) if not silent: logger.error(traceback.format_exc()) return False @@ -215,11 +214,11 @@ def write(path, data, mode="wb", silent=False, vfs=True): def file_open(path, mode="r", silent=False, vfs=True): """ - Abre un archivo - @param path: ruta + Open a file + @param path: path @type path: str @rtype: str - @return: objeto file + @return: file object """ path = encode(path) try: @@ -245,11 +244,11 @@ def file_open(path, mode="r", silent=False, vfs=True): def file_stat(path, silent=False, vfs=True): """ - Stat de un archivo - @param path: ruta + Stat of a file + @param path: path @type path: str @rtype: str - @return: objeto file + @return: file object """ path = encode(path) try: @@ -266,13 +265,13 @@ def file_stat(path, silent=False, vfs=True): def rename(path, new_name, silent=False, strict=False, vfs=True): """ - Renombra un archivo o carpeta - @param path: ruta del fichero o carpeta a renombrar + Rename a file or folder + @param path: path of the file or folder to rename @type path: str - @param new_name: nuevo nombre + @param new_name: new name @type new_name: str @rtype: bool - @return: devuelve False en caso de error + @return: returns False on error """ path = encode(path) try: @@ -309,13 +308,13 @@ def rename(path, new_name, silent=False, strict=False, vfs=True): def move(path, dest, silent=False, strict=False, vfs=True): """ - Mueve un archivo - @param path: ruta del fichero a mover + Move a file + @param path: path of the file to move @type path: str - @param dest: ruta donde mover + @param dest: path where to move @type dest: str @rtype: bool - @return: devuelve False en caso de error + @return: returns False on error """ try: if xbmc_vfs and vfs: @@ -343,10 +342,10 @@ def move(path, dest, silent=False, strict=False, vfs=True): dest = encode(dest) path = encode(path) os.rename(path, dest) - # mixto En este caso se copia el archivo y luego se elimina el de origen + # mixed In this case the file is copied and then the source file is deleted else: if not silent: - dialogo = platformtools.dialog_progress("Copiando archivo", "") + dialogo = platformtools.dialog_progress("Copying file", "") return copy(path, dest) == True and remove(path) == True except: logger.error("ERROR when moving file: %s to %s" % (path, dest)) @@ -359,29 +358,29 @@ def move(path, dest, silent=False, strict=False, vfs=True): def copy(path, dest, silent=False, vfs=True): """ - Copia un archivo - @param path: ruta del fichero a copiar + Copy a file + @param path: path of the file to copy @type path: str - @param dest: ruta donde copiar + @param dest: path to copy @type dest: str - @param silent: se muestra o no el cuadro de dialogo + @param silent: the dialog box is displayed or not @type silent: bool @rtype: bool - @return: devuelve False en caso de error + @return: returns False on error """ try: if xbmc_vfs and vfs: path = encode(path) dest = encode(dest) if not silent: - dialogo = platformtools.dialog_progress("Copiando archivo", "") + dialogo = platformtools.dialog_progress("Copying file", "") return bool(xbmcvfs.copy(path, dest)) fo = file_open(path, "rb") fd = file_open(dest, "wb") if fo and fd: if not silent: - dialogo = platformtools.dialog_progress("Copiando archivo", "") + dialogo = platformtools.dialog_progress("Copying file", "") size = getsize(path) copiado = 0 while True: @@ -408,11 +407,11 @@ def copy(path, dest, silent=False, vfs=True): def exists(path, silent=False, vfs=True): """ - Comprueba si existe una carpeta o fichero - @param path: ruta + Check if there is a folder or file + @param path: path @type path: str @rtype: bool - @return: Retorna True si la ruta existe, tanto si es una carpeta como un archivo + @return: Returns True if the path exists, whether it is a folder or a file """ path = encode(path) try: @@ -434,16 +433,16 @@ def exists(path, silent=False, vfs=True): def isfile(path, silent=False, vfs=True): """ - Comprueba si la ruta es un fichero - @param path: ruta + Check if the path is a file + @param path: path @type path: str @rtype: bool - @return: Retorna True si la ruta existe y es un archivo + @return: Returns True if the path exists and is a file """ path = encode(path) try: if xbmc_vfs and vfs: - if not scrapertools.find_single_match(path, '(^\w+:\/\/)'): + if not scrapertools.find_single_match(path, r'(^\w+:\/\/)'): return os.path.isfile(path) if path.endswith('/') or path.endswith('\\'): path = path[:-1] @@ -466,16 +465,16 @@ def isfile(path, silent=False, vfs=True): def isdir(path, silent=False, vfs=True): """ - Comprueba si la ruta es un directorio - @param path: ruta + Check if the path is a directory + @param path: path @type path: str @rtype: bool - @return: Retorna True si la ruta existe y es un directorio + @return: Returns True if the path exists and is a directory """ path = encode(path) try: if xbmc_vfs and vfs: - if not scrapertools.find_single_match(path, '(^\w+:\/\/)'): + if not scrapertools.find_single_match(path, r'(^\w+:\/\/)'): return os.path.isdir(path) if path.endswith('/') or path.endswith('\\'): path = path[:-1] @@ -498,11 +497,11 @@ def isdir(path, silent=False, vfs=True): def getsize(path, silent=False, vfs=True): """ - Obtiene el tamaño de un archivo - @param path: ruta del fichero + Gets the size of a file + @param path: file path @type path: str @rtype: str - @return: tamaño del fichero + @return: file size """ path = encode(path) try: @@ -525,11 +524,11 @@ def getsize(path, silent=False, vfs=True): def remove(path, silent=False, vfs=True): """ - Elimina un archivo - @param path: ruta del fichero a eliminar + Delete a file + @param path: path of the file to delete @type path: str @rtype: bool - @return: devuelve False en caso de error + @return: returns False on error """ path = encode(path) try: @@ -551,11 +550,11 @@ def remove(path, silent=False, vfs=True): def rmdirtree(path, silent=False, vfs=True): """ - Elimina un directorio y su contenido - @param path: ruta a eliminar + Delete a directory and its contents + @param path: path to remove @type path: str @rtype: bool - @return: devuelve False en caso de error + @return: returns False on error """ path = encode(path) try: @@ -591,11 +590,11 @@ def rmdirtree(path, silent=False, vfs=True): def rmdir(path, silent=False, vfs=True): """ - Elimina un directorio - @param path: ruta a eliminar + Delete a directory + @param path: path to remove @type path: str @rtype: bool - @return: devuelve False en caso de error + @return: returns False on error """ path = encode(path) try: @@ -619,11 +618,11 @@ def rmdir(path, silent=False, vfs=True): def mkdir(path, silent=False, vfs=True): """ - Crea un directorio - @param path: ruta a crear + Create a directory + @param path: path to create @type path: str @rtype: bool - @return: devuelve False en caso de error + @return: returns False on error """ path = encode(path) try: @@ -652,37 +651,37 @@ def mkdir(path, silent=False, vfs=True): def walk(top, topdown=True, onerror=None, vfs=True): """ - Lista un directorio de manera recursiva - @param top: Directorio a listar, debe ser un str "UTF-8" + List a directory recursively + @param top: Directory to list, must be a str "UTF-8" @type top: str - @param topdown: se escanea de arriba a abajo + @param topdown: scanned from top to bottom @type topdown: bool - @param onerror: muestra error para continuar con el listado si tiene algo seteado sino levanta una excepción + @param onerror: show error to continue listing if you have something set but raise an exception @type onerror: bool - ***El parametro followlinks que por defecto es True, no se usa aqui, ya que en samba no discrimina los links + ***The followlinks parameter, which by default is True, is not used here, since in samba it does not discriminate links """ top = encode(top) if xbmc_vfs and vfs: for a, b, c in walk_vfs(top, topdown, onerror): - # list(b) es para que haga una copia del listado de directorios - # si no da error cuando tiene que entrar recursivamente en directorios con caracteres especiales + # list (b) is for you to make a copy of the directory listing + # if it doesn't give error when you have to recursively enter directories with special characters yield a, list(b), c elif top.lower().startswith("smb://"): for a, b, c in samba.walk(top, topdown, onerror): - # list(b) es para que haga una copia del listado de directorios - # si no da error cuando tiene que entrar recursivamente en directorios con caracteres especiales + # list (b) is for you to make a copy of the directory listing + # if it doesn't give error when you have to recursively enter directories with special characters yield decode(a), decode(list(b)), decode(c) else: for a, b, c in os.walk(top, topdown, onerror): - # list(b) es para que haga una copia del listado de directorios - # si no da error cuando tiene que entrar recursivamente en directorios con caracteres especiales + # list (b) is for you to make a copy of the directory listing + # if it doesn't give error when you have to recursively enter directories with special characters yield decode(a), decode(list(b)), decode(c) def walk_vfs(top, topdown=True, onerror=None): """ - Lista un directorio de manera recursiva - Como xmbcvfs no tiene esta función, se copia la lógica de libsmb(samba) para realizar la previa al Walk + List a directory recursively + Since xmbcvfs does not have this function, the logic of libsmb (samba) is copied to carry out the pre-Walk """ top = encode(top) dirs, nondirs = xbmcvfs.listdir(top) @@ -707,11 +706,11 @@ def walk_vfs(top, topdown=True, onerror=None): def listdir(path, silent=False, vfs=True): """ - Lista un directorio - @param path: Directorio a listar, debe ser un str "UTF-8" + List a directory + @param path: Directory to list, must be a str "UTF-8" @type path: str @rtype: str - @return: contenido de un directorio + @return: content of a directory """ path = encode(path) @@ -732,10 +731,10 @@ def listdir(path, silent=False, vfs=True): def join(*paths): """ - Junta varios directorios - Corrige las barras "/" o "\" segun el sistema operativo y si es o no smaba + Join several directories + Correct the bars "/" or "\" according to the operating system and whether or not it is smaba @rytpe: str - @return: la ruta concatenada + @return: the concatenated path """ list_path = [] if paths[0].startswith("/"): @@ -754,14 +753,14 @@ def join(*paths): def split(path, vfs=True): """ - Devuelve una tupla formada por el directorio y el nombre del fichero de una ruta + Returns a tuple consisting of the directory and filename of a path @param path: ruta @type path: str @return: (dirname, basename) @rtype: tuple """ - if scrapertools.find_single_match(path, '(^\w+:\/\/)'): - protocol = scrapertools.find_single_match(path, '(^\w+:\/\/)') + if scrapertools.find_single_match(path, r'(^\w+:\/\/)'): + protocol = scrapertools.find_single_match(path, r'(^\w+:\/\/)') if '/' not in path[6:]: path = path.replace(protocol, protocol + "/", 1) return path.rsplit('/', 1) @@ -771,10 +770,10 @@ def split(path, vfs=True): def basename(path, vfs=True): """ - Devuelve el nombre del fichero de una ruta - @param path: ruta + Returns the file name of a path + @param path: path @type path: str - @return: fichero de la ruta + @return: path file @rtype: str """ return split(path)[1] @@ -782,10 +781,10 @@ def basename(path, vfs=True): def dirname(path, vfs=True): """ - Devuelve el directorio de una ruta - @param path: ruta + Returns the directory of a path + @param path: path @type path: str - @return: directorio de la ruta + @return: path directory @rtype: str """ return split(path)[0] @@ -797,15 +796,15 @@ def is_relative(path): def remove_tags(title): """ - devuelve el titulo sin tags como color + returns the title without tags as color @type title: str @param title: title @rtype: str - @return: cadena sin tags + @return: string without tags """ logger.info() - title_without_tags = scrapertools.find_single_match(title, '\[color .+?\](.+)\[\/color\]') + title_without_tags = scrapertools.find_single_match(title, r'\[color .+?\](.+)\[\/color\]') if title_without_tags: return title_without_tags @@ -815,19 +814,19 @@ def remove_tags(title): def remove_smb_credential(path): """ - devuelve el path sin contraseña/usuario para paths de SMB - @param path: ruta + returns the path without password / user for SMB paths + @param path: path @type path: str - @return: cadena sin credenciales + @return: chain without credentials @rtype: str """ logger.info() - if not scrapertools.find_single_match(path, '(^\w+:\/\/)'): + if not scrapertools.find_single_match(path, r'(^\w+:\/\/)'): return path - protocol = scrapertools.find_single_match(path, '(^\w+:\/\/)') - path_without_credentials = scrapertools.find_single_match(path, '^\w+:\/\/(?:[^;\n]+;)?(?:[^:@\n]+[:|@])?(?:[^@\n]+@)?(.*?$)') + protocol = scrapertools.find_single_match(path, r'(^\w+:\/\/)') + path_without_credentials = scrapertools.find_single_match(path, r'^\w+:\/\/(?:[^;\n]+;)?(?:[^:@\n]+[:|@])?(?:[^@\n]+@)?(.*?$)') if path_without_credentials: return (protocol + path_without_credentials) diff --git a/core/httptools.py b/core/httptools.py index 9cf736ed..1b005e0c 100755 --- a/core/httptools.py +++ b/core/httptools.py @@ -384,7 +384,7 @@ def downloadpage(url, **opt): info_dict.append(('Success', 'False')) response['code'] = str(e) info_dict.append(('Response code', str(e))) - info_dict.append(('Finalizado en', time.time() - inicio)) + info_dict.append(('Finished in', time.time() - inicio)) if not opt.get('alfa_s', False): show_infobox(info_dict) return type('HTTPResponse', (), response) diff --git a/core/item.py b/core/item.py index f3d33b9a..5f17dadf 100644 --- a/core/item.py +++ b/core/item.py @@ -12,9 +12,9 @@ if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int if PY3: #from future import standard_library #standard_library.install_aliases() - import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo + import urllib.parse as urllib # It is very slow in PY2. In PY3 it is native else: - import urllib # Usamos el nativo de PY2 que es más rápido + import urllib # We use the native of PY2 which is faster from core.scrapertools import unescape import base64 @@ -29,14 +29,14 @@ class InfoLabels(dict): def __setitem__(self, name, value): if name in ["season", "episode"]: - # forzamos int() en season y episode + # we force int () in season and episode try: super(InfoLabels, self).__setitem__(name, int(value)) except: pass elif name in ['IMDBNumber', 'imdb_id']: - # Por compatibilidad hemos de guardar el valor en los tres campos + # For compatibility we have to save the value in the three fields super(InfoLabels, self).__setitem__('IMDBNumber', str(value)) # super(InfoLabels, self).__setitem__('code', value) super(InfoLabels, self).__setitem__('imdb_id', str(value)) @@ -62,22 +62,22 @@ class InfoLabels(dict): El parametro 'default' en la funcion obj_infoLabels.get(key,default) tiene preferencia sobre los aqui definidos. """ if key in ['rating']: - # Ejemplo de clave q devuelve un str formateado como float por defecto + # Key example q returns a str formatted as float by default return '0.0' elif key == 'code': code = [] - # Añadir imdb_id al listado de codigos + # Add imdb_id to the code list if 'imdb_id' in list(super(InfoLabels, self).keys()) and super(InfoLabels, self).__getitem__('imdb_id'): code.append(super(InfoLabels, self).__getitem__('imdb_id')) - # Completar con el resto de codigos + # Complete with the rest of the codes for scr in ['tmdb_id', 'tvdb_id', 'noscrap_id']: if scr in list(super(InfoLabels, self).keys()) and super(InfoLabels, self).__getitem__(scr): value = "%s%s" % (scr[:-2], super(InfoLabels, self).__getitem__(scr)) code.append(value) - # Opcion añadir un code del tipo aleatorio + # Option to add a code of the random type if not code: import time value = time.strftime("%Y%m%d%H%M%S", time.gmtime()) @@ -109,7 +109,7 @@ class InfoLabels(dict): return 'list' else: - # El resto de claves devuelven cadenas vacias por defecto + # The rest of the keys return empty strings by default return "" def tostring(self, separador=', '): @@ -132,7 +132,7 @@ class InfoLabels(dict): class Item(object): def __init__(self, **kwargs): """ - Inicializacion del item + Item initialization """ # Creamos el atributo infoLabels @@ -159,14 +159,13 @@ class Item(object): def __contains__(self, m): """ - Comprueba si un atributo existe en el item + Check if an attribute exists in the item """ return m in self.__dict__ def __setattr__(self, name, value): """ - Función llamada al modificar cualquier atributo del item, modifica algunos atributos en función de los datos - modificados. + Function called when modifying any attribute of the item, modifies some attributes based on the modified data. """ if PY3: name = self.toutf8(name) value = self.toutf8(value) @@ -175,14 +174,14 @@ class Item(object): self.__setattr__(key, value[key]) return - # Descodificamos los HTML entities + # We decode the HTML entities if name in ["title", "plot", "fulltitle", "contentPlot", "contentTitle"]: value = self.decode_html(value) - # Al modificar cualquiera de estos atributos content... + # By modifying any of these attributes content... if name in ["contentTitle", "contentPlot", "plot", "contentSerieName", "contentType", "contentEpisodeTitle", "contentSeason", "contentEpisodeNumber", "contentThumbnail", "show", "contentQuality", "quality"]: - # ...y actualizamos infoLables + # ...and update infoLables if name == "contentTitle": self.__dict__["infoLabels"]["title"] = value elif name == "contentPlot" or name == "plot": @@ -203,13 +202,13 @@ class Item(object): self.__dict__["infoLabels"]["quality"] = value elif name == "duration": - # String q representa la duracion del video en segundos + # String q represents the duration of the video in seconds self.__dict__["infoLabels"]["duration"] = str(value) elif name == "viewcontent" and value not in ["files", "movies", "tvshows", "seasons", "episodes"]: super(Item, self).__setattr__("viewcontent", "files") - # Al asignar un valor a infoLables + # When assigning a value to infoLables elif name == "infoLabels": if isinstance(value, dict): value_defaultdict = InfoLabels(value) @@ -220,22 +219,22 @@ class Item(object): def __getattr__(self, name): """ - Devuelve los valores por defecto en caso de que el atributo solicitado no exista en el item + Returns the default values ​​in case the requested attribute does not exist in the item """ if name.startswith("__"): return super(Item, self).__getattribute__(name) - # valor por defecto para folder + # default value for folder if name == "folder": return True - # valor por defecto para contentChannel + # default value for contentChannel elif name == "contentChannel": return "list" - # valor por defecto para viewcontent + # default value for viewcontent elif name == "viewcontent": - # intentamos fijarlo segun el tipo de contenido... + # we try to fix it according to the type of content... if self.__dict__["infoLabels"]["mediatype"] == 'movie': viewcontent = 'movies' elif self.__dict__["infoLabels"]["mediatype"] in ["tvshow", "season", "episode"]: @@ -246,7 +245,7 @@ class Item(object): self.__dict__["viewcontent"] = viewcontent return viewcontent - # valores guardados en infoLabels + # values ​​saved in infoLabels elif name in ["contentTitle", "contentPlot", "contentSerieName", "show", "contentType", "contentEpisodeTitle", "contentSeason", "contentEpisodeNumber", "contentThumbnail", "plot", "duration", "contentQuality", "quality"]: @@ -258,7 +257,7 @@ class Item(object): return self.__dict__["infoLabels"]["tvshowtitle"] elif name == "contentType": ret = self.__dict__["infoLabels"]["mediatype"] - if ret == 'list' and self.__dict__.get("fulltitle", None): # retrocompatibilidad + if ret == 'list' and self.__dict__.get("fulltitle", None): # backward compatibility ret = 'movie' self.__dict__["infoLabels"]["mediatype"] = ret return ret @@ -275,7 +274,7 @@ class Item(object): else: return self.__dict__["infoLabels"][name] - # valor por defecto para el resto de atributos + # default value for all other attributes else: return "" @@ -284,28 +283,28 @@ class Item(object): def set_parent_content(self, parentContent): """ - Rellena los campos contentDetails con la informacion del item "padre" - @param parentContent: item padre + Fill the contentDetails fields with the information of the item "parent" + @param parentContent: item father @type parentContent: item """ - # Comprueba que parentContent sea un Item + # Check that parentContent is an Item if not type(parentContent) == type(self): return - # Copia todos los atributos que empiecen por "content" y esten declarados y los infoLabels + # Copy all the attributes that start with "content" and are declared and the infoLabels for attr in parentContent.__dict__: if attr.startswith("content") or attr == "infoLabels": self.__setattr__(attr, parentContent.__dict__[attr]) def tostring(self, separator=", "): """ - Genera una cadena de texto con los datos del item para el log - Uso: logger.info(item.tostring()) - @param separator: cadena que se usará como separador + Generate a text string with the item's data for the log + Use: logger.info(item.tostring()) + @param separator: string to be used as a separator @type separator: str '""" dic = self.__dict__.copy() - # Añadimos los campos content... si tienen algun valor + # We add the content fields... if they have any value for key in ["contentTitle", "contentPlot", "contentSerieName", "contentEpisodeTitle", "contentSeason", "contentEpisodeNumber", "contentThumbnail"]: value = self.__getattr__(key) @@ -337,10 +336,9 @@ class Item(object): def tourl(self): """ - Genera una cadena de texto con los datos del item para crear una url, para volver generar el Item usar - item.fromurl(). + Generate a text string with the item data to create a url, to re-generate the Item use item.fromurl (). - Uso: url = item.tourl() + Use: url = item.tourl() """ dump = json.dump(self.__dict__).encode("utf8") # if empty dict @@ -351,9 +349,9 @@ class Item(object): def fromurl(self, url): """ - Genera un item a partir de una cadena de texto. La cadena puede ser creada por la funcion tourl() o tener - el formato antiguo: plugin://plugin.video.kod/?channel=... (+ otros parametros) - Uso: item.fromurl("cadena") + Generate an item from a text string. The string can be created by the tourl () function or have +        the old format: plugin: //plugin.video.kod/? channel = ... (+ other parameters) + Use: item.fromurl("string") @param url: url @type url: str @@ -384,12 +382,12 @@ class Item(object): def tojson(self, path=""): from core import filetools """ - Crea un JSON a partir del item, para guardar archivos de favoritos, lista de descargas, etc... - Si se especifica un path, te lo guarda en la ruta especificada, si no, devuelve la cadena json - Usos: item.tojson(path="ruta\archivo\json.json") - file.write(item.tojson()) + Create a JSON from the item, to save favorite files, download list, etc.... + If a path is specified, it saves it in the specified path, if not, it returns the string json + Applications: item.tojson(path="path\archivo\json.json") + file.write(item.tojson()) - @param path: ruta + @param path: path @type path: str """ if path: @@ -401,14 +399,14 @@ class Item(object): def fromjson(self, json_item=None, path=""): from core import filetools """ - Genera un item a partir de un archivo JSON - Si se especifica un path, lee directamente el archivo, si no, lee la cadena de texto pasada. - Usos: item = Item().fromjson(path="ruta\archivo\json.json") - item = Item().fromjson("Cadena de texto json") + Generate an item from a JSON file + If a path is specified, it directly reads the file, if not, it reads the passed text string. + Applications: item = Item().fromjson(path="path\archivo\json.json") + item = Item().fromjson("Cadena de texto json") @param json_item: item @type json_item: json - @param path: ruta + @param path: path @type path: str """ if path: @@ -431,9 +429,9 @@ class Item(object): def clone(self, **kwargs): """ - Genera un nuevo item clonando el item actual - Usos: NuevoItem = item.clone() - NuevoItem = item.clone(title="Nuevo Titulo", action = "Nueva Accion") + Generate a new item by cloning the current item + Applications: NewItem = item.clone() + NuewItem = item.clone(title="New Title", action = "New Action") """ newitem = copy.deepcopy(self) if "infoLabels" in kwargs: @@ -447,8 +445,8 @@ class Item(object): @staticmethod def decode_html(value): """ - Descodifica las HTML entities - @param value: valor a decodificar + Decode the HTML entities + @param value: value to decode @type value: str """ try: @@ -461,7 +459,7 @@ class Item(object): def toutf8(self, *args): """ - Pasa el item a utf8 + Pass the item to utf8 """ if len(args) > 0: value = args[0] diff --git a/core/jsontools.py b/core/jsontools.py index d12a5213..6ebc799b 100644 --- a/core/jsontools.py +++ b/core/jsontools.py @@ -80,15 +80,14 @@ def to_utf8(dct): def get_node_from_file(name_file, node, path=None): """ - Obtiene el nodo de un fichero JSON + Gets the node of a JSON file - @param name_file: Puede ser el nombre de un canal o server (sin incluir extension) - o bien el nombre de un archivo json (con extension) + @param name_file: It can be the name of a channel or server (not including extension) or the name of a json file (with extension) @type name_file: str - @param node: nombre del nodo a obtener + @param node: name of the node to obtain @type node: str - @param path: Ruta base del archivo json. Por defecto la ruta de settings_channels. - @return: dict con el nodo a devolver + @param path: Base path of the json file. By default the path of settings_channels. + @return: dict with the node to return @rtype: dict """ logger.info() @@ -121,14 +120,13 @@ def get_node_from_file(name_file, node, path=None): def check_to_backup(data, fname, dict_data): """ - Comprueba que si dict_data(conversion del fichero JSON a dict) no es un diccionario, se genere un fichero con - data de nombre fname.bk. + Check that if dict_data (conversion of the JSON file to dict) is not a dictionary, a file with data name fname.bk will be generated. - @param data: contenido del fichero fname + @param data: fname file content @type data: str - @param fname: nombre del fichero leido + @param fname: name of the read file @type fname: str - @param dict_data: nombre del diccionario + @param dict_data: dictionary name @type dict_data: dict """ logger.info() @@ -137,7 +135,7 @@ def check_to_backup(data, fname, dict_data): logger.error("Error loading json from file %s" % fname) if data != "": - # se crea un nuevo fichero + # a new file is created from core import filetools title = filetools.write("%s.bk" % fname, data) if title != "": @@ -150,16 +148,15 @@ def check_to_backup(data, fname, dict_data): def update_node(dict_node, name_file, node, path=None, silent=False): """ - actualiza el json_data de un fichero con el diccionario pasado + update the json_data of a file with the last dictionary - @param dict_node: diccionario con el nodo + @param dict_node: dictionary with node @type dict_node: dict - @param name_file: Puede ser el nombre de un canal o server (sin incluir extension) - o bien el nombre de un archivo json (con extension) + @param name_file: It can be the name of a channel or server (not including extension) or the name of a json file (with extension) @type name_file: str - @param node: nodo a actualizar - @param path: Ruta base del archivo json. Por defecto la ruta de settings_channels. - @return result: Devuelve True si se ha escrito correctamente o False si ha dado un error + @param node: node to update + @param path: Base path of the json file. By default the path of settings_channels. + @return result: Returns True if it was written correctly or False if it gave an error @rtype: bool @return json_data @rtype: dict @@ -182,7 +179,7 @@ def update_node(dict_node, name_file, node, path=None, silent=False): try: data = filetools.read(fname) dict_data = load(data) - # es un dict + # it's a dict if dict_data: if node in dict_data: if not silent: logger.debug(" the key exists %s" % node) diff --git a/core/scraper.py b/core/scraper.py index 44b945fb..2515a77d 100644 --- a/core/scraper.py +++ b/core/scraper.py @@ -9,8 +9,7 @@ from core.item import InfoLabels from platformcode import config, logger from platformcode import platformtools -# Este modulo es una interface para poder implementar diferentes scrapers -# contendra todos las funciones comunes +# This module is an interface to implement different scrapers, it will contain all the common functions dict_default = None scraper = None @@ -18,36 +17,35 @@ scraper = None def find_and_set_infoLabels(item): """ - función que se llama para buscar y setear los infolabels + function called to search and set infolabels :param item: - :return: boleano que indica si se ha podido encontrar el 'code' + :return: Boolean indicating if the 'code' could be found """ global scraper scraper = None # logger.debug("item:\n" + item.tostring('\n')) list_opciones_cuadro = [config.get_localized_string(60223), config.get_localized_string(60224)] - # Si se añaden más scrapers hay q declararlos aqui-> "modulo_scraper": "Texto_en_cuadro" - scrapers_disponibles = {'tmdb': config.get_localized_string(60225), - 'tvdb': config.get_localized_string(60226)} + # If more scrapers are added, they must be declared here-> "modulo_scraper": "Text_in_box" + scrapers_disponibles = {'tmdb': config.get_localized_string(60225), 'tvdb': config.get_localized_string(60226)} - # Obtener el Scraper por defecto de la configuracion segun el tipo de contenido + # Get the default Scraper of the configuration according to the content type if item.contentType == "movie": scraper_actual = ['tmdb'][config.get_setting("scraper_movies", "videolibrary")] tipo_contenido = config.get_localized_string(70283) title = item.contentTitle - # Completar lista de opciones para este tipo de contenido + # Complete list of options for this type of content list_opciones_cuadro.append(scrapers_disponibles['tmdb']) else: scraper_actual = ['tmdb', 'tvdb'][config.get_setting("scraper_tvshows", "videolibrary")] tipo_contenido = "serie" title = item.contentSerieName - # Completar lista de opciones para este tipo de contenido + # Complete list of options for this type of content list_opciones_cuadro.append(scrapers_disponibles['tmdb']) list_opciones_cuadro.append(scrapers_disponibles['tvdb']) - # Importamos el scraper + # We import the scraper try: scraper = __import__('core.%s' % scraper_actual, fromlist=["core.%s" % scraper_actual]) except ImportError: @@ -57,34 +55,34 @@ def find_and_set_infoLabels(item): logger.error(traceback.format_exc()) while scraper: - # Llamamos a la funcion find_and_set_infoLabels del scraper seleccionado + # We call the find_and_set_infoLabels function of the selected scraper scraper_result = scraper.find_and_set_infoLabels(item) - # Verificar si existe 'code' + # Check if there is a 'code' if scraper_result and item.infoLabels['code']: - # code correcto + # correct code logger.info("Identificador encontrado: %s" % item.infoLabels['code']) scraper.completar_codigos(item) return True elif scraper_result: - # Contenido encontrado pero no hay 'code' + # Content found but no 'code' msg = config.get_localized_string(60227) % title else: - # Contenido no encontrado + # Content not found msg = config.get_localized_string(60228) % title logger.info(msg) - # Mostrar cuadro con otras opciones: + # Show box with other options: if scrapers_disponibles[scraper_actual] in list_opciones_cuadro: list_opciones_cuadro.remove(scrapers_disponibles[scraper_actual]) index = platformtools.dialog_select(msg, list_opciones_cuadro) if index < 0: - logger.debug("Se ha pulsado 'cancelar' en la ventana '%s'" % msg) + logger.debug("You have clicked 'cancel' in the window '%s'" % msg) return False elif index == 0: - # Pregunta el titulo + # Ask the title title = platformtools.dialog_input(title, config.get_localized_string(60229) % tipo_contenido) if title: if item.contentType == "movie": @@ -92,25 +90,25 @@ def find_and_set_infoLabels(item): else: item.contentSerieName = title else: - logger.debug("he pulsado 'cancelar' en la ventana 'Introduzca el nombre correcto'") + logger.debug("I clicked 'cancel' in the window 'Enter the correct name'") return False elif index == 1: - # Hay q crear un cuadro de dialogo para introducir los datos - logger.info("Completar información") + # You have to create a dialog box to enter the data + logger.info("Complete information") if cuadro_completar(item): - # code correcto - logger.info("Identificador encontrado: %s" % str(item.infoLabels['code'])) + # correct code + logger.info("Identifier found: %s" % str(item.infoLabels['code'])) return True # raise elif list_opciones_cuadro[index] in list(scrapers_disponibles.values()): - # Obtener el nombre del modulo del scraper + # Get the name of the scraper module for k, v in list(scrapers_disponibles.items()): if list_opciones_cuadro[index] == v: if scrapers_disponibles[scraper_actual] not in list_opciones_cuadro: list_opciones_cuadro.append(scrapers_disponibles[scraper_actual]) - # Importamos el scraper k + # We import the scraper k scraper_actual = k try: scraper = None @@ -119,7 +117,7 @@ def find_and_set_infoLabels(item): exec("import core." + scraper_actual + " as scraper_module") break - logger.error("Error al importar el modulo scraper %s" % scraper_actual) + logger.error("Error importing the scraper module %s" % scraper_actual) def cuadro_completar(item): @@ -129,7 +127,7 @@ def cuadro_completar(item): dict_default = {} COLOR = ["0xFF65B3DA", "0xFFFFFFFF"] - # Creamos la lista de campos del infoLabel + # We create the list of infoLabel fields controls = [("title", "text", config.get_localized_string(60230)), ("originaltitle", "text", config.get_localized_string(60231)), ("year", "text", config.get_localized_string(60232)), @@ -171,7 +169,7 @@ def cuadro_completar(item): if len(c) > 3: enabled += c[3] - # default para casos especiales + # default for special cases if c[0] == "url_tmdb" and item.infoLabels["tmdb_id"] and 'tmdb' in item.infoLabels["url_scraper"]: dict_default[c[0]] = item.infoLabels["url_scraper"] @@ -181,7 +179,7 @@ def cuadro_completar(item): if not dict_default[c[0]] or dict_default[c[0]] == 'None' or dict_default[c[0]] == 0: dict_default[c[0]] = '' elif isinstance(dict_default[c[0]], (int, float)) or (not PY3 and isinstance(dict_default[c[0]], (int, float, long))): - # Si es numerico lo convertimos en str + # If it is numerical we convert it into str dict_default[c[0]] = str(dict_default[c[0]]) listado_controles.append({'id': c[0], @@ -207,7 +205,7 @@ def callback_cuadro_completar(item, dict_values): global dict_default if dict_values.get("title", None): - # Adaptar dict_values a infoLabels validos + # Adapt dict_values ​​to valid infoLabels dict_values['mediatype'] = ['movie', 'tvshow'][dict_values['mediatype']] for k, v in list(dict_values.items()): if k in dict_default and dict_default[k] == dict_values[k]: @@ -229,16 +227,16 @@ def callback_cuadro_completar(item, dict_values): def get_nfo(item): """ - Devuelve la información necesaria para que se scrapee el resultado en la videoteca de kodi, + Returns the information necessary for the result to be scraped into the kodi video library, - @param item: elemento que contiene los datos necesarios para generar la info + @param item: element that contains the data necessary to generate the info @type item: Item @rtype: str @return: """ logger.info() if "infoLabels" in item and "noscrap_id" in item.infoLabels: - # Crea el fichero xml con los datos que se obtiene de item ya que no hay ningún scraper activo + # Create the xml file with the data obtained from the item since there is no active scraper info_nfo = '' if "season" in item.infoLabels and "episode" in item.infoLabels: diff --git a/core/scrapertools.py b/core/scrapertools.py index 0067a9f1..0ab6440c 100644 --- a/core/scrapertools.py +++ b/core/scrapertools.py @@ -56,15 +56,15 @@ def find_multiple_matches_groups(text, pattern): return [m.groupdict() for m in r.finditer(text)] -# Convierte los codigos html "ñ" y lo reemplaza por "ñ" caracter unicode utf-8 +# Convert html codes "ñ" and replace it with "ñ" unicode utf-8 character def decodeHtmlentities(data): - entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8})(;?)") + entity_re = re.compile(r"&(#?)(\d{1,5}|\w{1,8})(;?)") def substitute_entity(match): ent = match.group(2) + match.group(3) res = "" while not ent in html5 and not ent.endswith(";") and match.group(1) != "#": - # Excepción para cuando '&' se usa como argumento en la urls contenidas en los datos + # Exception for when '&' is used as an argument in the urls contained in the data try: res = ent[-1] + res ent = ent[:-1] @@ -85,9 +85,9 @@ def decodeHtmlentities(data): def unescape(text): - """Removes HTML or XML character references - and entities from a text string. - keep &, >, < in the source code. + """ + Removes HTML or XML character references and entities from a text string. + keep &, >, < in the source code. from Fredrik Lundh http://effbot.org/zone/re-sub.htm#unescape-html """ @@ -122,7 +122,7 @@ def unescape(text): return re.sub("&#?\w+;", fixup, str(text)) - # Convierte los codigos html "ñ" y lo reemplaza por "ñ" caracter unicode utf-8 + # Convert html codes "ñ" and replace it with "ñ" unicode utf-8 character # def decodeHtmlentities(string): @@ -277,7 +277,7 @@ def htmlclean(cadena): def slugify(title): # print title - # Sustituye acentos y eñes + # Substitutes accents and eñes title = title.replace("Á", "a") title = title.replace("É", "e") title = title.replace("Í", "i") @@ -305,23 +305,23 @@ def slugify(title): title = title.replace("/", "-") title = title.replace("&", "&") - # Pasa a minúsculas + # Lowercase title = title.lower().strip() - # Elimina caracteres no válidos + # Remove invalid characters validchars = "abcdefghijklmnopqrstuvwxyz1234567890- " title = ''.join(c for c in title if c in validchars) - # Sustituye espacios en blanco duplicados y saltos de línea - title = re.compile("\s+", re.DOTALL).sub(" ", title) + # Replace duplicate blanks and line breaks + title = re.compile(r"\s+", re.DOTALL).sub(" ", title) - # Sustituye espacios en blanco por guiones - title = re.compile("\s", re.DOTALL).sub("-", title.strip()) + # Replace blanks with hyphens + title = re.compile(r"\s", re.DOTALL).sub("-", title.strip()) - # Sustituye espacios en blanco duplicados y saltos de línea - title = re.compile("\-+", re.DOTALL).sub("-", title) + # Replace duplicate blanks and line breaks + title = re.compile(r"\-+", re.DOTALL).sub("-", title) - # Arregla casos especiales + # Fix special cases if title.startswith("-"): title = title[1:] @@ -337,10 +337,10 @@ def remove_htmltags(string): def remove_show_from_title(title, show): # print slugify(title)+" == "+slugify(show) - # Quita el nombre del programa del título + # Remove program name from title if slugify(title).startswith(slugify(show)): - # Convierte a unicode primero, o el encoding se pierde + # Convert to unicode first, or encoding is lost title = unicode(title, "utf-8", "replace") show = unicode(show, "utf-8", "replace") title = title[len(show):].strip() @@ -351,7 +351,7 @@ def remove_show_from_title(title, show): if title == "": title = str(time.time()) - # Vuelve a utf-8 + # Return to utf-8 title = title.encode("utf-8", "ignore") show = show.encode("utf-8", "ignore") @@ -360,15 +360,15 @@ def remove_show_from_title(title, show): def get_filename_from_url(url): if PY3: - import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo + import urllib.parse as urlparse # It is very slow in PY2. In PY3 it is native else: - import urlparse # Usamos el nativo de PY2 que es más rápido + import urlparse # We use the native of PY2 which is faster parsed_url = urlparse.urlparse(url) try: filename = parsed_url.path except: - # Si falla es porque la implementación de parsed_url no reconoce los atributos como "path" + # If it fails it is because the implementation of parsed_url does not recognize the attributes as "path" if len(parsed_url) >= 4: filename = parsed_url[2] else: @@ -382,15 +382,15 @@ def get_filename_from_url(url): def get_domain_from_url(url): if PY3: - import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo + import urllib.parse as urlparse # It is very slow in PY2. In PY3 it is native else: - import urlparse # Usamos el nativo de PY2 que es más rápido + import urlparse # We use the native of PY2 which is faster parsed_url = urlparse.urlparse(url) try: filename = parsed_url.netloc except: - # Si falla es porque la implementación de parsed_url no reconoce los atributos como "path" + # If it fails it is because the implementation of parsed_url does not recognize the attributes as "path" if len(parsed_url) >= 4: filename = parsed_url[1] else: @@ -401,8 +401,8 @@ def get_domain_from_url(url): def get_season_and_episode(title): """ - Retorna el numero de temporada y de episodio en formato "1x01" obtenido del titulo de un episodio - Ejemplos de diferentes valores para title y su valor devuelto: + Returns the season and episode number in "1x01" format obtained from the title of an episode + Examples of different values ​​for title and its return value: "serie 101x1.strm", "s101e1.avi", "t101e1.avi" -> '101x01' "Name TvShow 1x6.avi" -> '1x06' "Temp 3 episodio 2.avi" -> '3x02' @@ -412,9 +412,9 @@ def get_season_and_episode(title): "Episodio 25: titulo episodio" -> '' (no existe el numero de temporada) "Serie X Temporada 1" -> '' (no existe el numero del episodio) @type title: str - @param title: titulo del episodio de una serie + @param title: title of a series episode @rtype: str - @return: Numero de temporada y episodio en formato "1x01" o cadena vacia si no se han encontrado + @return: Nseason and episode number in "1x01" format or empty string if not found """ filename = "" diff --git a/core/servertools.py b/core/servertools.py index b370a088..afb846a0 100644 --- a/core/servertools.py +++ b/core/servertools.py @@ -12,9 +12,9 @@ if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int if PY3: #from future import standard_library #standard_library.install_aliases() - import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo + import urllib.parse as urlparse #It is very slow in PY2. In PY3 it is native else: - import urlparse # Usamos el nativo de PY2 que es más rápido + import urlparse # We use the native of PY2 which is faster from future.builtins import range from past.utils import old_div @@ -35,38 +35,38 @@ server_list = {} def find_video_items(item=None, data=None): """ - Función genérica para buscar vídeos en una página, devolviendo un itemlist con los items listos para usar. - - Si se pasa un Item como argumento, a los items resultantes mantienen los parametros del item pasado - - Si no se pasa un Item, se crea uno nuevo, pero no contendra ningun parametro mas que los propios del servidor. + Generic function to search for videos on a page, returning an itemlist with the ready-to-use items. + - If an Item is passed as an argument, the resulting items keep the parameters of the last item + - If an Item is not passed, a new one is created, but it will not contain any parameters other than those of the server. - @param item: Item al cual se quieren buscar vídeos, este debe contener la url válida + @param item: Item to which you want to search for videos, this must contain the valid url @type item: Item - @param data: Cadena con el contendio de la página ya descargado (si no se pasa item) + @param data: String with the page content already downloaded (if item is not passed) @type data: str - @return: devuelve el itemlist con los resultados + @return: returns the itemlist with the results @rtype: list """ logger.info() itemlist = [] - # Descarga la página + # Download the page if data is None: data = httptools.downloadpage(item.url).data data = unshortenit.findlinks(data) - # Crea un item si no hay item + # Create an item if there is no item if item is None: item = Item() - # Pasa los campos thumbnail y title a contentThumbnail y contentTitle + # Pass the thumbnail and title fields to contentThumbnail and contentTitle else: if not item.contentThumbnail: item.contentThumbnail = item.thumbnail if not item.contentTitle: item.contentTitle = item.title - # Busca los enlaces a los videos + # Find the links to the videos for label, url, server, thumbnail in findvideos(data): title = config.get_localized_string(70206) % label itemlist.append( @@ -77,29 +77,28 @@ def find_video_items(item=None, data=None): def get_servers_itemlist(itemlist, fnc=None, sort=False): """ - Obtiene el servidor para cada uno de los items, en funcion de su url. - - Asigna el servidor, la url modificada, el thumbnail (si el item no contiene contentThumbnail se asigna el del thumbnail) - - Si se pasa una funcion por el argumento fnc, esta se ejecuta pasando el item como argumento, - el resultado de esa funcion se asigna al titulo del item - - En esta funcion podemos modificar cualquier cosa del item - - Esta funcion siempre tiene que devolver el item.title como resultado - - Si no se encuentra servidor para una url, se asigna "directo" - - @param itemlist: listado de items + Get the server for each of the items, based on their url. + - Assign the server, the modified url, the thumbnail (if the item does not contain contentThumbnail the thumbnail is assigned) + - If a function is passed through the fnc argument, it is executed by passing the item as an argument, the result of that function is assigned to the title of the item + - In this function we can modify anything of the item + - This function always has to return the item.title as a result + - If no server is found for a url, it is assigned "direct" + + @param itemlist: item list @type itemlist: list - @param fnc: función para ejecutar con cada item (para asignar el titulo) + @param fnc: function to execute with each item (to assign the title) @type fnc: function - @param sort: indica si el listado resultante se ha de ordenar en funcion de la lista de servidores favoritos + @param sort: indicates whether the resulting list should be ordered based on the list of favorite servers @type sort: bool """ - # Recorre los servidores + # Roam the servers for serverid in list(get_servers_list().keys()): server_parameters = get_server_parameters(serverid) - # Recorre los patrones + # Walk the patterns for pattern in server_parameters.get("find_videos", {}).get("patterns", []): logger.info(pattern["pattern"]) - # Recorre los resultados + # Scroll through the results for match in re.compile(pattern["pattern"], re.DOTALL).finditer( "\n".join([item.url.split('|')[0] for item in itemlist if not item.server])): url = pattern["url"] @@ -117,13 +116,13 @@ def get_servers_itemlist(itemlist, fnc=None, sort=False): else: item.url = url - # Eliminamos los servidores desactivados - #itemlist = filter(lambda i: not i.server or is_server_enabled(i.server), itemlist) - # Filtrar si es necesario + # We remove the deactivated servers + # itemlist = filter(lambda i: not i.server or is_server_enabled(i.server), itemlist) + # Filter if necessary itemlist = filter_servers(itemlist) for item in itemlist: - # Asignamos "directo" en caso de que el server no se encuentre en Alfa + # We assign "direct" in case the server is not in KoD if not item.server and item.url: item.server = "directo" @@ -131,7 +130,7 @@ def get_servers_itemlist(itemlist, fnc=None, sort=False): item.title = fnc(item) - # Ordenar segun favoriteslist si es necesario + # Sort according to favoriteslist if necessary if sort: itemlist = sort_servers(itemlist) @@ -140,11 +139,11 @@ def get_servers_itemlist(itemlist, fnc=None, sort=False): def findvideos(data, skip=False): """ - Recorre la lista de servidores disponibles y ejecuta la funcion findvideosbyserver para cada uno de ellos - :param data: Texto donde buscar los enlaces - :param skip: Indica un limite para dejar de recorrer la lista de servidores. Puede ser un booleano en cuyo caso - seria False para recorrer toda la lista (valor por defecto) o True para detenerse tras el primer servidor que - retorne algun enlace. Tambien puede ser un entero mayor de 1, que representaria el numero maximo de enlaces a buscar. + Scroll through the list of available servers and run the findvideosbyserver function for each of them + :param data: Text where to look for the links + :param skip: Indicates a limit to stop scrolling through the list of servers. It can be a boolean in which case + It would be False to go through the whole list (default value) or True to stop after the first server that + return some link. It can also be an integer greater than 1, which would represent the maximum number of links to search. :return: """ logger.info() @@ -155,7 +154,7 @@ def findvideos(data, skip=False): is_filter_servers = False - # Ejecuta el findvideos en cada servidor activo + # Run findvideos on each active server for serverid in servers_list: '''if not is_server_enabled(serverid): continue''' @@ -183,16 +182,16 @@ def findvideosbyserver(data, serverid): return [] devuelve = [] if "find_videos" in server_parameters: - # Recorre los patrones + # Walk the patterns for pattern in server_parameters["find_videos"].get("patterns", []): msg = "%s\npattern: %s" % (serverid, pattern["pattern"]) - # Recorre los resultados + # Scroll through the results for match in re.compile(pattern["pattern"], re.DOTALL).finditer(data): url = pattern["url"] - # Crea la url con los datos + # Create the url with the data for x in range(len(match.groups())): url = url.replace("\\%s" % (x + 1), match.groups()[x]) - msg += "\nurl encontrada: %s" % url + msg += "\nfound url: %s" % url value = server_parameters["name"], url, serverid, server_parameters.get("thumbnail", "") if value not in devuelve and url not in server_parameters["find_videos"].get("ignore_urls", []): devuelve.append(value) @@ -211,7 +210,7 @@ def get_server_from_url(url): logger.info() servers_list = list(get_servers_list().keys()) - # Ejecuta el findvideos en cada servidor activo + # Run findvideos on each active server for serverid in servers_list: '''if not is_server_enabled(serverid): continue''' @@ -224,18 +223,18 @@ def get_server_from_url(url): if not server_parameters["active"]: continue if "find_videos" in server_parameters: - # Recorre los patrones + # Walk the patterns for n, pattern in enumerate(server_parameters["find_videos"].get("patterns", [])): msg = "%s\npattern: %s" % (serverid, pattern["pattern"]) if not "pattern_compiled" in pattern: # logger.info('compiled ' + serverid) pattern["pattern_compiled"] = re.compile(pattern["pattern"]) dict_servers_parameters[serverid]["find_videos"]["patterns"][n]["pattern_compiled"] = pattern["pattern_compiled"] - # Recorre los resultados + # Scroll through the results match = re.search(pattern["pattern_compiled"], url) if match: url = pattern["url"] - # Crea la url con los datos + # Create the url with the data for x in range(len(match.groups())): url = url.replace("\\%s" % (x + 1), match.groups()[x]) msg += "\nurl encontrada: %s" % url @@ -249,19 +248,19 @@ def get_server_from_url(url): def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialogo=False, background_dialog=False): """ - Función para obtener la url real del vídeo - @param server: Servidor donde está alojado el vídeo + Function to get the real url of the video + @param server: Server where the video is hosted @type server: str - @param url: url del vídeo + @param url: video url @type url: str - @param video_password: Password para el vídeo + @param video_password: Password for the video @type video_password: str - @param muestra_dialogo: Muestra el diálogo de progreso + @param muestra_dialogo: Show progress dialog @type muestra_dialogo: bool @type background_dialog: bool @param background_dialog: if progress dialog should be in background - @return: devuelve la url del video + @return: returns the url of the video @rtype: list """ logger.info("Server: %s, Url: %s" % (server, url)) @@ -273,14 +272,14 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo error_messages = [] opciones = [] - # Si el vídeo es "directo" o "local", no hay que buscar más + # If the video is "direct" or "local", look no further if server == "directo" or server == "local": if isinstance(video_password, list): return video_password, len(video_password) > 0, "
".join(error_messages) - logger.info("Server: %s, la url es la buena" % server) + logger.info("Server: %s, url is good" % server) video_urls.append(["%s [%s]" % (urlparse.urlparse(url)[2][-4:], server), url]) - # Averigua la URL del vídeo + # Find out the video URL else: if server: server_parameters = get_server_parameters(server) @@ -288,12 +287,11 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo server_parameters = {} if server_parameters: - # Muestra un diágo de progreso + # Show a progress dialog if muestra_dialogo: - progreso = (platformtools.dialog_progress_bg if background_dialog else platformtools.dialog_progress)(config.get_localized_string(20000), - config.get_localized_string(70180) % server_parameters["name"]) + progreso = (platformtools.dialog_progress_bg if background_dialog else platformtools.dialog_progress)(config.get_localized_string(20000), config.get_localized_string(70180) % server_parameters["name"]) - # Cuenta las opciones disponibles, para calcular el porcentaje + # Count the available options, to calculate the percentage orden = [ ["free"] + [server] + [premium for premium in server_parameters["premium"] if not premium == server], @@ -309,76 +307,76 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo priority = int(config.get_setting("resolve_priority")) opciones = sorted(opciones, key=lambda x: orden[priority].index(x)) - logger.info("Opciones disponibles: %s | %s" % (len(opciones), opciones)) + logger.info("Available options: %s | %s" % (len(opciones), opciones)) else: - logger.error("No existe conector para el servidor %s" % server) + logger.error("There is no connector for the server %s" % server) error_messages.append(config.get_localized_string(60004) % server) muestra_dialogo = False - # Importa el server + # Import the server try: server_module = __import__('servers.%s' % server, None, None, ["servers.%s" % server]) - logger.info("Servidor importado: %s" % server_module) + logger.info("Imported server: %s" % server_module) except: server_module = None if muestra_dialogo: progreso.close() - logger.error("No se ha podido importar el servidor: %s" % server) + logger.error("Could not import server: %s" % server) import traceback logger.error(traceback.format_exc()) - # Si tiene una función para ver si el vídeo existe, lo comprueba ahora + # If it has a function to see if the video exists, check it now if hasattr(server_module, 'test_video_exists'): - logger.info("Invocando a %s.test_video_exists" % server) + logger.info("Invoking a %s.test_video_exists" % server) try: video_exists, message = server_module.test_video_exists(page_url=url) if not video_exists: error_messages.append(message) - logger.info("test_video_exists dice que el video no existe") + logger.info("test_video_exists says video doesn't exist") if muestra_dialogo: progreso.close() else: - logger.info("test_video_exists dice que el video SI existe") + logger.info("test_video_exists says the video DOES exist") except: - logger.error("No se ha podido comprobar si el video existe") + logger.error("Could not verify if the video exists") import traceback logger.error(traceback.format_exc()) - # Si el video existe y el modo free está disponible, obtenemos la url + # If the video exists and the free mode is available, we get the url if video_exists: for opcion in opciones: - # Opcion free y premium propio usa el mismo server + # Own free and premium option uses the same server if opcion == "free" or opcion == server: serverid = server_module server_name = server_parameters["name"] - # Resto de opciones premium usa un debrider + # Rest of premium options use a debrider else: serverid = __import__('servers.debriders.%s' % opcion, None, None, ["servers.debriders.%s" % opcion]) server_name = get_server_parameters(opcion)["name"] - # Muestra el progreso + # Show progress if muestra_dialogo: progreso.update((old_div(100, len(opciones))) * opciones.index(opcion), config.get_localized_string(70180) % server_name) - # Modo free + # Free mode if opcion == "free": try: - logger.info("Invocando a %s.get_video_url" % server) + logger.info("Invoking a %s.get_video_url" % server) response = serverid.get_video_url(page_url=url, video_password=video_password) video_urls.extend(response) except: - logger.error("Error al obtener la url en modo free") + logger.error("Error getting url in free mode") error_messages.append(config.get_localized_string(60006) % server_name) import traceback logger.error(traceback.format_exc()) - # Modo premium + # Premium mode else: try: - logger.info("Invocando a %s.get_video_url" % opcion) + logger.info("Invoking a %s.get_video_url" % opcion) response = serverid.get_video_url(page_url=url, premium=True, user=config.get_setting("user", server=opcion), password=config.get_setting("password", server=opcion), @@ -390,27 +388,27 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo else: error_messages.append(config.get_localized_string(60006) % server_name) except: - logger.error("Error en el servidor: %s" % opcion) + logger.error("Server errorr: %s" % opcion) error_messages.append(config.get_localized_string(60006) % server_name) import traceback logger.error(traceback.format_exc()) - # Si ya tenemos URLS, dejamos de buscar + # If we already have URLS, we stop searching if video_urls and config.get_setting("resolve_stop") == True: break - # Cerramos el progreso + # We close progress if muestra_dialogo: progreso.update(100, config.get_localized_string(60008)) progreso.close() - # Si no hay opciones disponibles mostramos el aviso de las cuentas premium + # If there are no options available, we show the notice of premium accounts if video_exists and not opciones and server_parameters.get("premium"): listapremium = [get_server_parameters(premium)["name"] for premium in server_parameters["premium"]] error_messages.append( config.get_localized_string(60009) % (server, " o ".join(listapremium))) - # Si no tenemos urls ni mensaje de error, ponemos uno generico + # If we do not have urls or error messages, we put a generic one elif not video_urls and not error_messages: error_messages.append(config.get_localized_string(60006) % get_server_parameters(server)["name"]) @@ -419,51 +417,51 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo def get_server_name(serverid): """ - Función obtener el nombre del servidor real a partir de una cadena. - @param serverid: Cadena donde mirar + Function get real server name from string. + @param serverid: Chain where to look @type serverid: str - @return: Nombre del servidor + @return: Server name @rtype: str """ serverid = serverid.lower().split(".")[0] - # Obtenemos el listado de servers + # We get the list of servers server_list = list(get_servers_list().keys()) - # Si el nombre está en la lista + # If the name is in the list if serverid in server_list: return serverid - # Recorre todos los servers buscando el nombre + # Browse all servers looking for the name for server in server_list: params = get_server_parameters(server) - # Si la nombre esta en el listado de ids + # If the name is in the list of ids if serverid in params["id"]: return server - # Si el nombre es mas de una palabra, comprueba si algun id esta dentro del nombre: + # If the name is more than one word, check if any id is inside the name: elif len(serverid.split()) > 1: for id in params["id"]: if id in serverid: return server - # Si no se encuentra nada se devuelve una cadena vacia + # If nothing is found an empty string is returned return "" def is_server_enabled(server): """ - Función comprobar si un servidor está segun la configuración establecida - @param server: Nombre del servidor + Function check if a server is according to the established configuration + @param server: Server name @type server: str - @return: resultado de la comprobación + @return: check result @rtype: bool """ server = get_server_name(server) - # El server no existe + # The server does not exist if not server: return False @@ -481,11 +479,11 @@ def is_server_enabled(server): def get_server_parameters(server): """ - Obtiene los datos del servidor - @param server: Nombre del servidor + Get data from server + @param server: Server name @type server: str - @return: datos del servidor + @return: server data @rtype: dict """ # logger.info("server %s" % server) @@ -503,12 +501,11 @@ def get_server_parameters(server): # Debriders elif filetools.isfile(filetools.join(config.get_runtime_path(), "servers", "debriders", server + ".json")): path = filetools.join(config.get_runtime_path(), "servers", "debriders", server + ".json") - # - #Cuando no está bien definido el server en el canal (no existe conector), muestra error por no haber "path" y se tiene que revisar el canal - # + + # When the server is not well defined in the channel (there is no connector), it shows an error because there is no "path" and the channel has to be checked dict_server = jsontools.load(filetools.read(path)) - # Imagenes: se admiten url y archivos locales dentro de "resources/images" + # Images: url and local files are allowed inside "resources / images" if dict_server.get("thumbnail") and "://" not in dict_server["thumbnail"]: dict_server["thumbnail"] = filetools.join(config.get_runtime_path(), "resources", "media", "servers", dict_server["thumbnail"]) @@ -573,7 +570,7 @@ def get_server_controls_settings(server_name): # Conversion de str a bool, etc... for c in list_controls: if 'id' not in c or 'type' not in c or 'default' not in c: - # Si algun control de la lista no tiene id, type o default lo ignoramos + # If any control in the list does not have id, type or default, we ignore it continue # new dict with key(id) and value(default) from settings @@ -584,28 +581,28 @@ def get_server_controls_settings(server_name): def get_server_setting(name, server, default=None): """ - Retorna el valor de configuracion del parametro solicitado. + Returns the configuration value of the requested parameter. - Devuelve el valor del parametro 'name' en la configuracion propia del servidor 'server'. + Returns the value of the parameter 'name' in the own configuration of the server 'server'. - Busca en la ruta \addon_data\plugin.video.addon\settings_servers el archivo server_data.json y lee - el valor del parametro 'name'. Si el archivo server_data.json no existe busca en la carpeta servers el archivo - server.json y crea un archivo server_data.json antes de retornar el valor solicitado. Si el parametro 'name' - tampoco existe en el el archivo server.json se devuelve el parametro default. + Look in the path \addon_data\plugin.video.addon\settings_servers for the file server_data.json and read + the value of the parameter 'name'. If the server_data.json file does not exist look in the servers folder for the file + server.json and create a server_data.json file before returning the requested value. If the parameter 'name' + also does not exist in the server.json file the default parameter is returned. - @param name: nombre del parametro + @param name: parameter name @type name: str - @param server: nombre del servidor + @param server: server name @type server: str - @param default: valor devuelto en caso de que no exista el parametro name + @param default: return value in case the name parameter does not exist @type default: any - @return: El valor del parametro 'name' + @return: The parameter value 'name' @rtype: any """ - # Creamos la carpeta si no existe + # We create the folder if it does not exist if not filetools.exists(filetools.join(config.get_data_path(), "settings_servers")): filetools.mkdir(filetools.join(config.get_data_path(), "settings_servers")) @@ -613,34 +610,34 @@ def get_server_setting(name, server, default=None): dict_settings = {} dict_file = {} if filetools.exists(file_settings): - # Obtenemos configuracion guardada de ../settings/channel_data.json + # We get saved configuration from ../settings/channel_data.json try: dict_file = jsontools.load(filetools.read(file_settings)) if isinstance(dict_file, dict) and 'settings' in dict_file: dict_settings = dict_file['settings'] except EnvironmentError: - logger.info("ERROR al leer el archivo: %s" % file_settings) + logger.info("ERROR when reading the file: %s" % file_settings) if not dict_settings or name not in dict_settings: - # Obtenemos controles del archivo ../servers/server.json + # We get controls from the file ../servers/server.json try: list_controls, default_settings = get_server_controls_settings(server) except: default_settings = {} - if name in default_settings: # Si el parametro existe en el server.json creamos el server_data.json + if name in default_settings: # If the parameter exists in the server.json we create the server_data.json default_settings.update(dict_settings) dict_settings = default_settings dict_file['settings'] = dict_settings - # Creamos el archivo ../settings/channel_data.json + # We create the file ../settings/channel_data.json if not filetools.write(file_settings, jsontools.dump(dict_file)): - logger.info("ERROR al salvar el archivo: %s" % file_settings) + logger.info("ERROR saving file: %s" % file_settings) - # Devolvemos el valor del parametro local 'name' si existe, si no se devuelve default + # We return the value of the local parameter 'name' if it exists, if default is not returned return dict_settings.get(name, default) def set_server_setting(name, value, server): - # Creamos la carpeta si no existe + # We create the folder if it does not exist if not filetools.exists(filetools.join(config.get_data_path(), "settings_servers")): filetools.mkdir(filetools.join(config.get_data_path(), "settings_servers")) @@ -650,24 +647,24 @@ def set_server_setting(name, value, server): dict_file = None if filetools.exists(file_settings): - # Obtenemos configuracion guardada de ../settings/channel_data.json + # We get saved configuration from ../settings/channel_data.json try: dict_file = jsontools.load(filetools.read(file_settings)) dict_settings = dict_file.get('settings', {}) except EnvironmentError: - logger.info("ERROR al leer el archivo: %s" % file_settings) + logger.info("ERROR when reading the file: %s" % file_settings) dict_settings[name] = value - # comprobamos si existe dict_file y es un diccionario, sino lo creamos + # we check if dict_file exists and it is a dictionary, if not we create it if dict_file is None or not dict_file: dict_file = {} dict_file['settings'] = dict_settings - # Creamos el archivo ../settings/channel_data.json + # We create the file ../settings/channel_data.json if not filetools.write(file_settings, jsontools.dump(dict_file)): - logger.info("ERROR al salvar el archivo: %s" % file_settings) + logger.info("ERROR saving file: %s" % file_settings) return None return value @@ -675,10 +672,9 @@ def set_server_setting(name, value, server): def get_servers_list(): """ - Obtiene un diccionario con todos los servidores disponibles + Get a dictionary with all available servers - @return: Diccionario cuyas claves son los nombre de los servidores (nombre del json) - y como valor un diccionario con los parametros del servidor. + @return: Diccionario cuyas claves son los nombre de los servidores (nombre del json) and as a value a dictionary with the server parameters. @rtype: dict """ global server_list @@ -693,10 +689,9 @@ def get_servers_list(): def get_debriders_list(): """ - Obtiene un diccionario con todos los debriders disponibles + Get a dictionary with all available debriders - @return: Diccionario cuyas claves son los nombre de los debriders (nombre del json) - y como valor un diccionario con los parametros del servidor. + @return: Dictionary whose keys are the names of the debriders (name of the json) and as a value a dictionary with the server parameters. @rtype: dict """ server_list = {} @@ -711,60 +706,52 @@ def get_debriders_list(): def sort_servers(servers_list): """ - Si esta activada la opcion "Ordenar servidores" en la configuracion de servidores y existe un listado de servidores - favoritos en la configuracion lo utiliza para ordenar la lista servers_list - :param servers_list: Listado de servidores para ordenar. Los elementos de la lista servers_list pueden ser strings - u objetos Item. En cuyo caso es necesario q tengan un atributo item.server del tipo str. - :return: Lista del mismo tipo de objetos que servers_list ordenada en funcion de los servidores favoritos. + If the option "Order servers" is activated in the server configuration and there is a list of servers + favorites in settings use it to sort the servers_list list + :param servers_list: List of servers to order. The items in the servers_list can be strings or Item objects. In which case it is necessary that they have an item.server attribute of type str. + :return: List of the same type of objects as servers_list ordered according to the favorite servers. """ if servers_list and config.get_setting('favorites_servers'): if isinstance(servers_list[0], Item): - servers_list = sorted(servers_list, - key=lambda x: config.get_setting("favorites_servers_list", server=x.server) or 100) + servers_list = sorted(servers_list, key=lambda x: config.get_setting("favorites_servers_list", server=x.server) or 100) else: - servers_list = sorted(servers_list, - key=lambda x: config.get_setting("favorites_servers_list", server=x) or 100) + servers_list = sorted(servers_list, key=lambda x: config.get_setting("favorites_servers_list", server=x) or 100) return servers_list def filter_servers(servers_list): """ - Si esta activada la opcion "Filtrar por servidores" en la configuracion de servidores, elimina de la lista - de entrada los servidores incluidos en la Lista Negra. - :param servers_list: Listado de servidores para filtrar. Los elementos de la lista servers_list pueden ser strings - u objetos Item. En cuyo caso es necesario q tengan un atributo item.server del tipo str. - :return: Lista del mismo tipo de objetos que servers_list filtrada en funcion de la Lista Negra. + If the option "Filter by servers" is activated in the server configuration, removes the servers included in the Black List from the entry list. + :param servers_list: List of servers to filter. The items in the servers_list can be strings or Item objects. In which case it is necessary that they have an item.server attribute of type str. + :return: List of the same type of objects as servers_list filtered based on the Black List. """ - #Eliminamos los inactivos + # We eliminate the inactive if servers_list: servers_list = [i for i in servers_list if not i.server or is_server_enabled(i.server)] - + if servers_list and config.get_setting('filter_servers'): if isinstance(servers_list[0], Item): servers_list_filter = [x for x in servers_list if not config.get_setting("black_list", server=x.server)] else: servers_list_filter = [x for x in servers_list if not config.get_setting("black_list", server=x)] - # Si no hay enlaces despues de filtrarlos - if servers_list_filter or not platformtools.dialog_yesno(config.get_localized_string(60000), - config.get_localized_string(60010), - config.get_localized_string(70281)): + # If there are no links after filtering + if servers_list_filter or not platformtools.dialog_yesno(config.get_localized_string(60000), config.get_localized_string(60010), config.get_localized_string(70281)): servers_list = servers_list_filter - + return servers_list -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# Comprobación de enlaces -# ----------------------- + +# Checking links def check_list_links(itemlist, numero='', timeout=3): """ - Comprueba una lista de enlaces a videos y la devuelve modificando el titulo con la verificacion. - El parámetro numero indica cuantos enlaces hay que verificar (0:5, 1:10, 2:15, 3:20) - El parámetro timeout indica un tope de espera para descargar la página + Check a list of video links and return it by modifying the title with verification. + The number parameter indicates how many links to check (0:5, 1:10, 2:15, 3:20) + The timeout parameter indicates a waiting limit to download the page """ numero = numero if numero > 4 else ((int(numero) + 1) * 5) if numero != '' else 5 import sys @@ -790,47 +777,43 @@ def check_list_links(itemlist, numero='', timeout=3): def check_video_link(item, timeout=3): """ - Comprueba si el enlace a un video es valido y devuelve un string de 2 posiciones con la verificacion. - :param url, server: Link y servidor - :return: str(2) '??':No se ha podido comprobar. 'Ok':Parece que el link funciona. 'NO':Parece que no funciona. - """ + Check if the link to a video is valid and return a 2-position string with verification. + :param url, server: Link and server + :return: str(2) '??':Could not be verified. 'Ok': The link seems to work. 'NO': It doesn't seem to work. + """ url = item.url server = item.server - + NK = "[COLOR 0xFFF9B613][B]" + u"\u2022".encode('utf-8') + "[/B][/COLOR]" OK = "[COLOR 0xFF00C289][B]" + u"\u2022".encode('utf-8') + "[/B][/COLOR]" KO = "[COLOR 0xFFC20000][B]" + u"\u2022".encode('utf-8') + "[/B][/COLOR]" - # NK = "[COLOR 0xFFF9B613][B]♥[/B][/COLOR]" - # OK = "[COLOR 0xFF00C289][B]♥[/B][/COLOR]" - # KO = "[COLOR 0xFFC20000][B]♥[/B][/COLOR]" - try: server_module = __import__('servers.%s' % server, None, None, ["servers.%s" % server]) except: server_module = None - logger.info("[check_video_link] No se puede importar el servidor! %s" % server) + logger.info("[check_video_link] Cannot import server! %s" % server) return item, NK - + if hasattr(server_module, 'test_video_exists'): ant_timeout = httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT - httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = timeout # Limitar tiempo de descarga - + httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = timeout # Limit download time + try: video_exists, message = server_module.test_video_exists(page_url=url) if not video_exists: - logger.info("[check_video_link] No existe! %s %s %s" % (message, server, url)) + logger.info("[check_video_link] Does not exist! %s %s %s" % (message, server, url)) resultado = KO else: - logger.info("[check_video_link] comprobacion OK %s %s" % (server, url)) + logger.info("[check_video_link] check ok %s %s" % (server, url)) resultado = OK except: - logger.info("[check_video_link] No se puede comprobar ahora! %s %s" % (server, url)) + logger.info("[check_video_link] Can't check now! %s %s" % (server, url)) resultado = NK finally: - httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = ant_timeout # Restaurar tiempo de descarga + httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = ant_timeout # Restore download time return item, resultado - logger.info("[check_video_link] No hay test_video_exists para servidor: %s" % server) + logger.info("[check_video_link] There is no test_video_exists for server: %s" % server) return item, NK diff --git a/core/tmdb.py b/core/tmdb.py index 9fa9e655..13dd22cd 100644 --- a/core/tmdb.py +++ b/core/tmdb.py @@ -1,90 +1,65 @@ # -*- coding: utf-8 -*- -#from future import standard_library -#standard_library.install_aliases() -#from builtins import str +# from future import standard_library +# standard_library.install_aliases() +# from builtins import str import sys PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int if PY3: - import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo + import urllib.parse as urllib # It is very slow in PY2. In PY3 it is native else: - import urllib # Usamos el nativo de PY2 que es más rápido + import urllib # We use the native of PY2 which is faster from future.builtins import range from future.builtins import object -import ast +import ast, copy, re, sqlite3, time, xbmcaddon -import copy -import re -import sqlite3 -import time - -import xbmcaddon - -from core import filetools -from core import httptools -from core import jsontools -from core import scrapertools +from core import filetools, httptools, jsontools, scrapertools from core.item import InfoLabels -from platformcode import config -from platformcode import logger +from platformcode import config, logger info_language = ["de", "en", "es", "fr", "it", "pt"] # from videolibrary.json def_lang = info_language[config.get_setting("info_language", "videolibrary")] -# ----------------------------------------------------------------------------------------------------------- -# Conjunto de funciones relacionadas con las infoLabels. -# version 1.0: -# Version inicial +# ------------------------------------------------- -------------------------------------------------- -------- +# Set of functions related to infoLabels. +# version 1.0: +# Initial version # -# Incluyen: -# set_infoLabels(source, seekTmdb, idioma_busqueda): Obtiene y fija (item.infoLabels) los datos extras de una o -# varias series, capitulos o peliculas. -# set_infoLabels_item(item, seekTmdb, idioma_busqueda): Obtiene y fija (item.infoLabels) los datos extras de una -# serie, capitulo o pelicula. -# set_infoLabels_itemlist(item_list, seekTmdb, idioma_busqueda): Obtiene y fija (item.infoLabels) los datos -# extras de una lista de series, capitulos o peliculas. -# infoLabels_tostring(item): Retorna un str con la lista ordenada con los infoLabels del item +# Include: +# - set_infoLabels (source, seekTmdb, search_language): Gets and sets (item.infoLabels) the extra data of one or several series, chapters or movies. +# - set_infoLabels_item (item, seekTmdb, search_language): Gets and sets (item.infoLabels) the extra data of a series, chapter or movie. +# - set_infoLabels_itemlist (item_list, seekTmdb, search_language): Gets and sets (item.infoLabels) the data extras from a list of series, chapters or movies. +# - infoLabels_tostring (item): Returns a str with the list ordered with the item's infoLabels # -# Uso: -# tmdb.set_infoLabels(item, seekTmdb = True) +# Usage: +# - tmdb.set_infoLabels (item, seekTmdb = True) # -# Obtener datos basicos de una pelicula: -# Antes de llamar al metodo set_infoLabels el titulo a buscar debe estar en item.contentTitle -# y el año en item.infoLabels['year']. +# Get basic data from a movie: +# Before calling the set_infoLabels method the title to search for must be in item.contentTitle and the year in item.infoLabels ['year']. # -# Obtener datos basicos de una serie: -# Antes de llamar al metodo set_infoLabels el titulo a buscar debe estar en item.show o en -# item.contentSerieName. +# Obtain basic data from a series: +# Before calling the set_infoLabels method the title to search for must be in item.show or in item.contentSerieName. # -# Obtener mas datos de una pelicula o serie: -# Despues de obtener los datos basicos en item.infoLabels['tmdb'] tendremos el codigo de la serie o pelicula. -# Tambien podriamos directamente fijar este codigo, si se conoce, o utilizar los codigo correspondientes de: -# IMDB (en item.infoLabels['IMDBNumber'] o item.infoLabels['code'] o item.infoLabels['imdb_id']), TVDB -# (solo series, en item.infoLabels['tvdb_id']), -# Freebase (solo series, en item.infoLabels['freebase_mid']),TVRage (solo series, en -# item.infoLabels['tvrage_id']) +# Get more data from a movie or series: +# After obtaining the basic data in item.infoLabels ['tmdb'] we will have the code of the series or movie. +# We could also directly set this code, if known, or use the corresponding code of: +# IMDB (in item.infoLabels ['IMDBNumber'] or item.infoLabels ['code'] or item.infoLabels ['imdb_id']), TVDB (only series, in item.infoLabels ['tvdb_id']), +# Freebase (series only, on item.infoLabels ['freebase_mid']), TVRage (series only, on item.infoLabels ['tvrage_id']) # -# Obtener datos de una temporada: -# Antes de llamar al metodo set_infoLabels el titulo de la serie debe estar en item.show o en -# item.contentSerieName, -# el codigo TMDB de la serie debe estar en item.infoLabels['tmdb'] (puede fijarse automaticamente mediante -# la consulta de datos basica) -# y el numero de temporada debe estar en item.infoLabels['season']. +# Get data from a season: +# Before calling the set_infoLabels method the series title must be in item.show or in item.contentSerieName, +# the series TMDB code must be in item.infoLabels ['tmdb'] (it can be set automatically by the basic data query) +# and the season number must be in item.infoLabels ['season']. # -# Obtener datos de un episodio: -# Antes de llamar al metodo set_infoLabels el titulo de la serie debe estar en item.show o en -# item.contentSerieName, -# el codigo TMDB de la serie debe estar en item.infoLabels['tmdb'] (puede fijarse automaticamente mediante la -# consulta de datos basica), -# el numero de temporada debe estar en item.infoLabels['season'] y el numero de episodio debe estar en -# item.infoLabels['episode']. -# -# -# -------------------------------------------------------------------------------------------------------------- +# Get data from an episode: +# Before calling the set_infoLabels method the series title must be in item.show or in item.contentSerieName, +# the TMDB code of the series must be in item.infoLabels ['tmdb'] (it can be set automatically using the basic data query), +# the season number must be in item.infoLabels ['season'] and the episode number must be in item.infoLabels ['episode']. +# ------------------------------------------------- -------------------------------------------------- ----------- otmdb_global = None fname = filetools.join(config.get_data_path(), "kod_db.sqlite") @@ -110,7 +85,7 @@ def drop_bd(): create_bd() -# El nombre de la funcion es el nombre del decorador y recibe la funcion que decora. +# The function name is the name of the decorator and receives the function that decorates. def cache_response(fn): logger.info() @@ -153,7 +128,7 @@ def cache_response(fn): # 1 month - 30 days elif cache_expire == 3: - # no tenemos en cuenta febrero o meses con 31 días + # we do not take into account February or months with 31 days if elapsed > datetime.timedelta(days=30): valided = False else: @@ -167,7 +142,7 @@ def cache_response(fn): result = {} try: - # no está activa la cache + # cache is not active if not config.get_setting("tmdb_cache", default=False): result = fn(*args) else: @@ -199,7 +174,7 @@ def cache_response(fn): # elapsed_time = time.time() - start_time # logger.debug("TARDADO %s" % elapsed_time) - # error al obtener los datos + # error getting data except Exception as ex: message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) logger.error("error in: %s" % message) @@ -211,17 +186,15 @@ def cache_response(fn): def set_infoLabels(source, seekTmdb=True, idioma_busqueda=def_lang, forced=False): """ - Dependiendo del tipo de dato de source obtiene y fija (item.infoLabels) los datos extras de una o varias series, - capitulos o peliculas. + Depending on the data type of source, it obtains and sets (item.infoLabels) the extra data of one or more series, chapters or movies. - @param source: variable que contiene la información para establecer infoLabels + @param source: variable that contains the information to set infoLabels @type source: list, item - @param seekTmdb: si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario - obtiene los datos del propio Item. + @param seekTmdb: if it is True, it searches www.themoviedb.org to obtain the data, otherwise it obtains the data of the Item itself. @type seekTmdb: bool - @param idioma_busqueda: fija el valor de idioma en caso de busqueda en www.themoviedb.org + @param idioma_busqueda: set the language value in case of search at www.themoviedb.org @type idioma_busqueda: str - @return: un numero o lista de numeros con el resultado de las llamadas a set_infoLabels_item + @return: a number or list of numbers with the result of the calls to set_infoLabels_item @rtype: int, list """ @@ -240,23 +213,18 @@ def set_infoLabels(source, seekTmdb=True, idioma_busqueda=def_lang, forced=False def set_infoLabels_itemlist(item_list, seekTmdb=False, idioma_busqueda=def_lang, forced=False): """ - De manera concurrente, obtiene los datos de los items incluidos en la lista item_list. + Concurrently, it gets the data of the items included in the item_list. - La API tiene un limite de 40 peticiones por IP cada 10'' y por eso la lista no deberia tener mas de 30 items - para asegurar un buen funcionamiento de esta funcion. + The API has a limit of 40 requests per IP every 10 '' and that is why the list should not have more than 30 items to ensure the proper functioning of this function. - @param item_list: listado de objetos Item que representan peliculas, series o capitulos. El atributo - infoLabels de cada objeto Item sera modificado incluyendo los datos extras localizados. + @param item_list: list of Item objects that represent movies, series or chapters. The infoLabels attribute of each Item object will be modified including the extra localized data. @type item_list: list - @param seekTmdb: Si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario - obtiene los datos del propio Item si existen. + @param seekTmdb: If it is True, it searches www.themoviedb.org to obtain the data, otherwise it obtains the data of the Item itself if they exist. @type seekTmdb: bool - @param idioma_busqueda: Codigo del idioma segun ISO 639-1, en caso de busqueda en www.themoviedb.org. + @param idioma_busqueda: Language code according to ISO 639-1, in case of search at www.themoviedb.org. @type idioma_busqueda: str - @return: Una lista de numeros cuyo valor absoluto representa la cantidad de elementos incluidos en el atributo - infoLabels de cada Item. Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y - negativo en caso contrario. + @return: A list of numbers whose absolute value represents the number of elements included in the infoLabels attribute of each Item. This number will be positive if the data has been obtained from www.themoviedb.org and negative otherwise. @rtype: list """ @@ -284,32 +252,29 @@ def set_infoLabels_itemlist(item_list, seekTmdb=False, idioma_busqueda=def_lang, i += 1 l_hilo.append(t) - # esperar q todos los hilos terminen + # wait for all the threads to end for x in l_hilo: x.join() - # Ordenar lista de resultados por orden de llamada para mantener el mismo orden q item_list + # Sort results list by call order to keep the same order q item_list r_list.sort(key=lambda i: i[0]) - # Reconstruir y devolver la lista solo con los resultados de las llamadas individuales + # Rebuild and return list only with results of individual calls return [ii[2] for ii in r_list] def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None): """ - Obtiene y fija (item.infoLabels) los datos extras de una serie, capitulo o pelicula. + Gets and sets (item.infoLabels) the extra data of a series, chapter or movie. - @param item: Objeto Item que representa un pelicula, serie o capitulo. El atributo infoLabels sera modificado - incluyendo los datos extras localizados. + @param item: Item object that represents a movie, series or chapter. The infoLabels attribute will be modified including the extra localized data. @type item: Item - @param seekTmdb: Si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario - obtiene los datos del propio Item si existen. + @param seekTmdb: If it is True, it searches www.themoviedb.org to obtain the data, otherwise it obtains the data of the Item itself if they exist. @type seekTmdb: bool - @param idioma_busqueda: Codigo del idioma segun ISO 639-1, en caso de busqueda en www.themoviedb.org. + @param idioma_busqueda: Language code according to ISO 639-1, in case of search at www.themoviedb.org. @type idioma_busqueda: str - @param lock: para uso de threads cuando es llamado del metodo 'set_infoLabels_itemlist' - @return: Un numero cuyo valor absoluto representa la cantidad de elementos incluidos en el atributo item.infoLabels. - Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y negativo en caso contrario. + @param lock: For use of threads when calling the 'set_infoLabels_itemlist' method + @return: A number whose absolute value represents the number of elements included in the item.infoLabels attribute. This number will be positive if the data has been obtained from www.themoviedb.org and negative otherwise. @rtype: int """ global otmdb_global @@ -322,7 +287,7 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None item.fanart = item.infoLabels['fanart'] if seekTmdb: - # Comprobamos q tipo de contenido es... + # We check what type of content it is... if item.contentType == 'movie': tipo_busqueda = 'movie' else: @@ -338,15 +303,12 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None if lock: lock.acquire() - if not otmdb_global or (item.infoLabels['tmdb_id'] - and str(otmdb_global.result.get("id")) != item.infoLabels['tmdb_id']) \ + if not otmdb_global or (item.infoLabels['tmdb_id'] and str(otmdb_global.result.get("id")) != item.infoLabels['tmdb_id']) \ or (otmdb_global.texto_buscado and otmdb_global.texto_buscado != item.infoLabels['tvshowtitle']): if item.infoLabels['tmdb_id']: - otmdb_global = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo_busqueda, - idioma_busqueda=idioma_busqueda) + otmdb_global = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda) else: - otmdb_global = Tmdb(texto_buscado=item.infoLabels['tvshowtitle'], tipo=tipo_busqueda, - idioma_busqueda=idioma_busqueda, year=item.infoLabels['year']) + otmdb_global = Tmdb(texto_buscado=item.infoLabels['tvshowtitle'], tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda, year=item.infoLabels['year']) __leer_datos(otmdb_global) @@ -361,13 +323,13 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None logger.debug("The episode number (%s) is not valid" % repr(item.infoLabels['episode'])) return -1 * len(item.infoLabels) - # Tenemos numero de temporada y numero de episodio validos... - # ... buscar datos episodio + # We have valid season number and episode number... + # ... search episode data item.infoLabels['mediatype'] = 'episode' episodio = otmdb_global.get_episodio(numtemporada, episode) if episodio: - # Actualizar datos + # Update data __leer_datos(otmdb_global) item.infoLabels['title'] = episodio['episodio_titulo'] if episodio['episodio_sinopsis']: @@ -388,15 +350,15 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None return len(item.infoLabels) else: - # Tenemos numero de temporada valido pero no numero de episodio... - # ... buscar datos temporada + # We have a valid season number but no episode number... + # ... search season data item.infoLabels['mediatype'] = 'season' temporada = otmdb_global.get_temporada(numtemporada) if not isinstance(temporada, dict): temporada = ast.literal_eval(temporada.decode('utf-8')) if temporada: - # Actualizar datos + # Update data __leer_datos(otmdb_global) item.infoLabels['title'] = temporada['name'] if 'name' in temporada else '' if 'overview' in temporada and temporada['overview']: @@ -418,69 +380,62 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None if lock and lock.locked(): lock.release() - # Buscar... + # Search... else: otmdb = copy.copy(otmdb_global) - # Busquedas por ID... + # Search by ID... if item.infoLabels['tmdb_id']: - # ...Busqueda por tmdb_id - otmdb = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo_busqueda, - idioma_busqueda=idioma_busqueda) + # ...Search for tmdb_id + otmdb = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda) elif item.infoLabels['imdb_id']: - # ...Busqueda por imdb code - otmdb = Tmdb(external_id=item.infoLabels['imdb_id'], external_source="imdb_id", - tipo=tipo_busqueda, - idioma_busqueda=idioma_busqueda) + # ...Search by imdb code + otmdb = Tmdb(external_id=item.infoLabels['imdb_id'], external_source="imdb_id", tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda) - elif tipo_busqueda == 'tv': # buscar con otros codigos + elif tipo_busqueda == 'tv': # bsearch with other codes if item.infoLabels['tvdb_id']: - # ...Busqueda por tvdb_id - otmdb = Tmdb(external_id=item.infoLabels['tvdb_id'], external_source="tvdb_id", tipo=tipo_busqueda, - idioma_busqueda=idioma_busqueda) + # ...Search for tvdb_id + otmdb = Tmdb(external_id=item.infoLabels['tvdb_id'], external_source="tvdb_id", tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda) elif item.infoLabels['freebase_mid']: - # ...Busqueda por freebase_mid - otmdb = Tmdb(external_id=item.infoLabels['freebase_mid'], external_source="freebase_mid", - tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda) + # ...Search for freebase_mid + otmdb = Tmdb(external_id=item.infoLabels['freebase_mid'], external_source="freebase_mid", tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda) elif item.infoLabels['freebase_id']: - # ...Busqueda por freebase_id - otmdb = Tmdb(external_id=item.infoLabels['freebase_id'], external_source="freebase_id", - tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda) + # ...Search by freebase_id + otmdb = Tmdb(external_id=item.infoLabels['freebase_id'], external_source="freebase_id", tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda) elif item.infoLabels['tvrage_id']: - # ...Busqueda por tvrage_id - otmdb = Tmdb(external_id=item.infoLabels['tvrage_id'], external_source="tvrage_id", - tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda) + # ...Search by tvrage_id + otmdb = Tmdb(external_id=item.infoLabels['tvrage_id'], external_source="tvrage_id", tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda) - #if otmdb is None: + # if otmdb is None: if not item.infoLabels['tmdb_id'] and not item.infoLabels['imdb_id'] and not item.infoLabels['tvdb_id'] and not item.infoLabels['freebase_mid'] and not item.infoLabels['freebase_id'] and not item.infoLabels['tvrage_id']: - # No se ha podido buscar por ID... - # hacerlo por titulo + # Could not search by ID ... + # do it by title if tipo_busqueda == 'tv': - # Busqueda de serie por titulo y filtrando sus resultados si es necesario + # Serial search by title and filtering your results if necessary otmdb = Tmdb(texto_buscado=item.infoLabels['tvshowtitle'], tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda, filtro=item.infoLabels.get('filtro', {}), year=item.infoLabels['year']) else: - # Busqueda de pelicula por titulo... + # Movie search by title ... # if item.infoLabels['year'] or item.infoLabels['filtro']: - # ...y año o filtro + # ...and year or filter searched_title = item.contentTitle if item.contentTitle else item.fulltitle otmdb = Tmdb(texto_buscado=searched_title, tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda, filtro=item.infoLabels.get('filtro', {}), year=item.infoLabels['year']) if otmdb is not None: if otmdb.get_id() and config.get_setting("tmdb_plus_info", default=False): - # Si la busqueda ha dado resultado y no se esta buscando una lista de items, - # realizar otra busqueda para ampliar la informacion + # If the search has been successful and you are not looking for a list of items, + # carry out another search to expand the information otmdb = Tmdb(id_Tmdb=otmdb.result.get("id"), tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda) if lock and lock.locked(): lock.release() if otmdb is not None and otmdb.get_id(): - # La busqueda ha encontrado un resultado valido + # The search has found a valid result __leer_datos(otmdb) return len(item.infoLabels) - # La busqueda en tmdb esta desactivada o no ha dado resultado + # Search in tmdb is deactivated or has not given result # item.contentType = item.infoLabels['mediatype'] return -1 * len(item.infoLabels) @@ -500,7 +455,7 @@ def find_and_set_infoLabels(item): tipo_contenido = config.get_localized_string(60298) title = item.contentSerieName - # Si el titulo incluye el (año) se lo quitamos + # If the title includes the (year) we will remove it year = scrapertools.find_single_match(title, "^.+?\s*(\(\d{4}\))$") if year: title = title.replace(year, "").strip() @@ -510,8 +465,7 @@ def find_and_set_infoLabels(item): if not item.infoLabels.get("imdb_id"): otmdb_global = Tmdb(texto_buscado=title, tipo=tipo_busqueda, year=item.infoLabels['year']) else: - otmdb_global = Tmdb(external_id=item.infoLabels.get("imdb_id"), external_source="imdb_id", - tipo=tipo_busqueda) + otmdb_global = Tmdb(external_id=item.infoLabels.get("imdb_id"), external_source="imdb_id", tipo=tipo_busqueda) elif not otmdb_global or str(otmdb_global.result.get("id")) != item.infoLabels['tmdb_id']: otmdb_global = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo_busqueda, idioma_busqueda=def_lang) @@ -519,8 +473,7 @@ def find_and_set_infoLabels(item): if len(results) > 1: from platformcode import platformtools - tmdb_result = platformtools.show_video_info(results, item=item, - caption= tipo_contenido % title) + tmdb_result = platformtools.show_video_info(results, item=item, caption= tipo_contenido % title) elif len(results) > 0: tmdb_result = results[0] @@ -531,7 +484,7 @@ def find_and_set_infoLabels(item): if tmdb_result: infoLabels['tmdb_id'] = tmdb_result['id'] - # todo mirar si se puede eliminar y obtener solo desde get_nfo() + # all look if it can be removed and get only from get_nfo () infoLabels['url_scraper'] = ["https://www.themoviedb.org/%s/%s" % (tipo_busqueda, infoLabels['tmdb_id'])] if infoLabels['tvdb_id']: infoLabels['url_scraper'].append("http://thetvdb.com/index.php?tab=series&id=%s" % infoLabels['tvdb_id']) @@ -547,16 +500,14 @@ def find_and_set_infoLabels(item): def get_nfo(item): """ - Devuelve la información necesaria para que se scrapee el resultado en la videoteca de kodi, para tmdb funciona - solo pasandole la url. - @param item: elemento que contiene los datos necesarios para generar la info + Returns the information necessary for the result to be scraped into the kodi video library, for tmdb it works only by passing it the url. + @param item: element that contains the data necessary to generate the info @type item: Item @rtype: str @return: """ if "season" in item.infoLabels and "episode" in item.infoLabels: - info_nfo = "https://www.themoviedb.org/tv/%s/season/%s/episode/%s\n" % \ - (item.infoLabels['tmdb_id'], item.contentSeason, item.contentEpisodeNumber) + info_nfo = "https://www.themoviedb.org/tv/%s/season/%s/episode/%s\n" % (item.infoLabels['tmdb_id'], item.contentSeason, item.contentEpisodeNumber) else: info_nfo = ', '.join(item.infoLabels['url_scraper']) + "\n" @@ -565,10 +516,10 @@ def get_nfo(item): def completar_codigos(item): """ - Si es necesario comprueba si existe el identificador de tvdb y sino existe trata de buscarlo + If necessary, check if the tvdb identifier exists and if it does not exist try to find it """ if item.contentType != "movie" and not item.infoLabels['tvdb_id']: - # Lanzar busqueda por imdb_id en tvdb + # Launch search for imdb_id on tvdb from core.tvdb import Tvdb ob = Tvdb(imdb_id=item.infoLabels['imdb_id']) item.infoLabels['tvdb_id'] = ob.get_id() @@ -585,8 +536,7 @@ def discovery(item, dict_=False, cast=False): listado = Tmdb(discover = dict_, cast=cast) elif item.search_type == 'discover': - listado = Tmdb(discover={'url':'discover/%s' % item.type, 'with_genres':item.list_type, 'language':def_lang, - 'page':item.page}) + listado = Tmdb(discover={'url':'discover/%s' % item.type, 'with_genres':item.list_type, 'language':def_lang, 'page':item.page}) elif item.search_type == 'list': if item.page == '': @@ -602,7 +552,7 @@ def get_genres(type): return genres.dic_generos[lang] -# Clase auxiliar +# Auxiliary class class ResultDictDefault(dict): # Python 2.4 def __getitem__(self, key): @@ -613,14 +563,13 @@ class ResultDictDefault(dict): def __missing__(self, key): """ - valores por defecto en caso de que la clave solicitada no exista + default values ​​in case the requested key does not exist """ if key in ['genre_ids', 'genre', 'genres']: return list() elif key == 'images_posters': posters = dict() - if 'images' in list(super(ResultDictDefault, self).keys()) and \ - 'posters' in super(ResultDictDefault, self).__getitem__('images'): + if 'images' in list(super(ResultDictDefault, self).keys()) and 'posters' in super(ResultDictDefault, self).__getitem__('images'): posters = super(ResultDictDefault, self).__getitem__('images')['posters'] super(ResultDictDefault, self).__setattr__("images_posters", posters) @@ -628,8 +577,7 @@ class ResultDictDefault(dict): elif key == "images_backdrops": backdrops = dict() - if 'images' in list(super(ResultDictDefault, self).keys()) and \ - 'backdrops' in super(ResultDictDefault, self).__getitem__('images'): + if 'images' in list(super(ResultDictDefault, self).keys()) and 'backdrops' in super(ResultDictDefault, self).__getitem__('images'): backdrops = super(ResultDictDefault, self).__getitem__('images')['backdrops'] super(ResultDictDefault, self).__setattr__("images_backdrops", backdrops) @@ -637,15 +585,14 @@ class ResultDictDefault(dict): elif key == "images_profiles": profiles = dict() - if 'images' in list(super(ResultDictDefault, self).keys()) and \ - 'profiles' in super(ResultDictDefault, self).__getitem__('images'): + if 'images' in list(super(ResultDictDefault, self).keys()) and 'profiles' in super(ResultDictDefault, self).__getitem__('images'): profiles = super(ResultDictDefault, self).__getitem__('images')['profiles'] super(ResultDictDefault, self).__setattr__("images_profiles", profiles) return profiles else: - # El resto de claves devuelven cadenas vacias por defecto + # The rest of the keys return empty strings by default return "" def __str__(self): @@ -668,78 +615,78 @@ class ResultDictDefault(dict): # --------------------------------------------------------------------------------------------------------------- # class Tmdb: -# Scraper para el addon basado en el Api de https://www.themoviedb.org/ -# version 1.4: -# - Documentada limitacion de uso de la API (ver mas abajo). -# - Añadido metodo get_temporada() -# version 1.3: -# - Corregido error al devolver None el path_poster y el backdrop_path -# - Corregido error que hacia que en el listado de generos se fueran acumulando de una llamada a otra -# - Añadido metodo get_generos() -# - Añadido parametros opcional idioma_alternativo al metodo get_sinopsis() +# Scraper for the API based addon from https://www.themoviedb.org/ +# version 1.4: +# - Documented limitation of API use (see below). +# - Added get_temporada () method +# version 1.3: +# - Fixed error when returning None the path_poster and backdrop_path +# - Fixed a bug that caused the list of genres to accumulate from one call to another +# - Added get_generos () method +# - Added optional parameters alternative_language to the get_sinopsis () method # # -# Uso: -# Metodos constructores: -# Tmdb(texto_buscado, tipo) -# Parametros: -# texto_buscado:(str) Texto o parte del texto a buscar -# tipo: ("movie" o "tv") Tipo de resultado buscado peliculas o series. Por defecto "movie" -# (opcional) idioma_busqueda: (str) codigo del idioma segun ISO 639-1 -# (opcional) include_adult: (bool) Se incluyen contenidos para adultos en la busqueda o no. Por defecto +# Usage: +# Construction methods: +# Tmdb (search_text, type) +# Parameters: +# searched_text: (str) Text or part of the text to search +# type: ("movie" or "tv") Type of result searched for movies or series. Default "movie" +# (optional) language_search: (str) language code according to ISO 639-1 +# (optional) include_adult: (bool) Adult content is included in the search or not. Default # 'False' -# (opcional) year: (str) Año de lanzamiento. -# (opcional) page: (int) Cuando hay muchos resultados para una busqueda estos se organizan por paginas. -# Podemos cargar la pagina que deseemos aunque por defecto siempre es la primera. -# Return: -# Esta llamada devuelve un objeto Tmdb que contiene la primera pagina del resultado de buscar 'texto_buscado' -# en la web themoviedb.org. Cuantos mas parametros opcionales se incluyan mas precisa sera la busqueda. -# Ademas el objeto esta inicializado con el primer resultado de la primera pagina de resultados. -# Tmdb(id_Tmdb,tipo) -# Parametros: -# id_Tmdb: (str) Codigo identificador de una determinada pelicula o serie en themoviedb.org -# tipo: ("movie" o "tv") Tipo de resultado buscado peliculas o series. Por defecto "movie" -# (opcional) idioma_busqueda: (str) codigo del idioma segun ISO 639-1 -# Return: -# Esta llamada devuelve un objeto Tmdb que contiene el resultado de buscar una pelicula o serie con el -# identificador id_Tmd -# en la web themoviedb.org. -# Tmdb(external_id, external_source, tipo) -# Parametros: -# external_id: (str) Codigo identificador de una determinada pelicula o serie en la web referenciada por +# (optional) year: (str) Release year. +# (optional) page: (int) When there are many results for a search these are organized by pages. +# We can load the page we want, although by default it is always the first page. +# Return: +# This call returns a Tmdb object containing the first page of the search result 'search_text' +# on the themoviedb.org website. The more optional parameters are included, the more precise the search will be. +# Also the object is initialized with the first result of the first page of results. +# Tmdb (id_Tmdb, type) +# Parameters: +# id_Tmdb: (str) Identifier code of a certain movie or series at themoviedb.org +# type: ("movie" or "tv") Type of result searched for movies or series. Default "movie" +# (optional) language_search: (str) language code according to ISO 639-1 +# Return: +# This call returns a Tmdb object that contains the result of searching for a movie or series with the +# identifier id_Tmd +# on the themoviedb.org website. +# Tmdb (external_id, external_source, type) +# Parameters: +# external_id: (str) Identifier code of a certain movie or series on the web referenced by # 'external_source'. -# external_source: (Para series:"imdb_id","freebase_mid","freebase_id","tvdb_id","tvrage_id"; Para -# peliculas:"imdb_id") -# tipo: ("movie" o "tv") Tipo de resultado buscado peliculas o series. Por defecto "movie" -# (opcional) idioma_busqueda: (str) codigo del idioma segun ISO 639-1 -# Return: -# Esta llamada devuelve un objeto Tmdb que contiene el resultado de buscar una pelicula o serie con el -# identificador 'external_id' de -# la web referenciada por 'external_source' en la web themoviedb.org. +# external_source: (For series: "imdb_id", "freebase_mid", "freebase_id", "tvdb_id", "tvrage_id"; For +# movies: "imdb_id") +# type: ("movie" or "tv") Type of result searched for movies or series. Default "movie" +# (optional) language_search: (str) language code according to ISO 639-1 +# Return: +# This call returns a Tmdb object that contains the result of searching for a movie or series with the +# identifier 'external_id' of +# the website referenced by 'external_source' on the themoviedb.org website. # -# Metodos principales: -# get_id(): Retorna un str con el identificador Tmdb de la pelicula o serie cargada o una cadena vacia si no hubiese -# nada cargado. -# get_sinopsis(idioma_alternativo): Retorna un str con la sinopsis de la serie o pelicula cargada. -# get_poster (tipo_respuesta,size): Obtiene el poster o un listado de posters. -# get_backdrop (tipo_respuesta,size): Obtiene una imagen de fondo o un listado de imagenes de fondo. -# get_temporada(temporada): Obtiene un diccionario con datos especificos de la temporada. -# get_episodio (temporada, capitulo): Obtiene un diccionario con datos especificos del episodio. -# get_generos(): Retorna un str con la lista de generos a los que pertenece la pelicula o serie. +# Main methods: +# get_id (): Returns a str with the Tmdb identifier of the loaded movie or series or an empty string if there were no +# nothing loaded. +# get_sinopsis (alternate_language): Returns a str with the synopsis of the series or movie loaded. +# get_poster (response_type, size): Get the poster or a list of posters. +# get_backdrop (response_type, size): Get a background image or a list of background images. +# get_temporada (season): Get a dictionary with season-specific data. +# get_episodio (season, episode): Get a dictionary with specific data of the episode. +# get_generos (): Returns a str with the list of genres to which the movie or series belongs. # # -# Otros metodos: -# load_resultado(resultado, page): Cuando la busqueda devuelve varios resultados podemos seleccionar que resultado -# concreto y de que pagina cargar los datos. +# Other methods: +# load_resultado (result, page): When the search returns several results we can select which result +# concrete and from which page to load the data. # -# Limitaciones: -# El uso de la API impone un limite de 20 conexiones simultaneas (concurrencia) o 30 peticiones en 10 segundos por IP -# Informacion sobre la api : http://docs.themoviedb.apiary.io +# Limitations: +# The use of the API imposes a limit of 20 simultaneous connections (concurrency) or 30 requests in 10 seconds per IP +# Information about the api: http://docs.themoviedb.apiary.io # ------------------------------------------------------------------------------------------------------------------- class Tmdb(object): - # Atributo de clase + # Class attribute dic_generos = {} ''' dic_generos={"id_idioma1": {"tv": {"id1": "name1", @@ -830,17 +777,15 @@ class Tmdb(object): self.busqueda_filtro = kwargs.get('filtro', {}) self.discover = kwargs.get('discover', {}) - # Reellenar diccionario de generos si es necesario - if (self.busqueda_tipo == 'movie' or self.busqueda_tipo == "tv") and \ - (self.busqueda_idioma not in Tmdb.dic_generos or - self.busqueda_tipo not in Tmdb.dic_generos[self.busqueda_idioma]): + # Refill gender dictionary if necessary + if (self.busqueda_tipo == 'movie' or self.busqueda_tipo == "tv") and (self.busqueda_idioma not in Tmdb.dic_generos or self.busqueda_tipo not in Tmdb.dic_generos[self.busqueda_idioma]): self.rellenar_dic_generos(self.busqueda_tipo, self.busqueda_idioma) if not self.busqueda_tipo: self.busqueda_tipo = 'movie' if self.busqueda_id: - # Busqueda por identificador tmdb + #Search by tmdb identifier self.__by_id() elif self.busqueda_texto: @@ -848,12 +793,10 @@ class Tmdb(object): self.__search(page=self.page) elif 'external_source' in kwargs and 'external_id' in kwargs: - # Busqueda por identificador externo segun el tipo. + # Search by external identifier according to type. # TV Series: imdb_id, freebase_mid, freebase_id, tvdb_id, tvrage_id # Movies: imdb_id - if (self.busqueda_tipo == 'movie' and kwargs.get('external_source') == "imdb_id") or \ - (self.busqueda_tipo == 'tv' and kwargs.get('external_source') in ( - "imdb_id", "freebase_mid", "freebase_id", "tvdb_id", "tvrage_id")): + if (self.busqueda_tipo == 'movie' and kwargs.get('external_source') == "imdb_id") or (self.busqueda_tipo == 'tv' and kwargs.get('external_source') in ("imdb_id", "freebase_mid", "freebase_id", "tvdb_id", "tvrage_id")): self.busqueda_id = kwargs.get('external_id') self.__by_id(source=kwargs.get('external_source')) @@ -880,9 +823,9 @@ class Tmdb(object): if dict_data["status_code"] == 25: while "status_code" in dict_data and dict_data["status_code"] == 25: wait = int(res_headers['retry-after']) - #logger.error("Limite alcanzado, esperamos para volver a llamar en ...%s" % wait) + #logger.error("Limit reached, we wait to call back on ...%s" % wait) time.sleep(wait) - # logger.debug("RE Llamada #%s" % d) + # logger.debug("RE Call #%s" % d) result = httptools.downloadpage(url, cookies=False) res_headers = result.headers @@ -890,7 +833,7 @@ class Tmdb(object): dict_data = jsontools.load(result.data) # logger.debug("result_data es %s" % dict_data) - # error al obtener los datos + # error getting data except Exception as ex: message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) logger.error("error in: %s" % message) @@ -900,14 +843,13 @@ class Tmdb(object): @classmethod def rellenar_dic_generos(cls, tipo='movie', idioma=def_lang): - # Rellenar diccionario de generos del tipo e idioma pasados como parametros + # Fill dictionary of genres of the type and language passed as parameters if idioma not in cls.dic_generos: cls.dic_generos[idioma] = {} if tipo not in cls.dic_generos[idioma]: cls.dic_generos[idioma][tipo] = {} - url = ('http://api.themoviedb.org/3/genre/%s/list?api_key=a1ab8b8669da03637a4b98fa39c39228&language=%s' - % (tipo, idioma)) + url = ('http://api.themoviedb.org/3/genre/%s/list?api_key=a1ab8b8669da03637a4b98fa39c39228&language=%s' % (tipo, idioma)) try: logger.info("[Tmdb.py] Filling in dictionary of genres") @@ -959,7 +901,7 @@ class Tmdb(object): self.result = ResultDictDefault(resultado) else: - # No hay resultados de la busqueda + # No search results msg = "The search of %s gave no results" % buscando logger.debug(msg) @@ -995,7 +937,6 @@ class Tmdb(object): results = resultado["results"] if self.busqueda_filtro and total_results > 1: - # TODO documentar esta parte for key, value in list(dict(self.busqueda_filtro).items()): for r in results[:]: if not r[key]: @@ -1006,13 +947,13 @@ class Tmdb(object): if results: if index_results >= len(results): - # Se ha solicitado un numero de resultado mayor de los obtenidos + # A higher number of results has been requested than those obtained logger.error( "The search for '%s' gave %s results for the page %s \n It is impossible to show the result number %s" % (buscando, len(results), page, index_results)) return 0 - # Retornamos el numero de resultados de esta pagina + # We return the number of results of this page self.results = results self.total_results = total_results self.total_pages = total_pages @@ -1020,7 +961,7 @@ class Tmdb(object): return len(self.results) else: - # No hay resultados de la busqueda + # No search results msg = "The search for '%s' gave no results for page %s" % (buscando, page) logger.error(msg) return 0 @@ -1033,8 +974,8 @@ class Tmdb(object): total_pages = 0 # Ejemplo self.discover: {'url': 'discover/movie', 'with_cast': '1'} - # url: Método de la api a ejecutar - # resto de claves: Parámetros de la búsqueda concatenados a la url + # url: API method to run + # rest of keys: Search parameters concatenated to the url type_search = self.discover.get('url', '') if type_search: params = [] @@ -1074,7 +1015,7 @@ class Tmdb(object): "The search for '%s' did not give %s results" % (type_search, index_results)) return 0 - # Retornamos el numero de resultados de esta pagina + # We return the number of results of this page if results: self.results = results self.total_results = total_results @@ -1086,13 +1027,12 @@ class Tmdb(object): self.result = results return len(self.results) else: - # No hay resultados de la busqueda + # No search results logger.error("The search for '%s' gave no results" % type_search) return 0 def load_resultado(self, index_results=0, page=1): - # Si no hay resultados, solo hay uno o - # si el numero de resultados de esta pagina es menor al indice buscado salir + # If there are no results, there is only one or if the number of results on this page is less than the index sought to exit self.result = ResultDictDefault() num_result_page = len(self.results) @@ -1112,7 +1052,6 @@ class Tmdb(object): def get_list_resultados(self, num_result=20): # logger.info("self %s" % str(self)) - # TODO documentar res = [] if num_result <= 0: @@ -1141,9 +1080,9 @@ class Tmdb(object): def get_generos(self, origen=None): """ - :param origen: Diccionario origen de donde se obtiene los infoLabels, por omision self.result + :param origen: Source dictionary where the infoLabels are obtained, by default self.result :type origen: Dict - :return: Devuelve la lista de generos a los que pertenece la pelicula o serie. + :return: Returns the list of genres to which the movie or series belongs. :rtype: str """ genre_list = [] @@ -1152,7 +1091,7 @@ class Tmdb(object): origen = self.result if "genre_ids" in origen: - # Buscar lista de generos por IDs + # Search list of genres by IDs for i in origen.get("genre_ids"): try: genre_list.append(Tmdb.dic_generos[self.busqueda_idioma][self.busqueda_tipo][str(i)]) @@ -1160,7 +1099,7 @@ class Tmdb(object): pass elif "genre" in origen or "genres" in origen: - # Buscar lista de generos (lista de objetos {id,nombre}) + # Search genre list (object list {id, name}) v = origen["genre"] v.extend(origen["genres"]) for i in v: @@ -1175,9 +1114,7 @@ class Tmdb(object): def get_id(self): """ - - :return: Devuelve el identificador Tmdb de la pelicula o serie cargada o una cadena vacia en caso de que no - hubiese nada cargado. Se puede utilizar este metodo para saber si una busqueda ha dado resultado o no. + :return: Returns the Tmdb identifier of the loaded movie or series or an empty string in case nothing was loaded. You can use this method to find out if a search has been successful or not. :rtype: str """ return str(self.result.get('id', "")) @@ -1185,12 +1122,10 @@ class Tmdb(object): def get_sinopsis(self, idioma_alternativo=""): """ - :param idioma_alternativo: codigo del idioma, segun ISO 639-1, en el caso de que en el idioma fijado para la - busqueda no exista sinopsis. - Por defecto, se utiliza el idioma original. Si se utiliza None como idioma_alternativo, solo se buscara en - el idioma fijado. + :param idioma_alternativo: Language code, according to ISO 639-1, if there is no synopsis in the language set for the search. + By default, the original language is used. If None is used as the alternative_language, it will only search in the set language. :type idioma_alternativo: str - :return: Devuelve la sinopsis de una pelicula o serie + :return: Returns the synopsis of a movie or series :rtype: str """ ret = "" @@ -1198,15 +1133,14 @@ class Tmdb(object): if 'id' in self.result: ret = self.result.get('overview') if ret == "" and str(idioma_alternativo).lower() != 'none': - # Vamos a lanzar una busqueda por id y releer de nuevo la sinopsis + # Let's launch a search for id and reread the synopsis again self.busqueda_id = str(self.result["id"]) if idioma_alternativo: self.busqueda_idioma = idioma_alternativo else: self.busqueda_idioma = self.result['original_language'] - url = ('http://api.themoviedb.org/3/%s/%s?api_key=a1ab8b8669da03637a4b98fa39c39228&language=%s' % - (self.busqueda_tipo, self.busqueda_id, self.busqueda_idioma)) + url = ('http://api.themoviedb.org/3/%s/%s?api_key=a1ab8b8669da03637a4b98fa39c39228&language=%s' % (self.busqueda_tipo, self.busqueda_id, self.busqueda_idioma)) resultado = self.get_json(url) if not isinstance(resultado, dict): @@ -1221,15 +1155,13 @@ class Tmdb(object): def get_poster(self, tipo_respuesta="str", size="original"): """ - @param tipo_respuesta: Tipo de dato devuelto por este metodo. Por defecto "str" + @param tipo_respuesta: Data type returned by this method. Default "str" @type tipo_respuesta: list, str @param size: ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280", "original") - Indica la anchura(w) o altura(h) de la imagen a descargar. Por defecto "original" - @return: Si el tipo_respuesta es "list" devuelve un listado con todas las urls de las imagenes tipo poster del - tamaño especificado. - Si el tipo_respuesta es "str" devuelve la url de la imagen tipo poster, mas valorada, del tamaño - especificado. - Si el tamaño especificado no existe se retornan las imagenes al tamaño original. + Indicates the width (w) or height (h) of the image to download. Default "original" + @return: If the response_type is "list" it returns a list with all the urls of the poster images of the specified size. + If the response_type is "str" ​​it returns the url of the poster image, most valued, of the specified size. + If the specified size does not exist, the images are returned to the original size. @rtype: list, str """ ret = [] @@ -1247,7 +1179,7 @@ class Tmdb(object): return [] if len(self.result['images_posters']) == 0: - # Vamos a lanzar una busqueda por id y releer de nuevo + # We are going to launch a search by id and reread again self.busqueda_id = str(self.result["id"]) self.__by_id() @@ -1255,7 +1187,7 @@ class Tmdb(object): for i in self.result['images_posters']: imagen_path = i['file_path'] if size != "original": - # No podemos pedir tamaños mayores que el original + # We cannot order sizes larger than the original if size[1] == 'w' and int(imagen_path['width']) < int(size[1:]): size = "original" elif size[1] == 'h' and int(imagen_path['height']) < int(size[1:]): @@ -1268,16 +1200,15 @@ class Tmdb(object): def get_backdrop(self, tipo_respuesta="str", size="original"): """ - Devuelve las imagenes de tipo backdrop - @param tipo_respuesta: Tipo de dato devuelto por este metodo. Por defecto "str" + Returns the images of type backdrop + @param tipo_respuesta: Data type returned by this method. Default "str" @type tipo_respuesta: list, str @param size: ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280", "original") - Indica la anchura(w) o altura(h) de la imagen a descargar. Por defecto "original" + Indicates the width (w) or height (h) of the image to download. Default "original" @type size: str - @return: Si el tipo_respuesta es "list" devuelve un listado con todas las urls de las imagenes tipo backdrop del - tamaño especificado. - Si el tipo_respuesta es "str" devuelve la url de la imagen tipo backdrop, mas valorada, del tamaño especificado. - Si el tamaño especificado no existe se retornan las imagenes al tamaño original. + @return: If the response_type is "list" it returns a list with all the urls of the backdrop images of the specified size. + If the response_type is "str" ​​it returns the url of the backdrop type image, most valued, of the specified size. + If the specified size does not exist, the images are returned to the original size. @rtype: list, str """ ret = [] @@ -1295,7 +1226,7 @@ class Tmdb(object): return [] if len(self.result['images_backdrops']) == 0: - # Vamos a lanzar una busqueda por id y releer de nuevo todo + # Let's launch a search by id and reread everything self.busqueda_id = str(self.result["id"]) self.__by_id() @@ -1303,7 +1234,7 @@ class Tmdb(object): for i in self.result['images_backdrops']: imagen_path = i['file_path'] if size != "original": - # No podemos pedir tamaños mayores que el original + # We cannot order sizes larger than the original if size[1] == 'w' and int(imagen_path['width']) < int(size[1:]): size = "original" elif size[1] == 'h' and int(imagen_path['height']) < int(size[1:]): @@ -1316,13 +1247,13 @@ class Tmdb(object): def get_temporada(self, numtemporada=1): # -------------------------------------------------------------------------------------------------------------------------------------------- - # Parametros: - # numtemporada: (int) Numero de temporada. Por defecto 1. - # Return: (dic) - # Devuelve un dicionario con datos sobre la temporada. - # Puede obtener mas informacion sobre los datos devueltos en: - # http://docs.themoviedb.apiary.io/#reference/tv-seasons/tvidseasonseasonnumber/get - # http://docs.themoviedb.apiary.io/#reference/tv-seasons/tvidseasonseasonnumbercredits/get + # Parameters: + # season number: (int) Season number. Default 1. + # Return: (Dec) + # Returns a dictionary with data about the season. + # You can get more information about the returned data at: + # http://docs.themoviedb.apiary.io/#reference/tv-seasons/tvidseasonseasonnumber/get + # http://docs.themoviedb.apiary.io/#reference/tv-seasons/tvidseasonseasonnumbercredits/get # -------------------------------------------------------------------------------------------------------------------------------------------- if not self.result["id"] or self.busqueda_tipo != "tv": return {} @@ -1332,7 +1263,7 @@ class Tmdb(object): numtemporada = 1 if not self.temporada.get(numtemporada, {}): - # Si no hay datos sobre la temporada solicitada, consultar en la web + # If there is no information about the requested season, check the website # http://api.themoviedb.org/3/tv/1407/season/1?api_key=a1ab8b8669da03637a4b98fa39c39228&language=es& # append_to_response=credits @@ -1352,7 +1283,7 @@ class Tmdb(object): self.temporada[numtemporada] = {"episodes": {}} if "status_code" in self.temporada[numtemporada]: - #Se ha producido un error + # An error has occurred msg = config.get_localized_string(70496) + buscando + config.get_localized_string(70497) msg += "\nTmdb error: %s %s" % ( self.temporada[numtemporada]["status_code"], self.temporada[numtemporada]["status_message"]) @@ -1363,16 +1294,16 @@ class Tmdb(object): def get_episodio(self, numtemporada=1, capitulo=1): # -------------------------------------------------------------------------------------------------------------------------------------------- - # Parametros: - # numtemporada(opcional): (int) Numero de temporada. Por defecto 1. - # capitulo: (int) Numero de capitulo. Por defecto 1. - # Return: (dic) - # Devuelve un dicionario con los siguientes elementos: - # "temporada_nombre", "temporada_sinopsis", "temporada_poster", "temporada_num_episodios"(int), - # "temporada_air_date", "episodio_vote_count", "episodio_vote_average", - # "episodio_titulo", "episodio_sinopsis", "episodio_imagen", "episodio_air_date", - # "episodio_crew" y "episodio_guest_stars", - # Con capitulo == -1 el diccionario solo tendra los elementos referentes a la temporada + # Parameters: + # season number (optional): (int) Season number. Default 1. + # chapter: (int) Chapter number. Default 1. + # Return: (Dec) + # Returns a dictionary with the following elements: + # "season_name", "season_synopsis", "season_poster", "season_num_ episodes" (int), + # "season_air_date", "episode_vote_count", "episode_vote_average", + # "episode_title", "episode_synopsis", "episode_image", "episode_air_date", + # "episode_crew" and "episode_guest_stars", + # With chapter == -1 the dictionary will only have the elements referring to the season # -------------------------------------------------------------------------------------------------------------------------------------------- if not self.result["id"] or self.busqueda_tipo != "tv": return {} @@ -1388,16 +1319,16 @@ class Tmdb(object): if not isinstance(temporada, dict): temporada = ast.literal_eval(temporada.decode('utf-8')) if not temporada: - # Se ha producido un error + # An error has occurred return {} if len(temporada["episodes"]) == 0 or len(temporada["episodes"]) < capitulo: - # Se ha producido un error + # An error has occurred logger.error("Episode %d of the season %d not found." % (capitulo, numtemporada)) return {} ret_dic = dict() - # Obtener datos para esta temporada + # Get data for this season ret_dic["temporada_nombre"] = temporada["name"] ret_dic["temporada_sinopsis"] = temporada["overview"] ret_dic["temporada_num_episodios"] = len(temporada["episodes"]) @@ -1414,8 +1345,7 @@ class Tmdb(object): ret_dic["temporada_cast"] = dic_aux.get('cast', []) ret_dic["temporada_crew"] = dic_aux.get('crew', []) if capitulo == -1: - # Si solo buscamos datos de la temporada, - # incluir el equipo tecnico que ha intervenido en algun capitulo + # If we only look for season data, include the technical team that has intervened in any chapter dic_aux = dict((i['id'], i) for i in ret_dic["temporada_crew"]) for e in temporada["episodes"]: for crew in e['crew']: @@ -1423,7 +1353,7 @@ class Tmdb(object): dic_aux[crew['id']] = crew ret_dic["temporada_crew"] = list(dic_aux.values()) - # Obtener datos del capitulo si procede + # Obtain chapter data if applicable if capitulo != -1: episodio = temporada["episodes"][capitulo - 1] ret_dic["episodio_titulo"] = episodio["name"] @@ -1446,8 +1376,7 @@ class Tmdb(object): def get_videos(self): """ - :return: Devuelve una lista ordenada (idioma/resolucion/tipo) de objetos Dict en la que cada uno de - sus elementos corresponde con un trailer, teaser o clip de youtube. + :return: Returns an ordered list (language / resolution / type) of Dict objects in which each of its elements corresponds to a trailer, teaser or clip from youtube. :rtype: list of Dict """ ret = [] @@ -1455,7 +1384,7 @@ class Tmdb(object): if self.result['videos']: self.result["videos"] = self.result["videos"]['results'] else: - # Primera búsqueda de videos en el idioma de busqueda + # First video search in the search language url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=a1ab8b8669da03637a4b98fa39c39228&language=%s" \ % (self.busqueda_tipo, self.result['id'], self.busqueda_idioma) @@ -1467,10 +1396,9 @@ class Tmdb(object): dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size'])) self.result["videos"] = dict_videos['results'] - # Si el idioma de busqueda no es ingles, hacer una segunda búsqueda de videos en inglés + # If the search language is not English, do a second video search in English if self.busqueda_idioma != 'en': - url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=a1ab8b8669da03637a4b98fa39c39228" \ - % (self.busqueda_tipo, self.result['id']) + url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=a1ab8b8669da03637a4b98fa39c39228" % (self.busqueda_tipo, self.result['id']) dict_videos = self.get_json(url) if not isinstance(dict_videos, dict): @@ -1480,7 +1408,7 @@ class Tmdb(object): dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size'])) self.result["videos"].extend(dict_videos['results']) - # Si las busqueda han obtenido resultados devolver un listado de objetos + # If the searches have obtained results, return a list of objects for i in self.result['videos']: if i['site'] == "YouTube": ret.append({'name': i['name'], @@ -1493,12 +1421,11 @@ class Tmdb(object): def get_infoLabels(self, infoLabels=None, origen=None): """ - :param infoLabels: Informacion extra de la pelicula, serie, temporada o capitulo. + :param infoLabels: Extra information about the movie, series, season or chapter. :type infoLabels: Dict - :param origen: Diccionario origen de donde se obtiene los infoLabels, por omision self.result + :param origen: Source dictionary where the infoLabels are obtained, by default self.result :type origen: Dict - :return: Devuelve la informacion extra obtenida del objeto actual. Si se paso el parametro infoLables, el valor - devuelto sera el leido como parametro debidamente actualizado. + :return: Returns the extra information obtained from the current object. If the infoLables parameter was passed, the returned value will be read as a duly updated parameter. :rtype: Dict """ @@ -1507,7 +1434,7 @@ class Tmdb(object): else: ret_infoLabels = InfoLabels() - # Iniciar listados + # Start Listings l_country = [i.strip() for i in ret_infoLabels['country'].split(',') if ret_infoLabels['country']] l_director = [i.strip() for i in ret_infoLabels['director'].split(',') if ret_infoLabels['director']] l_writer = [i.strip() for i in ret_infoLabels['writer'].split(',') if ret_infoLabels['writer']] @@ -1524,9 +1451,9 @@ class Tmdb(object): items = list(origen.items()) - # Informacion Temporada/episodio + # Season / episode information if ret_infoLabels['season'] and self.temporada.get(ret_infoLabels['season']): - # Si hay datos cargados de la temporada indicada + # If there is data loaded for the indicated season episodio = -1 if ret_infoLabels['episode']: episodio = ret_infoLabels['episode'] @@ -1550,12 +1477,12 @@ class Tmdb(object): else: ret_infoLabels['plot'] = self.get_sinopsis() - elif k == 'runtime': #Duration for movies + elif k == 'runtime': # Duration for movies ret_infoLabels['duration'] = int(v) * 60 - elif k == 'episode_run_time': #Duration for episodes + elif k == 'episode_run_time': # Duration for episodes try: - for v_alt in v: #It comes as a list (?!) + for v_alt in v: # It comes as a list (?!) ret_infoLabels['duration'] = int(v_alt) * 60 except: pass @@ -1650,7 +1577,7 @@ class Tmdb(object): # logger.debug("Atributos no añadidos: " + k +'= '+ str(v)) pass - # Ordenar las listas y convertirlas en str si es necesario + # Sort the lists and convert them to str if necessary if l_castandrole: ret_infoLabels['castandrole'] = sorted(l_castandrole, key=lambda tup: tup[0]) if l_country: diff --git a/core/videolibrarytools.py b/core/videolibrarytools.py index 006f631b..736ced36 100644 --- a/core/videolibrarytools.py +++ b/core/videolibrarytools.py @@ -8,19 +8,12 @@ import sys PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int -import errno -import math -import traceback -import re -import os +import errno, math, traceback, re, os -from core import filetools -from core import scraper -from core import scrapertools +from core import filetools, scraper, scrapertools from core.item import Item from lib import generictools -from platformcode import config, logger -from platformcode import platformtools +from platformcode import config, logger, platformtools FOLDER_MOVIES = config.get_setting("folder_movies") FOLDER_TVSHOWS = config.get_setting("folder_tvshows") @@ -37,15 +30,13 @@ addon_name = "plugin://plugin.video.%s/" % config.PLUGIN_NAME def read_nfo(path_nfo, item=None): """ - Metodo para leer archivos nfo. - Los arcivos nfo tienen la siguiente extructura: url_scraper | xml + item_json - [url_scraper] y [xml] son opcionales, pero solo uno de ellos ha de existir siempre. - @param path_nfo: ruta absoluta al archivo nfo + Method to read nfo files. + Nfo files have the following structure: url_scraper | xml + item_json [url_scraper] and [xml] are optional, but only one of them must always exist. + @param path_nfo: absolute path to nfo file @type path_nfo: str - @param item: Si se pasa este parametro el item devuelto sera una copia de este con - los valores de 'infoLabels', 'library_playcounts' y 'path' leidos del nfo + @param item: If this parameter is passed the returned item will be a copy of it with the values ​​of 'infoLabels', 'library_playcounts' and 'path' read from the nfo @type: Item - @return: Una tupla formada por la cabecera (head_nfo ='url_scraper'|'xml') y el objeto 'item_json' + @return: A tuple consisting of the header (head_nfo = 'url_scraper' | 'xml') and the object 'item_json' @rtype: tuple (str, Item) """ head_nfo = "" @@ -77,15 +68,15 @@ def read_nfo(path_nfo, item=None): def save_movie(item, silent=False): """ - guarda en la libreria de peliculas el elemento item, con los valores que contiene. + saves the item element in the movie library, with the values ​​it contains. @type item: item - @param item: elemento que se va a guardar. + @param item: item to be saved. @rtype insertados: int - @return: el número de elementos insertados + @return: the number of elements inserted @rtype sobreescritos: int - @return: el número de elementos sobreescritos + @return: the number of overwritten elements @rtype fallidos: int - @return: el número de elementos fallidos o -1 si ha fallado todo + @return: the number of failed items or -1 if all failed """ logger.info() # logger.debug(item.tostring('\n')) @@ -94,34 +85,32 @@ def save_movie(item, silent=False): fallidos = 0 path = "" - # Itentamos obtener el titulo correcto: - # 1. contentTitle: Este deberia ser el sitio correcto, ya que title suele contener "Añadir a la videoteca..." + # We try to obtain the correct title: + # 1. contentTitle: This should be the correct site, since the title usually contains "Add to the video library..." # 2. fulltitle # 3. title # if item.contentTitle: item.title = item.contentTitle # elif item.fulltitle: item.title = item.fulltitle if not item.contentTitle: - # Colocamos el titulo correcto en su sitio para que scraper lo localize + # We put the correct title on your site so that scraper can locate it if item.fulltitle: item.contentTitle = item.fulltitle else: item.contentTitle = item.title - # Si llegados a este punto no tenemos titulo, salimos + # If at this point we do not have a title, we leave if not item.contentTitle or not item.channel: logger.debug("contentTitle NOT FOUND") return 0, 0, -1, path # Salimos sin guardar scraper_return = scraper.find_and_set_infoLabels(item) - # Llegados a este punto podemos tener: - # scraper_return = True: Un item con infoLabels con la información actualizada de la peli - # scraper_return = False: Un item sin información de la peli (se ha dado a cancelar en la ventana) - # item.infoLabels['code'] == "" : No se ha encontrado el identificador de IMDB necesario para continuar, salimos + # At this point we can have: + # scraper_return = True: An item with infoLabels with the updated information of the movie + # scraper_return = False: An item without movie information (it has been canceled in the window) + # item.infoLabels['code'] == "" : The required IMDB identifier was not found to continue, we quit if not scraper_return or not item.infoLabels['code']: - # TODO de momento si no hay resultado no añadimos nada, - # aunq podriamos abrir un cuadro para introducir el identificador/nombre a mano logger.debug("NOT FOUND IN SCRAPER OR DO NOT HAVE code") return 0, 0, -1, path @@ -153,7 +142,7 @@ def save_movie(item, silent=False): break if not path: - # Crear carpeta + # Create folder path = filetools.join(MOVIES_PATH, ("%s [%s]" % (base_name, _id)).strip()) logger.info("Creating movie directory:" + path) if not filetools.mkdir(path): @@ -169,7 +158,7 @@ def save_movie(item, silent=False): json_exists = filetools.exists(json_path) if not nfo_exists: - # Creamos .nfo si no existe + # We create .nfo if it doesn't exist logger.info("Creating .nfo: " + nfo_path) head_nfo = scraper.get_nfo(item) @@ -178,18 +167,18 @@ def save_movie(item, silent=False): library_urls={}) else: - # Si existe .nfo, pero estamos añadiendo un nuevo canal lo abrimos + # If .nfo exists, but we are adding a new channel we open it head_nfo, item_nfo = read_nfo(nfo_path) if not strm_exists: - # Crear base_name.strm si no existe + # Create base_name.strm if you do not exist item_strm = Item(channel='videolibrary', action='play_from_library', strm_path=strm_path.replace(MOVIES_PATH, ""), contentType='movie', contentTitle=item.contentTitle) strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl())) item_nfo.strm_path = strm_path.replace(MOVIES_PATH, "") - # Solo si existen item_nfo y .strm continuamos + # Only if item_nfo and .strm exist we continue if item_nfo and strm_exists: if json_exists: @@ -198,7 +187,7 @@ def save_movie(item, silent=False): else: insertados += 1 - # Si se ha marcado la opción de url de emergencia, se añade ésta a la película después de haber ejecutado Findvideos del canal + # If the emergency url option has been checked, it is added to the movie after running Findvideos from the channel try: headers = {} if item.headers: @@ -221,7 +210,7 @@ def save_movie(item, silent=False): if filetools.write(nfo_path, head_nfo + item_nfo.tojson()): #logger.info("FOLDER_MOVIES : %s" % FOLDER_MOVIES) - # actualizamos la videoteca de Kodi con la pelicula + # We update the Kodi video library with the movie if config.is_xbmc() and config.get_setting("videolibrary_kodi") and not silent: from platformcode import xbmc_videolibrary xbmc_videolibrary.update() @@ -229,7 +218,7 @@ def save_movie(item, silent=False): if not silent: p_dialog.close() return insertados, sobreescritos, fallidos, path - # Si llegamos a este punto es por q algo ha fallado + # If we get to this point it is because something has gone wrong logger.error("Could not save %s in the video library" % item.contentTitle) if not silent: p_dialog.update(100, config.get_localized_string(60063), item.contentTitle) @@ -423,37 +412,35 @@ def filter_list(episodelist, action=None, path=None): def save_tvshow(item, episodelist, silent=False): """ - guarda en la libreria de series la serie con todos los capitulos incluidos en la lista episodelist + stores in the series library the series with all the chapters included in the episodelist @type item: item - @param item: item que representa la serie a guardar + @param item: item that represents the series to save @type episodelist: list - @param episodelist: listado de items que representan los episodios que se van a guardar. + @param episodelist: list of items that represent the episodes to be saved. @rtype insertados: int - @return: el número de episodios insertados + @return: the number of episodes inserted @rtype sobreescritos: int - @return: el número de episodios sobreescritos + @return: the number of overwritten episodes @rtype fallidos: int - @return: el número de episodios fallidos o -1 si ha fallado toda la serie + @return: the number of failed episodes or -1 if the entire series has failed @rtype path: str - @return: directorio serie + @return: serial directory """ logger.info() # logger.debug(item.tostring('\n')) path = "" - # Si llegados a este punto no tenemos titulo o code, salimos + # If at this point we do not have a title or code, we leave if not (item.contentSerieName or item.infoLabels['code']) or not item.channel: logger.debug("NOT FOUND contentSerieName or code") return 0, 0, -1, path # Salimos sin guardar scraper_return = scraper.find_and_set_infoLabels(item) - # Llegados a este punto podemos tener: - # scraper_return = True: Un item con infoLabels con la información actualizada de la serie - # scraper_return = False: Un item sin información de la peli (se ha dado a cancelar en la ventana) - # item.infoLabels['code'] == "" : No se ha encontrado el identificador de IMDB necesario para continuar, salimos + # At this point we can have: + # scraper_return = True: An item with infoLabels with the updated information of the series + # scraper_return = False: An item without movie information (it has been canceled in the window) + # item.infoLabels['code'] == "" :T he required IMDB identifier was not found to continue, we quit if not scraper_return or not item.infoLabels['code']: - # TODO de momento si no hay resultado no añadimos nada, - # aunq podriamos abrir un cuadro para introducir el identificador/nombre a mano logger.debug("NOT FOUND IN SCRAPER OR DO NOT HAVE code") return 0, 0, -1, path @@ -464,8 +451,7 @@ def save_tvshow(item, episodelist, silent=False): elif item.infoLabels['code'][2] and item.infoLabels['code'][2] != 'None': _id = item.infoLabels['code'][2] else: - logger.error("NO ENCONTRADO EN SCRAPER O NO TIENE code: " + item.url - + ' / ' + item.infoLabels['code']) + logger.error("NOT FOUND IN SCRAPER OR HAS NO CODE: " + item.url + ' / ' + item.infoLabels['code']) return 0, 0, -1, path if config.get_setting("original_title_folder", "videolibrary") and item.infoLabels['originaltitle']: @@ -504,7 +490,7 @@ def save_tvshow(item, episodelist, silent=False): tvshow_path = filetools.join(path, "tvshow.nfo") if not filetools.exists(tvshow_path): - # Creamos tvshow.nfo, si no existe, con la head_nfo, info de la serie y marcas de episodios vistos + # We create tvshow.nfo, if it does not exist, with the head_nfo, series info and watched episode marks logger.info("Creating tvshow.nfo: " + tvshow_path) head_nfo = scraper.get_nfo(item) item.infoLabels['mediatype'] = "tvshow" @@ -516,7 +502,7 @@ def save_tvshow(item, episodelist, silent=False): item_tvshow.library_urls = {item.channel: item.url} else: - # Si existe tvshow.nfo, pero estamos añadiendo un nuevo canal actualizamos el listado de urls + # If tvshow.nfo exists, but we are adding a new channel we update the list of urls head_nfo, item_tvshow = read_nfo(tvshow_path) item_tvshow.fulltitle = item.fulltitle item_tvshow.channel = "videolibrary" @@ -524,15 +510,15 @@ def save_tvshow(item, episodelist, silent=False): item_tvshow.library_urls[item.channel] = item.url # FILTERTOOLS - # si el canal tiene filtro de idiomas, añadimos el canal y el show + # if the channel has a language filter, we add the channel and the show if episodelist and "list_language" in episodelist[0]: - # si ya hemos añadido un canal previamente con filtro, añadimos o actualizamos el canal y show + # if we have already added a previously filtered channel, we add or update the channel and show if "library_filter_show" in item_tvshow: if item.title_from_channel: item_tvshow.library_filter_show[item.channel] = item.title_from_channel else: item_tvshow.library_filter_show[item.channel] = item.show - # no habia ningún canal con filtro y lo generamos por primera vez + # there was no filter channel and we generated it for the first time else: if item.title_from_channel: item_tvshow.library_filter_show = {item.channel: item.title_from_channel} @@ -540,15 +526,15 @@ def save_tvshow(item, episodelist, silent=False): item_tvshow.library_filter_show = {item.channel: item.show} if item.channel != "downloads": - item_tvshow.active = 1 # para que se actualice a diario cuando se llame a service + item_tvshow.active = 1 # to be updated daily when service is called filetools.write(tvshow_path, head_nfo + item_tvshow.tojson()) if not episodelist: - # La lista de episodios esta vacia + # The episode list is empty return 0, 0, 0, path - # Guardar los episodios + # Save the episodes '''import time start_time = time.time()''' insertados, sobreescritos, fallidos = save_episodes(path, episodelist, item, silent=silent) @@ -561,27 +547,27 @@ def save_tvshow(item, episodelist, silent=False): def save_episodes(path, episodelist, serie, silent=False, overwrite=True): """ - guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist + saves in the indicated path all the chapters included in the episodelist @type path: str - @param path: ruta donde guardar los episodios + @param path: path to save the episodes @type episodelist: list - @param episodelist: listado de items que representan los episodios que se van a guardar. + @param episodelist: list of items that represent the episodes to be saved. @type serie: item - @param serie: serie de la que se van a guardar los episodios + @param serie: series from which to save the episodes @type silent: bool - @param silent: establece si se muestra la notificación - @param overwrite: permite sobreescribir los ficheros existentes + @param silent: sets whether notification is displayed + @param overwrite: allows to overwrite existing files @type overwrite: bool @rtype insertados: int - @return: el número de episodios insertados + @return: the number of episodes inserted @rtype sobreescritos: int - @return: el número de episodios sobreescritos + @return: the number of overwritten episodes @rtype fallidos: int - @return: el número de episodios fallidos + @return: the number of failed episodes """ logger.info() episodelist = filter_list(episodelist, serie.action, path) - # No hay lista de episodios, no hay nada que guardar + # No episode list, nothing to save if not len(episodelist): logger.info("There is no episode list, we go out without creating strm") return 0, 0, 0 @@ -606,27 +592,27 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): sobreescritos = 0 fallidos = 0 news_in_playcounts = {} - # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno + # We list all the files in the series, so we avoid having to check if they exist one by one raiz, carpetas_series, ficheros = next(filetools.walk(path)) ficheros = [filetools.join(path, f) for f in ficheros] - # Silent es para no mostrar progreso (para service) + # Silent is to show no progress (for service) if not silent: # progress dialog p_dialog = platformtools.dialog_progress(config.get_localized_string(20000), config.get_localized_string(60064)) p_dialog.update(0, config.get_localized_string(60065)) - channel_alt = generictools.verify_channel(serie.channel) #Preparamos para añadir las urls de emergencia - emergency_urls_stat = config.get_setting("emergency_urls", channel_alt) #El canal quiere urls de emergencia? + channel_alt = generictools.verify_channel(serie.channel) # We prepare to add the emergency urls + emergency_urls_stat = config.get_setting("emergency_urls", channel_alt) # Does the channel want emergency urls? emergency_urls_succ = False try: channel = __import__('specials.%s' % channel_alt, fromlist=["specials.%s" % channel_alt]) except: channel = __import__('channels.%s' % channel_alt, fromlist=["channels.%s" % channel_alt]) - if serie.torrent_caching_fail: #Si el proceso de conversión ha fallado, no se cachean + if serie.torrent_caching_fail: # If the conversion process has failed, they are not cached emergency_urls_stat = 0 del serie.torrent_caching_fail new_episodelist = [] - # Obtenemos el numero de temporada y episodio y descartamos los q no lo sean + # We obtain the season and episode number and discard those that are not for e in episodelist: headers = {} @@ -636,52 +622,52 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): try: season_episode = scrapertools.get_season_and_episode(e.title) - # Si se ha marcado la opción de url de emergencia, se añade ésta a cada episodio después de haber ejecutado Findvideos del canal - if e.emergency_urls and isinstance(e.emergency_urls, dict): del e.emergency_urls #Borramos trazas anteriores - json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) #Path del .json del episodio - if emergency_urls_stat == 1 and not e.emergency_urls and e.contentType == 'episode': #Guardamos urls de emergencia? + # If the emergency url option has been checked, it is added to each episode after running Findvideos from the channel + if e.emergency_urls and isinstance(e.emergency_urls, dict): del e.emergency_urls # We erase previous traces + json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) # Path of the episode .json + if emergency_urls_stat == 1 and not e.emergency_urls and e.contentType == 'episode': # Do we keep emergency urls? if not silent: - p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title) #progress dialog - if json_path in ficheros: #Si existe el .json sacamos de ahí las urls - if overwrite: #pero solo si se se sobrescriben los .json - json_epi = Item().fromjson(filetools.read(json_path)) #Leemos el .json - if json_epi.emergency_urls: #si existen las urls de emergencia... - e.emergency_urls = json_epi.emergency_urls #... las copiamos - else: #y si no... - e = emergency_urls(e, channel, json_path, headers=headers) #... las generamos + p_dialog.update(0, 'Caching links and .torren filest...', e.title) # progress dialog + if json_path in ficheros: # If there is the .json we get the urls from there + if overwrite: # but only if .json are overwritten + json_epi = Item().fromjson(filetools.read(json_path)) #We read the .json + if json_epi.emergency_urls: # if there are emergency urls ... + e.emergency_urls = json_epi.emergency_urls # ... we copy them + else: # if not... + e = emergency_urls(e, channel, json_path, headers=headers) # ... we generate them else: - e = emergency_urls(e, channel, json_path, headers=headers) #Si el episodio no existe, generamos las urls - if e.emergency_urls: #Si ya tenemos urls... - emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo - elif emergency_urls_stat == 2 and e.contentType == 'episode': #Borramos urls de emergencia? + e = emergency_urls(e, channel, json_path, headers=headers) # If the episode does not exist, we generate the urls + if e.emergency_urls: #If we already have urls... + emergency_urls_succ = True # ... is a success and we are going to mark the .nfo + elif emergency_urls_stat == 2 and e.contentType == 'episode': # Do we delete emergency urls? if e.emergency_urls: del e.emergency_urls - emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo - elif emergency_urls_stat == 3 and e.contentType == 'episode': #Actualizamos urls de emergencia? + emergency_urls_succ = True # ... is a success and we are going to mark the .nfo + elif emergency_urls_stat == 3 and e.contentType == 'episode': # Do we update emergency urls? if not silent: - p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title) #progress dialog - e = emergency_urls(e, channel, json_path, headers=headers) #generamos las urls - if e.emergency_urls: #Si ya tenemos urls... - emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo + p_dialog.update(0, 'Caching links and .torrent files...', e.title) # progress dialog + e = emergency_urls(e, channel, json_path, headers=headers) # we generate the urls + if e.emergency_urls: # If we already have urls... + emergency_urls_succ = True # ... is a success and we are going to mark the .nfo if not e.infoLabels["tmdb_id"] or (serie.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != serie.infoLabels["tmdb_id"]): #en series multicanal, prevalece el infolabels... - e.infoLabels = serie.infoLabels #... del canal actual y no el del original + e.infoLabels = serie.infoLabels # ... dthe current channel and not the original one e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") if e.videolibray_emergency_urls: del e.videolibray_emergency_urls if e.channel_redir: - del e.channel_redir #... y se borran las marcas de redirecciones + del e.channel_redir # ... and redirect marks are erased new_episodelist.append(e) except: if e.contentType == 'episode': logger.error("Unable to save %s emergency urls in the video library" % e.contentTitle) continue - # No hay lista de episodios, no hay nada que guardar + # No episode list, nothing to save if not len(new_episodelist): logger.info("There is no episode list, we go out without creating strm") return 0, 0, 0 - # fix float porque la division se hace mal en python 2.x + # fix float because division is done poorly in python 2.x try: t = float(100) / len(new_episodelist) except: @@ -718,9 +704,8 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): json_exists = json_path in ficheros if not strm_exists: - # Si no existe season_episode.strm añadirlo - item_strm = Item(action='play_from_library', channel='videolibrary', - strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) + # If there is no season_episode.strm add it + item_strm = Item(action='play_from_library', channel='videolibrary', strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) item_strm.contentSeason = e.contentSeason item_strm.contentEpisodeNumber = e.contentEpisodeNumber item_strm.contentType = e.contentType @@ -728,7 +713,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): # FILTERTOOLS if item_strm.list_language: - # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar + # if tvshow.nfo has a filter it is passed to the item_strm to be generated if "library_filter_show" in serie: item_strm.library_filter_show = serie.library_filter_show @@ -741,38 +726,36 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): item_nfo = None if not nfo_exists and e.infoLabels["code"]: - # Si no existe season_episode.nfo añadirlo + # If there is no season_episode.nfo add it scraper.find_and_set_infoLabels(e) head_nfo = scraper.get_nfo(e) - item_nfo = e.clone(channel="videolibrary", url="", action='findvideos', - strm_path=strm_path.replace(TVSHOWS_PATH, "")) + item_nfo = e.clone(channel="videolibrary", url="", action='findvideos', strm_path=strm_path.replace(TVSHOWS_PATH, "")) if item_nfo.emergency_urls: - del item_nfo.emergency_urls #Solo se mantiene en el .json del episodio + del item_nfo.emergency_urls # It only stays in the episode's .json nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson()) - # Solo si existen season_episode.nfo y season_episode.strm continuamos + # Only if there are season_episode.nfo and season_episode.strm we continue if nfo_exists and strm_exists: if not json_exists or overwrite: - # Obtenemos infoLabel del episodio + # We get infoLabel from the episode if not item_nfo: head_nfo, item_nfo = read_nfo(nfo_path) - # En series multicanal, prevalece el infolabels del canal actual y no el del original - if not e.infoLabels["tmdb_id"] or (item_nfo.infoLabels["tmdb_id"] \ - and e.infoLabels["tmdb_id"] != item_nfo.infoLabels["tmdb_id"]): + # In multichannel series, the infolabels of the current channel prevail and not that of the original + if not e.infoLabels["tmdb_id"] or (item_nfo.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != item_nfo.infoLabels["tmdb_id"]): e.infoLabels = item_nfo.infoLabels if filetools.write(json_path, e.tojson()): if not json_exists: logger.info("Inserted: %s" % json_path) insertados += 1 - # Marcamos episodio como no visto + # We mark episode as unseen news_in_playcounts[season_episode] = 0 - # Marcamos la temporada como no vista + # We mark the season as unseen news_in_playcounts["season %s" % e.contentSeason] = 0 - # Marcamos la serie como no vista + # We mark the series as unseen # logger.debug("serie " + serie.tostring('\n')) news_in_playcounts[serie.contentSerieName] = 0 @@ -796,25 +779,25 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): p_dialog.close() if news_in_playcounts or emergency_urls_succ or serie.infoLabels["status"] == "Ended" or serie.infoLabels["status"] == "Canceled": - # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... + # If there are new episodes we mark them as unseen on tvshow.nfo ... tvshow_path = filetools.join(path, "tvshow.nfo") try: import datetime head_nfo, tvshow_item = read_nfo(tvshow_path) tvshow_item.library_playcounts.update(news_in_playcounts) - #Si la operación de insertar/borrar urls de emergencia en los .jsons de los episodios ha tenido éxito, se marca el .nfo + # If the emergency url insert / delete operation in the .jsons of the episodes was successful, the .nfo is checked if emergency_urls_succ: if tvshow_item.emergency_urls and not isinstance(tvshow_item.emergency_urls, dict): del tvshow_item.emergency_urls - if emergency_urls_stat in [1, 3]: #Operación de guardar/actualizar enlaces + if emergency_urls_stat in [1, 3]: # Save / update links operation if not tvshow_item.emergency_urls: tvshow_item.emergency_urls = dict() if tvshow_item.library_urls.get(serie.channel, False): tvshow_item.emergency_urls.update({serie.channel: True}) - elif emergency_urls_stat == 2: #Operación de Borrar enlaces + elif emergency_urls_stat == 2: # Delete links operation if tvshow_item.emergency_urls and tvshow_item.emergency_urls.get(serie.channel, False): - tvshow_item.emergency_urls.pop(serie.channel, None) #borramos la entrada del .nfo + tvshow_item.emergency_urls.pop(serie.channel, None) # delete the entry of the .nfo if tvshow_item.active == 30: tvshow_item.active = 1 @@ -822,12 +805,9 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): tvshow_item.infoLabels = serie.infoLabels tvshow_item.infoLabels["title"] = tvshow_item.infoLabels["tvshowtitle"] - if max_sea == high_sea and max_epi == high_epi and (tvshow_item.infoLabels["status"] == "Ended" - or tvshow_item.infoLabels["status"] == "Canceled") and insertados == 0 and fallidos == 0 \ - and not tvshow_item.local_episodes_path: - tvshow_item.active = 0 # ... no la actualizaremos más - logger.debug("%s [%s]: serie 'Terminada' o 'Cancelada'. Se desactiva la actualización periódica" % \ - (serie.contentSerieName, serie.channel)) + if max_sea == high_sea and max_epi == high_epi and (tvshow_item.infoLabels["status"] == "Ended" or tvshow_item.infoLabels["status"] == "Canceled") and insertados == 0 and fallidos == 0 and not tvshow_item.local_episodes_path: + tvshow_item.active = 0 # ... nor we will update it more + logger.debug("%s [%s]: 'Finished' or 'Canceled' series. Periodic update is disabled" % (serie.contentSerieName, serie.channel)) update_last = datetime.date.today() tvshow_item.update_last = update_last.strftime('%Y-%m-%d') @@ -841,7 +821,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): logger.error(traceback.format_exc()) fallidos = -1 else: - # ... si ha sido correcto actualizamos la videoteca de Kodi + # ... if it was correct we update the Kodi video library if config.is_xbmc() and config.get_setting("videolibrary_kodi") and not silent: from platformcode import xbmc_videolibrary xbmc_videolibrary.update() @@ -849,8 +829,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): if fallidos == len(episodelist): fallidos = -1 - logger.debug("%s [%s]: inserted= %s, overwritten= %s, failed= %s" % - (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) + logger.debug("%s [%s]: inserted= %s, overwritten= %s, failed= %s" % (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) return insertados, sobreescritos, fallidos @@ -924,65 +903,63 @@ def process_local_episodes(local_episodes_path, path): def add_movie(item): """ - guarda una pelicula en la libreria de cine. La pelicula puede ser un enlace dentro de un canal o un video - descargado previamente. + Keep a movie at the movie library. The movie can be a link within a channel or a previously downloaded video. - Para añadir episodios descargados en local, el item debe tener exclusivamente: - - contentTitle: titulo de la pelicula - - title: titulo a mostrar junto al listado de enlaces -findvideos- ("Reproducir video local HD") - - infoLabels["tmdb_id"] o infoLabels["imdb_id"] + To add locally downloaded episodes, the item must have exclusively: + - contentTitle: title of the movie + - title: title to show next to the list of links -findvideos- ("Play local HD video") + - infoLabels ["tmdb_id"] o infoLabels ["imdb_id"] - contentType == "movie" - channel = "downloads" - - url : ruta local al video + - url: local path to the video @type item: item - @param item: elemento que se va a guardar. + @param item: item to be saved. """ logger.info() - #Para desambiguar títulos, se provoca que TMDB pregunte por el título realmente deseado - #El usuario puede seleccionar el título entre los ofrecidos en la primera pantalla - #o puede cancelar e introducir un nuevo título en la segunda pantalla - #Si lo hace en "Introducir otro nombre", TMDB buscará automáticamente el nuevo título - #Si lo hace en "Completar Información", cambia parcialmente al nuevo título, pero no busca en TMDB. Hay que hacerlo - #Si se cancela la segunda pantalla, la variable "scraper_return" estará en False. El usuario no quiere seguir + # To disambiguate titles, TMDB is caused to ask for the really desired title + # The user can select the title among those offered on the first screen + # or you can cancel and enter a new title on the second screen + # If you do it in "Enter another name", TMDB will automatically search for the new title + # If you do it in "Complete Information", it partially changes to the new title, but does not search TMDB. We have to do it + # If the second screen is canceled, the variable "scraper_return" will be False. The user does not want to continue - item = generictools.update_title(item) #Llamamos al método que actualiza el título con tmdb.find_and_set_infoLabels + item = generictools.update_title(item) # We call the method that updates the title with tmdb.find_and_set_infoLabels #if item.tmdb_stat: - # del item.tmdb_stat #Limpiamos el status para que no se grabe en la Videoteca + # del item.tmdb_stat # We clean the status so that it is not recorded in the Video Library new_item = item.clone(action="findvideos") insertados, sobreescritos, fallidos, path = save_movie(new_item) if fallidos == 0: platformtools.dialog_ok(config.get_localized_string(30131), - config.get_localized_string(30135) % new_item.contentTitle) # 'se ha añadido a la videoteca' + config.get_localized_string(30135) % new_item.contentTitle) # 'has been added to the video library' else: filetools.rmdirtree(path) platformtools.dialog_ok(config.get_localized_string(30131), - config.get_localized_string(60066) % new_item.contentTitle) #"ERROR, la pelicula NO se ha añadido a la videoteca") + config.get_localized_string(60066) % new_item.contentTitle) # "ERROR, the movie has NOT been added to the video library") def add_tvshow(item, channel=None): """ - Guarda contenido en la libreria de series. Este contenido puede ser uno de estos dos: - - La serie con todos los capitulos incluidos en la lista episodelist. - - Un solo capitulo descargado previamente en local. + Save content in the series library. This content can be one of these two: + - The series with all the chapters included in the episodelist. + - A single chapter previously downloaded locally. - Para añadir episodios descargados en local, el item debe tener exclusivamente: - - contentSerieName (o show): Titulo de la serie - - contentTitle: titulo del episodio para extraer season_and_episode ("1x01 Piloto") - - title: titulo a mostrar junto al listado de enlaces -findvideos- ("Reproducir video local") - - infoLabels["tmdb_id"] o infoLabels["imdb_id"] + To add locally downloaded episodes, the item must have exclusively: + - contentSerieName (or show): Title of the series + - contentTitle: title of the episode to extract season_and_episode ("1x01 Pilot") + - title: title to show next to the list of links -findvideos- ("Play local video") + - infoLabels ["tmdb_id"] o infoLabels ["imdb_id"] - contentType != "movie" - channel = "downloads" - - url : ruta local al video + - url: local path to the video @type item: item - @param item: item que representa la serie a guardar + @param item: item that represents the series to save @type channel: modulo - @param channel: canal desde el que se guardara la serie. - Por defecto se importara item.from_channel o item.channel + @param channel: channel from which the series will be saved. By default, item.from_channel or item.channel will be imported. """ logger.info("show=#" + item.show + "#") @@ -991,7 +968,7 @@ def add_tvshow(item, channel=None): itemlist = [item.clone()] else: - # Esta marca es porque el item tiene algo más aparte en el atributo "extra" + # This mark is because the item has something else apart in the "extra" attribute item.action = item.extra if item.extra else item.action if isinstance(item.extra, str) and "###" in item.extra: item.action = item.extra.split("###")[0] @@ -1009,18 +986,18 @@ def add_tvshow(item, channel=None): except ImportError: exec("import channels." + item.channel + " as channel") - #Para desambiguar títulos, se provoca que TMDB pregunte por el título realmente deseado - #El usuario puede seleccionar el título entre los ofrecidos en la primera pantalla - #o puede cancelar e introducir un nuevo título en la segunda pantalla - #Si lo hace en "Introducir otro nombre", TMDB buscará automáticamente el nuevo título - #Si lo hace en "Completar Información", cambia parcialmente al nuevo título, pero no busca en TMDB. Hay que hacerlo - #Si se cancela la segunda pantalla, la variable "scraper_return" estará en False. El usuario no quiere seguir + # To disambiguate titles, TMDB is caused to ask for the really desired title + # The user can select the title among those offered on the first screen + # or you can cancel and enter a new title on the second screen + # If you do it in "Enter another name", TMDB will automatically search for the new title + # If you do it in "Complete Information", it partially changes to the new title, but does not search TMDB. We have to do it + # If the second screen is canceled, the variable "scraper_return" will be False. The user does not want to continue - item = generictools.update_title(item) #Llamamos al método que actualiza el título con tmdb.find_and_set_infoLabels + item = generictools.update_title(item) # We call the method that updates the title with tmdb.find_and_set_infoLabels #if item.tmdb_stat: - # del item.tmdb_stat #Limpiamos el status para que no se grabe en la Videoteca + # del item.tmdb_stat # We clean the status so that it is not recorded in the Video Library - # Obtiene el listado de episodios + # Get the episode list itemlist = getattr(channel, item.action)(item) if itemlist and not scrapertools.find_single_match(itemlist[0].title, r'(\d+.\d+)'): @@ -1040,34 +1017,34 @@ def add_tvshow(item, channel=None): if not insertados and not sobreescritos and not fallidos: filetools.rmdirtree(path) platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60067) % item.show) - logger.error("La serie %s no se ha podido añadir a la videoteca. No se ha podido obtener ningun episodio" % item.show) + logger.error("The string %s could not be added to the video library. Could not get any episode" % item.show) elif fallidos == -1: filetools.rmdirtree(path) platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60068) % item.show) - logger.error("La serie %s no se ha podido añadir a la videoteca" % item.show) + logger.error("The string %s could not be added to the video library" % item.show) elif fallidos == -2: filetools.rmdirtree(path) elif fallidos > 0: platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60069) % item.show) - logger.error("No se han podido añadir %s episodios de la serie %s a la videoteca" % (fallidos, item.show)) + logger.error("Could not add %s episodes of series %s to the video library" % (fallidos, item.show)) else: platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60070) % item.show) - logger.info("Se han añadido %s episodios de la serie %s a la videoteca" % (insertados, item.show)) + logger.info("%s episodes of series %s have been added to the video library" % (insertados, item.show)) if config.is_xbmc(): if config.get_setting("sync_trakt_new_tvshow", "videolibrary"): import xbmc from platformcode import xbmc_videolibrary if config.get_setting("sync_trakt_new_tvshow_wait", "videolibrary"): - # Comprobar que no se esta buscando contenido en la videoteca de Kodi + # Check that you are not looking for content in the Kodi video library while xbmc.getCondVisibility('Library.IsScanningVideo()'): xbmc.sleep(1000) - # Se lanza la sincronizacion para la videoteca de Kodi + # Synchronization for Kodi video library launched xbmc_videolibrary.sync_trakt_kodi() - # Se lanza la sincronización para la videoteca del addon + # Synchronization for the addon video library is launched xbmc_videolibrary.sync_trakt_addon(path) @@ -1079,52 +1056,52 @@ def emergency_urls(item, channel=None, path=None, headers={}): magnet_caching_e = magnet_caching except: magnet_caching_e = True - - """ - Llamamos a Findvideos del canal con la variable "item.videolibray_emergency_urls = True" para obtener la variable - "item.emergency_urls" con la lista de listas de tuplas de los enlaces torrent y de servidores directos para ese episodio o película - En la lista [0] siempre deben ir los enlaces torrents, si los hay. Si se desea cachear los .torrents, la búsqueda va contra esa lista. - En la lista dos irán los enlaces de servidores directos, pero también pueden ir enlaces magnet (que no son cacheables) + """ - #lanazamos un "lookup" en el "findvideos" del canal para obtener los enlaces de emergencia + We call Findvideos of the channel with the variable "item.videolibray_emergency_urls = True" to get the variable + "item.emergency_urls" with the list of tuple lists of torrent links and direct servers for that episode or movie + Torrents should always go in list [0], if any. If you want to cache the .torrents, the search goes against that list. + List two will include direct server links, but also magnet links (which are not cacheable). + """ + # we launched a "lookup" in the "findvideos" of the channel to obtain the emergency links try: - if channel == None: #Si el llamador no ha aportado la estructura de channel, se crea - channel = generictools.verify_channel(item.channel) #Se verifica si es un clon, que devuelva "newpct1" + if channel == None: # If the caller has not provided the channel structure, it is created + channel = generictools.verify_channel(item.channel) # It is verified if it is a clone, which returns "newpct1" #channel = __import__('channels.%s' % channel, fromlist=["channels.%s" % channel]) channel = __import__('specials.%s' % channel_alt, fromlist=["specials.%s" % channel_alt]) - if hasattr(channel, 'findvideos'): #Si el canal tiene "findvideos"... - item.videolibray_emergency_urls = True #... se marca como "lookup" - channel_save = item.channel #... guarda el canal original por si hay fail-over en Newpct1 - category_save = item.category #... guarda la categoría original por si hay fail-over o redirección en Newpct1 - if item.channel_redir: #... si hay un redir, se restaura temporamente el canal alternativo - item.channel = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').lower() - item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize() - item_res = getattr(channel, 'findvideos')(item) #... se procesa Findvideos - item_res.channel = channel_save #... restaura el canal original por si hay fail-over en Newpct1 - item_res.category = category_save #... restaura la categoría original por si hay fail-over o redirección en Newpct1 - item.category = category_save #... restaura la categoría original por si hay fail-over o redirección en Newpct1 - del item_res.videolibray_emergency_urls #... y se borra la marca de lookup + if hasattr(channel, 'findvideos'): # If the channel has "findvideos" ... + item.videolibray_emergency_urls = True # ... marks itself as "lookup" + channel_save = item.channel # ... save the original channel in case of fail-over in Newpct1 + category_save = item.category # ... save the original category in case of fail-over or redirection in Newpct1 + if item.channel_redir: # ... if there is a redir, the alternate channel is temporarily restored + item.channel = scrapertools.find_single_match(item.url, r'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').lower() + item.category = scrapertools.find_single_match(item.url, r'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize() + item_res = getattr(channel, 'findvideos')(item) # ... the process of Findvideos + item_res.channel = channel_save # ... restore the original channel in case there is a fail-over in Newpct1 + item_res.category = category_save # ... restore the original category in case there is a fail-over or redirection in Newpct1 + item.category = category_save # ... restore the original category in case there is a fail-over or redirection in Newpct1 + del item_res.videolibray_emergency_urls # ... and the lookup mark is erased if item.videolibray_emergency_urls: - del item.videolibray_emergency_urls #... y se borra la marca de lookup original + del item.videolibray_emergency_urls # ... and the original lookup mark is erased except: logger.error('ERROR when processing the title in Findvideos del Canal: ' + item.channel + ' / ' + item.title) logger.error(traceback.format_exc()) - item.channel = channel_save #... restaura el canal original por si hay fail-over o redirección en Newpct1 - item.category = category_save #... restaura la categoría original por si hay fail-over o redirección en Newpct1 - item_res = item.clone() #Si ha habido un error, se devuelve el Item original + item.channel = channel_save # ... restore the original channel in case of fail-over or redirection in Newpct1 + item.category = category_save # ... restore the original category in case there is a fail-over or redirection in Newpct1 + item_res = item.clone() # If there has been an error, the original Item is returned if item_res.videolibray_emergency_urls: - del item_res.videolibray_emergency_urls #... y se borra la marca de lookup + del item_res.videolibray_emergency_urls # ... and the lookup mark is erased if item.videolibray_emergency_urls: - del item.videolibray_emergency_urls #... y se borra la marca de lookup original + del item.videolibray_emergency_urls # ... and the original lookup mark is erased - #Si el usuario ha activado la opción "emergency_urls_torrents", se descargarán los archivos .torrent de cada título - else: #Si se han cacheado con éxito los enlaces... + # If the user has activated the option "emergency_urls_torrents", the .torrent files of each title will be downloaded + else: # If the links have been successfully cached ... try: referer = None post = None channel_bis = generictools.verify_channel(item.channel) if config.get_setting("emergency_urls_torrents", channel_bis) and item_res.emergency_urls and path != None: - videolibrary_path = config.get_videolibrary_path() #detectamos el path absoluto del título + videolibrary_path = config.get_videolibrary_path() # we detect the absolute path of the title movies = config.get_setting("folder_movies") series = config.get_setting("folder_tvshows") if movies in path: @@ -1135,16 +1112,16 @@ def emergency_urls(item, channel=None, path=None, headers={}): i = 1 if item_res.referer: referer = item_res.referer if item_res.post: post = item_res.post - for url in item_res.emergency_urls[0]: #Recorremos las urls de emergencia... + for url in item_res.emergency_urls[0]: # We go through the emergency urls ... torrents_path = re.sub(r'(?:\.\w+$)', '_%s.torrent' % str(i).zfill(2), path) path_real = '' if magnet_caching_e or not url.startswith('magnet'): - path_real = torrent.caching_torrents(url, referer, post, torrents_path=torrents_path, headers=headers) #... para descargar los .torrents - if path_real: #Si ha tenido éxito... - item_res.emergency_urls[0][i-1] = path_real.replace(videolibrary_path, '') #se guarda el "path" relativo + path_real = torrent.caching_torrents(url, referer, post, torrents_path=torrents_path, headers=headers) # ... to download the .torrents + if path_real: # If you have been successful ... + item_res.emergency_urls[0][i-1] = path_real.replace(videolibrary_path, '') # if it looks at the relative "path" i += 1 - #Restauramos variables originales + # We restore original variables if item.referer: item_res.referer = item.referer elif item_res.referer: @@ -1158,7 +1135,7 @@ def emergency_urls(item, channel=None, path=None, headers={}): except: logger.error('ERROR when caching the .torrent of: ' + item.channel + ' / ' + item.title) logger.error(traceback.format_exc()) - item_res = item.clone() #Si ha habido un error, se devuelve el Item original + item_res = item.clone() # If there has been an error, the original Item is returned #logger.debug(item_res.emergency_urls) - return item_res #Devolvemos el Item actualizado con los enlaces de emergencia + return item_res # We return the updated Item with the emergency links diff --git a/core/ziptools.py b/core/ziptools.py index 1154e8f7..f95750b7 100644 --- a/core/ziptools.py +++ b/core/ziptools.py @@ -17,8 +17,8 @@ from core import filetools class ziptools(object): def extract(self, file, dir, folder_to_extract="", overwrite_question=False, backup=False): - logger.info("file=%s" % file) - logger.info("dir=%s" % dir) + logger.info("file= %s" % file) + logger.info("dir= %s" % dir) if not dir.endswith(':') and not filetools.exists(dir): filetools.mkdir(dir) @@ -32,7 +32,7 @@ class ziptools(object): name = nameo.replace(':', '_').replace('<', '_').replace('>', '_').replace('|', '_').replace('"', '_').replace('?', '_').replace('*', '_') logger.info("name=%s" % nameo) if not name.endswith('/'): - logger.info("no es un directorio") + logger.info("it's not a directory") try: (path, filename) = filetools.split(filetools.join(dir, name)) logger.info("path=%s" % path) @@ -53,31 +53,28 @@ class ziptools(object): try: if filetools.exists(outfilename) and overwrite_question: from platformcode import platformtools - dyesno = platformtools.dialog_yesno("El archivo ya existe", - "El archivo %s a descomprimir ya existe" \ - ", ¿desea sobrescribirlo?" \ - % filetools.basename(outfilename)) + dyesno = platformtools.dialog_yesno("File already exists "," File %s to unzip already exists, do you want to overwrite it?" % filetools.basename(outfilename)) if not dyesno: break if backup: import time - hora_folder = "Copia seguridad [%s]" % time.strftime("%d-%m_%H-%M", time.localtime()) + hora_folder = "Backup [%s]" % time.strftime("%d-%m_%H-%M", time.localtime()) backup = filetools.join(config.get_data_path(), 'backups', hora_folder, folder_to_extract) if not filetools.exists(backup): filetools.mkdir(backup) filetools.copy(outfilename, filetools.join(backup, filetools.basename(outfilename))) if not filetools.write(outfilename, zf.read(nameo), silent=True, vfs=VFS): #TRUNCA en FINAL en Kodi 19 con VFS - logger.error("Error en fichero " + nameo) + logger.error("File error " + nameo) except: import traceback logger.error(traceback.format_exc()) - logger.error("Error en fichero " + nameo) + logger.error("File error " + nameo) try: zf.close() except: - logger.info("Error cerrando .zip " + file) + logger.info("Error closing .zip " + file) def _createstructure(self, file, dir): self._makedirs(self._listdirs(file), dir) diff --git a/lib/cloudscraper/__init__.py b/lib/cloudscraper/__init__.py index eb957a63..d10d2f73 100644 --- a/lib/cloudscraper/__init__.py +++ b/lib/cloudscraper/__init__.py @@ -1,8 +1,26 @@ +# ------------------------------------------------------------------------------- # + import logging import re +import requests import sys import ssl -import requests + +from collections import OrderedDict +from copy import deepcopy + +from requests.adapters import HTTPAdapter +from requests.sessions import Session +from requests_toolbelt.utils import dump + +from time import sleep + +# ------------------------------------------------------------------------------- # + +try: + import brotli +except ImportError: + pass try: import copyreg @@ -17,12 +35,12 @@ except ImportError: else: from html.parser import HTMLParser -from copy import deepcopy -from time import sleep -from collections import OrderedDict +try: + from urlparse import urlparse, urljoin +except ImportError: + from urllib.parse import urlparse, urljoin -from requests.sessions import Session -from requests.adapters import HTTPAdapter +# ------------------------------------------------------------------------------- # from .exceptions import ( CloudflareLoopProtection, @@ -37,25 +55,9 @@ from .interpreters import JavaScriptInterpreter from .reCaptcha import reCaptcha from .user_agent import User_Agent -try: - from requests_toolbelt.utils import dump -except ImportError: - pass - -try: - import brotli -except ImportError: - pass - -try: - from urlparse import urlparse, urljoin -except ImportError: - from urllib.parse import urlparse, urljoin - - # ------------------------------------------------------------------------------- # -__version__ = '1.2.36' +__version__ = '1.2.40' # ------------------------------------------------------------------------------- # @@ -107,6 +109,9 @@ class CloudScraper(Session): self.ssl_context = kwargs.pop('ssl_context', None) self.interpreter = kwargs.pop('interpreter', 'native') self.recaptcha = kwargs.pop('recaptcha', {}) + self.requestPreHook = kwargs.pop('requestPreHook', None) + self.requestPostHook = kwargs.pop('requestPostHook', None) + self.allow_brotli = kwargs.pop( 'allow_brotli', True if 'brotli' in sys.modules.keys() else False @@ -213,19 +218,46 @@ class CloudScraper(Session): if kwargs.get('proxies') and kwargs.get('proxies') != self.proxies: self.proxies = kwargs.get('proxies') - resp = self.decodeBrotli( + # ------------------------------------------------------------------------------- # + # Pre-Hook the request via user defined function. + # ------------------------------------------------------------------------------- # + + if self.requestPreHook: + (method, url, args, kwargs) = self.requestPreHook( + self, + method, + url, + *args, + **kwargs + ) + + # ------------------------------------------------------------------------------- # + # Make the request via requests. + # ------------------------------------------------------------------------------- # + + response = self.decodeBrotli( super(CloudScraper, self).request(method, url, *args, **kwargs) ) # ------------------------------------------------------------------------------- # - # Debug request + # Debug the request via the Response object. # ------------------------------------------------------------------------------- # if self.debug: - self.debugRequest(resp) + self.debugRequest(response) + + # ------------------------------------------------------------------------------- # + # Post-Hook the request aka Post-Hook the response via user defined function. + # ------------------------------------------------------------------------------- # + + if self.requestPostHook: + response = self.requestPostHook(self, response) + + if self.debug: + self.debugRequest(response) # Check if Cloudflare anti-bot is on - if self.is_Challenge_Request(resp): + if self.is_Challenge_Request(response): # ------------------------------------------------------------------------------- # # Try to solve the challenge and send it back # ------------------------------------------------------------------------------- # @@ -239,12 +271,12 @@ class CloudScraper(Session): self._solveDepthCnt += 1 - resp = self.Challenge_Response(resp, **kwargs) + response = self.Challenge_Response(response, **kwargs) else: - if not resp.is_redirect and resp.status_code not in [429, 503]: + if not response.is_redirect and response.status_code not in [429, 503]: self._solveDepthCnt = 0 - return resp + return response # ------------------------------------------------------------------------------- # # check if the response contains a valid Cloudflare challenge @@ -259,7 +291,7 @@ class CloudScraper(Session): and re.search( r'
', formPayload['form']): + for challengeParam in re.findall(r'^\s*', formPayload['form'], re.M | re.S): inputPayload = dict(re.findall(r'(\S+)="(\S+)"', challengeParam)) if inputPayload.get('name') in ['r', 'jschl_vc', 'pass']: payload.update({inputPayload['name']: inputPayload['value']}) diff --git a/lib/cloudscraper/interpreters/__init__.py b/lib/cloudscraper/interpreters/__init__.py index 10955552..af937b4a 100644 --- a/lib/cloudscraper/interpreters/__init__.py +++ b/lib/cloudscraper/interpreters/__init__.py @@ -49,7 +49,7 @@ class JavaScriptInterpreter(ABC): def solveChallenge(self, body, domain): try: - return float(self.eval(body, domain)) + return '{0:.10f}'.format(float(self.eval(body, domain))) except Exception: raise CloudflareSolveError( 'Error trying to solve Cloudflare IUAM Javascript, they may have changed their technique.' diff --git a/lib/cloudscraper/interpreters/encapsulated.py b/lib/cloudscraper/interpreters/encapsulated.py index 98faf48f..d98fa236 100644 --- a/lib/cloudscraper/interpreters/encapsulated.py +++ b/lib/cloudscraper/interpreters/encapsulated.py @@ -9,32 +9,38 @@ def template(body, domain): try: js = re.search( - r'setTimeout\(function\(\){\s+(.*?a\.value = \S+)', + r'setTimeout\(function\(\){\s+(.*?a\.value\s*=\s*\S+toFixed\(10\);)', body, re.M | re.S ).group(1) except Exception: raise ValueError('Unable to identify Cloudflare IUAM Javascript on website. {}'.format(BUG_REPORT)) - jsEnv = ''' - String.prototype.italics=function(str) {{return "" + this + "";}}; + jsEnv = '''String.prototype.italics=function(str) {{return "" + this + "";}}; + var subVars= {{{subVars}}}; var document = {{ createElement: function () {{ return {{ firstChild: {{ href: "https://{domain}/" }} }} }}, - getElementById: function () {{ - return {{"innerHTML": "{innerHTML}"}}; + getElementById: function (str) {{ + return {{"innerHTML": subVars[str]}}; }} }}; ''' try: - innerHTML = re.search( - r']*)? id="([^<>]*?)">([^<>]*?)', - body, - re.MULTILINE | re.DOTALL + js = js.replace( + r"(setInterval(function(){}, 100),t.match(/https?:\/\//)[0]);", + r"t.match(/https?:\/\//)[0];" ) - innerHTML = innerHTML.group(2) if innerHTML else '' + + k = re.search(r" k\s*=\s*'(?P\S+)';", body).group('k') + r = re.compile(r'
\s*(?P[^<>]*)
'.format(k)) + + subVars = '' + for m in r.finditer(body): + subVars = '{}\n\t\t{}{}: {},\n'.format(subVars, k, m.group('id'), m.group('jsfuck')) + subVars = subVars[:-2] except: # noqa logging.error('Error extracting Cloudflare IUAM Javascript. {}'.format(BUG_REPORT)) @@ -46,7 +52,7 @@ def template(body, domain): ' ', jsEnv.format( domain=domain, - innerHTML=innerHTML + subVars=subVars ), re.MULTILINE | re.DOTALL ), diff --git a/lib/cloudscraper/interpreters/native.py b/lib/cloudscraper/interpreters/native.py index 94d238bb..f71474cf 100644 --- a/lib/cloudscraper/interpreters/native.py +++ b/lib/cloudscraper/interpreters/native.py @@ -100,8 +100,8 @@ class ChallengeInterpreter(JavaScriptInterpreter): # ------------------------------------------------------------------------------- # - def flatten(l): - return sum(map(flatten, l), []) if isinstance(l, list) else [l] + def flatten(lists): + return sum(map(flatten, lists), []) if isinstance(lists, list) else [lists] # ------------------------------------------------------------------------------- # @@ -114,6 +114,7 @@ class ChallengeInterpreter(JavaScriptInterpreter): # Hackery Parser for Math stack = [] bstack = [] + for i in flatten(pyparsing.nestedExpr().parseString(jsFuck).asList()): if i == '+': stack.append(bstack) @@ -152,13 +153,35 @@ class ChallengeInterpreter(JavaScriptInterpreter): try: jsfuckChallenge = re.search( r"setTimeout\(function\(\){\s+var.*?f,\s*(?P\w+).*?:(?P\S+)};" - r".*?\('challenge-form'\);\s+;(?P.*?a\.value)" - r"(?:.*id=\"cf-dn-.*?>(?P\S+)<)?", + r".*?\('challenge-form'\);.*?;(?P.*?a\.value)\s*=\s*\S+\.toFixed\(10\);", body, re.DOTALL | re.MULTILINE ).groupdict() except AttributeError: - raise CloudflareSolveError('There was an issue extracting the Cloudflare challenge.') + raise CloudflareSolveError('There was an issue extracting "jsfuckChallenge" from the Cloudflare challenge.') + + kJSFUCK = re.search(r'(;|)\s*k.=(?P\S+);', jsfuckChallenge['challenge'], re.S | re.M) + if kJSFUCK: + try: + kJSFUCK = jsfuckToNumber(kJSFUCK.group('kJSFUCK')) + except IndexError: + raise CloudflareSolveError('There was an issue extracting "kJSFUCK" from the Cloudflare challenge.') + + try: + kID = re.search(r"\s*k\s*=\s*'(?P\S+)';", body).group('kID') + except IndexError: + raise CloudflareSolveError('There was an issue extracting "kID" from the Cloudflare challenge.') + + try: + r = re.compile(r'
\s*(?P[^<>]*)
'.format(kID)) + + kValues = {} + for m in r.finditer(body): + kValues[int(m.group('id'))] = m.group('jsfuck') + + jsfuckChallenge['k'] = kValues[kJSFUCK] + except (AttributeError, IndexError): + raise CloudflareSolveError('There was an issue extracting "kValues" from the Cloudflare challenge.') jsfuckChallenge['challenge'] = re.finditer( r'{}.*?([+\-*/])=(.*?);(?=a\.value|{})'.format( @@ -193,8 +216,8 @@ class ChallengeInterpreter(JavaScriptInterpreter): # ------------------------------------------------------------------------------- # - if not jsfuckChallenge['k'] and '+ t.length' in body: - jschl_answer += len(domain) + # if not jsfuckChallenge['k'] and '+ t.length' in body: + # jschl_answer += len(domain) # ------------------------------------------------------------------------------- # diff --git a/lib/megaserver/cursor.py b/lib/megaserver/cursor.py index ec387b9c..32b84e6a 100644 --- a/lib/megaserver/cursor.py +++ b/lib/megaserver/cursor.py @@ -66,8 +66,14 @@ class Cursor(object): def prepare_decoder(self,offset): initial_value = self.initial_value + int(offset/16) - from lib import pyaes - self.decryptor = pyaes.AESModeOfOperationCTR(self._file._client.a32_to_str(self.k), counter=pyaes.Counter(initial_value=initial_value)) + try: + from Cryptodome.Cipher import AES + from Cryptodome.Util import Counter + self.decryptor = AES.new(self._file._client.a32_to_str(self.k), AES.MODE_CTR, counter = Counter.new(128, initial_value = initial_value)) + except: + from Crypto.Cipher import AES + from Crypto.Util import Counter + self.decryptor = AES.new(self._file._client.a32_to_str(self.k), AES.MODE_CTR, counter = Counter.new(128, initial_value = initial_value)) rest = offset - int(offset/16)*16 if rest: self.decode(str(0)*rest) diff --git a/platformcode/config.py b/platformcode/config.py index e94b0dc8..af2f0c2f 100644 --- a/platformcode/config.py +++ b/platformcode/config.py @@ -1,19 +1,13 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -# Parámetros de configuración (kodi) +# Configuration parameters (kodi) # ------------------------------------------------------------ -#from builtins import str -import sys +# from builtins import str +import sys, os, re, xbmc, xbmcaddon PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int -import os -import re - -import xbmc -import xbmcaddon - PLUGIN_NAME = "kod" __settings__ = xbmcaddon.Addon(id="plugin.video." + PLUGIN_NAME) @@ -29,7 +23,7 @@ def get_addon_core(): def get_addon_version(with_fix=True): ''' - Devuelve el número de versión del addon, y opcionalmente número de fix si lo hay + Returns the version number of the addon, and optionally fix number if there is one ''' if with_fix: return __settings__.getAddonInfo('version') + " " + get_addon_version_fix() @@ -61,17 +55,17 @@ def dev_mode(): def get_platform(full_version=False): """ - Devuelve la información la version de xbmc o kodi sobre el que se ejecuta el plugin + Returns the information the version of xbmc or kodi on which the plugin is run - @param full_version: indica si queremos toda la informacion o no + @param full_version: indicates if we want all the information or not @type full_version: bool @rtype: str o dict - @return: Si el paramentro full_version es True se retorna un diccionario con las siguientes claves: - 'num_version': (float) numero de version en formato XX.X - 'name_version': (str) nombre clave de cada version - 'video_db': (str) nombre del archivo que contiene la base de datos de videos - 'plaform': (str) esta compuesto por "kodi-" o "xbmc-" mas el nombre de la version segun corresponda. - Si el parametro full_version es False (por defecto) se retorna el valor de la clave 'plaform' del diccionario anterior. + @return: If the full_version parameter is True, a dictionary with the following keys is returned: + 'num_version': (float) version number in XX.X format + 'name_version': (str) key name of each version + 'video_db': (str) name of the file that contains the video database + 'plaform': (str) is made up of "kodi-" or "xbmc-" plus the version name as appropriate. + If the full_version parameter is False (default) the value of the 'plaform' key from the previous dictionary is returned. """ ret = {} @@ -130,7 +124,7 @@ def get_channel_url(findhostMethod=None, name=None): return channels_data[name] def get_system_platform(): - """ fonction: pour recuperer la platform que xbmc tourne """ + """ function: to recover the platform that xbmc is running """ platform = "unknown" if xbmc.getCondVisibility("system.platform.linux"): platform = "linux" @@ -172,7 +166,7 @@ def enable_disable_autorun(is_enabled): return True def get_all_settings_addon(): - # Lee el archivo settings.xml y retorna un diccionario con {id: value} + # Read the settings.xml file and return a dictionary with {id: value} from core import scrapertools infile = open(os.path.join(get_data_path(), "settings.xml"), "r") @@ -194,27 +188,26 @@ def open_settings(): def get_setting(name, channel="", server="", default=None): """ - Retorna el valor de configuracion del parametro solicitado. + Returns the configuration value of the requested parameter. - Devuelve el valor del parametro 'name' en la configuracion global, en la configuracion propia del canal 'channel' - o en la del servidor 'server'. + Returns the value of the parameter 'name' in the global configuration, in the own configuration of the channel 'channel' or in that of the server 'server'. - Los parametros channel y server no deben usarse simultaneamente. Si se especifica el nombre del canal se devolvera - el resultado de llamar a channeltools.get_channel_setting(name, channel, default). Si se especifica el nombre del - servidor se devolvera el resultado de llamar a servertools.get_channel_setting(name, server, default). Si no se - especifica ninguno de los anteriores se devolvera el valor del parametro en la configuracion global si existe o - el valor default en caso contrario. + The channel and server parameters should not be used simultaneously. If the channel name is specified it will be returned + the result of calling channeltools.get_channel_setting (name, channel, default). If the name of the + server will return the result of calling servertools.get_channel_setting (name, server, default). If I dont know + Specify none of the above will return the value of the parameter in the global configuration if it exists or + the default value otherwise. - @param name: nombre del parametro + @param name: parameter name @type name: str - @param channel: nombre del canal + @param channel: channel name @type channel: str - @param server: nombre del servidor + @param server: server name @type server: str - @param default: valor devuelto en caso de que no exista el parametro name + @param default: return value in case the name parameter does not exist @type default: any - @return: El valor del parametro 'name' + @return: The value of the parameter 'name' @rtype: any """ @@ -261,26 +254,24 @@ def get_setting(name, channel="", server="", default=None): def set_setting(name, value, channel="", server=""): """ - Fija el valor de configuracion del parametro indicado. + Sets the configuration value of the indicated parameter. - Establece 'value' como el valor del parametro 'name' en la configuracion global o en la configuracion propia del - canal 'channel'. - Devuelve el valor cambiado o None si la asignacion no se ha podido completar. + Set 'value' as the value of the parameter 'name' in the global configuration or in the own configuration of the channel 'channel'. + Returns the changed value or None if the assignment could not be completed. - Si se especifica el nombre del canal busca en la ruta \addon_data\plugin.video.kod\settings_channels el - archivo channel_data.json y establece el parametro 'name' al valor indicado por 'value'. Si el archivo - channel_data.json no existe busca en la carpeta channels el archivo channel.json y crea un archivo channel_data.json - antes de modificar el parametro 'name'. - Si el parametro 'name' no existe lo añade, con su valor, al archivo correspondiente. + If the name of the channel is specified, search in the path \ addon_data \ plugin.video.kod \ settings_channels the + channel_data.json file and set the parameter 'name' to the value indicated by 'value'. If the file + channel_data.json does not exist look in the channels folder for the channel.json file and create a channel_data.json file before modifying the 'name' parameter. + If the parameter 'name' does not exist, it adds it, with its value, to the corresponding file. - Parametros: - name -- nombre del parametro - value -- valor del parametro - channel [opcional] -- nombre del canal + Parameters: + name - name of the parameter + value - value of the parameter + channel [optional] - channel name - Retorna: - 'value' en caso de que se haya podido fijar el valor y None en caso contrario + Returns: + 'value' if the value could be set and None otherwise """ if channel: @@ -304,7 +295,7 @@ def set_setting(name, value, channel="", server=""): except Exception as ex: from platformcode import logger - logger.error("Error al convertir '%s' no se guarda el valor \n%s" % (name, ex)) + logger.error("Error converting '%s' value is not saved \n%s" % (name, ex)) return None return value @@ -322,7 +313,7 @@ def get_localized_string(code): # All encodings to utf8 elif not PY3 and isinstance(dev, str): dev = unicode(dev, "utf8", errors="replace").encode("utf8") - + # Bytes encodings to utf8 elif PY3 and isinstance(dev, bytes): dev = dev.decode("utf8") @@ -365,7 +356,7 @@ def get_runtime_path(): def get_data_path(): dev = xbmc.translatePath(__settings__.getAddonInfo('Profile')) - # Crea el directorio si no existe + # Create the directory if it doesn't exist if not os.path.exists(dev): os.makedirs(dev) @@ -405,7 +396,7 @@ def verify_directories_created(): for path, default in config_paths: saved_path = get_setting(path) - # videoteca + # video store if path == "videolibrarypath": if not saved_path: saved_path = xbmc_videolibrary.search_library_path() @@ -435,7 +426,7 @@ def verify_directories_created(): if not filetools.exists(content_path): logger.debug("Creating %s: %s" % (path, content_path)) - # si se crea el directorio + # if the directory is created filetools.mkdir(content_path) from platformcode import xbmc_videolibrary @@ -444,11 +435,10 @@ def verify_directories_created(): try: from core import scrapertools - # Buscamos el archivo addon.xml del skin activo - skindir = filetools.join(xbmc.translatePath("special://home"), 'addons', xbmc.getSkinDir(), - 'addon.xml') - if not os.path.isdir(skindir): return # No hace falta mostrar error en el log si no existe la carpeta - # Extraemos el nombre de la carpeta de resolución por defecto + # We look for the addon.xml file of the active skin + skindir = filetools.join(xbmc.translatePath("special://home"), 'addons', xbmc.getSkinDir(), 'addon.xml') + if not os.path.isdir(skindir): return # No need to show error in log if folder doesn't exist + # We extract the name of the default resolution folder folder = "" data = filetools.read(skindir) res = scrapertools.find_multiple_matches(data, '()') @@ -457,22 +447,18 @@ def verify_directories_created(): folder = scrapertools.find_single_match(r, 'folder="([^"]+)"') break - # Comprobamos si existe en el addon y sino es así, la creamos + # We check if it exists in the addon and if not, we create it default = filetools.join(get_runtime_path(), 'resources', 'skins', 'Default') if folder and not filetools.exists(filetools.join(default, folder)): filetools.mkdir(filetools.join(default, folder)) - # Copiamos el archivo a dicha carpeta desde la de 720p si éste no existe o si el tamaño es diferente + # We copy the file to said folder from the 720p folder if it does not exist or if the size is different if folder and folder != '720p': for root, folders, files in filetools.walk(filetools.join(default, '720p')): for f in files: - if not filetools.exists(filetools.join(default, folder, f)) or \ - (filetools.getsize(filetools.join(default, folder, f)) != - filetools.getsize(filetools.join(default, '720p', f))): - filetools.copy(filetools.join(default, '720p', f), - filetools.join(default, folder, f), - True) + if not filetools.exists(filetools.join(default, folder, f)) or (filetools.getsize(filetools.join(default, folder, f)) != filetools.getsize(filetools.join(default, '720p', f))): + filetools.copy(filetools.join(default, '720p', f), filetools.join(default, folder, f), True) except: import traceback - logger.error("Al comprobar o crear la carpeta de resolución") + logger.error("When checking or creating the resolution folder") logger.error(traceback.format_exc()) diff --git a/platformcode/custom_code.py b/platformcode/custom_code.py deleted file mode 100644 index 0881057f..00000000 --- a/platformcode/custom_code.py +++ /dev/null @@ -1,380 +0,0 @@ -# -*- coding: utf-8 -*- -# -------------------------------------------------------------------------------- -# Updater (kodi) -# -------------------------------------------------------------------------------- - -#from builtins import str -import sys -PY3 = False -if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int - -import traceback -import xbmc -import xbmcaddon -import threading -import subprocess -import time - -from platformcode import config, logger, platformtools - -from core import jsontools -from core import filetools - -json_data_file_name = 'custom_code.json' - - -def init(): - logger.info() - - """ - Todo el código añadido al add-on se borra con cada actualización. Esta función permite restaurarlo automáticamente con cada actualización. Esto permite al usuario tener su propio código, bajo su responsabilidad, y restaurarlo al add-on cada vez que se actualiza. - - El mecanismo funciona copiando el contenido de la carpeta-arbol "./userdata/addon_data/plugin.video.kod/custom_code/..." sobre - las carpetas de código del add-on. No verifica el contenido, solo vuelca(reemplaza) el contenido de "custom_code". - - El usuario almacenará en las subcarpetas de "custom_code" su código actualizado y listo para ser copiado en cualquier momento. - Si no se desea que copie algo, simplemente se borra de "custom_code" y ya no se copiará en la próxima actualización. - - Los pasos que sigue esta función, son los siguientes: - - 1.- La función se llama desde service.py, desde la función inicial: - # Copia Custom code a las carpetas de Alfa desde la zona de Userdata - from platformcode import custom_code - custom_code.init() - - 2.- En el inicio de Kodi, comprueba si existe la carpeta "custom_code" en "./userdata/addon_data/plugin.video.kod/". - Si no existe, la crea y sale sin más, dando al ususario la posibilidad de copiar sobre esa estructura su código, - y que la función la vuelque sobre el add-on en el próximo inicio de Kodi. - - 3.- En el siguiente inicio de Kodi, comprueba si existe el custom_code.json en la carpeta root del add-on. - Si no existe, lo crea con el número de versión del add-on vacío, para permitir que se copien los archivos en esta pasada. - - 4.- Verifica que el número de versión del add-on es diferente de el de custom_code.json. Si es la misma versión, - se sale porque ya se realizo la copia anteriormente. - Si la versión es distinta, se realiza el volcado de todos los archivos de la carpeta-árbol "custom_code" sobre el add-on. - Si la carpeta de destino no existe, dará un error y se cancelará la copia. Se considera que no tienen sentido nuevas carpetas. - - 5.- Si la copia ha terminado con éxito, se actualiza el custom_code.json con el número de versión del add-on, - para que en inicios sucesivos de Kodi no se realicen las copias, hasta que el add-on cambie de versión. - En el número de versión del add-on no se considera el número de fix. - - Tiempos: Copiando 7 archivos de prueba, el proceso ha tardado una décima de segundo. - """ - - try: - #Borra el .zip de instalación de Alfa de la carpeta Packages, por si está corrupto, y que así se pueda descargar de nuevo - version = 'plugin.video.kod-%s.zip' % config.get_addon_version(with_fix=False) - filetools.remove(filetools.join(xbmc.translatePath('special://home'), 'addons', 'packages', version), True) - - #Verifica si Kodi tiene algún achivo de Base de Datos de Vídeo de versiones anteriores, entonces los borra - verify_Kodi_video_DB() - - #LIBTORRENT: se descarga el binario de Libtorrent cada vez que se actualiza Alfa - # try: - # threading.Thread(target=update_libtorrent).start() # Creamos un Thread independiente, hasta el fin de Kodi - # time.sleep(2) # Dejamos terminar la inicialización... - # except: # Si hay problemas de threading, nos vamos - # logger.error(traceback.format_exc()) - - # #QUASAR: Preguntamos si se hacen modificaciones a Quasar - # if not filetools.exists(filetools.join(config.get_data_path(), "quasar.json")) \ - # and not config.get_setting('addon_quasar_update', default=False): - # question_update_external_addon("quasar") - - # #QUASAR: Hacemos las modificaciones a Quasar, si está permitido, y si está instalado - # if config.get_setting('addon_quasar_update', default=False) or \ - # (filetools.exists(filetools.join(config.get_data_path(), \ - # "quasar.json")) and not xbmc.getCondVisibility('System.HasAddon("plugin.video.quasar")')): - # if not update_external_addon("quasar"): - # platformtools.dialog_notification("Actualización Quasar", "Ha fallado. Consulte el log") - - #Existe carpeta "custom_code" ? Si no existe se crea y se sale - custom_code_dir = filetools.join(config.get_data_path(), 'custom_code') - if not filetools.exists(custom_code_dir): - create_folder_structure(custom_code_dir) - return - - else: - #Existe "custom_code.json" ? Si no existe se crea - custom_code_json_path = config.get_runtime_path() - custom_code_json = filetools.join(custom_code_json_path, 'custom_code.json') - if not filetools.exists(custom_code_json): - create_json(custom_code_json_path) - - #Se verifica si la versión del .json y del add-on son iguales. Si es así se sale. Si no se copia "custom_code" al add-on - verify_copy_folders(custom_code_dir, custom_code_json_path) - except: - logger.error(traceback.format_exc()) - - -def create_folder_structure(custom_code_dir): - logger.info() - - #Creamos todas las carpetas. La importante es "custom_code". Las otras sirven meramente de guía para evitar errores de nombres... - filetools.mkdir(custom_code_dir) - filetools.mkdir(filetools.join(custom_code_dir, 'channels')) - filetools.mkdir(filetools.join(custom_code_dir, 'core')) - filetools.mkdir(filetools.join(custom_code_dir, 'lib')) - filetools.mkdir(filetools.join(custom_code_dir, 'platformcode')) - filetools.mkdir(filetools.join(custom_code_dir, 'resources')) - filetools.mkdir(filetools.join(custom_code_dir, 'servers')) - - return - - -def create_json(custom_code_json_path, json_name=json_data_file_name): - logger.info() - - #Guardamaos el json con la versión de Alfa vacía, para permitir hacer la primera copia - json_data_file = filetools.join(custom_code_json_path, json_name) - if filetools.exists(json_data_file): - filetools.remove(json_data_file) - result = filetools.write(json_data_file, jsontools.dump({"addon_version": ""})) - - return - - -def verify_copy_folders(custom_code_dir, custom_code_json_path): - logger.info() - - #verificamos si es una nueva versión de Alfa instalada o era la existente. Si es la existente, nos vamos sin hacer nada - json_data_file = filetools.join(custom_code_json_path, json_data_file_name) - json_data = jsontools.load(filetools.read(json_data_file)) - current_version = config.get_addon_version(with_fix=False) - if not json_data or not 'addon_version' in json_data: - create_json(custom_code_json_path) - json_data = jsontools.load(filetools.read(json_data_file)) - try: - if current_version == json_data['addon_version']: - return - except: - logger.error(traceback.format_exc(1)) - - #Ahora copiamos los archivos desde el área de Userdata, Custom_code, sobre las carpetas del add-on - for root, folders, files in filetools.walk(custom_code_dir): - for file in files: - input_file = filetools.join(root, file) - output_file = input_file.replace(custom_code_dir, custom_code_json_path) - if not filetools.copy(input_file, output_file, silent=True): - return - - #Guardamaos el json con la versión actual de Alfa, para no volver a hacer la copia hasta la nueva versión - json_data['addon_version'] = current_version - filetools.write(json_data_file, jsontools.dump(json_data)) - - return - - -def question_update_external_addon(addon_name): - logger.info(addon_name) - - #Verificamos que el addon está instalado - stat = False - if xbmc.getCondVisibility('System.HasAddon("plugin.video.%s")' % addon_name): - #Si es la primera vez que se pregunta por la actualización del addon externo, recogemos la respuesta, - # guardaos un .json en userdat/alfa para no volver a preguntar otra vez, y se actualiza el setting en Alfa. - stat = platformtools.dialog_yesno('Actualización de %s' % addon_name.capitalize(), '¿Quiere que actualicemos Quasar para que sea compatible con las últimas versiones de Kodi? (recomendado: SÍ)', '', 'Si actualiza Quasar, reinicie Kodi en un par de minutos') - - #Con la respuesta actualizamos la variable en Alfa settings.xml. Se puede cambiar en Ajustes de Alfa, Otros - # if stat: - # config.set_setting('addon_quasar_update', True) - # else: - # config.set_setting('addon_quasar_update', False) - - #Creamos un .json en userdata para no volver a preguntar otra vez - create_json(config.get_data_path(), "%s.json" % addon_name) - - return stat - - -def update_external_addon(addon_name): - logger.info(addon_name) - - try: - #Verificamos que el addon está instalado - if xbmc.getCondVisibility('System.HasAddon("plugin.video.%s")' % addon_name): - #Path de actualizaciones de Alfa - alfa_addon_updates_mig = filetools.join(config.get_runtime_path(), "lib") - alfa_addon_updates = filetools.join(alfa_addon_updates_mig, addon_name) - - #Path de destino en addon externo - __settings__ = xbmcaddon.Addon(id="plugin.video." + addon_name) - if addon_name.lower() in ['quasar', 'elementum']: - addon_path_mig = filetools.join(xbmc.translatePath(__settings__.getAddonInfo('Path')), \ - filetools.join("resources", "site-packages")) - addon_path = filetools.join(addon_path_mig, addon_name) - else: - addon_path_mig = '' - addon_path = '' - - #Hay modificaciones en Alfa? Las copiamos al addon, incuidas las carpetas de migración a PY3 - if filetools.exists(alfa_addon_updates) and filetools.exists(addon_path): - for root, folders, files in filetools.walk(alfa_addon_updates_mig): - if ('future' in root or 'past' in root) and not 'concurrent' in root: - for file in files: - alfa_addon_updates_mig_folder = root.replace(alfa_addon_updates_mig, addon_path_mig) - if not filetools.exists(alfa_addon_updates_mig_folder): - filetools.mkdir(alfa_addon_updates_mig_folder) - if file.endswith('.pyo') or file.endswith('.pyd'): - continue - input_file = filetools.join(root, file) - output_file = input_file.replace(alfa_addon_updates_mig, addon_path_mig) - if not filetools.copy(input_file, output_file, silent=True): - logger.error('Error en la copia de MIGRACIÓN: Input: %s o Output: %s' % (input_file, output_file)) - return False - - for root, folders, files in filetools.walk(alfa_addon_updates): - for file in files: - input_file = filetools.join(root, file) - output_file = input_file.replace(alfa_addon_updates, addon_path) - if not filetools.copy(input_file, output_file, silent=True): - logger.error('Error en la copia: Input: %s o Output: %s' % (input_file, output_file)) - return False - return True - else: - logger.error('Alguna carpeta no existe: Alfa: %s o %s: %s' % (alfa_addon_updates, addon_name, addon_path)) - # Se ha desinstalado Quasar, reseteamos la opción - else: - config.set_setting('addon_quasar_update', False) - if filetools.exists(filetools.join(config.get_data_path(), "%s.json" % addon_name)): - filetools.remove(filetools.join(config.get_data_path(), "%s.json" % addon_name)) - return True - except: - logger.error(traceback.format_exc()) - - return False - - -# def update_libtorrent(): -# logger.info() - -# if not config.get_setting("mct_buffer", server="torrent", default=""): -# default = config.get_setting("torrent_client", server="torrent", default=0) -# config.set_setting("torrent_client", default, server="torrent") -# config.set_setting("mct_buffer", "50", server="torrent") -# if config.get_setting("mct_download_path", server="torrent", default=config.get_setting("downloadpath")): -# config.set_setting("mct_download_path", config.get_setting("downloadpath"), server="torrent") -# config.set_setting("mct_background_download", True, server="torrent") -# config.set_setting("mct_rar_unpack", True, server="torrent") -# config.set_setting("bt_buffer", "50", server="torrent") -# if config.get_setting("bt_download_path", server="torrent", default=config.get_setting("downloadpath")): -# config.set_setting("bt_download_path", config.get_setting("downloadpath"), server="torrent") -# config.set_setting("mct_download_limit", "", server="torrent") -# config.set_setting("magnet2torrent", False, server="torrent") - -# if not filetools.exists(filetools.join(config.get_runtime_path(), "custom_code.json")) or not \ -# config.get_setting("unrar_path", server="torrent", default=""): - -# path = filetools.join(config.get_runtime_path(), 'lib', 'rarfiles') -# creationflags = '' -# sufix = '' -# unrar = '' -# for device in filetools.listdir(path): -# if xbmc.getCondVisibility("system.platform.android") and 'android' not in device: continue -# if xbmc.getCondVisibility("system.platform.windows") and 'windows' not in device: continue -# if not xbmc.getCondVisibility("system.platform.windows") and not xbmc.getCondVisibility("system.platform.android") \ -# and ('android' in device or 'windows' in device): continue -# if 'windows' in device: -# creationflags = 0x08000000 -# sufix = '.exe' -# else: -# creationflags = '' -# sufix = '' -# unrar = filetools.join(path, device, 'unrar%s') % sufix -# if not filetools.exists(unrar): unrar = '' -# if unrar: -# if not xbmc.getCondVisibility("system.platform.windows"): -# try: -# if xbmc.getCondVisibility("system.platform.android"): -# # Para Android copiamos el binario a la partición del sistema -# unrar_org = unrar -# unrar = filetools.join(xbmc.translatePath('special://xbmc/'), 'files').replace('/cache/apk/assets', '') -# if not filetools.exists(unrar): -# filetools.mkdir(unrar) -# unrar = filetools.join(unrar, 'unrar') -# filetools.copy(unrar_org, unrar, silent=True) - -# command = ['chmod', '777', '%s' % unrar] -# p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) -# output_cmd, error_cmd = p.communicate() -# command = ['ls', '-l', unrar] -# p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) -# output_cmd, error_cmd = p.communicate() -# xbmc.log('######## UnRAR file: %s' % str(output_cmd), xbmc.LOGNOTICE) -# except: -# xbmc.log('######## UnRAR ERROR in path: %s' % str(unrar), xbmc.LOGNOTICE) -# logger.error(traceback.format_exc(1)) - -# try: -# if xbmc.getCondVisibility("system.platform.windows"): -# p = subprocess.Popen(unrar, stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=creationflags) -# else: -# p = subprocess.Popen(unrar, stdout=subprocess.PIPE, stderr=subprocess.PIPE) -# output_cmd, error_cmd = p.communicate() -# if p.returncode != 0 or error_cmd: -# xbmc.log('######## UnRAR returncode in module %s: %s, %s in %s' % \ -# (device, str(p.returncode), str(error_cmd), unrar), xbmc.LOGNOTICE) -# unrar = '' -# else: -# xbmc.log('######## UnRAR OK in %s: %s' % (device, unrar), xbmc.LOGNOTICE) -# break -# except: -# xbmc.log('######## UnRAR ERROR in module %s: %s' % (device, unrar), xbmc.LOGNOTICE) -# logger.error(traceback.format_exc(1)) -# unrar = '' - -# if unrar: config.set_setting("unrar_path", unrar, server="torrent") - -# if filetools.exists(filetools.join(config.get_runtime_path(), "custom_code.json")) and \ -# config.get_setting("libtorrent_path", server="torrent", default="") : -# return - - -# try: -# from lib.python_libtorrent.python_libtorrent import get_libtorrent -# except Exception as e: -# logger.error(traceback.format_exc(1)) -# if not PY3: -# e = unicode(str(e), "utf8", errors="replace").encode("utf8") -# config.set_setting("libtorrent_path", "", server="torrent") -# if not config.get_setting("libtorrent_error", server="torrent", default=''): -# config.set_setting("libtorrent_error", str(e), server="torrent") - -# return - - -def verify_Kodi_video_DB(): - logger.info() - import random - - platform = {} - path = '' - db_files = [] - - try: - path = filetools.join(xbmc.translatePath("special://masterprofile/"), "Database") - if filetools.exists(path): - platform = config.get_platform(full_version=True) - if platform and platform['num_version'] <= 19: - db_files = filetools.walk(path) - if filetools.exists(filetools.join(path, platform['video_db'])): - for root, folders, files in db_files: - for file in files: - if platform['video_db'] not in file: - if file.startswith('MyVideos'): - randnum = str(random.randrange(1, 999999)) - filetools.rename(filetools.join(path, file), 'OLD_' + randnum +'_' + file) - logger.error('BD obsoleta: ' + file) - - else: - logger.error('Video_DB: ' + str(platform['video_db']) + ' para versión Kodi ' + str(platform['num_version']) + ' NO EXISTE. Analizar carpeta: ' + str(db_files)) - else: - logger.error('Estructura de get_platform(full_version=True) incorrecta') - else: - logger.error('Path a Userdata/Database (' + path + ') no encontrado') - - except: - logger.error('Platform: ' + str(platform) + ' / Path: ' + str(path) + ' / Files: ' + str(db_files)) - logger.error(traceback.format_exc()) - - return \ No newline at end of file diff --git a/platformcode/download_and_play.py b/platformcode/download_and_play.py index c356bc4e..312124c6 100644 --- a/platformcode/download_and_play.py +++ b/platformcode/download_and_play.py @@ -14,23 +14,14 @@ import sys PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int -import urllib.request, urllib.parse, urllib.error - -import os -import re -import socket -import threading -import time - -import xbmc -import xbmcgui +import urllib.request, urllib.parse, urllib.error, os, re, socket, threading, time, xbmc, xbmcgui from core import downloadtools from platformcode import config, logger # Download a file and start playing while downloading def download_and_play(url, file_name, download_path): - # Lanza thread + # Start thread logger.info("Active threads " + str(threading.active_count())) logger.info("" + repr(threading.enumerate())) logger.info("Starting download thread...") @@ -40,7 +31,7 @@ def download_and_play(url, file_name, download_path): logger.info("Active threads " + str(threading.active_count())) logger.info("" + repr(threading.enumerate())) - # Espera + # Wait logger.info("Waiting...") while True: @@ -52,8 +43,7 @@ def download_and_play(url, file_name, download_path): while not cancelled and download_thread.isAlive(): dialog.update(download_thread.get_progress(), config.get_localized_string(60313), config.get_localized_string(60314) + str(int(old_div(download_thread.get_speed(), 1024))) + " KB/s " + str( - download_thread.get_actual_size()) + config.get_localized_string(60316) + str( - download_thread.get_total_size()) + "MB", + download_thread.get_actual_size()) + config.get_localized_string(60316) + str( download_thread.get_total_size()) + "MB", config.get_localized_string(60202) % (str(downloadtools.sec_to_hms(download_thread.get_remaining_time())))) xbmc.sleep(1000) @@ -65,25 +55,25 @@ def download_and_play(url, file_name, download_path): logger.info("End of waiting") - # Lanza el reproductor + # Launch the player player = CustomPlayer() player.set_download_thread(download_thread) player.PlayStream(download_thread.get_file_name()) - # Fin de reproducción - logger.info("Fin de reproducción") + # End of playback + logger.info("End of playback") if player.is_stopped(): - logger.info("Terminado por el usuario") + logger.info("Terminated by user") break else: if not download_thread.isAlive(): - logger.info("La descarga ha terminado") + logger.info("Download has finished") break else: logger.info("Continua la descarga") - # Cuando el reproductor acaba, si continúa descargando lo para ahora + # When the player finishes, if you continue downloading it for now logger.info("Download thread alive=" + str(download_thread.isAlive())) if download_thread.isAlive(): logger.info("Killing download thread") @@ -141,7 +131,7 @@ class CustomPlayer(xbmc.Player): # Download in background class DownloadThread(threading.Thread): def __init__(self, url, file_name, download_path): - logger.info(repr(file)) + # logger.info(repr(file)) self.url = url self.download_path = download_path self.file_name = os.path.join(download_path, file_name) @@ -194,17 +184,17 @@ class DownloadThread(threading.Thread): logger.info() comando = "./megacrypter.sh" - logger.info("comando=" + comando) + logger.info("command= " + comando) oldcwd = os.getcwd() - logger.info("oldcwd=" + oldcwd) + logger.info("oldcwd= " + oldcwd) cwd = os.path.join(config.get_runtime_path(), "tools") - logger.info("cwd=" + cwd) + logger.info("cwd= " + cwd) os.chdir(cwd) - logger.info("directory changed to=" + os.getcwd()) + logger.info("directory changed to= " + os.getcwd()) - logger.info("destino=" + self.download_path) + logger.info("destination= " + self.download_path) os.system(comando + " '" + self.url + "' \"" + self.download_path + "\"") # p = subprocess.Popen([comando , self.url , self.download_path], cwd=cwd, stdout=subprocess.PIPE , stderr=subprocess.PIPE ) @@ -218,18 +208,18 @@ class DownloadThread(threading.Thread): headers = [] - # Se asegura de que el fichero se podrá crear - logger.info("nombrefichero=" + self.file_name) + # Ensures that the file can be created + logger.info("filename= " + self.file_name) self.file_name = xbmc.makeLegalFilename(self.file_name) - logger.info("nombrefichero=" + self.file_name) - logger.info("url=" + self.url) + logger.info("filename= " + self.file_name) + logger.info("url= " + self.url) - # Crea el fichero + # Create the file existSize = 0 f = open(self.file_name, 'wb') grabado = 0 - # Interpreta las cabeceras en una URL como en XBMC + # Interpret headers in a URL like in XBMC if "|" in self.url: additional_headers = self.url.split("|")[1] if "&" in additional_headers: @@ -244,7 +234,7 @@ class DownloadThread(threading.Thread): headers.append([name, value]) self.url = self.url.split("|")[0] - logger.info("url=" + self.url) + logger.info("url= " + self.url) # Timeout del socket a 60 segundos socket.setdefaulttimeout(60) @@ -253,7 +243,7 @@ class DownloadThread(threading.Thread): h = urllib.request.HTTPHandler(debuglevel=0) request = urllib.request.Request(self.url) for header in headers: - logger.info("Header=" + header[0] + ": " + header[1]) + logger.info("Header= " + header[0] + ": " + header[1]) request.add_header(header[0], header[1]) # Lanza la petición @@ -262,14 +252,14 @@ class DownloadThread(threading.Thread): try: connexion = opener.open(request) except urllib.error.HTTPError as e: - logger.error("error %d (%s) al abrir la url %s" % (e.code, e.msg, self.url)) + logger.error("error %d (%s) opening url %s" % (e.code, e.msg, self.url)) # print e.code # print e.msg # print e.hdrs # print e.fp f.close() - # El error 416 es que el rango pedido es mayor que el fichero => es que ya está completo + # Error 416 is that the requested range is greater than the file => is that it is already complete if e.code == 416: return 0 else: @@ -286,21 +276,21 @@ class DownloadThread(threading.Thread): blocksize = 100 * 1024 bloqueleido = connexion.read(blocksize) - logger.info("Iniciando descarga del fichero, bloqueleido=%s" % len(bloqueleido)) + logger.info("Starting file download, blocked= %s" % len(bloqueleido)) maxreintentos = 10 while len(bloqueleido) > 0: try: if os.path.exists(self.force_stop_file_name): - logger.info("Detectado fichero force_stop, se interrumpe la descarga") + logger.info("Force_stop file detected, download is interrupted") f.close() xbmc.executebuiltin("XBMC.Notification(%s,%s,300)" % (config.get_localized_string(60319),config.get_localized_string(60320))) return - # Escribe el bloque leido + # Write the block read # try: # import xbmcvfs # f.write( bloqueleido ) @@ -309,12 +299,12 @@ class DownloadThread(threading.Thread): grabado = grabado + len(bloqueleido) logger.info("grabado=%d de %d" % (grabado, totalfichero)) percent = int(float(grabado) * 100 / float(totalfichero)) - self.progress = percent; + self.progress = percent totalmb = float(float(totalfichero) / (1024 * 1024)) descargadosmb = float(float(grabado) / (1024 * 1024)) self.actual_size = int(descargadosmb) - # Lee el siguiente bloque, reintentando para no parar todo al primer timeout + #Read the next block, retrying not to stop everything at the first timeout reintentos = 0 while reintentos <= maxreintentos: try: @@ -333,13 +323,13 @@ class DownloadThread(threading.Thread): except: import sys reintentos = reintentos + 1 - logger.info("ERROR en la descarga del bloque, reintento %d" % reintentos) + logger.info("ERROR in block download, retry %d" % reintentos) for line in sys.exc_info(): logger.error("%s" % line) - # Ha habido un error en la descarga + # There was an error in the download if reintentos > maxreintentos: - logger.error("ERROR en la descarga del fichero") + logger.error("ERROR in the file download") f.close() return -2 diff --git a/platformcode/envtal.py b/platformcode/envtal.py index c68342e8..a5461a83 100644 --- a/platformcode/envtal.py +++ b/platformcode/envtal.py @@ -10,13 +10,7 @@ import sys PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int -import xbmc -import xbmcaddon - -import os -import subprocess -import re -import platform +import xbmc, xbmcaddon, os, subprocess, re, platform try: import ctypes @@ -123,18 +117,14 @@ def get_environment(): environment['kodi_bmode'] = '0' environment['kodi_rfactor'] = '4.0' if filetools.exists(filetools.join(xbmc.translatePath("special://userdata"), "advancedsettings.xml")): - advancedsettings = filetools.read(filetools.join(xbmc.translatePath("special://userdata"), - "advancedsettings.xml")).split('\n') + advancedsettings = filetools.read(filetools.join(xbmc.translatePath("special://userdata"), "advancedsettings.xml")).split('\n') for label_a in advancedsettings: if 'memorysize' in label_a: - environment['kodi_buffer'] = str(old_div(int(scrapertools.find_single_match - (label_a, '>(\d+)<\/')), 1024 ** 2)) + environment['kodi_buffer'] = str(old_div(int(scrapertools.find_single_match(label_a, r'>(\d+)<\/')), 1024 ** 2)) if 'buffermode' in label_a: - environment['kodi_bmode'] = str(scrapertools.find_single_match - (label_a, '>(\d+)<\/')) + environment['kodi_bmode'] = str(scrapertools.find_single_match(label_a, r'>(\d+)<\/')) if 'readfactor' in label_a: - environment['kodi_rfactor'] = str(scrapertools.find_single_match - (label_a, '>(.*?)<\/')) + environment['kodi_rfactor'] = str(scrapertools.find_single_match(label_a, r'>(.*?)<\/')) except: pass @@ -142,14 +132,12 @@ def get_environment(): try: if environment['os_name'].lower() == 'windows': free_bytes = ctypes.c_ulonglong(0) - ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(environment['userdata_path']), - None, None, ctypes.pointer(free_bytes)) + ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(environment['userdata_path']), None, None, ctypes.pointer(free_bytes)) environment['userdata_free'] = str(round(float(free_bytes.value) / (1024 ** 3), 3)) else: disk_space = os.statvfs(environment['userdata_path']) if not disk_space.f_frsize: disk_space.f_frsize = disk_space.f_frsize.f_bsize - environment['userdata_free'] = str(round((float(disk_space.f_bavail) / \ - (1024 ** 3)) * float(disk_space.f_frsize), 3)) + environment['userdata_free'] = str(round((float(disk_space.f_bavail) / (1024 ** 3)) * float(disk_space.f_frsize), 3)) except: environment['userdata_free'] = '?' @@ -158,22 +146,15 @@ def get_environment(): environment['videolab_episodios'] = '?' environment['videolab_pelis'] = '?' environment['videolab_path'] = str(xbmc.translatePath(config.get_videolibrary_path())) - if filetools.exists(filetools.join(environment['videolab_path'], \ - config.get_setting("folder_tvshows"))): - environment['videolab_series'] = str(len(filetools.listdir(filetools.join(environment['videolab_path'], \ - config.get_setting( - "folder_tvshows"))))) + if filetools.exists(filetools.join(environment['videolab_path'], config.get_setting("folder_tvshows"))): + environment['videolab_series'] = str(len(filetools.listdir(filetools.join(environment['videolab_path'], config.get_setting("folder_tvshows"))))) counter = 0 - for root, folders, files in filetools.walk(filetools.join(environment['videolab_path'], \ - config.get_setting("folder_tvshows"))): + for root, folders, files in filetools.walk(filetools.join(environment['videolab_path'], config.get_setting("folder_tvshows"))): for file in files: if file.endswith('.strm'): counter += 1 environment['videolab_episodios'] = str(counter) - if filetools.exists(filetools.join(environment['videolab_path'], \ - config.get_setting("folder_movies"))): - environment['videolab_pelis'] = str(len(filetools.listdir(filetools.join(environment['videolab_path'], \ - config.get_setting( - "folder_movies"))))) + if filetools.exists(filetools.join(environment['videolab_path'], config.get_setting("folder_movies"))): + environment['videolab_pelis'] = str(len(filetools.listdir(filetools.join(environment['videolab_path'], config.get_setting("folder_movies"))))) except: pass try: @@ -184,14 +165,12 @@ def get_environment(): try: if environment['os_name'].lower() == 'windows': free_bytes = ctypes.c_ulonglong(0) - ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(environment['videolab_path']), - None, None, ctypes.pointer(free_bytes)) + ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(environment['videolab_path']), None, None, ctypes.pointer(free_bytes)) environment['videolab_free'] = str(round(float(free_bytes.value) / (1024 ** 3), 3)) else: disk_space = os.statvfs(environment['videolab_path']) if not disk_space.f_frsize: disk_space.f_frsize = disk_space.f_frsize.f_bsize - environment['videolab_free'] = str(round((float(disk_space.f_bavail) / \ - (1024 ** 3)) * float(disk_space.f_frsize), 3)) + environment['videolab_free'] = str(round((float(disk_space.f_bavail) / (1024 ** 3)) * float(disk_space.f_frsize), 3)) except: environment['videolab_free'] = '?' @@ -439,131 +418,116 @@ def paint_env(item, environment={}): thumb = get_thumb("setting_0.png") cabecera = """\ - Muestra las [COLOR yellow]variables[/COLOR] del ecosistema de Kodi que puden ser relevantes para el diagnóstico de problema en Alfa: - - Versión de Alfa con Fix + It shows the [COLOR yellow] variables [/ COLOR] of the Kodi ecosystem that may be relevant to the problem diagnosis in Alpha: + - Alpha version with Fix - Debug Alfa: True/False """ plataform = """\ - Muestra los datos especificos de la [COLOR yellow]plataforma[/COLOR] en la que está alojado Kodi: - - Sistema Operativo - - Modelo (opt) - - Versión SO - - Procesador - - Aquitectura - - Idioma de Kodi + It shows the specific data of the [COLOR yellow] platform [/ COLOR] where Kodi is hosted: + - Operating system + - Model (opt) + - SO version + - Processor + - Architecture + - Kodi language """ kodi = """\ - Muestra los datos especificos de la instalación de [COLOR yellow]Kodi[/COLOR]: - - Versión de Kodi - - Base de Datos de Vídeo - - Versión de Python + It shows the specific data of the installation of [COLOR yellow] Kodi [/ COLOR]: + - Kodi version + - Video Database + - Python version """ cpu = """\ - Muestra los datos consumo actual de [COLOR yellow]CPU(s)[/COLOR] + Displays the current consumption data of [COLOR yellow] CPU (s) [/ COLOR] """ memoria = """\ - Muestra los datos del uso de [COLOR yellow]Memoria[/COLOR] del sistema: - - Memoria total - - Memoria disponible + Displays the usage data of [COLOR yellow] System [/ COLOR] memory: + - Total memory + - Available memory - en [COLOR yellow]Advancedsettings.xml[/COLOR] - - Buffer de memoria - configurado: - para Kodi: 3 x valor de + - Memory buffer + configured: + for Kodi: 3 x value of - - Buffermode: cachea: + - Buffermode: cache: * Internet (0, 2) - * También local (1) + * Also local (1) * No Buffer (3) - - Readfactor: readfactor * - avg bitrate vídeo + - Readfactor: readfactor * + avg bitrate video """ userdata = """\ - Muestra los datos del "path" de [COLOR yellow]Userdata[/COLOR]: + It shows the data of the "path" of [COLOR yellow] Userdata [/ COLOR]: - Path - - Espacio disponible + - Available space """ videoteca = """\ - Muestra los datos de la [COLOR yellow]Videoteca[/COLOR]: - - Nº de Series y Episodios - - Nº de Películas - - Tipo de actulización + It shows the data of the [COLOR yellow] Video Library [/ COLOR]: + - Number of Series and Episodes + - No. of Movies + - Update type - Path - - Espacio disponible + - Available space """ torrent = """\ - Muestra los datos generales del estado de [COLOR yellow]Torrent[/COLOR]: - - ID del cliente seleccionado - - Descompresión automática de archivos RAR? - - Está activo Libtorrent? - - Se descomprimen los RARs en background? - - Está operativo el módulo UnRAR? Qué plataforma? + It shows the general data of the status of [COLOR yellow] Torrent [/ COLOR]: + - ID of the selected customer + - Automatic decompression of RAR files? + - Is Libtorrent active? + - Are RARs decompressed in the background? + - Is the UnRAR module operational? Which platform? """ torrent_error = """\ - Muestra los datos del error de importación de [COLOR yellow]Libtorrent[/COLOR] + Displays the import error data for [COLOR yellow] Libtorrent [/ COLOR] """ torrent_cliente = """\ - Muestra los datos de los [COLOR yellow]Clientes Torrent[/COLOR]: - - Nombre del Cliente - - Tamaño de buffer inicial - - Path de descargas - - Tamaño de buffer en Memoria + It shows the data of the [COLOR yellow] Torrent Clients [/ COLOR]: + - Customer name + - Initial buffer size + - Download path + - Memory buffer size (opt, si no disco) - - Espacio disponible + - Available space """ proxy = """\ - Muestra las direcciones de canales o servidores que necesitan [COLOR yellow]Proxy[/COLOR] + Shows the addresses of channels or servers that need [COLOR yellow] Proxy [/ COLOR] """ log = """\ - Muestra el tamaño actual del [COLOR yellow]Log[/COLOR] + Displays the current size of the [COLOR yellow] Log [/ COLOR] """ reporte = """\ - Enlaza con la utilidad que permite el [COLOR yellow]envío del Log[/COLOR] de Kodi a través de un servicio Pastebin + Links with the utility that allows the [COLOR yellow] to send the Kodi Log [/ COLOR] through a Pastebin service """ - itemlist.append(Item(channel=item.channel, title="[COLOR orange][B]Variables " + - "de entorno Alfa: %s Debug: %s[/B][/COLOR]" % - (environment['addon_version'], environment['debug']), + itemlist.append(Item(channel=item.channel, title="KoD environment variables: %s Debug: %s" % (environment['addon_version'], environment['debug']), action="", plot=cabecera, thumbnail=thumb, folder=False)) - itemlist.append(Item(channel=item.channel, title='[COLOR yellow]%s[/COLOR]' % - environment['os_name'] + ' ' + environment['prod_model'] + ' ' + - environment['os_release'] + ' ' + environment['machine'] + ' ' + - environment['architecture'] + ' ' + environment['language'], + itemlist.append(Item(channel=item.channel, title=environment['os_name'] + ' ' + environment['prod_model'] + ' ' + environment['os_release'] + ' ' + environment['machine'] + ' ' + environment['architecture'] + ' ' + environment['language'], action="", plot=plataform, thumbnail=thumb, folder=False)) - itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Kodi [/COLOR]' + - environment['num_version'] + ', Vídeo: ' + environment[ - 'video_db'] + - ', Python ' + environment['python_version'], action="", + itemlist.append(Item(channel=item.channel, title='Kodi ' + environment['num_version'] + ', Vídeo: ' + environment[ 'video_db'] + ', Python ' + environment['python_version'], action="", plot=kodi, thumbnail=thumb, folder=False)) if environment['cpu_usage']: - itemlist.append(Item(channel=item.channel, title='[COLOR yellow]CPU: [/COLOR]' + - environment['cpu_usage'], action="", plot=cpu, thumbnail=thumb, - folder=False)) + itemlist.append(Item(channel=item.channel, title='CPU: ' + environment['cpu_usage'], action="", plot=cpu, thumbnail=thumb, folder=False)) if environment['mem_total'] or environment['mem_free']: - itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Memoria: [/COLOR]Total: ' + + itemlist.append(Item(channel=item.channel, title='Memory: Total: ' + environment['mem_total'] + ' MB / Disp.: ' + environment['mem_free'] + ' MB / Buffers: ' + - str(int( - environment['kodi_buffer']) * 3) + ' MB / Buffermode: ' + + str(int(environment['kodi_buffer']) * 3) + ' MB / Buffermode: ' + environment['kodi_bmode'] + ' / Readfactor: ' + environment['kodi_rfactor'], action="", plot=memoria, thumbnail=thumb, folder=False)) - itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Userdata: [/COLOR]' + - environment['userdata_path'] + ' - Free: ' + environment[ - 'userdata_free'].replace('.', ',') + + itemlist.append(Item(channel=item.channel, title='Userdata:' + + environment['userdata_path'] + ' - Free: ' + environment[ 'userdata_free'].replace('.', ',') + ' GB', action="", plot=userdata, thumbnail=thumb, folder=False)) - itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Videoteca: [/COLOR]Series/Epis: ' + - environment['videolab_series'] + '/' + environment[ - 'videolab_episodios'] + - ' - Pelis: ' + environment['videolab_pelis'] + ' - Upd: ' + - environment['videolab_update'] + ' - Path: ' + - environment['videolab_path'] + ' - Free: ' + environment[ - 'videolab_free'].replace('.', ',') + + itemlist.append(Item(channel=item.channel, title='Video store: Series/Epis: ' + + environment['videolab_series'] + '/' + environment['videolab_episodios'] + + ' - Movie: ' + environment['videolab_pelis'] + ' - Upd: ' + environment['videolab_update'] + ' - Path: ' + + environment['videolab_path'] + ' - Free: ' + environment[ 'videolab_free'].replace('.', ',') + ' GB', action="", plot=videoteca, thumbnail=thumb, folder=False)) if environment['torrent_list']: @@ -571,41 +535,27 @@ def paint_env(item, environment={}): if x == 0: cliente_alt = cliente.copy() del cliente_alt['Torrent_opt'] - itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Torrent: [/COLOR]Opt: %s, %s' \ - % (str(cliente['Torrent_opt']), - str(cliente_alt).replace('{', '').replace('}', '') \ - .replace("'", '').replace('_', ' ')), action="", - plot=torrent, thumbnail=thumb, - folder=False)) + itemlist.append(Item(channel=item.channel, title='Torrent: Opt: %s, %s' % (str(cliente['Torrent_opt']), str(cliente_alt).replace('{', '').replace('}', '').replace("'", '').replace('_', ' ')), action="", + plot=torrent, thumbnail=thumb, folder=False)) elif x == 1 and environment['torrent_error']: itemlist.append(Item(channel=item.channel, - title='[COLOR magenta]- %s[/COLOR]' % str(cliente).replace('{', '').replace('}', - '') \ - .replace("'", '').replace('_', ' '), action="", plot=torrent_error, - thumbnail=thumb, - folder=False)) + title=str(cliente).replace('{', '').replace('}','').replace("'", '').replace('_', ' '), action="", plot=torrent_error, + thumbnail=thumb, folder=False)) else: cliente_alt = cliente.copy() del cliente_alt['Plug_in'] cliente_alt['Libre'] = cliente_alt['Libre'].replace('.', ',') + ' GB' - itemlist.append(Item(channel=item.channel, title='[COLOR yellow]- %s: [/COLOR]: %s' % - (str(cliente['Plug_in']), - str(cliente_alt).replace('{', '').replace('}', '') \ - .replace("'", '').replace('\\\\', '\\')), action="", - plot=torrent_cliente, - thumbnail=thumb, folder=False)) + itemlist.append(Item(channel=item.channel, title='- %s: %s' % (str(cliente['Plug_in']), str(cliente_alt).replace('{', '').replace('}', '').replace("'", '').replace('\\\\', '\\')), action="", + plot=torrent_cliente, thumbnail=thumb, folder=False)) - itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Proxy: [/COLOR]' + - environment['proxy_active'], action="", plot=proxy, - thumbnail=thumb, - folder=False)) + itemlist.append(Item(channel=item.channel, title='Proxy: ' + environment['proxy_active'], action="", plot=proxy, + thumbnail=thumb, folder=False)) - itemlist.append(Item(channel=item.channel, title='[COLOR yellow]TAMAÑO del LOG: [/COLOR]' + - environment['log_size'].replace('.', ',') + ' MB', action="", + itemlist.append(Item(channel=item.channel, title='LOG SIZE: ' + environment['log_size'].replace('.', ',') + ' MB', action="", plot=log, thumbnail=thumb, folder=False)) - itemlist.append(Item(title="[COLOR hotpink][B]==> Reportar un fallo[/B][/COLOR]", + itemlist.append(Item(title="==> Report a bug", channel="setting", action="report_menu", category='Configuración', unify=False, plot=reporte, thumbnail=get_thumb("error.png"))) diff --git a/platformcode/launcher.py b/platformcode/launcher.py index c6a80bb6..07664798 100644 --- a/platformcode/launcher.py +++ b/platformcode/launcher.py @@ -11,31 +11,30 @@ PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int # if PY3: -# import urllib.error as urllib2 # Es muy lento en PY2. En PY3 es nativo +# import urllib.error as urllib2 # It is very slow in PY2. In PY3 it is native # else: -# import urllib2 # Usamos el nativo de PY2 que es más rápido +# import urllib2 # We use the native of PY2 which is faster import os from core.item import Item -from platformcode import config, logger -from platformcode import platformtools +from platformcode import config, logger, platformtools from platformcode.logger import WebErrorException def start(): - """ Primera funcion que se ejecuta al entrar en el plugin. - Dentro de esta funcion deberian ir todas las llamadas a las - funciones que deseamos que se ejecuten nada mas abrir el plugin. + """ First function that is executed when entering the plugin. + Within this function all calls should go to + functions that we want to execute as soon as we open the plugin. """ logger.info() - #config.set_setting('show_once', True) + # config.set_setting('show_once', True) # Test if all the required directories are created config.verify_directories_created() - # controlla se l'utente ha qualche problema di connessione - # se lo ha: non lo fa entrare nell'addon - # se ha problemi di DNS avvia ma lascia entrare - # se tutto ok: entra nell'addon + # check if the user has any connection problems + # if it has: it does not enter the addon + # if it has DNS problems start but let in + # if everything is ok: enter the addon from specials.checkhost import test_conn import threading @@ -191,7 +190,7 @@ def run(item=None): # Special play action if item.action == "play": - #define la info para trakt + # define la info para trakt try: from core import trakt_tools trakt_tools.set_trakt_info(item) @@ -444,14 +443,14 @@ def limit_itemlist(itemlist): def play_from_library(item): itemlist=[] """ - Los .strm al reproducirlos desde kodi, este espera que sea un archivo "reproducible" asi que no puede contener - más items, como mucho se puede colocar un dialogo de seleccion. - Esto lo solucionamos "engañando a kodi" y haciendole creer que se ha reproducido algo, asi despues mediante - "Container.Update()" cargamos el strm como si un item desde dentro del addon se tratara, quitando todas - las limitaciones y permitiendo reproducir mediante la funcion general sin tener que crear nuevos métodos para - la videoteca. + The .strm files when played from kodi, this expects it to be a "playable" file so it cannot contain + more items, at most a selection dialog can be placed. + We solve this by "cheating kodi" and making him believe that something has been reproduced, so later by + "Container.Update ()" we load the strm as if an item from inside the addon were treated, removing all + the limitations and allowing to reproduce through the general function without having to create new methods to + the video library. @type item: item - @param item: elemento con información + @param item: item with information """ item.fromLibrary = True logger.info() @@ -463,30 +462,28 @@ def play_from_library(item): from time import sleep, time from specials import nextep - # Intentamos reproducir una imagen (esto no hace nada y ademas no da error) - xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, - xbmcgui.ListItem( - path=os.path.join(config.get_runtime_path(), "resources", "kod.mp4"))) + # We try to reproduce an image (this does nothing and also does not give an error) + xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xbmcgui.ListItem(path=os.path.join(config.get_runtime_path(), "resources", "kod.mp4"))) - # Por si acaso la imagen hiciera (en futuras versiones) le damos a stop para detener la reproduccion - # sleep(0.5) ### Si no se pone esto se bloquea Kodi + # Just in case the image did (in future versions) we give stop to stop the reproduction + # sleep(0.5) ### If you don't put this on you crash Kodi xbmc.Player().stop() - # modificamos el action (actualmente la videoteca necesita "findvideos" ya que es donde se buscan las fuentes + # we modify the action (currently the video library needs "findvideos" since this is where the sources are searched item.action = "findvideos" check_next_ep = nextep.check(item) window_type = config.get_setting("window_type", "videolibrary") - # y volvemos a lanzar kodi + # and we launch kodi again if xbmc.getCondVisibility('Window.IsMedia') and not window_type == 1: - # Ventana convencional + # Conventional window xbmc.executebuiltin("Container.Update(" + sys.argv[0] + "?" + item.tourl() + ")") else: - # Ventana emergente + # Pop-up window item.show_server = True from specials import videolibrary, autoplay @@ -505,22 +502,22 @@ def play_from_library(item): else: while platformtools.is_playing(): - # Ventana convencional + # Conventional window sleep(5) p_dialog.update(50, '') it = item if item.show_server or not check_next_ep: - '''# Se filtran los enlaces segun la lista negra - if config.get_setting('filter_servers', "servers"): - itemlist = servertools.filter_servers(itemlist)''' + # The links are filtered according to the blacklist + # if config.get_setting('filter_servers', "servers"): + # itemlist = servertools.filter_servers(itemlist) - # Se limita la cantidad de enlaces a mostrar + # The number of links to show is limited if config.get_setting("max_links", "videolibrary") != 0: itemlist = limit_itemlist(itemlist) - # Se "limpia" ligeramente la lista de enlaces + # The list of links is slightly "cleaned" if config.get_setting("replace_VD", "videolibrary") == 1: itemlist = reorder_itemlist(itemlist) @@ -532,12 +529,12 @@ def play_from_library(item): if len(itemlist) > 0: while not xbmc.Monitor().abortRequested(): - # El usuario elige el mirror + # The user chooses the mirror opciones = [] for item in itemlist: opciones.append(item.title) - # Se abre la ventana de seleccion + # The selection window opens if (item.contentSerieName != "" and item.contentSeason != "" and item.contentEpisodeNumber != ""): diff --git a/platformcode/logger.py b/platformcode/logger.py index f5308358..785dd5cf 100644 --- a/platformcode/logger.py +++ b/platformcode/logger.py @@ -21,7 +21,7 @@ def log_enable(active): def encode_log(message=""): - + # Unicode to utf8 if isinstance(message, unicode): message = message.encode("utf8") @@ -30,7 +30,7 @@ def encode_log(message=""): # All encodings to utf8 elif not PY3 and isinstance(message, str): message = unicode(message, "utf8", errors="replace").encode("utf8") - + # Bytes encodings to utf8 elif PY3 and isinstance(message, bytes): message = message.decode("utf8") @@ -43,7 +43,7 @@ def encode_log(message=""): def get_caller(message=None): - + if message and isinstance(message, unicode): message = message.encode("utf8") if PY3: message = message.decode("utf8") @@ -52,8 +52,8 @@ def get_caller(message=None): elif message and not PY3: message = unicode(message, "utf8", errors="replace").encode("utf8") elif message: - message = str(message) - + message = str(message) + module = inspect.getmodule(inspect.currentframe().f_back.f_back) if module == None: diff --git a/platformcode/mct.py b/platformcode/mct.py deleted file mode 100644 index 63434c2d..00000000 --- a/platformcode/mct.py +++ /dev/null @@ -1,973 +0,0 @@ -# -*- coding: utf-8 -*- -# ------------------------------------------------------------ -# MCT - Mini Cliente Torrent -# ------------------------------------------------------------ - -from __future__ import division -from future import standard_library -standard_library.install_aliases() -from builtins import hex -#from builtins import str -import sys -PY3 = False -if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int -from builtins import range -from past.utils import old_div - -import os -import re -import tempfile -import urllib.request, urllib.parse, urllib.error -import platform -import traceback - -try: - import xbmc - import xbmcgui -except: - pass - -from platformcode import config -LIBTORRENT_PATH = config.get_setting("libtorrent_path", server="torrent", default='') - -from servers import torrent as torr -lt, e, e1, e2 = torr.import_libtorrent(LIBTORRENT_PATH) - -from core import scrapertools -from core import filetools -from core import httptools - -try: - BUFFER = int(config.get_setting("mct_buffer", server="torrent", default="50")) -except: - BUFFER = 50 - config.set_setting("mct_buffer", "50", server="torrent") - -try: - DOWNLOAD_PATH = '' - DOWNLOAD_PATH = xbmc.translatePath(config.get_setting("mct_download_path", server="torrent", default=config.get_setting("torrent_downloadpath"))) -except: - DOWNLOAD_PATH = config.get_setting("mct_download_path", server="torrent", default=config.get_setting("downloadpath")) -if not config.get_setting("mct_download_path", server="torrent") and DOWNLOAD_PATH: - config.set_setting("mct_download_path", DOWNLOAD_PATH, server="torrent") -if not DOWNLOAD_PATH: - try: - DOWNLOAD_PATH = str(xbmc.translatePath(os.path.join(config.get_data_path(), 'downloads'))) - config.set_setting("mct_download_path", os.path.join(config.get_data_path(), 'downloads'), server="torrent") - except: - DOWNLOAD_PATH = os.path.join(config.get_data_path(), 'downloads') - config.set_setting("mct_download_path", DOWNLOAD_PATH, server="torrent") - -BACKGROUND = config.get_setting("mct_background_download", server="torrent", default=True) -RAR = config.get_setting("mct_rar_unpack", server="torrent", default=True) -DOWNLOAD_LIMIT = config.get_setting("mct_download_limit", server="torrent", default="") -if DOWNLOAD_LIMIT: - try: - DOWNLOAD_LIMIT = int(DOWNLOAD_LIMIT) * 1024 - except: - DOWNLOAD_LIMIT = 0 -else: - DOWNLOAD_LIMIT = 0 -UPLOAD_LIMIT = 100 * 1024 -msg_header = 'MCT Client Torrent' - - -def play(url, xlistitem={}, is_view=None, subtitle="", password="", item=None): - allocate = True - try: - log("XXX KODI XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") - log("OS platform: %s %s" % (platform.system(),platform.release())) - log("xbmc/kodi version: %s" % xbmc.getInfoLabel( "System.BuildVersion" )) - xbmc_version = int(xbmc.getInfoLabel( "System.BuildVersion" )[:2]) - log("Architecture: %s %s" % (str(platform.machine()), \ - str(sys.maxsize > 2 ** 32 and "64-bit" or "32-bit"))) - log("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX KODI & platform XXXX") - except: - log(traceback.format_exc()) - - # -- adfly: ------------------------------------ - if url.startswith("http://adf.ly/"): - try: - data = httptools.downloadpage(url).data - url = decode_adfly(data) - except: - ddd = xbmcgui.Dialog() - ddd.ok( msg_header + ": No adf.ly support "," The script has no support for the adf.ly url shortener.", "", "url: " + url ) - return - - """ - # -- Necesario para algunas webs ---------------------------- - if not url.endswith(".torrent") and not url.startswith("magnet"): - #t_file = httptools.downloadpage(url, follow_redirects=False).headers["location"] - t_file = scrapertools.get_header_from_response(url, header_to_get="location") - if t_file: - if len(t_file) > 0: - url = t_file - t_file = httptools.downloadpage(url, follow_redirects=False).headers["location"] - if len(t_file) > 0: - url = t_file - """ - - # -- Crear dos carpetas en descargas para los archivos ------ - save_path_videos = os.path.join( DOWNLOAD_PATH , "MCT-torrent-videos" ) - save_path_torrents = os.path.join( DOWNLOAD_PATH , "MCT-torrents" ) - if not os.path.exists( save_path_torrents ): os.mkdir(save_path_torrents) - video_path = '' - global bkg_user - bkg_user = False - ses_lt = False - if item: - if item.contentType == 'movie': - video_path = '%s-%s' % (item.contentTitle, item.infoLabels['tmdb_id']) - else: - video_path = '%s-%sx%s-%s' % (item.contentSerieName, item.contentSeason, \ - item.contentEpisodeNumber, item.infoLabels['tmdb_id']) - item.rar_path = video_path - - # -- Usar - archivo torrent desde web, magnet o HD --------- - if not os.path.isfile(url) and not url.startswith("magnet"): - # -- http - crear archivo torrent ----------------------- - data = url_get(url) - - # -- El nombre del torrent será el que contiene en los -- - # -- datos. - - re_name = urllib.parse.unquote( scrapertools.find_single_match(data,':name\d+:(.*?)\d+:') ) - torrent_file = os.path.join(save_path_torrents, encode(re_name + '.torrent')) - - f = open(torrent_file,'wb') - f.write(data) - f.close() - elif os.path.isfile(url): - # -- file - para usar torrens desde el HD --------------- - torrent_file = url - else: - # -- magnet --------------------------------------------- - torrent_file = url - # ----------------------------------------------------------- - - # -- MCT - MiniClienteTorrent ------------------------------- - try: - log("XXX libtorrent pathname: %s" % str(LIBTORRENT_PATH)) - ses = lt.session() - except Exception as e: - do = xbmcgui.Dialog() - e = e1 or e2 - do.ok(config.get_localized_string(30035) + 'MCT Libtorrent', config.get_localized_string(30036), config.get_localized_string(60015), str(e)) - return - - log("XXX libtorrent version: %s" % lt.version) - log("##### Torrent file: %s ##" % torrent_file) - - ses.add_dht_router("router.bittorrent.com",6881) - ses.add_dht_router("router.utorrent.com",6881) - ses.add_dht_router("dht.transmissionbt.com",6881) - - trackers = [ - "udp://tracker.openbittorrent.com:80/announce", - "http://tracker.torrentbay.to:6969/announce", - "http://tracker.pow7.com/announce", - "udp://tracker.ccc.de:80/announce", - "udp://open.demonii.com:1337", - - "http://9.rarbg.com:2710/announce", - "http://bt.careland.com.cn:6969/announce", - "http://explodie.org:6969/announce", - "http://mgtracker.org:2710/announce", - "http://tracker.best-torrents.net:6969/announce", - "http://tracker.tfile.me/announce", - "http://tracker1.wasabii.com.tw:6969/announce", - "udp://9.rarbg.com:2710/announce", - "udp://9.rarbg.me:2710/announce", - "udp://coppersurfer.tk:6969/announce", - - "http://www.spanishtracker.com:2710/announce", - "http://www.todotorrents.com:2710/announce", - ] - - video_file = "" - # -- magnet2torrent ----------------------------------------- - if torrent_file.startswith("magnet"): - try: - import zlib - btih = hex(zlib.crc32(scrapertools.find_single_match(torrent_file, 'magnet:\?xt=urn:(?:[A-z0-9:]+|)([A-z0-9]{32})')) & 0xffffffff) - files = [f for f in os.listdir(save_path_torrents) if os.path.isfile(os.path.join(save_path_torrents, f))] - for file in files: - if btih in os.path.basename(file): - torrent_file = os.path.join(save_path_torrents, file) - except: - pass - - if torrent_file.startswith("magnet"): - try: - tempdir = tempfile.mkdtemp() - except IOError: - tempdir = os.path.join(save_path_torrents , "temp") - if not os.path.exists(tempdir): - os.mkdir(tempdir) - params = { - 'save_path': tempdir, - 'trackers': trackers, - 'storage_mode': lt.storage_mode_t.storage_mode_allocate - } - """ - , - 'paused': False, - 'auto_managed': True, - 'duplicate_is_error': True - """ - h = lt.add_magnet_uri(ses, torrent_file, params) - dp = xbmcgui.DialogProgress() - dp.create(msg_header) - while not h.has_metadata(): - message, porcent, msg_file, s, download = getProgress(h, "Creating torrent from magnet") - dp.update(porcent, message, msg_file) - if s.state == 1: download = 1 - if dp.iscanceled(): - dp.close() - remove_files( download, torrent_file, video_file, ses, h ) - return - h.force_dht_announce() - xbmc.sleep(1000) - - dp.close() - info = h.get_torrent_info() - data = lt.bencode( lt.create_torrent(info).generate() ) - - #torrent_file = os.path.join(save_path_torrents, unicode(info.name()+"-"+btih, "'utf-8'", errors="replace") + ".torrent") - torrent_file = os.path.join(save_path_torrents, info.name()+"-"+btih+ ".torrent") - f = open(torrent_file,'wb') - f.write(data) - f.close() - ses.remove_torrent(h) - filetools.rmdirtree(tempdir) - # ----------------------------------------------------------- - - # -- Archivos torrent --------------------------------------- - e = lt.bdecode(open(torrent_file, 'rb').read()) - info = lt.torrent_info(e) - - # -- El más gordo o uno de los más gordo se entiende que es - - # -- el vídeo o es el vídeo que se usará como referencia - - # -- para el tipo de archivo - - log("##### Archivos ## %s ##" % len(info.files())) - _index_file, _video_file, _size_file = get_video_file(info) - - # -- Prioritarizar/Seleccionar archivo----------------------- - _index, video_file, video_size, len_files = get_video_files_sizes( info ) - if len_files == 0: - dp = xbmcgui.Dialog().ok(config.get_localized_string(20000), config.get_localized_string(60339)) - - if _index < 0: - log("##### parts = %s #########" % str(video_file)) - log("##### video_size = %s #########" % str(video_size)) - log("##### _index = %s #########" % str(_index)) - #if _index == -1: - # _index = _index_file - # video_size = _size_file - video_file = _video_file - else: - log("##### video_size = %s #########" % str(video_size)) - log("##### _index = %s #########" % str(_index)) - _video_file_ext = os.path.splitext( _video_file )[1] - log("##### _video_file ## %s ##" % str(_video_file)) - log("##### _video_file_ext ## %s ##" % _video_file_ext) - - dp_cerrado = True - rar = False - global extracted_rar - extracted_rar = False - global erase_file_path - erase_file_path = '' - - if _video_file_ext == ".rar": - rar = True - filename = video_file - if "/" in filename: - filename = filename.split("/")[1] - if RAR and BACKGROUND: - xbmcgui.Dialog().notification(config.get_localized_string(70768) % (video_size / 1048576.0), config.get_localized_string(70769), time=10000) - dialog = True - else: - dialog = xbmcgui.Dialog().yesno(config.get_localized_string(70770), config.get_localized_string(70771) % filename, - config.get_localized_string(70772) % (video_size / 1048576.0), config.get_localized_string(70773)) - if dialog: - dp_cerrado = False - dp = xbmcgui.DialogProgressBG() - dp.create(msg_header) - - if (_video_file_ext == ".avi" or _video_file_ext == ".mp4" or _video_file_ext == ".mkv") and allocate: - log("##### storage_mode_t.storage_mode_allocate ("+_video_file_ext+") #####") - h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_allocate } ) - else: - log("##### storage_mode_t.storage_mode_sparse ("+_video_file_ext+") #####") - h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_sparse } ) - allocate = True - global ses_lt - ses_lt = True - # ----------------------------------------------------------- - - # -- Descarga secuencial - trozo 1, trozo 2, ... ------------ - h.set_sequential_download(True) - - h.force_reannounce() - h.force_dht_announce() - h.set_upload_limit(UPLOAD_LIMIT) - - # -- Inicio de variables para 'pause' automático cuando el - - # -- el vídeo se acerca a una pieza sin completar - - is_greater_num_pieces = False - is_greater_num_pieces_plus = False - is_greater_num_pieces_pause = False - - porcent4first_pieces = int( video_size * 0.000000005 ) - porcent4first_pieces = BUFFER - if porcent4first_pieces < BUFFER: porcent4first_pieces = BUFFER - if porcent4first_pieces > 100: porcent4first_pieces = 100 - porcent4last_pieces = int(old_div(porcent4first_pieces,2)) - - num_pieces_to_resume = int( video_size * 0.0000000025 ) - if num_pieces_to_resume < 10: num_pieces_to_resume = 10 - if num_pieces_to_resume > 25: num_pieces_to_resume = 25 - - log("##### porcent4first_pieces ## %s ##" % porcent4first_pieces) - log("##### porcent4last_pieces ## %s ##" % porcent4last_pieces) - log("##### num_pieces_to_resume ## %s ##" % num_pieces_to_resume) - - # -- Prioritarizar o seleccionar las piezas del archivo que - - # -- se desea reproducir con 'file_priorities' - - piece_set = set_priority_pieces(h, _index, video_file, video_size, - porcent4first_pieces, porcent4last_pieces, allocate) - global tot_piece_set - tot_piece_set = len(piece_set) - log("##### total piece_set ## %s ##" % len(piece_set)) - - if dp_cerrado: - # -- Crear diálogo de progreso para el primer bucle --------- - dp = xbmcgui.DialogProgress() - dp.create(msg_header) - - _pieces_info = {} - - ren_video_file = os.path.join( save_path_videos, video_file ) - # -- Doble bucle anidado ------------------------------------ - # -- Descarga - Primer bucle - while not h.is_seed(): - s = h.status() - - xbmc.sleep(1000) - if not dp_cerrado and not BACKGROUND: - dp.close() - dp_cerrado = True - dp = xbmcgui.DialogProgress() - dp.create(msg_header) - - # -- Recuperar los datos del progreso ------------------- - message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info) - - # -- Si hace 'checking' existe descarga ----------------- - # -- 'download' Se usará para saber si hay datos - - # -- descargados para el diálogo de 'remove_files' - - if s.state == 1: download = 1 - - if (s.state == 5 or s.state == 4) and rar: - # -- Borrar sesión para que libere los archivos y se pueda renombrar la carpeta ------- - ses.pause() - #video_file, rar, play_file = extract_files(video_file, save_path_videos, password, dp, item=item) - video_file, rar, play_file, erase_path = torr.extract_files(video_file, \ - save_path_videos, password, dp, item=item, torr_client='MCT') # ... extraemos el vídeo del RAR - dp.close() - - erase_file_path = erase_path - ren_video_file = erase_file_path - extracted_rar = rar - if not play_file: - remove_files( download, torrent_file, erase_file_path, ses, h, ren_video_file, erase_file_path ) - return - is_view = "Ok" - save_path_videos = play_file - xbmc.sleep(3000) - - # -- Player - play -------------------------------------- - # -- Comprobar si se han completado las piezas para el - - # -- inicio del vídeo - - first_pieces = True - #if not extracted_rar: - _c = 0 - for i in range( piece_set[0], piece_set[porcent4first_pieces] ): - first_pieces &= h.have_piece(i) - if h.have_piece(i): _c+= 1 - _pieces_info = {'current': 0, 'continuous': "%s/%s" % (_c, porcent4first_pieces), \ - 'continuous2': "", 'have': h.status().num_pieces, 'len': len(piece_set)} - - last_pieces = True - if not allocate: - _c = len(piece_set)-1; _cc = 0 - for i in range(len(piece_set)-porcent4last_pieces, len(piece_set)): - last_pieces &= h.have_piece(i) - if h.have_piece(i): _c-= 1; _cc+=1 - _pieces_info['continuous2'] = "[%s/%s] " % (_cc, porcent4last_pieces) - - if is_view != "Ok" and h.status().num_pieces >= BUFFER and not rar and not bkg_user \ - or ((s.state == 5 or s.state == 4) and bkg_user): - _pieces_info['continuous2'] = "" - log("##### porcent [%.2f%%]" % (s.progress * 100)) - dp.close() - dp_cerrado = True - if not bkg_user: - is_view = "Ok" - else: - remove_files( download, torrent_file, video_file, ses, h, ren_video_file, erase_file_path ) - return - - if is_view == "Ok": - # -- Esperando a que termine otra reproducción -------------------------- - while xbmc.Player().isPlaying(): - xbmc.sleep(3000) - - # -- Player - Ver el vídeo -------------------------- - playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO ) - playlist.clear() - - ren_video_file = os.path.join( save_path_videos, video_file ) - try: - playlist.add( ren_video_file, xlistitem ) - except: - playlist.add( ren_video_file ) - - if xbmc_version < 17: - player = play_video( xbmc.PLAYER_CORE_AUTO ) - else: - player = play_video() - player.play(playlist) - - # -- Contador de cancelaciones para la ventana de - - # -- 'pause' automático - - is_greater_num_pieces_canceled = 0 - continuous_pieces = 0 - porcent_time = 0.00 - current_piece = 0 - set_next_continuous_pieces = porcent4first_pieces - - # -- Impedir que kodi haga 'resume' a un archivo ---- - # -- que se reprodujo con anterioridad y que se - - # -- eliminó para impedir que intente la reprucción - - # -- en una pieza que aún no se ha completado y se - - # -- active 'pause' automático - - not_resume = True - - # -- Bandera subTítulos - _sub = False - - # -- Segundo bucle - Player - Control de eventos ---- - bkg_auto = True - log("##### PLAY %s" % (h.status().num_pieces)) - if item: torr.mark_auto_as_watched(item) - if ses_lt: h.set_download_limit(DOWNLOAD_LIMIT) - while player.isPlaying(): - - # -- Impedir que kodi haga 'resume' al inicio --- - # -- de la descarga de un archivo conocido - - if not_resume: - player.seekTime(0) - not_resume = False - - # -- Control 'pause' automático - - continuous_pieces = count_completed_continuous_pieces(h, piece_set) - - if xbmc.Player().isPlaying() and not rar: - - # -- Porcentage del progreso del vídeo ------ - # -- En kodi 18.x se debe controlar - - # -- ZeroDivisionError: float division by - - # -- zero - - player_getTime = player.getTime() - player_getTotalTime = player.getTotalTime() - try: porcent_time = old_div(player_getTime, player_getTotalTime) * 100 - except: porcent_time = 0 - - # -- Pieza que se está reproduciendo -------- - # -- En kodi 18.x se debe controlar - - # -- ZeroDivisionError: float division by - - # -- zero - - try: current_piece = int( old_div(porcent_time, 100) * len(piece_set) ) - except: current_piece = 0 - - # -- Banderas de control -------------------- - is_greater_num_pieces = (current_piece > continuous_pieces - num_pieces_to_resume) - #is_greater_num_pieces_plus = (current_piece + porcent4first_pieces > continuous_pieces) - is_greater_num_pieces_plus = (current_piece + BUFFER > continuous_pieces) - #is_greater_num_pieces_finished = (current_piece + porcent4first_pieces >= len(piece_set)) - is_greater_num_pieces_finished = (current_piece + BUFFER >= len(piece_set)) - - # -- Activa 'pause' automático -------------- - if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished: - is_greater_num_pieces_pause = True - player.pause() - - if continuous_pieces >= set_next_continuous_pieces: - set_next_continuous_pieces = continuous_pieces + num_pieces_to_resume - next_continuous_pieces = str(continuous_pieces - current_piece) + "/" + str(set_next_continuous_pieces - current_piece) - _pieces_info = {'current': current_piece, 'continuous': next_continuous_pieces , 'continuous2': _pieces_info['continuous2'], 'have': h.status().num_pieces, 'len': len(piece_set)} - - # -- Cerrar el diálogo de progreso -------------- - if player.resumed: - dp.close() - - # -- Mostrar el diálogo de progreso ------------- - if player.paused and dp_cerrado and not rar: - # -- Crear diálogo si no existe ------------- - log("##### PAUSED %s" % (h.status().num_pieces)) - if not player.statusDialogoProgress: - dp = xbmcgui.DialogProgressBG() - dp.create(msg_header) - player.setDialogoProgress() - - # -- Diálogos de estado en el visionado ----- - if not h.is_seed(): - # -- Recuperar los datos del progreso --- - message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info) - dp.update(porcent, message, '[CR]' + message + '[CR]' + msg_file) - else: - dp.update(100, "Download complete: " + video_file) - - # -- Se canceló el progreso en el visionado - - # -- Continuar - - if not bkg_auto and dp.iscanceled(): - dp.close() - player.pause() - - # -- Se canceló el progreso en el visionado - - # -- en la ventana de 'pause' automático. - - # -- Parar si el contador llega a 3 - - if not bkg_auto and dp.iscanceled() and is_greater_num_pieces_pause: - is_greater_num_pieces_canceled+= 1 - if is_greater_num_pieces_canceled == 3: - player.stop() - - # -- Desactiva 'pause' automático y --------- - # -- reinicia el contador de cancelaciones - - if not is_greater_num_pieces_plus and is_greater_num_pieces_pause: - dp.close() - player.pause() - is_greater_num_pieces_pause = False - is_greater_num_pieces_canceled = 0 - - # -- El usuario cancelo el visionado -------- - # -- Terminar - - if player.ended: - # -- Diálogo eliminar archivos ---------- - remove_files( download, torrent_file, video_file, ses, h, ren_video_file, erase_file_path ) - return - - xbmc.sleep(1000) - - # -- Kodi - Se cerró el visionado ----------------------- - # -- Continuar | Terminar - - if is_view == "Ok" and not xbmc.Player().isPlaying(): - dp.close() - - if h.status().num_pieces < tot_piece_set: - # -- Diálogo continuar o terminar --------------- - # Preguntamos si el usuario quiere pasar a backgroung - ok = xbmcgui.Dialog().yesno(msg_header, config.get_localized_string(30031), config.get_localized_string(30032)) - else: ok = True - # -- NO --------------------------------------------- - if ok: - is_view=None - bkg_user = True - dp_cerrado = False - dp = xbmcgui.DialogProgressBG() - dp.create(msg_header) - - else: - # -- Terminar: ---------------------------------- - # -- Comprobar si el vídeo pertenece a una ------ - # -- lista de archivos - - remove_files( download, torrent_file, video_file, ses, h, ren_video_file, erase_file_path ) - dp.close() - return - """ - #_index, video_file, video_size, len_files = get_video_files_sizes( info ) - if _index < 0 or len_files == 1: - # -- Diálogo eliminar archivos -------------- - #video_file = _video_file - remove_files( download, torrent_file, video_file, ses, h, ren_video_file, erase_file_path ) - dp.close() - return - else: - # -- Lista de archivos. Diálogo de opciones - - piece_set = set_priority_pieces(h, _index, video_file, video_size, - porcent4first_pieces, porcent4last_pieces, allocate) - is_view=None - dp = xbmcgui.DialogProgress() - dp.create(msg_header) - """ - - # -- Mostar progeso antes del visionado ----------------- - if is_view != "Ok" : - dp.update(porcent, message, msg_file) - - # -- Se canceló el progreso antes del visionado --------- - # -- Dar otra oportunidad en background o Terminar - - if not bkg_user and dp_cerrado and dp.iscanceled(): - dp.close() - # Preguntamos si el usuario quiere pasar a backgroung - dialog = xbmcgui.Dialog().yesno(msg_header, config.get_localized_string(30031), config.get_localized_string(30032)) - if dialog: - bkg_user = True - dp_cerrado = False - dp = xbmcgui.DialogProgressBG() - dp.create(msg_header) - if ses_lt: h.set_download_limit(DOWNLOAD_LIMIT) - - else: - - remove_files( download, torrent_file, video_file, ses, h, ren_video_file, erase_file_path ) - return - # -- Comprobar si el vídeo pertenece a una lista de - - # -- archivos - - #_index, video_file, video_size, len_files = get_video_files_sizes( info ) - if _index < 0 or len_files == 1: - # -- Diálogo eliminar archivos ------------------ - #video_file = _video_file - remove_files( download, torrent_file, video_file, ses, h, ren_video_file, erase_file_path ) - return - else: - # -- Lista de archivos. Diálogo de opciones ----- - piece_set = set_priority_pieces(h, _index, video_file, video_size, - porcent4first_pieces, porcent4last_pieces, allocate) - is_view=None - dp = xbmcgui.DialogProgress() - dp.create(msg_header) - - # -- Kodi - Error? - No debería llegar aquí ----------------- - if is_view == "Ok" and not xbmc.Player().isPlaying(): - dp.close() - # -- Diálogo eliminar archivos -------------------------- - remove_files( download, torrent_file, video_file, ses, h, ren_video_file, erase_file_path) - - return - - -# -- Progreso de la descarga ------------------------------------ -def getProgress(h, video_file, _pf={}): - - if len(_pf) > 0: - _pf_msg = "[%s] [%s] %s[%s] [%s]" % (_pf['current'], _pf['continuous'], _pf['continuous2'], _pf['have'], _pf['len']) - else: _pf_msg = "" - - s = h.status() - - state_str = ['queued', 'checking', 'downloading metadata', \ - 'downloading', 'finished', 'seeding', 'allocating', 'checking fastresume'] - - message = '%.2f%% d:%.1f kb/s u:%.1f kb/s p:%d s:%d %s' % \ - (s.progress * 100, old_div(s.download_rate, 1000), old_div(s.upload_rate, 1000), \ - s.num_peers, s.num_seeds, state_str[s.state]) - porcent = int( s.progress * 100 ) - - download = ( s.progress * 100 ) - - if "/" in video_file: video_file = video_file.split("/")[1] - msg_file = video_file - - if len(msg_file) > 50: - msg_file = msg_file.replace( video_file, os.path.splitext(video_file)[0][:40] + "... " + os.path.splitext(video_file)[1] ) - msg_file = msg_file + "[CR]" + "%.2f MB" % (s.total_wanted/1048576.0) + " - " + _pf_msg - - return (message, porcent, msg_file, s, download) - - -# -- Clase play_video - Controlar eventos ----------------------- -class play_video(xbmc.Player): - - def __init__( self, *args, **kwargs ): - self.paused = False - self.resumed = True - self.statusDialogoProgress = False - self.ended = False - - def onPlayBackPaused(self): - self.paused = True - self.resumed = False - - def onPlayBackResumed(self): - self.paused = False - self.resumed = True - self.statusDialogoProgress = False - - def is_paused(self): - return self.paused - - def setDialogoProgress(self): - self.statusDialogoProgress = True - - def is_started(self): - self.ended = False - - def is_ended(self): - self.ended = True - - -# -- Conseguir el nombre un alchivo de vídeo del metadata ------- -# -- El más gordo o uno de los más gordo se entiende que es el - -# -- vídeo o es vídeo que se usará como referencia para el tipo - -# -- de archivo - -def get_video_file( info ): - extensions_list = ['.aaf', '.3gp', '.asf', '.avi', '.flv', '.mpeg', - '.m1v', '.m2v', '.m4v', '.mkv', '.mov', '.mpg', - '.mpe', '.mp4', '.ogg', '.rar', '.wmv', '.zip'] - size_file = 0 - for i, f in enumerate(info.files()): - if f.size > size_file: - video_file = f.path.replace("\\","/") - size_file = f.size - index_file = i - if os.path.splitext( video_file )[1] in extensions_list: - break - return index_file, video_file, size_file - - -# -- Listado de selección del vídeo a prioritarizar ------------- -def get_video_files_sizes( info ): - - opciones = {} - vfile_name = {} - vfile_size = {} - rar_parts = 0 - rar_size = 0 - vid_parts = 0 - vid_size = 0 - - # -- Eliminar errores con tíldes ----------------------------- - for i, f in enumerate( info.files() ): - _title = unicode(f.path, "iso-8859-1", errors="replace") - _title = unicode(f.path, "'utf-8'", errors="replace") - - extensions_list = ['.aaf', '.3gp', '.asf', '.avi', '.flv', '.mpeg', - '.m1v', '.m2v', '.m4v', '.mkv', '.mov', '.mpg', - '.mpe', '.mp4', '.ogg', '.rar', '.wmv', '.zip'] - - for i, f in enumerate( info.files() ): - _index = int(i) - _title = f.path.replace("\\","/") - _size = f.size - - _file_name = os.path.splitext( _title )[0] - if "/" in _file_name: _file_name = _file_name.split('/')[1] - - _file_ext = os.path.splitext( _title )[1] - - if '.rar' in _file_ext or '.zip' in _file_ext: - rar_parts += 1 - rar_size += _size - else: - vid_parts += 1 - vid_size += _size - - if _file_ext in extensions_list: - index = len(opciones) - _caption = str(index) + \ - " - " + \ - _file_name + _file_ext + \ - " - %.2f MB" % (_size / 1048576.0) - - vfile_name[index] = _title - vfile_size[index] = _size - - opciones[i] = _caption - - if len(opciones) > 1: - if rar_parts > 1: - seleccion = -1 - index = -9 - return index, rar_parts, rar_size, len(opciones) - else: - d = xbmcgui.Dialog() - seleccion = d.select(msg_header + config.get_localized_string(30034), list(opciones.values())) - else: seleccion = 0 - - index = list(opciones.keys())[seleccion] - if seleccion == -1: - vfile_name[seleccion] = vid_parts - vfile_size[seleccion] = vid_size - index = seleccion - - return index, vfile_name[seleccion], vfile_size[seleccion], len(opciones) - -# -- Preguntar si se desea borrar lo descargado ----------------- -def remove_files( download, torrent_file, video_file, ses, h, ren_video_file="", erase_file_path='' ): - dialog_view = False - torrent = False - - if os.path.isfile( torrent_file ): - dialog_view = True - torrent = True - - if download > 0: - dialog_view = True - if bkg_user and not extracted_rar: - dialog_view = False - - if erase_file_path and erase_file_path != os.path.join( DOWNLOAD_PATH , "MCT-torrent-videos" ): - ren_video_file = erase_file_path - if filetools.isfile(ren_video_file) and filetools.split(ren_video_file)[0] != os.path.join( DOWNLOAD_PATH , "MCT-torrent-videos" ): - ren_video_file = filetools.split(ren_video_file)[0] - elif filetools.isdir(ren_video_file) and ren_video_file == os.path.join( DOWNLOAD_PATH , "MCT-torrent-videos" ): - ren_video_file = '' - - if dialog_view and ren_video_file: - if h.status().num_pieces >= tot_piece_set: - d = xbmcgui.Dialog() - ok = d.yesno(msg_header, config.get_localized_string(30031), video_file) - else: - ok = True - - # -- SI ------------------------------------------------- - if ok: - # -- Borrar archivo - torrent ----------------------- - if torrent: - try: - os.remove( torrent_file ) - except: - pass - # -- Borrar carpeta/archivos y sesión - vídeo ------- - try: - ses.remove_torrent( h, 1 ) - ses_lt = False - except: - ses_lt = False - try: - if os.path.isdir(ren_video_file): - filetools.rmdirtree(ren_video_file, silent=True) - elif os.path.exists(ren_video_file) and os.path.isfile(ren_video_file): - os.remove(ren_video_file) - log("##### erase_file_path: %s" % ren_video_file) - except: - log("##### erase_file_path: %s" % ren_video_file) - - log("### End session #########") - else: - # -- Borrar sesión ---------------------------------- - try: - ses.remove_torrent( h ) - ses_lt = False - except: - ses_lt = False - log("### End session #########") - else: - # -- Borrar sesión -------------------------------------- - try: - ses.remove_torrent( h ) - ses_lt = False - except: - ses_lt = False - # -- Borrar archivo - torrent ----------------------- - if torrent: - try: - os.remove( torrent_file ) - except: - pass - log("### End session #########") - - return - - -# -- Descargar de la web los datos para crear el torrent -------- -# -- Si queremos aligerar el script mct.py se puede importar la - -# -- función del conentor torrent.py - -def url_get(url, params={}, headers={}): - from contextlib import closing - - USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:20.0) Gecko/20100101 Firefox/20.0" - - if params: - url = "%s?%s" % (url, urllib.parse.urlencode(params)) - - req = urllib.request.Request(url) - req.add_header("User-Agent", USER_AGENT) - - for k, v in list(headers.items()): - req.add_header(k, v) - - try: - with closing(urllib.request.urlopen(req)) as response: - data = response.read() - if response.headers.get("Content-Encoding", "") == "gzip": - import zlib - return zlib.decompressobj(16 + zlib.MAX_WBITS).decompress(data) - return data - except urllib.error.HTTPError: - return None - - -# -- Contar las piezas contiguas completas del vídeo ------------ -def count_completed_continuous_pieces(h, piece_set): - not_zero = 0 - for i, _set in enumerate(piece_set): - if not h.have_piece(_set): break - else: not_zero = 1 - return i + not_zero - - -# -- Prioritarizar o seleccionar las piezas del archivo que se - -# -- desea reproducir con 'file_priorities' estableciendo a 1 - -# -- el archivo deseado y a 0 el resto de archivos almacenando - -# -- en una lista los índices de de las piezas del archivo - -def set_priority_pieces(h, _index, video_file, video_size, - porcent4first_pieces, porcent4last_pieces, allocate): - - for i, _set in enumerate(h.file_priorities()): - if i != _index and _index >= 0: - #h.file_priority(i,0) - xbmc.sleep(1000) - h.file_priority(i,0) - else: - #h.file_priority(i,0) - xbmc.sleep(1000) - h.file_priority(i,1) - - piece_set = [] - x = 0 - for i, _set in enumerate(h.piece_priorities()): - #log("***** Nº Pieza: %s: %s" % (i, str(_set))) - if _set > 0: - piece_set.append(i) - x += 1 - log("***** Piezas %s : Activas: %s" % (str(i+1), str(x))) - - if not allocate: - for i in range(0, porcent4first_pieces): - h.set_piece_deadline(piece_set[i], 10000) - - for i in range(len(piece_set)-porcent4last_pieces, len(piece_set)): - h.set_piece_deadline(piece_set[i], 10000) - - return piece_set - - -def decode_adfly(data): - import base64 - ysmm = scrapertools.find_single_match(data, "var ysmm = '([^']+)'") - left = '' - right = '' - for c in [ysmm[i:i+2] for i in range(0, len(ysmm), 2)]: - left += c[0] - right = c[1] + right - - decoded_url = base64.b64decode(left.encode() + right.encode())[2:].decode() - return decoded_url - - -def encode(s): - import unicodedata - #log("### log ######") - #for c in s: - # log("%s : %s" % (c, str(unicodedata.category(c)))) - #log("##############") - #return s - return str(''.join((c for c in unicodedata.normalize('NFD', unicode(s, 'utf-8')) if unicodedata.category(c) != 'Mn'))) - - -def log(texto): - xbmc.log(texto, xbmc.LOGNOTICE) diff --git a/platformcode/platformtools.py b/platformcode/platformtools.py index cc77c3bd..fea9234f 100644 --- a/platformcode/platformtools.py +++ b/platformcode/platformtools.py @@ -21,8 +21,7 @@ else: import os, xbmc, xbmcgui, xbmcplugin from channelselector import get_thumb -from core import channeltools -from core import trakt_tools, scrapertools +from core import channeltools, trakt_tools, scrapertools from core.item import Item from platformcode import logger, config, unify diff --git a/platformcode/recaptcha.py b/platformcode/recaptcha.py index 4748367c..c4422143 100644 --- a/platformcode/recaptcha.py +++ b/platformcode/recaptcha.py @@ -42,7 +42,7 @@ class Recaptcha(xbmcgui.WindowXMLDialog): self.imagen = kwargs.get("imagen") def onInit(self): - #### Compatibilidad con Kodi 18 #### + #### Kodi 18 compatibility #### if config.get_platform(True)['num_version'] < 18: self.setCoordinateResolution(2) self.update_window() diff --git a/platformcode/subtitletools.py b/platformcode/subtitletools.py index b4294efc..9a90bd1f 100644 --- a/platformcode/subtitletools.py +++ b/platformcode/subtitletools.py @@ -9,19 +9,16 @@ if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int if PY3: #from future import standard_library #standard_library.install_aliases() - import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo + import urllib.parse as urllib # It is very slow in PY2. In PY3 it is native else: - import urllib # Usamos el nativo de PY2 que es más rápido + import urllib # We use the native of PY2 which is faster import os import re import string from unicodedata import normalize -from core import filetools -from core import httptools -from core import jsontools -from core import scrapertools +from core import filetools, httptools, jsontools, scrapertools import xbmc import xbmcgui @@ -32,22 +29,22 @@ if not PY3: allchars = string.maketrans('', '') deletechars = ',\\/:*"<>|?' -# Extraemos el nombre de la serie, temporada y numero de capitulo ejemplo: 'fringe 1x01' +# We extract the name of the series, season and chapter number example: 'fringe 1x01' def regex_tvshow(compare, file, sub=""): - regex_expressions = ['[Ss]([0-9]+)[][._-]*[Ee]([0-9]+)([^\\\\/]*)$', - '[\._ \-]([0-9]+)x([0-9]+)([^\\/]*)', # foo.1x09 - '[\._ \-]([0-9]+)([0-9][0-9])([\._ \-][^\\/]*)', # foo.109 - '([0-9]+)([0-9][0-9])([\._ \-][^\\/]*)', - '[\\\\/\\._ -]([0-9]+)([0-9][0-9])[^\\/]*', - 'Season ([0-9]+) - Episode ([0-9]+)[^\\/]*', - 'Season ([0-9]+) Episode ([0-9]+)[^\\/]*', - '[\\\\/\\._ -][0]*([0-9]+)x[0]*([0-9]+)[^\\/]*', - '[[Ss]([0-9]+)\]_\[[Ee]([0-9]+)([^\\/]*)', # foo_[s01]_[e01] - '[\._ \-][Ss]([0-9]+)[\.\-]?[Ee]([0-9]+)([^\\/]*)', # foo, s01e01, foo.s01.e01, foo.s01-e01 - 's([0-9]+)ep([0-9]+)[^\\/]*', # foo - s01ep03, foo - s1ep03 - '[Ss]([0-9]+)[][ ._-]*[Ee]([0-9]+)([^\\\\/]*)$', - '[\\\\/\\._ \\[\\(-]([0-9]+)x([0-9]+)([^\\\\/]*)$', - '[\\\\/\\._ \\[\\(-]([0-9]+)X([0-9]+)([^\\\\/]*)$' + regex_expressions = [r'[Ss]([0-9]+)[][._-]*[Ee]([0-9]+)([^\\\\/]*)$', + r'[\._ \-]([0-9]+)x([0-9]+)([^\\/]*)', # foo.1x09 + r'[\._ \-]([0-9]+)([0-9][0-9])([\._ \-][^\\/]*)', # foo.109 + r'([0-9]+)([0-9][0-9])([\._ \-][^\\/]*)', + r'[\\\\/\\._ -]([0-9]+)([0-9][0-9])[^\\/]*', + r'Season ([0-9]+) - Episode ([0-9]+)[^\\/]*', + r'Season ([0-9]+) Episode ([0-9]+)[^\\/]*', + r'[\\\\/\\._ -][0]*([0-9]+)x[0]*([0-9]+)[^\\/]*', + r'[[Ss]([0-9]+)\]_\[[Ee]([0-9]+)([^\\/]*)', # foo_[s01]_[e01] + r'[\._ \-][Ss]([0-9]+)[\.\-]?[Ee]([0-9]+)([^\\/]*)', # foo, s01e01, foo.s01.e01, foo.s01-e01 + r's([0-9]+)ep([0-9]+)[^\\/]*', # foo - s01ep03, foo - s1ep03 + r'[Ss]([0-9]+)[][ ._-]*[Ee]([0-9]+)([^\\\\/]*)$', + r'[\\\\/\\._ \\[\\(-]([0-9]+)x([0-9]+)([^\\\\/]*)$', + r'[\\\\/\\._ \\[\\(-]([0-9]+)X([0-9]+)([^\\\\/]*)$' ] sub_info = "" tvshow = 0 @@ -83,8 +80,7 @@ def regex_tvshow(compare, file, sub=""): else: return "", "", "" - # Obtiene el nombre de la pelicula o capitulo de la serie guardado previamente en configuraciones del plugin - # y luego lo busca en el directorio de subtitulos, si los encuentra los activa. + # Gets the name of the movie or episode of the series previously saved in plugin settings and then searches for it in the subtitles directory, if it finds them, activates them. def set_Subtitle(): @@ -134,7 +130,7 @@ def set_Subtitle(): except: logger.error("error al cargar subtitulos") - # Limpia los caracteres unicode + # Clean unicode characters def _normalize(title, charset='utf-8'): @@ -222,9 +218,7 @@ def searchSubtitle(item): filetools.join(full_path_tvshow, "%s %sx%s.mp4" % (tvshow_title, season, episode))) logger.info(full_path_video_new) listitem = xbmcgui.ListItem(title_new, iconImage="DefaultVideo.png", thumbnailImage="") - listitem.setInfo("video", - {"Title": title_new, "Genre": "Tv shows", "episode": int(episode), "season": int(season), - "tvshowtitle": tvshow_title}) + listitem.setInfo("video", {"Title": title_new, "Genre": "Tv shows", "episode": int(episode), "season": int(season), "tvshowtitle": tvshow_title}) else: full_path_video_new = xbmc.translatePath(filetools.join(path_movie_subt, title_new + ".mp4")) @@ -248,7 +242,7 @@ def searchSubtitle(item): # xbmctools.launchplayer(full_path_video_new,listitem) except: copy = False - logger.error("Error : no se pudo copiar") + logger.error("Error : could not copy") time.sleep(1) @@ -289,10 +283,9 @@ def saveSubtitleName(item): def get_from_subdivx(sub_url): """ - :param sub_url: Url de descarga del subtitulo alojado en suvdivx.com - Por Ejemplo: http://www.subdivx.com/bajar.php?id=573942&u=8 + :param sub_url: Download url of the subtitle hosted on suvdivx.com For Example: http://www.subdivx.com/bajar.php?id=573942&u=8 - :return: La ruta al subtitulo descomprimido + :return: The path to the unzipped subtitle """ logger.info() @@ -319,20 +312,20 @@ def get_from_subdivx(sub_url): filetools.write(filename, data_dl) sub = extract_file_online(sub_dir, filename) except: - logger.info('sub no valido') + logger.info('sub invalid') else: - logger.info('sub no valido') + logger.info('sub invalid') return sub def extract_file_online(path, filename): """ - :param path: Ruta donde se encuentra el archivo comprimido + :param path: Path where the compressed file is located - :param filename: Nombre del archivo comprimido + :param filename: - :return: Devuelve la ruta al subtitulo descomprimido + :return: """ logger.info() diff --git a/platformcode/unify.py b/platformcode/unify.py index 07302888..ec90ffd3 100644 --- a/platformcode/unify.py +++ b/platformcode/unify.py @@ -2,24 +2,18 @@ # ------------------------------------------------------------ # Unify # ------------------------------------------------------------ -# Herramientas responsables de unificar diferentes tipos de -# datos obtenidos de las paginas +# Tools responsible for unifying different types of data obtained from the pages # ---------------------------------------------------------- # from builtins import str -import sys +import sys, os, unicodedata, re PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int -import os -import unicodedata -import re - -from platformcode import config +from platformcode import config, logger from core.item import Item from core import scrapertools -from platformcode import logger thumb_dict = {"movies": "https://s10.postimg.cc/fxtqzdog9/peliculas.png", "tvshows": "https://s10.postimg.cc/kxvslawe1/series.png", @@ -147,10 +141,10 @@ def set_genre(string): def remove_format(string): # logger.info() - # logger.debug('entra en remove: %s' % string) + # logger.debug('enter remove: %s' % string) string = string.rstrip() string = re.sub(r'(\[|\[\/)(?:color|COLOR|b|B|i|I).*?\]|\[|\]|\(|\)|\:|\.', '', string) - # logger.debug('sale de remove: %s' % string) + # logger.debug('leaves remove: %s' % string) return string @@ -163,7 +157,7 @@ def normalize(string): def simplify(string): # logger.info() - # logger.debug('entra en simplify: %s'%string) + # logger.debug('enter simplify: %s'%string) string = remove_format(string) string = string.replace('-', ' ').replace('_', ' ') string = re.sub(r'\d+', '', string) @@ -196,7 +190,7 @@ def add_info_plot(plot, languages, quality): last = '[/I][/B]\n' if languages: - l_part = '[COLOR yellowgreen][B][I]Idiomas:[/COLOR] ' + l_part = 'Languages: ' mid = '' if isinstance(languages, list): @@ -208,7 +202,7 @@ def add_info_plot(plot, languages, quality): p_lang = '%s%s%s' % (l_part, mid, last) if quality: - q_part = '[COLOR yellowgreen][B][I]Calidad:[/COLOR] ' + q_part = 'Quality: ' p_quality = '%s%s%s' % (q_part, quality, last) if languages and quality: @@ -236,18 +230,17 @@ def set_color(title, category): color_scheme = {'otro': 'white', 'dual': 'white'} - # logger.debug('category antes de remove: %s' % category) + # logger.debug('category before remove: %s' % category) category = remove_format(category).lower() - # logger.debug('category despues de remove: %s' % category) - # Lista de elementos posibles en el titulo + # logger.debug('category after remove: %s' % category) + # List of possible elements in the title color_list = ['movie', 'tvshow', 'year', 'rating_1', 'rating_2', 'rating_3', 'quality', 'cast', 'lat', 'vose', 'vos', 'vo', 'server', 'library', 'update', 'no_update'] - # Se verifica el estado de la opcion de colores personalizados + # Check the status of the custom colors options custom_colors = config.get_setting('title_color') - # Se Forma el diccionario de colores para cada elemento, la opcion esta activas utiliza la configuracion del - # usuario, si no pone el titulo en blanco. + # The color dictionary is formed for each element, the option is active uses the user's configuration, if it does not leave the title blank. if title not in ['', ' ']: for element in color_list: @@ -258,13 +251,13 @@ def set_color(title, category): # color_scheme[element] = 'white' if category in ['update', 'no_update']: - # logger.debug('title antes de updates: %s' % title) + # logger.debug('title before updates: %s' % title) title = re.sub(r'\[COLOR .*?\]', '[COLOR %s]' % color_scheme[category], title) else: if category not in ['movie', 'tvshow', 'library', 'otro']: - title = "[COLOR %s][%s][/COLOR]" % (color_scheme[category], title) + title = title else: - title = "[COLOR %s]%s[/COLOR]" % (color_scheme[category], title) + title = title return title @@ -317,17 +310,17 @@ def title_format(item): language_color = 'otro' simple_language = '' - # logger.debug('item.title antes de formatear: %s' % item.title.lower()) + # logger.debug('item.title before formatting: %s' % item.title.lower()) - # TODO se deberia quitar cualquier elemento que no sea un enlace de la lista de findvideos para quitar esto + # TODO any item other than a link should be removed from the findvideos list to remove this - # Palabras "prohibidas" en los titulos (cualquier titulo que contengas estas no se procesara en unify) + # Words "prohibited" in the titles (any title that contains these will not be processed in unify) excluded_words = ['online', 'descarga', 'downloads', 'trailer', 'videoteca', 'gb', 'autoplay'] - # Actions excluidos, (se define canal y action) los titulos que contengan ambos valores no se procesaran en unify + # Excluded actions, (channel and action are defined) the titles that contain both values ​​will not be processed in unify excluded_actions = [('videolibrary', 'get_episodes')] - # Verifica el item sea valido para ser formateado por unify + # Verify the item is valid to be formatted by unify if item.channel == 'trailertools' or (item.channel.lower(), item.action.lower()) in excluded_actions or \ item.action == '': @@ -340,37 +333,36 @@ def title_format(item): if not valid: return item - # Verifica si hay marca de visto de trakt + # Check for trakt tick marks visto = False - # logger.debug('titlo con visto? %s' % item.title) + # logger.debug('I titlo with visa? %s' % item.title) if '[[I]v[/I]]' in item.title or '[COLOR limegreen][v][/COLOR]' in item.title: visto = True - # Se elimina cualquier formato previo en el titulo + # Any previous format in the title is eliminated if item.action != '' and item.action != 'mainlist' and item.unify: item.title = remove_format(item.title) - # logger.debug('visto? %s' % visto) + # logger.debug('seen? %s' % visto) - # Evita que aparezcan los idiomas en los mainlist de cada canal + # Prevents languages ​​from appearing in the main lists of each channel if item.action == 'mainlist': item.language = '' info = item.infoLabels - # logger.debug('item antes de formatear: %s'%item) + # logger.debug('item before formatr: %s'%item) if hasattr(item, 'text_color'): item.text_color = '' if valid and item.unify != False: - # Formamos el titulo para serie, se debe definir contentSerieName - # o show en el item para que esto funcione. + # We form the title for series, contentSerieName or show must be defined in the item for this to work. if item.contentSerieName: - # Si se tiene la informacion en infolabels se utiliza + # If you have the information in infolabels it is used if item.contentType == 'episode' and info['episode'] != '': if info['title'] == '': info['title'] = '%s - Episodio %s' % (info['tvshowtitle'], info['episode']) @@ -391,12 +383,12 @@ def title_format(item): else: - # En caso contrario se utiliza el titulo proporcionado por el canal + # Otherwise the title provided by the channel is used # logger.debug ('color_scheme[tvshow]: %s' % color_scheme['tvshow']) item.title = '%s' % set_color(item.title, 'tvshow') elif item.contentTitle: - # Si el titulo no tiene contentSerieName entonces se formatea como pelicula + # If the title does not have contentSerieName then it is formatted as a movie saga = False if 'saga' in item.title.lower(): item.title = '%s [Saga]' % set_color(item.contentTitle, 'movie') @@ -415,11 +407,10 @@ def title_format(item): # logger.debug('novedades') item.title = '%s [%s]' % (item.title, item.channel) - # Verificamos si item.language es una lista, si lo es se toma - # cada valor y se normaliza formado una nueva lista + # We check if item.language is a list, if it is, each value is taken and normalized, forming a new list if hasattr(item, 'language') and item.language != '': - # logger.debug('tiene language: %s'%item.language) + # logger.debug('has language: %s'%item.language) if isinstance(item.language, list): language_list = [] for language in item.language: @@ -429,7 +420,7 @@ def title_format(item): # logger.debug('language_list: %s' % language_list) simple_language = language_list else: - # Si item.language es un string se normaliza + # If item.language is a string it is normalized if item.language != '': lang = True simple_language = set_lang(item.language).upper() @@ -438,8 +429,7 @@ def title_format(item): # item.language = simple_language - # Damos formato al año si existiera y lo agregamos - # al titulo excepto que sea un episodio + # We format the year if it exists and add it to the title except that it is an episode if info and info.get("year", "") not in ["", " "] and item.contentType != 'episode' and not info['season']: try: year = '%s' % set_color(info['year'], 'year') @@ -447,14 +437,14 @@ def title_format(item): except: logger.debug('infoLabels: %s' % info) - # Damos formato al puntaje si existiera y lo agregamos al titulo + # We format the score if it exists and add it to the title if info and info['rating'] and info['rating'] != '0.0' and not info['season']: - # Se normaliza el puntaje del rating + # The rating score is normalized rating_value = check_rating(info['rating']) - # Asignamos el color dependiendo el puntaje, malo, bueno, muy bueno, en caso de que exista + # We assign the color depending on the score, bad, good, very good, in case it exists if rating_value: value = float(rating_value) @@ -471,13 +461,13 @@ def title_format(item): color_rating = 'otro' item.title = '%s %s' % (item.title, set_color(rating, color_rating)) - # Damos formato a la calidad si existiera y lo agregamos al titulo + # We format the quality if it exists and add it to the title if item.quality and isinstance(item.quality, str): quality = item.quality.strip() else: quality = '' - # Damos formato al idioma-calidad si existieran y los agregamos al plot + # We format the language-quality if they exist and add them to the plot quality_ = set_color(quality, 'quality') if (lang or quality) and item.action == "play": @@ -498,14 +488,14 @@ def title_format(item): plot_ = add_info_plot('', simple_language, quality_) item.contentPlot = plot_ - # Para las busquedas por canal + # For channel searches if item.from_channel != '': from core import channeltools channel_parameters = channeltools.get_channel_parameters(item.from_channel) logger.debug(channel_parameters) item.title = '%s [%s]' % (item.title, channel_parameters['title']) - # Formato para actualizaciones de series en la videoteca sobreescribe los colores anteriores + # Format for series updates in the video library overwrites the previous colors if item.channel == 'videolibrary' and item.context != '': if item.action == 'get_seasons': @@ -514,15 +504,14 @@ def title_format(item): if 'Activar' in item.context[1]['title']: item.title = '%s' % (set_color(item.title, 'no_update')) - # logger.debug('Despues del formato: %s' % item) - # Damos formato al servidor si existiera + # logger.debug('After the format: %s' % item) + # We format the server if it exists if item.server: server = '%s' % set_color(item.server.strip().capitalize(), 'server') - # Compureba si estamos en findvideos, y si hay server, si es asi no se muestra el - # titulo sino el server, en caso contrario se muestra el titulo normalmente. + # Check if we are in findvideos, and if there is a server, if so, the title is not shown but the server, otherwise the title is normally shown. - # logger.debug('item.title antes de server: %s'%item.title) + # logger.debug('item.title before server: %s'%item.title) if item.action != 'play' and item.server: item.title = '%s %s' % (item.title, server.strip()) @@ -544,7 +533,7 @@ def title_format(item): if item.channel == 'videolibrary': item.title += ' [%s]' % item.contentChannel - # si hay verificacion de enlaces + # if there is verification of links if item.alive != '': if item.alive.lower() == 'no': item.title = '[[COLOR red][B]X[/B][/COLOR]] %s' % item.title @@ -553,14 +542,14 @@ def title_format(item): else: item.title = '%s' % item.title - # logger.debug('item.title despues de server: %s' % item.title) + # logger.debug('item.title after server: %s' % item.title) elif 'library' in item.action: item.title = '%s' % set_color(item.title, 'library') elif item.action == '' and item.title != '': item.title = '**- %s -**' % item.title elif item.unify: item.title = '%s' % set_color(item.title, 'otro') - # logger.debug('antes de salir %s' % item.title) + # logger.debug('before leaving %s' % item.title) if visto: try: check = u'\u221a' @@ -579,8 +568,7 @@ def title_format(item): def thumbnail_type(item): # logger.info() - # Se comprueba que tipo de thumbnail se utilizara en findvideos, - # Poster o Logo del servidor + # Check what type of thumbnail will be used in findvideos, Poster or Logo of the server thumb_type = config.get_setting('video_thumbnail_type') info = item.infoLabels @@ -612,16 +600,16 @@ def check_rating(rating): def check_decimal_length(_rating): """ - Dejamos que el float solo tenga un elemento en su parte decimal, "7.10" --> "7.1" - @param _rating: valor del rating + We let the float only have one element in its decimal part, "7.10" --> "7.1" + @param _rating: rating value @type _rating: float - @return: devuelve el valor modificado si es correcto, si no devuelve None + @return: returns the modified value if it is correct, if it does not return None @rtype: float|None """ # logger.debug("rating %s" % _rating) try: - # convertimos los deciamles p.e. 7.1 + # we convert the deciamles ex. 7.1 return "%.1f" % round(_rating, 1) except Exception as ex_dl: template = "An exception of type %s occured. Arguments:\n%r" @@ -631,20 +619,20 @@ def check_rating(rating): def check_range(_rating): """ - Comprobamos que el rango de rating sea entre 0.0 y 10.0 - @param _rating: valor del rating + We check that the rating range is between 0.0 and 10.0 + @param _rating: rating value @type _rating: float - @return: devuelve el valor si está dentro del rango, si no devuelve None + @return: returns the value if it is within the range, if it does not return None @rtype: float|None """ # logger.debug("rating %s" % _rating) - # fix para comparacion float + # fix for float comparison dec = Decimal(_rating) if 0.0 <= dec <= 10.0: - # logger.debug("estoy en el rango!") + # logger.debug("i'm in range!") return _rating else: - # logger.debug("NOOO estoy en el rango!") + # logger.debug("NOOO I'm in range!") return None def convert_float(_rating): @@ -657,26 +645,26 @@ def check_rating(rating): return None if not isinstance(rating, float): - # logger.debug("no soy float") + # logger.debug("I'm not float") if isinstance(rating, int): - # logger.debug("soy int") + # logger.debug("I am int") rating = convert_float(rating) elif isinstance(rating, str): - # logger.debug("soy str") + # logger.debug("I'm str") rating = rating.replace("<", "") rating = convert_float(rating) if rating is None: - # logger.debug("error al convertir str, rating no es un float") - # obtenemos los valores de numericos + # logger.debug("error converting str, rating is not a float") + # we get the numerical values new_rating = scrapertools.find_single_match(rating, "(\d+)[,|:](\d+)") if len(new_rating) > 0: rating = convert_float("%s.%s" % (new_rating[0], new_rating[1])) else: logger.error("no se que soy!!") - # obtenemos un valor desconocido no devolvemos nada + # we get an unknown value we don't return anything return None if rating: diff --git a/platformcode/xbmc_config_menu.py b/platformcode/xbmc_config_menu.py index bfdd8ced..98a11e9b 100644 --- a/platformcode/xbmc_config_menu.py +++ b/platformcode/xbmc_config_menu.py @@ -25,12 +25,12 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): Construction method: SettingWindow(list_controls, dict_values, title, callback, item) - Parametros: - list_controls: (list) Lista de controles a incluir en la ventana, segun el siguiente esquema: + Parameters: + list_controls: (list) List of controls to include in the window, according to the following scheme: (opcional)list_controls= [ {'id': "nameControl1", 'type': "bool", # bool, text, list, label - 'label': "Control 1: tipo RadioButton", + 'label': "Control 1: type RadioButton", 'color': '0xFFee66CC', # text color in hexadecimal ARGB format 'default': True, 'enabled': True, @@ -38,7 +38,7 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): }, {'id': "nameControl2", 'type': "text", # bool, text, list, label - 'label': "Control 2: tipo Cuadro de texto", + 'label': "Control 2: type text box", 'color': '0xFFee66CC', 'default': "Valor por defecto", 'hidden': False, # only for type = text Indicates whether to hide the text (for passwords) @@ -47,7 +47,7 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): }, {'id': "nameControl3", 'type': "list", # bool, text, list, label - 'label': "Control 3: tipo Lista", + 'label': "Control 3: type List", 'color': '0xFFee66CC', 'default': 0, # Default value index in lvalues 'enabled': True, @@ -71,7 +71,7 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): { "id": "name_control_1", "type": "bool", - "label": "Control 1: tipo RadioButton", + "label": "Control 1: type RadioButton", "default": false, "enabled": true, "visible": true, @@ -80,8 +80,8 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): { "id": "name_control_2", "type": "text", - "label": "Control 2: tipo Cuadro de texto", - "default": "Valor por defecto", + "label": "Control 2: type text box", + "default": "Default value", "hidden": true, "enabled": true, "visible": true, @@ -90,7 +90,7 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): { "id": "name_control_3", "type": "list", - "label": "Control 3: tipo Lista", + "label": "Control 3: type List", "default": 0, "enabled": true, "visible": true, diff --git a/platformcode/xbmc_info_window.py b/platformcode/xbmc_info_window.py index 1fb9c859..19148fa2 100644 --- a/platformcode/xbmc_info_window.py +++ b/platformcode/xbmc_info_window.py @@ -73,7 +73,7 @@ class InfoWindow(xbmcgui.WindowXMLDialog): # logger.debug(str(data_in)) if self.listData: - # Datos comunes a todos los listados + # Data common to all listings infoLabels = self.scraper().get_infoLabels(origen=data_in) if "original_language" in infoLabels: @@ -84,78 +84,77 @@ class InfoWindow(xbmcgui.WindowXMLDialog): def start(self, data, caption="Información del vídeo", item=None, scraper=Tmdb): """ - Muestra una ventana con la info del vídeo. Opcionalmente se puede indicar el titulo de la ventana mendiante - el argumento 'caption'. + It shows a window with the info of the video. Optionally, the title of the window can be indicated by means of the argument 'caption'. - Si se pasa un item como argumento 'data' usa el scrapper Tmdb para buscar la info del vídeo - En caso de peliculas: - Coge el titulo de los siguientes campos (en este orden) - 1. contentTitle (este tiene prioridad 1) - 2. title (este tiene prioridad 2) - El primero que contenga "algo" lo interpreta como el titulo (es importante asegurarse que el titulo este en - su sitio) + If an item is passed as the 'data' argument use the Tmdb scrapper to find the video info + In case of movies: + Take the title from the following fields (in this order) + 1. contentTitle (this has priority 1) + 2. title (this has priority 2) + The first one containing "something" interprets it as the title (it is important to make sure that the title is in + your site) - En caso de series: - 1. Busca la temporada y episodio en los campos contentSeason y contentEpisodeNumber - 2. Intenta Sacarlo del titulo del video (formato: 1x01) + In case of series: + 1. Find the season and episode in the contentSeason and contentEpisodeNumber fields + 2. Try to remove it from the video title (format: 1x01) - Aqui hay dos opciones posibles: - 1. Tenemos Temporada y episodio - Muestra la información del capitulo concreto - 2. NO Tenemos Temporada y episodio - En este caso muestra la informacion generica de la serie + Here are two possible options: + 1. We have Season and episode + Shows the information of the specific chapter + 2. We DO NOT have Season and episode + In this case it shows the generic information of the series - Si se pasa como argumento 'data' un objeto InfoLabels(ver item.py) muestra en la ventana directamente - la información pasada (sin usar el scrapper) - Formato: - En caso de peliculas: - infoLabels({ + If an InfoLabels object (see item.py) is passed as an argument 'data' it shows in the window directly + the past information (without using the scrapper) + Format: + In case of movies: + infoLabels ({ "type" : "movie", - "title" : "Titulo de la pelicula", - "original_title" : "Titulo original de la pelicula", - "date" : "Fecha de lanzamiento", - "language" : "Idioma original de la pelicula", - "rating" : "Puntuacion de la pelicula", - "votes" : "Numero de votos", - "genres" : "Generos de la pelicula", - "thumbnail" : "Ruta para el thumbnail", - "fanart" : "Ruta para el fanart", - "plot" : "Sinopsis de la pelicula" + "title": "Title of the movie", + "original_title": "Original movie title", + "date": "Release date", + "language": "Original language of the movie", + "rating": "Rating of the movie", + "votes": "Number of votes", + "genres": "Genres of the movie", + "thumbnail": "Path for the thumbnail", + "fanart": "Route for the fanart", + "plot": "Synopsis of the movie" } - En caso de series: - infoLabels({ + In case of series: + infoLabels ({ "type" : "tv", - "title" : "Titulo de la serie", - "episode_title" : "Titulo del episodio", - "date" : "Fecha de emision", - "language" : "Idioma original de la serie", - "rating" : "Puntuacion de la serie", - "votes" : "Numero de votos", - "genres" : "Generos de la serie", - "thumbnail" : "Ruta para el thumbnail", - "fanart" : "Ruta para el fanart", - "plot" : "Sinopsis de la del episodio o de la serie", - "seasons" : "Numero de Temporadas", - "season" : "Temporada", - "episodes" : "Numero de episodios de la temporada", - "episode" : "Episodio" + "title": "Title of the series", + "episode_title": "Episode title", + "date": "Date of issue", + "language": "Original language of the series", + "rating": "Punctuation of the series", + "votes": "Number of votes", + "genres": "Genres of the series", + "thumbnail": "Path for the thumbnail", + "fanart": "Route for the fanart", + "plot": "Synopsis of the episode or series", + "seasons": "Number of Seasons", + "season": "Season", + "episodes": "Number of episodes of the season", + "episode": "Episode" } - Si se pasa como argumento 'data' un listado de InfoLabels() con la estructura anterior, muestra los botones - 'Anterior' y 'Siguiente' para ir recorriendo la lista. Ademas muestra los botones 'Aceptar' y 'Cancelar' que - llamaran a la funcion 'callback' del canal desde donde se realiza la llamada pasandole como parametros el elemento - actual (InfoLabels()) o None respectivamente. + If a list of InfoLabels () with the previous structure is passed as the 'data' argument, it shows the buttons + 'Previous' and 'Next' to scroll through the list. It also shows the 'Accept' and 'Cancel' buttons that + call the function 'callback' of the channel from where the call is made, passing the element as parameters + current (InfoLabels ()) or None respectively. - @param data: información para obtener datos del scraper. + @param data: information to get scraper data. @type data: item, InfoLabels, list(InfoLabels) - @param caption: titulo de la ventana. + @param caption: window title. @type caption: str - @param item: elemento del que se va a mostrar la ventana de información + @param item: item for which the information window is to be displayed @type item: Item - @param scraper: scraper que tiene los datos de las peliculas o series a mostrar en la ventana. + @param scraper: scraper that has the data of the movies or series to show in the window. @type scraper: Scraper """ - # Capturamos los parametros + # We capture the parameters self.caption = caption self.item = item self.indexList = -1 @@ -171,7 +170,7 @@ class InfoWindow(xbmcgui.WindowXMLDialog): self.get_scraper_data(data) - # Muestra la ventana + # Show window self.doModal() return self.return_value @@ -184,19 +183,19 @@ class InfoWindow(xbmcgui.WindowXMLDialog): self.scraper = Tmdb def onInit(self): - #### Compatibilidad con Kodi 18 #### + #### Kodi 18 compatibility #### if config.get_platform(True)['num_version'] < 18: if xbmcgui.__version__ == "1.2": self.setCoordinateResolution(1) else: self.setCoordinateResolution(5) - # Ponemos el título y las imagenes + # We put the title and the images self.getControl(10002).setLabel(self.caption) self.getControl(10004).setImage(self.result.get("fanart", "")) self.getControl(10005).setImage(self.result.get("thumbnail", "images/img_no_disponible.png")) - # Cargamos los datos para el formato pelicula + # We load the data for the movie format if self.result.get("mediatype", "movie") == "movie": self.getControl(10006).setLabel(config.get_localized_string(60377)) self.getControl(10007).setLabel(self.result.get("title", "N/A")) @@ -211,7 +210,7 @@ class InfoWindow(xbmcgui.WindowXMLDialog): self.getControl(100016).setLabel(config.get_localized_string(60382)) self.getControl(100017).setLabel(self.result.get("genre", "N/A")) - # Cargamos los datos para el formato serie + # We load the data for the serial format else: self.getControl(10006).setLabel(config.get_localized_string(60383)) self.getControl(10007).setLabel(self.result.get("title", "N/A")) @@ -235,7 +234,7 @@ class InfoWindow(xbmcgui.WindowXMLDialog): self.getControl(100020).setLabel(config.get_localized_string(60387)) self.getControl(100021).setLabel(self.result.get("date", "N/A")) - # Sinopsis + # Synopsis if self.result['plot']: self.getControl(100022).setLabel(config.get_localized_string(60388)) self.getControl(100023).setText(self.result.get("plot", "N/A")) @@ -243,20 +242,20 @@ class InfoWindow(xbmcgui.WindowXMLDialog): self.getControl(100022).setLabel("") self.getControl(100023).setText("") - # Cargamos los botones si es necesario - self.getControl(10024).setVisible(self.indexList > -1) # Grupo de botones - self.getControl(ID_BUTTON_PREVIOUS).setEnabled(self.indexList > 0) # Anterior + # We load the buttons if necessary + self.getControl(10024).setVisible(self.indexList > -1) # Button group + self.getControl(ID_BUTTON_PREVIOUS).setEnabled(self.indexList > 0) # Previous if self.listData: m = len(self.listData) else: m = 1 - self.getControl(ID_BUTTON_NEXT).setEnabled(self.indexList + 1 != m) # Siguiente + self.getControl(ID_BUTTON_NEXT).setEnabled(self.indexList + 1 != m) # Following self.getControl(100029).setLabel("(%s/%s)" % (self.indexList + 1, m)) # x/m - # Ponemos el foco en el Grupo de botones, si estuviera desactivado "Anterior" iria el foco al boton "Siguiente" - # si "Siguiente" tb estuviera desactivado pasara el foco al botón "Cancelar" + # We put the focus in the Group of buttons, if "Previous" was deactivated the focus would go to the "Next" button + # if "Next" tb is deactivated it will pass the focus to the "Cancel" button self.setFocus(self.getControl(10024)) return self.return_value @@ -331,6 +330,6 @@ class InfoWindow(xbmcgui.WindowXMLDialog): # Down elif action == 4: self.setFocus(self.getControl(ID_BUTTON_OK)) - # Pulsa ESC o Atrás, simula click en boton cancelar + # Press ESC or Back, simulate click on cancel button if action in [10, 92]: self.onClick(ID_BUTTON_CANCEL) diff --git a/platformcode/xbmc_videolibrary.py b/platformcode/xbmc_videolibrary.py index d64a8e01..1aac5828 100644 --- a/platformcode/xbmc_videolibrary.py +++ b/platformcode/xbmc_videolibrary.py @@ -5,21 +5,12 @@ from future import standard_library standard_library.install_aliases() #from builtins import str -import sys +import sys, os, threading, time, re, math, xbmc PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int -import os -import threading -import time -import re -import math - -import xbmc -from core import filetools -from core import jsontools -from platformcode import config, logger -from platformcode import platformtools +from core import filetools, jsontools +from platformcode import config, logger, platformtools from core import scrapertools from xml.dom import minidom @@ -82,25 +73,25 @@ def mark_auto_as_watched(item): time.sleep(5) - # Sincronizacion silenciosa con Trakt + # Silent sync with Trakt if sync_with_trakt and config.get_setting("trakt_sync"): sync_trakt_kodi() # logger.debug("Fin del hilo") - # Si esta configurado para marcar como visto + # If it is configured to mark as seen if config.get_setting("mark_as_watched", "videolibrary"): threading.Thread(target=mark_as_watched_subThread, args=[item]).start() def sync_trakt_addon(path_folder): """ - Actualiza los valores de episodios vistos si + Updates the values ​​of episodes seen if """ logger.info() - # si existe el addon hacemos la busqueda + # if the addon exists we do the search if xbmc.getCondVisibility('System.HasAddon("script.trakt")'): - # importamos dependencias + # we import dependencies paths = ["special://home/addons/script.module.dateutil/lib/", "special://home/addons/script.module.six/lib/", "special://home/addons/script.module.arrow/lib/", "special://home/addons/script.module.trakt/lib/", "special://home/addons/script.trakt/"] @@ -108,7 +99,7 @@ def sync_trakt_addon(path_folder): for path in paths: sys.path.append(xbmc.translatePath(path)) - # se obtiene las series vistas + # the series seen is obtained try: from resources.lib.traktapi import traktAPI traktapi = traktAPI() @@ -118,9 +109,9 @@ def sync_trakt_addon(path_folder): shows = traktapi.getShowsWatched({}) shows = list(shows.items()) - # obtenemos el id de la serie para comparar + # we get the series id to compare _id = re.findall("\[(.*?)\]", path_folder, flags=re.DOTALL)[0] - logger.debug("el id es %s" % _id) + logger.debug("the id is %s" % _id) if "tt" in _id: type_id = "imdb" @@ -131,15 +122,15 @@ def sync_trakt_addon(path_folder): type_id = "tmdb" _id = _id.strip("tmdb_") else: - logger.error("No hay _id de la serie") + logger.error("There is no _id of the series") return - # obtenemos los valores de la serie + # we obtain the values ​​of the series from core import videolibrarytools tvshow_file = filetools.join(path_folder, "tvshow.nfo") head_nfo, serie = videolibrarytools.read_nfo(tvshow_file) - # buscamos en las series de trakt + # we look in the trakt series for show in shows: show_aux = show[1].to_dict() @@ -148,30 +139,27 @@ def sync_trakt_addon(path_folder): # logger.debug("ID ES %s" % _id_trakt) if _id_trakt: if _id == _id_trakt: - logger.debug("ENCONTRADO!! %s" % show_aux) + logger.debug("FOUND! %s" % show_aux) - # creamos el diccionario de trakt para la serie encontrada con el valor que tiene "visto" + # we create the trakt dictionary for the found series with the value that has "seen" dict_trakt_show = {} for idx_season, season in enumerate(show_aux['seasons']): for idx_episode, episode in enumerate(show_aux['seasons'][idx_season]['episodes']): - sea_epi = "%sx%s" % (show_aux['seasons'][idx_season]['number'], - str(show_aux['seasons'][idx_season]['episodes'][idx_episode][ - 'number']).zfill(2)) + sea_epi = "%sx%s" % (show_aux['seasons'][idx_season]['number'], str(show_aux['seasons'][idx_season]['episodes'][idx_episode]['number']).zfill(2)) - dict_trakt_show[sea_epi] = show_aux['seasons'][idx_season]['episodes'][idx_episode][ - 'watched'] + dict_trakt_show[sea_epi] = show_aux['seasons'][idx_season]['episodes'][idx_episode]['watched'] logger.debug("dict_trakt_show %s " % dict_trakt_show) - # obtenemos las keys que son episodios + # we get the keys that are episodes regex_epi = re.compile('\d+x\d+') keys_episodes = [key for key in serie.library_playcounts if regex_epi.match(key)] - # obtenemos las keys que son temporadas + # we get the keys that are seasons keys_seasons = [key for key in serie.library_playcounts if 'season ' in key] - # obtenemos los numeros de las keys temporadas + # we get the numbers of the seasons keys seasons = [key.strip('season ') for key in keys_seasons] - # marcamos los episodios vistos + # we mark the episodes watched for k in keys_episodes: serie.library_playcounts[k] = dict_trakt_show.get(k, 0) @@ -179,7 +167,7 @@ def sync_trakt_addon(path_folder): episodios_temporada = 0 episodios_vistos_temporada = 0 - # obtenemos las keys de los episodios de una determinada temporada + # we obtain the keys of the episodes of a certain season keys_season_episodes = [key for key in keys_episodes if key.startswith("%sx" % season)] for k in keys_season_episodes: @@ -187,7 +175,7 @@ def sync_trakt_addon(path_folder): if serie.library_playcounts[k] > 0: episodios_vistos_temporada += 1 - # se comprueba que si todos los episodios están vistos, se marque la temporada como vista + # it is verified that if all the episodes are watched, the season is marked as watched if episodios_temporada == episodios_vistos_temporada: serie.library_playcounts.update({"season %s" % season: 1}) @@ -199,11 +187,11 @@ def sync_trakt_addon(path_folder): if serie.library_playcounts[k] > 0: temporada_vista += 1 - # se comprueba que si todas las temporadas están vistas, se marque la serie como vista + # sCheck that if all seasons are viewed, the series is marked as view if temporada == temporada_vista: serie.library_playcounts.update({serie.title: 1}) - logger.debug("los valores nuevos %s " % serie.library_playcounts) + logger.debug("the new values %s " % serie.library_playcounts) filetools.write(tvshow_file, head_nfo + serie.tojson()) break @@ -211,7 +199,7 @@ def sync_trakt_addon(path_folder): continue else: - logger.error("no se ha podido obtener el id, trakt tiene: %s" % show_aux['ids']) + logger.error("could not get id, trakt has: %s" % show_aux['ids']) except: import traceback @@ -219,7 +207,7 @@ def sync_trakt_addon(path_folder): def sync_trakt_kodi(silent=True): - # Para que la sincronizacion no sea silenciosa vale con silent=False + # So that the synchronization is not silent it is worth with silent = False if xbmc.getCondVisibility('System.HasAddon("script.trakt")'): notificacion = True if (not config.get_setting("sync_trakt_notification", "videolibrary") and platformtools.is_playing()): @@ -234,11 +222,11 @@ def sync_trakt_kodi(silent=True): def mark_content_as_watched_on_kodi(item, value=1): """ - marca el contenido como visto o no visto en la libreria de Kodi + mark the content as seen or not seen in the Kodi library @type item: item - @param item: elemento a marcar + @param item: element to mark @type value: int - @param value: >0 para visto, 0 para no visto + @param value: > 0 for seen, 0 for not seen """ logger.info() # logger.debug("item:\n" + item.tostring('\n')) @@ -253,23 +241,22 @@ def mark_content_as_watched_on_kodi(item, value=1): data = get_data(payload) if 'result' in data and "movies" in data['result']: - if item.strm_path: #Si Item es de un episodio + if item.strm_path: # If Item is from an episode filename = filetools.basename(item.strm_path) head, tail = filetools.split(filetools.split(item.strm_path)[0]) - else: #Si Item es de la Serie + else: # If Item is from the Series filename = filetools.basename(item.path) head, tail = filetools.split(filetools.split(item.path)[0]) path = filetools.join(tail, filename) for d in data['result']['movies']: if d['file'].replace("/", "\\").endswith(path.replace("/", "\\")): - # logger.debug("marco la pelicula como vista") + # logger.debug("I mark the movie as a view") movieid = d['movieid'] break if movieid != 0: - payload_f = {"jsonrpc": "2.0", "method": "VideoLibrary.SetMovieDetails", "params": { - "movieid": movieid, "playcount": value}, "id": 1} + payload_f = {"jsonrpc": "2.0", "method": "VideoLibrary.SetMovieDetails", "params": {"movieid": movieid, "playcount": value}, "id": 1} else: # item.contentType != 'movie' episodeid = 0 @@ -280,10 +267,10 @@ def mark_content_as_watched_on_kodi(item, value=1): data = get_data(payload) if 'result' in data and "episodes" in data['result']: - if item.strm_path: #Si Item es de un episodio + if item.strm_path: # If Item is from an episode filename = filetools.basename(item.strm_path) head, tail = filetools.split(filetools.split(item.strm_path)[0]) - else: #Si Item es de la Serie + else: # If Item is from the Series filename = filetools.basename(item.path) head, tail = filetools.split(filetools.split(item.path)[0]) path = filetools.join(tail, filename) @@ -291,35 +278,33 @@ def mark_content_as_watched_on_kodi(item, value=1): for d in data['result']['episodes']: if d['file'].replace("/", "\\").endswith(path.replace("/", "\\")): - # logger.debug("marco el episodio como visto") + # logger.debug("I mark the episode as seen") episodeid = d['episodeid'] break if episodeid != 0: - payload_f = {"jsonrpc": "2.0", "method": "VideoLibrary.SetEpisodeDetails", "params": { - "episodeid": episodeid, "playcount": value}, "id": 1} + payload_f = {"jsonrpc": "2.0", "method": "VideoLibrary.SetEpisodeDetails", "params": {"episodeid": episodeid, "playcount": value}, "id": 1} if payload_f: - # Marcar como visto + # Mark as seen data = get_data(payload_f) # logger.debug(str(data)) if data['result'] != 'OK': - logger.error("ERROR al poner el contenido como visto") + logger.error("ERROR putting content as viewed") def mark_season_as_watched_on_kodi(item, value=1): """ - marca toda la temporada como vista o no vista en la libreria de Kodi + mark the entire season as seen or unseen in the Kodi library @type item: item - @param item: elemento a marcar + @param item: element to mark @type value: int - @param value: >0 para visto, 0 para no visto + @param value: > 0 for seen, 0 for not seen """ logger.info() # logger.debug("item:\n" + item.tostring('\n')) - # Solo podemos marcar la temporada como vista en la BBDD de Kodi si la BBDD es local, - # en caso de compartir BBDD esta funcionalidad no funcionara + # We can only mark the season as seen in the Kodi database if the database is local, in case of sharing database this functionality will not work if config.get_setting("db_mode", "videolibrary"): return @@ -336,9 +321,7 @@ def mark_season_as_watched_on_kodi(item, value=1): item_path1 += "\\" item_path2 = item_path1.replace("\\", "/") - sql = 'update files set playCount= %s where idFile in ' \ - '(select idfile from episode_view where (strPath like "%s" or strPath like "%s")%s)' % \ - (value, item_path1, item_path2, request_season) + sql = 'update files set playCount= %s where idFile in (select idfile from episode_view where (strPath like "%s" or strPath like "%s")%s)' % (value, item_path1, item_path2, request_season) execute_sql_kodi(sql) @@ -348,9 +331,9 @@ def mark_content_as_watched_on_kod(path): from core import videolibrarytools """ - marca toda la serie o película como vista o no vista en la Videoteca de Alfa basado en su estado en la Videoteca de Kodi + mark the entire series or movie as viewed or unseen in the Alpha Video Library based on their status in the Kodi Video Library @type str: path - @param path: carpeta de contenido a marcar + @param path: content folder to mark """ logger.info() #logger.debug("path: " + path) @@ -361,9 +344,8 @@ def mark_content_as_watched_on_kod(path): if not VIDEOLIBRARY_PATH: return - # Solo podemos marcar el contenido como vista en la BBDD de Kodi si la BBDD es local, - # en caso de compartir BBDD esta funcionalidad no funcionara - #if config.get_setting("db_mode", "videolibrary"): + # We can only mark the content as a view in the Kodi database if the database is local, in case of sharing database this functionality will not work + # if config.get_setting("db_mode", "videolibrary"): # return path2 = '' @@ -375,60 +357,60 @@ def mark_content_as_watched_on_kod(path): if "\\" in path: path = path.replace("/", "\\") - head_nfo, item = videolibrarytools.read_nfo(path) #Leo el .nfo del contenido + head_nfo, item = videolibrarytools.read_nfo(path) # I read the content .nfo if not item: - logger.error('.NFO no encontrado: ' + path) + logger.error('.NFO not found: ' + path) return - if FOLDER_TVSHOWS in path: #Compruebo si es CINE o SERIE - contentType = "episode_view" #Marco la tabla de BBDD de Kodi Video - nfo_name = "tvshow.nfo" #Construyo el nombre del .nfo - path1 = path.replace("\\\\", "\\").replace(nfo_name, '') #para la SQL solo necesito la carpeta + if FOLDER_TVSHOWS in path: # I check if it is CINEMA or SERIES + contentType = "episode_view" # I mark the Kodi Video BBDD table + nfo_name = "tvshow.nfo" # I build the name of the .nfo + path1 = path.replace("\\\\", "\\").replace(nfo_name, '') # for SQL I just need the folder if not path2: - path2 = path1.replace("\\", "/") #Formato no Windows + path2 = path1.replace("\\", "/") # Format no Windows else: path2 = path2.replace(nfo_name, '') else: - contentType = "movie_view" #Marco la tabla de BBDD de Kodi Video - path1 = path.replace("\\\\", "\\") #Formato Windows + contentType = "movie_view" # I mark the Kodi Video BBDD table + path1 = path.replace("\\\\", "\\") # Windows format if not path2: - path2 = path1.replace("\\", "/") #Formato no Windows - nfo_name = scrapertools.find_single_match(path2, '\]\/(.*?)$') #Construyo el nombre del .nfo - path1 = path1.replace(nfo_name, '') #para la SQL solo necesito la carpeta - path2 = path2.replace(nfo_name, '') #para la SQL solo necesito la carpeta - path2 = filetools.remove_smb_credential(path2) #Si el archivo está en un servidor SMB, quitamos las credenciales + path2 = path1.replace("\\", "/") # Format no Windows + nfo_name = scrapertools.find_single_match(path2, '\]\/(.*?)$') # I build the name of the .nfo + path1 = path1.replace(nfo_name, '') # for SQL I just need the folder + path2 = path2.replace(nfo_name, '') # for SQL I just need the folder + path2 = filetools.remove_smb_credential(path2) # If the file is on an SMB server, we remove the credentials - #Ejecutmos la sentencia SQL + # Let's execute the SQL statement sql = 'select strFileName, playCount from %s where (strPath like "%s" or strPath like "%s")' % (contentType, path1, path2) nun_records = 0 records = None - nun_records, records = execute_sql_kodi(sql) #ejecución de la SQL - if nun_records == 0: #hay error? - logger.error("Error en la SQL: " + sql + ": 0 registros") - return #salimos: o no está catalogado en Kodi, o hay un error en la SQL + nun_records, records = execute_sql_kodi(sql) # SQL execution + if nun_records == 0: # is there an error? + logger.error("SQL error: " + sql + ": 0 registros") + return # we quit: either it is not cataloged in Kodi, or there is an error in the SQL - for title, playCount in records: #Ahora recorremos todos los registros obtenidos + for title, playCount in records: # Now we go through all the records obtained if contentType == "episode_view": - title_plain = title.replace('.strm', '') #Si es Serie, quitamos el sufijo .strm + title_plain = title.replace('.strm', '') # If it is Serial, we remove the suffix .strm else: - title_plain = scrapertools.find_single_match(item.strm_path, '.(.*?\s\[.*?\])') #si es peli, quitamos el título - if playCount is None or playCount == 0: #todavía no se ha visto, lo ponemos a 0 + title_plain = scrapertools.find_single_match(item.strm_path, '.(.*?\s\[.*?\])') # if it's a movie, we remove the title + if playCount is None or playCount == 0: # not yet seen, we set it to 0 playCount_final = 0 elif playCount >= 1: playCount_final = 1 elif not PY3 and isinstance(title_plain, (str, unicode)): - title_plain = title_plain.decode("utf-8").encode("utf-8") #Hacemos esto porque si no genera esto: u'title_plain' + title_plain = title_plain.decode("utf-8").encode("utf-8") # We do this because if it doesn't generate this: u'title_plain ' elif PY3 and isinstance(title_plain, bytes): title_plain = title_plain.decode('utf-8') - item.library_playcounts.update({title_plain: playCount_final}) #actualizamos el playCount del .nfo + item.library_playcounts.update({title_plain: playCount_final}) # update the .nfo playCount - if item.infoLabels['mediatype'] == "tvshow": #Actualizamos los playCounts de temporadas y Serie + if item.infoLabels['mediatype'] == "tvshow": # We update the Season and Series playCounts for season in item.library_playcounts: - if "season" in season: #buscamos las etiquetas "season" dentro de playCounts - season_num = int(scrapertools.find_single_match(season, 'season (\d+)')) #salvamos el núm, de Temporada - item = videolibrary.check_season_playcount(item, season_num) #llamamos al método que actualiza Temps. y Series + if "season" in season: # we look for the tags "season" inside playCounts + season_num = int(scrapertools.find_single_match(season, 'season (\d+)')) # we save the season number + item = videolibrary.check_season_playcount(item, season_num) # We call the method that updates Temps. and series filetools.write(path, head_nfo + item.tojson()) @@ -437,7 +419,7 @@ def mark_content_as_watched_on_kod(path): def get_data(payload): """ - obtiene la información de la llamada JSON-RPC con la información pasada en payload + get the information of the JSON-RPC call with the information passed in payload @type payload: dict @param payload: data :return: @@ -483,12 +465,12 @@ def get_data(payload): def update(folder_content=config.get_setting("folder_tvshows"), folder=""): """ - Actualiza la libreria dependiendo del tipo de contenido y la ruta que se le pase. + Update the library depending on the type of content and the path passed to it. @type folder_content: str - @param folder_content: tipo de contenido para actualizar, series o peliculas + @param folder_content: type of content to update, series or movies @type folder: str - @param folder: nombre de la carpeta a escanear. + @param folder: name of the folder to scan. """ logger.info(folder) @@ -512,7 +494,7 @@ def update(folder_content=config.get_setting("folder_tvshows"), folder=""): videolibrarypath = videolibrarypath[:-1] update_path = videolibrarypath + "/" + folder_content + "/" + folder + "/" else: - #update_path = filetools.join(videolibrarypath, folder_content, folder) + "/" # Problemas de encode en "folder" + # update_path = filetools.join(videolibrarypath, folder_content, folder) + "/" # Encoder problems in "folder" update_path = filetools.join(videolibrarypath, folder_content, ' ').rstrip() if videolibrarypath.startswith("special:") or not scrapertools.find_single_match(update_path, '(^\w+:\/\/)'): @@ -535,9 +517,9 @@ def search_library_path(): def set_content(content_type, silent=False, custom=False): """ - Procedimiento para auto-configurar la videoteca de kodi con los valores por defecto + Procedure to auto-configure the kodi video library with the default values @type content_type: str ('movie' o 'tvshow') - @param content_type: tipo de contenido para configurar, series o peliculas + @param content_type: content type to configure, series or movies """ logger.info() continuar = True @@ -556,14 +538,14 @@ def set_content(content_type, silent=False, custom=False): if seleccion == -1 or seleccion == 0: if not xbmc.getCondVisibility('System.HasAddon(metadata.themoviedb.org)'): if not silent: - # Preguntar si queremos instalar metadata.themoviedb.org + # Ask if we want to install metadata.themoviedb.org install = platformtools.dialog_yesno(config.get_localized_string(60046)) else: install = True if install: try: - # Instalar metadata.themoviedb.org + # Install metadata.themoviedb.org xbmc.executebuiltin('xbmc.installaddon(metadata.themoviedb.org)', True) logger.info("Instalado el Scraper de películas de TheMovieDB") except: @@ -580,7 +562,7 @@ def set_content(content_type, silent=False, custom=False): if continuar and not xbmc.getCondVisibility('System.HasAddon(metadata.universal)'): continuar = False if not silent: - # Preguntar si queremos instalar metadata.universal + # Ask if we want to install metadata.universal install = platformtools.dialog_yesno(config.get_localized_string(70095)) else: install = True @@ -610,16 +592,16 @@ def set_content(content_type, silent=False, custom=False): if seleccion == -1 or seleccion == 0: if not xbmc.getCondVisibility('System.HasAddon(metadata.tvdb.com)'): if not silent: - # Preguntar si queremos instalar metadata.tvdb.com + #Ask if we want to install metadata.tvdb.com install = platformtools.dialog_yesno(config.get_localized_string(60048)) else: install = True if install: try: - # Instalar metadata.tvdb.com + # Install metadata.tvdb.com xbmc.executebuiltin('xbmc.installaddon(metadata.tvdb.com)', True) - logger.info("Instalado el Scraper de series de The TVDB") + logger.info("The TVDB series Scraper installed ") except: pass @@ -634,14 +616,14 @@ def set_content(content_type, silent=False, custom=False): if continuar and not xbmc.getCondVisibility('System.HasAddon(metadata.tvshows.themoviedb.org)'): continuar = False if not silent: - # Preguntar si queremos instalar metadata.tvshows.themoviedb.org + # Ask if we want to install metadata.tvshows.themoviedb.org install = platformtools.dialog_yesno(config.get_localized_string(60050)) else: install = True if install: try: - # Instalar metadata.tvshows.themoviedb.org + # Install metadata.tvshows.themoviedb.org xbmc.executebuiltin('xbmc.installaddon(metadata.tvshows.themoviedb.org)', True) if xbmc.getCondVisibility('System.HasAddon(metadata.tvshows.themoviedb.org)'): continuar = True @@ -659,7 +641,7 @@ def set_content(content_type, silent=False, custom=False): if continuar: continuar = False - # Buscamos el idPath + # We look for the idPath sql = 'SELECT MAX(idPath) FROM path' nun_records, records = execute_sql_kodi(sql) if nun_records == 1: @@ -677,7 +659,7 @@ def set_content(content_type, silent=False, custom=False): if not sql_videolibrarypath.endswith(sep): sql_videolibrarypath += sep - # Buscamos el idParentPath + # We are looking for the idParentPath sql = 'SELECT idPath, strPath FROM path where strPath LIKE "%s"' % sql_videolibrarypath nun_records, records = execute_sql_kodi(sql) if nun_records == 1: @@ -685,7 +667,7 @@ def set_content(content_type, silent=False, custom=False): videolibrarypath = records[0][1][:-1] continuar = True else: - # No existe videolibrarypath en la BD: la insertamos + # There is no videolibrarypath in the DB: we insert it sql_videolibrarypath = videolibrarypath if not sql_videolibrarypath.endswith(sep): sql_videolibrarypath += sep @@ -703,7 +685,7 @@ def set_content(content_type, silent=False, custom=False): if continuar: continuar = False - # Fijamos strContent, strScraper, scanRecursive y strSettings + # We set strContent, strScraper, scanRecursive and strSettings if content_type == 'movie': strContent = 'movies' scanRecursive = 2147483647 @@ -719,7 +701,7 @@ def set_content(content_type, silent=False, custom=False): settings_data = filetools.read(path_settings) strSettings = ' '.join(settings_data.split()).replace("> <", "><") strSettings = strSettings.replace("\"","\'") - strActualizar = "¿Desea configurar este Scraper en español como opción por defecto para películas?" + strActualizar = "Do you want to set this Scraper in Spanish as the default option for movies?" if not videolibrarypath.endswith(sep): videolibrarypath += sep strPath = videolibrarypath + config.get_setting("folder_movies") + sep @@ -738,13 +720,13 @@ def set_content(content_type, silent=False, custom=False): settings_data = filetools.read(path_settings) strSettings = ' '.join(settings_data.split()).replace("> <", "><") strSettings = strSettings.replace("\"","\'") - strActualizar = "¿Desea configurar este Scraper en español como opción por defecto para series?" + strActualizar = "Do you want to configure this Scraper in Spanish as a default option for series?" if not videolibrarypath.endswith(sep): videolibrarypath += sep strPath = videolibrarypath + config.get_setting("folder_tvshows") + sep logger.info("%s: %s" % (content_type, strPath)) - # Comprobamos si ya existe strPath en la BD para evitar duplicados + # We check if strPath already exists in the DB to avoid duplicates sql = 'SELECT idPath FROM path where strPath="%s"' % strPath nun_records, records = execute_sql_kodi(sql) sql = "" @@ -1011,12 +993,12 @@ def clean(path_list=[]): def execute_sql_kodi(sql): """ - Ejecuta la consulta sql contra la base de datos de kodi - @param sql: Consulta sql valida + Run sql query against kodi database + @param sql: Valid sql query @type sql: str - @return: Numero de registros modificados o devueltos por la consulta + @return: Number of records modified or returned by the query @rtype nun_records: int - @return: lista con el resultado de la consulta + @return: list with the query result @rtype records: list of tuples """ logger.info() @@ -1024,12 +1006,12 @@ def execute_sql_kodi(sql): nun_records = 0 records = None - # Buscamos el archivo de la BBDD de videos segun la version de kodi + # We look for the archive of the video database according to the version of kodi video_db = config.get_platform(True)['video_db'] if video_db: file_db = filetools.join(xbmc.translatePath("special://userdata/Database"), video_db) - # metodo alternativo para localizar la BBDD + # alternative method to locate the database if not file_db or not filetools.exists(file_db): file_db = "" for f in filetools.listdir(xbmc.translatePath("special://userdata/Database")): @@ -1040,14 +1022,14 @@ def execute_sql_kodi(sql): break if file_db: - logger.info("Archivo de BD: %s" % file_db) + logger.info("DB file: %s" % file_db) conn = None try: import sqlite3 conn = sqlite3.connect(file_db) cursor = conn.cursor() - logger.info("Ejecutando sql: %s" % sql) + logger.info("Running sql: %s" % sql) cursor.execute(sql) conn.commit() @@ -1061,15 +1043,15 @@ def execute_sql_kodi(sql): nun_records = conn.total_changes conn.close() - logger.info("Consulta ejecutada. Registros: %s" % nun_records) + logger.info("Query executed. Records: %s" % nun_records) except: - logger.error("Error al ejecutar la consulta sql") + logger.error("Error executing sql query") if conn: conn.close() else: - logger.debug("Base de datos no encontrada") + logger.debug("Database not found") return nun_records, records diff --git a/servers/anavids.py b/servers/anavids.py index 5b3f2376..c4169620 100644 --- a/servers/anavids.py +++ b/servers/anavids.py @@ -7,7 +7,7 @@ from platformcode import config, logger def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) - + global data data = httptools.downloadpage(page_url, cookies=False).data if 'File you are looking for is not found.' in data: return False, config.get_localized_string(70449) % "AvaVids" @@ -16,7 +16,6 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) - data = httptools.downloadpage(page_url).data + global data video_urls = support.get_jwplayer_mediaurl(data, 'AvaVids') return video_urls diff --git a/servers/animeworld_server.py b/servers/animeworld_server.py index 5aa6619a..b951929d 100644 --- a/servers/animeworld_server.py +++ b/servers/animeworld_server.py @@ -13,6 +13,7 @@ def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) post = urllib.urlencode({'r': '', 'd': 'animeworld.biz'}) data_json = httptools.downloadpage(page_url.replace('/v/', '/api/source/'), headers=[['x-requested-with', 'XMLHttpRequest']], post=post).data + global json json = jsontools.load(data_json) if not json['data']: return False, config.get_localized_string(70449) % "AnimeWorld" @@ -23,9 +24,7 @@ def test_video_exists(page_url): def get_video_url(page_url, user="", password="", video_password=""): logger.info("(page_url='%s')" % page_url) video_urls = [] - post = urllib.urlencode({'r': '', 'd': 'animeworld.biz'}) - data_json = httptools.downloadpage(page_url.replace('/v/', '/api/source/'), headers=[['x-requested-with', 'XMLHttpRequest']], post=post).data - json = jsontools.load(data_json) + global json if json['data']: for file in json['data']: media_url = file['file'] diff --git a/servers/vvvvid.py b/servers/vvvvid.py index 3f1ddc53..dc644426 100644 --- a/servers/vvvvid.py +++ b/servers/vvvvid.py @@ -6,7 +6,7 @@ import requests from core import httptools from lib import vvvvid_decoder -from platformcode import logger +from platformcode import logger, config # Creating persistent session current_session = requests.Session() @@ -16,19 +16,18 @@ headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:62.0) Gecko/2010010 login_page = 'https://www.vvvvid.it/user/login' conn_id = current_session.get(login_page, headers=headers).json()['data']['conn_id'] payload = {'conn_id': conn_id} -# logger.info('CONNECTION ID= ' + str(payload)) def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "Not Found" in data or "File was deleted" in data: - return False, "[VVVVID] The file does not exist or has been deleted" + return False, config.get_localized_string(70449) % "VVVVID" else: page_url = page_url.replace("/show/","/#!show/") show_id = re.findall("#!show/([0-9]+)/", page_url)[0] name = re.findall(show_id + "/(.+?)/", page_url) - if not name: return False, "[VVVVID] The file does not exist or has been deleted" + if not name: return False, config.get_localized_string(70449) % "VVVVID" return True, "" @@ -45,20 +44,16 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= # Getting info from Site json_url = "https://www.vvvvid.it/vvvvid/ondemand/" + show_id + '/season/' +season_id + '/' - # logger.info('URL= ' + json_url) json_file = current_session.get(json_url, headers=headers, params=payload).json() logger.info(json_file['data']) # Search for the correct episode for episode in json_file['data']: - # import web_pdb; web_pdb.set_trace() if episode['video_id'] == int(video_id): ep_title = '[B]' + episode['title'] + '[/B]' embed_info = vvvvid_decoder.dec_ei(episode['embed_info']) embed_info = embed_info.replace('manifest.f4m','master.m3u8').replace('http://','https://').replace('/z/','/i/') - # import web_pdb; web_pdb.set_trace() - video_urls.append([ep_title, str(embed_info)]) return video_urls \ No newline at end of file diff --git a/servers/wstream.py b/servers/wstream.py index a9f5786d..ceda52ce 100644 --- a/servers/wstream.py +++ b/servers/wstream.py @@ -30,7 +30,7 @@ def test_video_exists(page_url): data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True, verify=False).data real_url = page_url - if "Not Found" in data or "File was deleted" in data: + if "Not Found" in data or "File was deleted" in data or 'Video is processing' in data: return False, config.get_localized_string(70449) % 'Wstream' else: return True, "" diff --git a/specials/autoplay.py b/specials/autoplay.py index 552e5be8..3f2ea054 100644 --- a/specials/autoplay.py +++ b/specials/autoplay.py @@ -8,12 +8,9 @@ from builtins import range import os -from core import channeltools -from core import jsontools +from core import channeltools, jsontools from core.item import Item -from platformcode import config, logger -from platformcode import platformtools -from platformcode import launcher +from platformcode import config, logger, platformtools, launcher from time import sleep from platformcode.config import get_setting @@ -28,7 +25,7 @@ colorKOD = '0xFF65B3DA' def context(): ''' - Agrega la opcion Configurar AutoPlay al menu contextual + Add the Configure AutoPlay option to the context menu :return: ''' @@ -47,12 +44,12 @@ context = context() def show_option(channel, itemlist, text_color=colorKOD, thumbnail=None, fanart=None): ''' - Agrega la opcion Configurar AutoPlay en la lista recibida + Add the option Configure AutoPlay in the received list :param channel: str :param itemlist: list (lista donde se desea integrar la opcion de configurar AutoPlay) - :param text_color: str (color para el texto de la opcion Configurar Autoplay) - :param thumbnail: str (direccion donde se encuentra el thumbnail para la opcion Configurar Autoplay) + :param text_color: str (color for the text of the option Configure Autoplay) + :param thumbnail: str (address where the thumbnail is for the Configure Autoplay option) :return: ''' from channelselector import get_thumb @@ -85,13 +82,13 @@ def show_option(channel, itemlist, text_color=colorKOD, thumbnail=None, fanart=N def start(itemlist, item): ''' - Metodo principal desde donde se reproduce automaticamente los enlaces - - En caso la opcion de personalizar activa utilizara las opciones definidas por el usuario. - - En caso contrario intentara reproducir cualquier enlace que cuente con el idioma preferido. + Main method from which the links are automatically reproduced + - In case the option to activate it will use the options defined by the user. + - Otherwise it will try to reproduce any link that has the preferred language. - :param itemlist: list (lista de items listos para reproducir, o sea con action='play') - :param item: item (el item principal del canal) - :return: intenta autoreproducir, en caso de fallar devuelve el itemlist que recibio en un principio + :param itemlist: list (list of items ready to play, ie with action = 'play') + :param item: item (the main item of the channel) + :return: try to auto-reproduce, in case of failure it returns the itemlist that it received in the beginning ''' logger.info() @@ -103,11 +100,11 @@ def start(itemlist, item): if not config.is_xbmc(): - #platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi') + # platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi') return itemlist if not autoplay_node: - # Obtiene el nodo AUTOPLAY desde el json + # Get AUTOPLAY node from json autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') channel_id = item.channel @@ -122,12 +119,12 @@ def start(itemlist, item): if not channel_id in autoplay_node: # or not active: return itemlist - # Agrega servidores y calidades que no estaban listados a autoplay_node + # Add servers and qualities not listed to autoplay_node new_options = check_value(channel_id, itemlist) - # Obtiene el nodo del canal desde autoplay_node + # Get the channel node from autoplay_node channel_node = autoplay_node.get(channel_id, {}) - # Obtiene los ajustes des autoplay para este canal + # Get the autoplay settings for this channel settings_node = channel_node.get('settings', {}) if get_setting('autoplay') or settings_node['active']: @@ -138,66 +135,65 @@ def start(itemlist, item): favorite_servers = [] favorite_quality = [] - #2nd lang, vemos si se quiere o no filtrar + # 2nd lang, see if you want to filter or not status_language = config.get_setting("filter_languages", channel_id) - # Guarda el valor actual de "Accion y Player Mode" en preferencias + # Save the current value of "Action and Player Mode" in preferences user_config_setting_action = config.get_setting("default_action") user_config_setting_player = config.get_setting("player_mode") - # Habilita la accion "Ver en calidad alta" (si el servidor devuelve más de una calidad p.e. gdrive) + # Enable the "View in high quality" action (if the server returns more than one quality, eg gdrive) if user_config_setting_action != 2: config.set_setting("default_action", 2) if user_config_setting_player != 0: config.set_setting("player_mode", 0) - # Informa que AutoPlay esta activo - #platformtools.dialog_notification('AutoPlay Activo', '', sound=False) + # Report that AutoPlay is active + # platformtools.dialog_notification('AutoPlay Activo', '', sound=False) - # Prioridades a la hora de ordenar itemlist: - # 0: Servidores y calidades - # 1: Calidades y servidores - # 2: Solo servidores - # 3: Solo calidades - # 4: No ordenar + # Priorities when ordering itemlist: + # 0: Servers and qualities + # 1: Qualities and servers + # 2: Servers only + # 3: Only qualities + # 4: Do not order if (settings_node['custom_servers'] and settings_node['custom_quality']) or get_setting('autoplay'): - priority = settings_node['priority'] # 0: Servidores y calidades o 1: Calidades y servidores + priority = settings_node['priority'] # 0: Servers and qualities or 1: Qualities and servers elif settings_node['custom_servers']: - priority = 2 # Solo servidores + priority = 2 # Servers only elif settings_node['custom_quality']: - priority = 3 # Solo calidades + priority = 3 # Only qualities else: - priority = 4 # No ordenar + priority = 4 # Do not order - # Obtiene las listas servidores, calidades disponibles desde el nodo del json de AutoPlay + # Get server lists, qualities available from AutoPlay json node server_list = channel_node.get('servers', []) for server in server_list: server = server.lower() quality_list = channel_node.get('quality', []) - # Si no se definen calidades la se asigna default como calidad unica + # If no qualities are defined, default is assigned as unique quality. if len(quality_list) == 0: quality_list =['default'] - # Se guardan los textos de cada servidor y calidad en listas p.e. favorite_servers = ['verystream', 'openload', - # 'streamcloud'] + # The texts of each server and quality are stored in lists, e.g. favorite_servers = ['verystream', 'openload', 'streamcloud'] for num in range(1, 4): favorite_servers.append(channel_node['servers'][settings_node['server_%s' % num]].lower()) favorite_quality.append(channel_node['quality'][settings_node['quality_%s' % num]]) - # Se filtran los enlaces de itemlist y que se correspondan con los valores de autoplay + # Itemlist links are filtered and correspond to autoplay values for n, item in enumerate(itemlist): autoplay_elem = dict() b_dict = dict() - # Comprobamos q se trata de un item de video + # We check that it is a video item if 'server' not in item: continue - #2nd lang lista idiomas + # 2nd lang language list if item.language not in favorite_langs: favorite_langs.append(item.language) - # Agrega la opcion configurar AutoPlay al menu contextual + # Add the option to configure AutoPlay to the context menu if 'context' not in item: item.context = list() if not [x for x in context if x['action'] == 'autoplay_config']: @@ -206,15 +202,14 @@ def start(itemlist, item): "channel": "autoplay", "from_channel": channel_id}) - # Si no tiene calidad definida le asigna calidad 'default' + # If it does not have a defined quality, it assigns a 'default' quality. if item.quality == '': item.quality = 'default' - # Se crea la lista para configuracion personalizada - if priority < 2: # 0: Servidores y calidades o 1: Calidades y servidores + # The list for custom settings is created + if priority < 2: # 0: Servers and qualities or 1: Qualities and servers - # si el servidor y la calidad no se encuentran en las listas de favoritos o la url esta repetida, - # descartamos el item + # if the server and the quality are not in the favorites lists or the url is repeated, we discard the item if item.server.lower() not in favorite_servers or item.quality not in favorite_quality \ or item.url in url_list_valid: item.type_b = True @@ -225,10 +220,9 @@ def start(itemlist, item): autoplay_elem["indice_server"] = favorite_servers.index(item.server.lower()) autoplay_elem["indice_quality"] = favorite_quality.index(item.quality) - elif priority == 2: # Solo servidores + elif priority == 2: # Servers only - # si el servidor no se encuentra en la lista de favoritos o la url esta repetida, - # descartamos el item + # if the server is not in the favorites list or the url is repeated, we discard the item if item.server.lower() not in favorite_servers or item.url in url_list_valid: item.type_b = True b_dict['videoitem'] = item @@ -239,8 +233,7 @@ def start(itemlist, item): elif priority == 3: # Solo calidades - # si la calidad no se encuentra en la lista de favoritos o la url esta repetida, - # descartamos el item + # if the quality is not in the favorites list or the url is repeated, we discard the item if item.quality not in favorite_quality or item.url in url_list_valid: item.type_b = True b_dict['videoitem'] = item @@ -249,13 +242,13 @@ def start(itemlist, item): autoplay_elem["indice_lang"] = favorite_langs.index(item.language) autoplay_elem["indice_quality"] = favorite_quality.index(item.quality) - else: # No ordenar + else: # Do not order - # si la url esta repetida, descartamos el item + # if the url is repeated, we discard the item if item.url in url_list_valid: continue - # Si el item llega hasta aqui lo añadimos al listado de urls validas y a autoplay_list + # If the item reaches here we add it to the list of valid urls and to autoplay_list url_list_valid.append(item.url) item.plan_b=True autoplay_elem['videoitem'] = item @@ -263,20 +256,20 @@ def start(itemlist, item): # autoplay_elem['quality'] = item.quality autoplay_list.append(autoplay_elem) - # Ordenamos segun la prioridad - if priority == 0: # Servidores y calidades + # We order according to priority + if priority == 0: # Servers and qualities autoplay_list.sort(key=lambda orden: (orden['indice_lang'], orden['indice_server'], orden['indice_quality'])) - elif priority == 1: # Calidades y servidores + elif priority == 1: # Qualities and servers autoplay_list.sort(key=lambda orden: (orden['indice_lang'], orden['indice_quality'], orden['indice_server'])) - elif priority == 2: # Solo servidores + elif priority == 2: # Servers only autoplay_list.sort(key=lambda orden: (orden['indice_lang'], orden['indice_server'])) - elif priority == 3: # Solo calidades + elif priority == 3: # Only qualities autoplay_list.sort(key=lambda orden: (orden['indice_lang'], orden['indice_quality'])) - # Se prepara el plan b, en caso de estar activo se agregan los elementos no favoritos al final + # Plan b is prepared, in case it is active the non-favorite elements are added at the end try: plan_b = settings_node['plan_b'] except: @@ -284,23 +277,22 @@ def start(itemlist, item): text_b = '' if plan_b: autoplay_list.extend(autoplay_b) - # Si hay elementos en la lista de autoplay se intenta reproducir cada elemento, hasta encontrar uno - # funcional o fallen todos + # If there are elements in the autoplay list, an attempt is made to reproduce each element, until one is found or all fail. if autoplay_list or (plan_b and autoplay_b): - #played = False + # played = False max_intentos = 5 max_intentos_servers = {} - # Si se esta reproduciendo algo detiene la reproduccion + # If something is playing it stops playing if platformtools.is_playing(): platformtools.stop_video() for autoplay_elem in autoplay_list: play_item = Item - # Si no es un elemento favorito si agrega el texto plan b + # If it is not a favorite element if you add the text plan b if autoplay_elem['videoitem'].type_b: text_b = '(Plan B)' if not platformtools.is_playing() and not PLAYED: @@ -308,7 +300,7 @@ def start(itemlist, item): if videoitem.server.lower() not in max_intentos_servers: max_intentos_servers[videoitem.server.lower()] = max_intentos - # Si se han alcanzado el numero maximo de intentos de este servidor saltamos al siguiente + # If the maximum number of attempts of this server have been reached, we jump to the next if max_intentos_servers[videoitem.server.lower()] == 0: continue @@ -318,10 +310,9 @@ def start(itemlist, item): platformtools.dialog_notification("AutoPlay %s" %text_b, "%s%s%s" % ( videoitem.server.upper(), lang, videoitem.quality.upper()), sound=False) - # TODO videoitem.server es el id del server, pero podria no ser el nombre!!! + # TODO videoitem.server is the id of the server, but it might not be the name !!! - # Intenta reproducir los enlaces - # Si el canal tiene metodo play propio lo utiliza + # Try to play the links If the channel has its own play method, use it try: channel = __import__('channels.%s' % channel_id, None, None, ["channels.%s" % channel_id]) except: @@ -334,19 +325,19 @@ def start(itemlist, item): else: videoitem = resolved_item[0] - # Si no directamente reproduce y marca como visto + # If not directly reproduce and mark as seen - # Verifica si el item viene de la videoteca + #Check if the item comes from the video library try: if base_item.contentChannel =='videolibrary': - # Marca como visto + # Mark as seen from platformcode import xbmc_videolibrary xbmc_videolibrary.mark_auto_as_watched(base_item) - # Rellena el video con los datos del item principal y reproduce + # Fill the video with the data of the main item and play play_item = base_item.clone(url=videoitem) platformtools.play_video(play_item.url, autoplay=True) else: - # Si no viene de la videoteca solo reproduce + # If it doesn't come from the video library, just play platformtools.play_video(videoitem, autoplay=True) except: pass @@ -358,18 +349,16 @@ def start(itemlist, item): except: logger.debug(str(len(autoplay_list))) - # Si hemos llegado hasta aqui es por q no se ha podido reproducir + # If we have come this far, it is because it could not be reproduced max_intentos_servers[videoitem.server.lower()] -= 1 - # Si se han alcanzado el numero maximo de intentos de este servidor - # preguntar si queremos seguir probando o lo ignoramos + # If the maximum number of attempts of this server has been reached, ask if we want to continue testing or ignore it. if max_intentos_servers[videoitem.server.lower()] == 0: text = config.get_localized_string(60072) % videoitem.server.upper() - if not platformtools.dialog_yesno("AutoPlay", text, - config.get_localized_string(60073)): + if not platformtools.dialog_yesno("AutoPlay", text, config.get_localized_string(60073)): max_intentos_servers[videoitem.server.lower()] = max_intentos - # Si no quedan elementos en la lista se informa + # If there are no items in the list, it is reported if autoplay_elem == autoplay_list[-1]: platformtools.dialog_notification('AutoPlay', config.get_localized_string(60072) % videoitem.server.upper()) @@ -378,7 +367,7 @@ def start(itemlist, item): if new_options: platformtools.dialog_notification("AutoPlay", config.get_localized_string(60076), sound=False) - # Restaura si es necesario el valor previo de "Accion y Player Mode" en preferencias + # Restore if necessary the previous value of "Action and Player Mode" in preferences if user_config_setting_action != 2: config.set_setting("default_action", user_config_setting_action) if user_config_setting_player != 0: @@ -389,15 +378,15 @@ def start(itemlist, item): def init(channel, list_servers, list_quality, reset=False): ''' - Comprueba la existencia de canal en el archivo de configuracion de Autoplay y si no existe lo añade. - Es necesario llamar a esta funcion al entrar a cualquier canal que incluya la funcion Autoplay. + Check the existence of a channel in the Autoplay configuration file and if it does not exist, add it. + It is necessary to call this function when entering any channel that includes the Autoplay function. - :param channel: (str) id del canal - :param list_servers: (list) lista inicial de servidores validos para el canal. No es necesario incluirlos todos, - ya que la lista de servidores validos se ira actualizando dinamicamente. - :param list_quality: (list) lista inicial de calidades validas para el canal. No es necesario incluirlas todas, - ya que la lista de calidades validas se ira actualizando dinamicamente. - :return: (bool) True si la inicializacion ha sido correcta. + :param channel: (str) channel id + :param list_servers: (list) initial list of valid servers for the channel. It is not necessary to include them all, + since the list of valid servers will be updated dynamically. + :param list_quality: (list) initial list of valid qualities for the channel. It is not necessary to include them all, + since the list of valid qualities will be updated dynamically. + :return: (bool) True if the initialization was successful. ''' logger.info() change = False @@ -418,13 +407,13 @@ def init(channel, list_servers, list_quality, reset=False): if channel not in autoplay_node or reset: change = True - # Se comprueba que no haya calidades ni servidores duplicados + # It is verified that there are no duplicate qualities or servers if 'default' not in list_quality: list_quality.append('default') # list_servers = list(set(list_servers)) # list_quality = list(set(list_quality)) - # Creamos el nodo del canal y lo añadimos + # We create the channel node and add it channel_node = {"servers": list_servers, "quality": list_quality, "settings": { @@ -458,11 +447,11 @@ def init(channel, list_servers, list_quality, reset=False): def check_value(channel, itemlist): - ''' comprueba la existencia de un valor en la lista de servidores o calidades - si no existiera los agrega a la lista en el json + ''' + checks the existence of a value in the list of servers or qualities if it does not exist adds them to the list in the json :param channel: str - :param values: list (una de servidores o calidades) + :param values: list (one of servers or qualities) :param value_type: str (server o quality) :return: list ''' @@ -471,7 +460,7 @@ def check_value(channel, itemlist): change = False if not autoplay_node: - # Obtiene el nodo AUTOPLAY desde el json + # Get AUTOPLAY node from json autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') channel_node = autoplay_node.get(channel) @@ -507,7 +496,7 @@ def autoplay_config(item): channel_name = channel_parameters['title'] if not autoplay_node: - # Obtiene el nodo AUTOPLAY desde el json + # Get AUTOPLAY node from json autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') channel_node = autoplay_node.get(item.from_channel, {}) @@ -515,21 +504,19 @@ def autoplay_config(item): allow_option = True - active_settings = {"id": "active", "label": config.get_localized_string(60079), - "type": "bool", "default": False, "enabled": allow_option, - "visible": allow_option} + active_settings = {"id": "active", "label": config.get_localized_string(60079), "type": "bool", + "default": False, "enabled": allow_option, "visible": allow_option} list_controls.append(active_settings) dict_values['active'] = settings_node.get('active', False) - hide_servers = {"id": "hide_servers", "label": config.get_localized_string(70747), - "type": "bool", "default": False, "enabled": "eq(-" + str(len(list_controls)) + ",true)", - "visible": allow_option} + hide_servers = {"id": "hide_servers", "label": config.get_localized_string(70747), "type": "bool", + "default": False, "enabled": "eq(-" + str(len(list_controls)) + ",true)", "visible": allow_option} list_controls.append(hide_servers) dict_values['hide_servers'] = settings_node.get('hide_servers', False) - # Idioma + # Language status_language = config.get_setting("filter_languages", item.from_channel) if not status_language: status_language = 0 @@ -546,7 +533,7 @@ def autoplay_config(item): "type": "label", "enabled": True, "visible": True} list_controls.append(separador) - # Seccion servidores favoritos + # Favorite servers section server_list = channel_node.get("servers", []) if not server_list: enabled = False @@ -578,7 +565,7 @@ def autoplay_config(item): if settings_node.get("server_%s" % num, 0) > len(server_list) - 1: dict_values["server_%s" % num] = 0 - # Seccion Calidades favoritas + # Favorite Qualities Section quality_list = channel_node.get("quality", []) if not quality_list: enabled = False @@ -586,8 +573,7 @@ def autoplay_config(item): else: enabled = "eq(-" + str(len(list_controls)) + ",true)" - custom_quality_settings = {"id": "custom_quality", "label": config.get_localized_string(60083), - "type": "bool", "default": False, "enabled": enabled, "visible": True} + custom_quality_settings = {"id": "custom_quality", "label": config.get_localized_string(60083), "type": "bool", "default": False, "enabled": enabled, "visible": True} custom_quality_pos = len(list_controls) list_controls.append(custom_quality_settings) if dict_values['active'] and enabled: @@ -617,7 +603,7 @@ def autoplay_config(item): list_controls.append(plan_b) - # Seccion Prioridades + # Priorities Section priority_list = [config.get_localized_string(70174), config.get_localized_string(70175)] set_priority = {"id": "priority", "label": config.get_localized_string(60085), "type": "list", "default": 0, "enabled": True, "visible": "eq(-5,true)+eq(-9,true)+eq(-12,true)", "lvalues": priority_list} @@ -626,7 +612,7 @@ def autoplay_config(item): - # Abrir cuadro de dialogo + # Open dialog box platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, callback='save', item=item, caption='%s - AutoPlay' % channel_name, custom_button={'visible': True, @@ -637,7 +623,7 @@ def autoplay_config(item): def save(item, dict_data_saved): ''' - Guarda los datos de la ventana de configuracion + Save the data from the configuration window :param item: item :param dict_data_saved: dict @@ -647,7 +633,7 @@ def save(item, dict_data_saved): global autoplay_node if not autoplay_node: - # Obtiene el nodo AUTOPLAY desde el json + # Get AUTOPLAY node from json autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') new_config = dict_data_saved @@ -665,7 +651,7 @@ def save(item, dict_data_saved): def get_languages(channel): ''' - Obtiene los idiomas desde el json del canal + Get the languages ​​from the channel's json :param channel: str :return: list @@ -685,9 +671,9 @@ def get_languages(channel): def is_active(channel): ''' - Devuelve un booleano q indica si esta activo o no autoplay en el canal desde el que se llama + Returns a boolean that indicates whether or not autoplay is active on the channel from which it is called - :return: True si esta activo autoplay para el canal desde el que se llama, False en caso contrario. + :return:True if autoplay is active for the channel from which it is called, False otherwise. ''' logger.info() global autoplay_node @@ -696,18 +682,18 @@ def is_active(channel): return False if not autoplay_node: - # Obtiene el nodo AUTOPLAY desde el json + # Get AUTOPLAY node from json autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') - # Obtine el canal desde el q se hace la llamada + # Get the channel from which the call is made #import inspect #module = inspect.getmodule(inspect.currentframe().f_back) #canal = module.__name__.split('.')[1] canal = channel - # Obtiene el nodo del canal desde autoplay_node + # Get the channel node from autoplay_node channel_node = autoplay_node.get(canal, {}) - # Obtiene los ajustes des autoplay para este canal + # Get the autoplay settings for this channel settings_node = channel_node.get('settings', {}) return settings_node.get('active', False) or get_setting('autoplay') @@ -727,7 +713,7 @@ def reset(item, dict): # def set_status(status): # logger.info() -# # Obtiene el nodo AUTOPLAY desde el json +# # Get AUTOPLAY node from json # autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') # autoplay_node['status'] = status # @@ -737,7 +723,7 @@ def reset(item, dict): def get_channel_AP_HS(item): autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') channel_node = autoplay_node.get(item.channel, {}) - if not channel_node: # non ha mai aperto il menu del canale quindi in autoplay_data.json non c'e la key + if not channel_node: # never opened the channel menu so in autoplay_data.json there is no key try: channelFile = __import__('channels.' + item.channel, fromlist=["channels.%s" % item.channel]) except: diff --git a/specials/checkhost.py b/specials/checkhost.py index 5593aae3..31ea9187 100644 --- a/specials/checkhost.py +++ b/specials/checkhost.py @@ -18,7 +18,7 @@ addonid = addon.getAddonInfo('id') LIST_SITE = ['http://www.ansa.it/', 'https://www.google.it']#, 'https://www.google.com'] -# lista di siti che non verranno raggiunti con i DNS del gestore +# list of sites that will not be reached with the manager's DNS LST_SITE_CHCK_DNS = ['https://www.casacinema.me/', 'https://cb01-nuovo-indirizzo.info/'] #'https://www.italia-film.pw', 'https://www.cb01.uno/',] # tolti @@ -41,13 +41,8 @@ class Kdicc(): def check_Ip(self): """ - controllo l'ip - se ip_addr = 127.0.0.1 o ip_addr = '' allora il device non - e' connesso al modem/router - check the ip - if ip_addr = 127.0.0.1 or ip_addr = '' then the device does not - is connected to the modem/router + if ip_addr = 127.0.0.1 or ip_addr = '' then the device does not is connected to the modem/router return: bool """ @@ -59,7 +54,7 @@ class Kdicc(): def check_Adsl(self): """ - controllo se il device raggiunge i siti + check if the device reaches the sites """ urls = LIST_SITE @@ -67,8 +62,8 @@ class Kdicc(): http_errr = 0 for rslt in r: xbmc.log("check_Adsl rslt: %s" % rslt['code'], level=xbmc.LOGNOTICE) - # Errno -2 potrebbe essere mancanza di connessione adsl o sito non raggiungibile.... - # anche nei casi in cui ci sia il cambio gestore. + # Errno -2 could be lack of adsl connection or unreachable site .... + # even in cases where there is a change of manager. if rslt['code'] == '111' or '[Errno -3]' in str(rslt['code']) or 'Errno -2' in str(rslt['code']): http_errr +=1 @@ -80,7 +75,7 @@ class Kdicc(): def check_Dns(self): """ - Controllo se i DNS raggiungono certi siti + Control if DNS reaches certain sites """ if self.lst_site_check_dns == []: urls = LST_SITE_CHCK_DNS @@ -88,7 +83,7 @@ class Kdicc(): urls = self.lst_site_check_dns r = self.rqst(urls) - xbmc.log("check_Dns risultato: %s" % r, level=xbmc.LOGNOTICE) + xbmc.log("check_Dns result: %s" % r, level=xbmc.LOGNOTICE) http_errr = 0 for rslt in r: xbmc.log("check_Dns rslt: %s" % rslt['code'], level=xbmc.LOGNOTICE) @@ -103,7 +98,7 @@ class Kdicc(): def rqst(self, lst_urls): """ - url deve iniziare con http(s):' + url must start with http(s):' return : (esito, sito, url, code, reurl) """ rslt_final = [] @@ -114,7 +109,7 @@ class Kdicc(): for sito in lst_urls: rslt = {} try: - r = requests.head(sito, allow_redirects = True)#, timeout=7) # da errore dopo l'inserimento in lib di httplib2 + r = requests.head(sito, allow_redirects = True) #, timeout=7) # from error after lib insertion of httplib2 if r.url.endswith('/'): r.url = r.url[:-1] if str(sito) != str(r.url): @@ -130,17 +125,17 @@ class Kdicc(): xbmc.log("Risultato nel try: %s" % (r,), level=xbmc.LOGNOTICE) except requests.exceptions.ConnectionError as conn_errr: - # Errno 10061 per s.o. win - # gli Errno 10xxx e 11xxx saranno da compattare in qualche modo? - # gli errori vengono inglobati in code = '111' in quanto in quel momento - # non vengono raggiunti per una qualsiasi causa + # Errno 10061 for s.o. win + # will the Errno 10xxx and 11xxx be to be compacted in any way? + # the errors are incorporated in code = '111' since at that moment + # they are not reached for any reason if '[Errno 111]' in str(conn_errr) or 'Errno 10060' in str(conn_errr) \ or 'Errno 10061' in str(conn_errr) \ or '[Errno 110]' in str(conn_errr) \ or 'ConnectTimeoutError' in str(conn_errr) \ or 'Errno 11002' in str(conn_errr) or 'ReadTimeout' in str(conn_errr) \ or 'Errno 11001' in str(conn_errr) \ - or 'Errno -2' in str(conn_errr): # questo errore è anche nel code: -2 + or 'Errno -2' in str(conn_errr): # this error is also in the code: -2 rslt['code'] = '111' rslt['url'] = str(sito) rslt['http_err'] = 'Connection error' @@ -169,11 +164,11 @@ class Kdicc(): else: rslt['code'] = code.status except httplib2.ServerNotFoundError as msg: - # sia per mancanza di ADSL che per i siti non esistenti + # both for lack of ADSL and for non-existent sites rslt['code'] = -2 except socket.error as msg: - # per siti irraggiungibili senza DNS corretti - #[Errno 111] Connection refused + # for unreachable sites without correct DNS + # [Errno 111] Connection refused rslt['code'] = 111 except: rslt['code'] = 'Connection error' @@ -181,8 +176,7 @@ class Kdicc(): def view_Advise(self, txt = '' ): """ - Avviso per utente - testConnected + Notice per user testConnected """ ip = self.check_Ip() if ip: @@ -201,37 +195,36 @@ class Kdicc(): txt = config.get_localized_string(707402) dialog.notification(addonname, txt, xbmcgui.NOTIFICATION_INFO, 10000) """ - def richiamato in launcher.py + def called in launcher.py """ def test_conn(is_exit, check_dns, view_msg, lst_urls, lst_site_check_dns, in_addon): ktest = Kdicc(is_exit, check_dns, view_msg, lst_urls, lst_site_check_dns, in_addon) - # se non ha l'ip lo comunico all'utente + # if it does not have the IP, I will communicate it to the user if not ktest.check_Ip(): - # non permetto di entrare nell'addon # I don't let you get into the addon - # inserire codice lingua + # enter language code if view_msg == True: ktest.view_Advise(config.get_localized_string(70720)) if ktest.is_exit == True: exit() - # se non ha connessione ADSL lo comunico all'utente + # if it has no ADSL connection, I will communicate it to the user if not ktest.check_Adsl(): if view_msg == True: ktest.view_Advise(config.get_localized_string(70721)) if ktest.is_exit == True: exit() - # se ha i DNS filtrati lo comunico all'utente + # if it has DNS filtered, I will communicate it to the user if check_dns == True: if not ktest.check_Dns(): if view_msg == True: ktest.view_Advise(config.get_localized_string(70722)) - xbmc.log("############ Inizio Check DNS ############", level=xbmc.LOGNOTICE) + xbmc.log("############ Start Check DNS ############", level=xbmc.LOGNOTICE) xbmc.log("## IP: %s" % (ktest.ip_addr), level=xbmc.LOGNOTICE) xbmc.log("## DNS: %s" % (ktest.dns), level=xbmc.LOGNOTICE) - xbmc.log("############ Fine Check DNS ############", level=xbmc.LOGNOTICE) + xbmc.log("############# End Check DNS #############", level=xbmc.LOGNOTICE) # if check_dns == True: # if ktest.check_Ip() == True and ktest.check_Adsl() == True and ktest.check_Dns() == True: # return True @@ -243,21 +236,17 @@ def test_conn(is_exit, check_dns, view_msg, # else: # return False -# def per la creazione del file channels.json +# def for creating the channels.json file def check_channels(inutile=''): """ - leggo gli host dei canali dal file channels.json - li controllo - scrivo il file channels-test.json - con il codice di errore e il nuovio url in caso di redirect + I read the channel hosts from the channels.json file, I check them, + I write the channels-test.json file with the error code and the new url in case of redirect - gli url DEVONO avere http(s) + urls MUST have http (s) - Durante il controllo degli urls vengono rieffettuati - i controlli di ip, asdl e dns. - Questo perchè può succedere che in un qualsiasi momento - la connessione possa avere problemi. Nel caso accada, il controllo e - relativa scrittura del file viene interrotto con messaggio di avvertimento + During the urls check the ip, asdl and dns checks are carried out. + This is because it can happen that at any time the connection may have problems. If it does, check it + relative writing of the file is interrupted with a warning message """ logger.info() @@ -272,37 +261,37 @@ def check_channels(inutile=''): for chann, host in sorted(data.items()): ris = [] - # per avere un'idea della tempistica - # utile solo se si controllano tutti i canali - # per i canali con error 522 si perdono circa 40 sec... + # to get an idea of ​​the timing + # useful only if you control all channels + # for channels with error 522 about 40 seconds are lost ... logger.info("check #### INIZIO #### channel - host :%s - %s " % (chann, host)) rslt = Kdicc(lst_urls = [host]).http_Resp() - # tutto ok + # all right if rslt['code'] == 200: risultato[chann] = host # redirect elif str(rslt['code']).startswith('3'): - #risultato[chann] = str(rslt['code']) +' - '+ rslt['redirect'][:-1] + # risultato[chann] = str(rslt['code']) +' - '+ rslt['redirect'][:-1] if rslt['redirect'].endswith('/'): rslt['redirect'] = rslt['redirect'][:-1] risultato[chann] = rslt['redirect'] - # sito inesistente + # non-existent site elif rslt['code'] == -2: risultato[chann] = 'Host Sconosciuto - '+ str(rslt['code']) +' - '+ host - # sito non raggiungibile - probabili dns non settati + # site not reachable - probable dns not set elif rslt['code'] == 111: risultato[chann] = ['Host non raggiungibile - '+ str(rslt['code']) +' - '+ host] else: - # altri tipi di errore - #risultato[chann] = 'Errore Sconosciuto - '+str(rslt['code']) +' - '+ host + # other types of errors + # risultato[chann] = 'Errore Sconosciuto - '+str(rslt['code']) +' - '+ host risultato[chann] = host logger.info("check #### FINE #### rslt :%s " % (rslt)) fileJson_test = 'channels-test.json' - # scrivo il file aggiornato + # I write the updated file with open(folderJson+'/'+fileJson_test, 'w') as f: data = json.dump(risultato, f, sort_keys=True, indent=4) logger.info(data) diff --git a/specials/downloads.py b/specials/downloads.py index ae6014ea..2467b63d 100644 --- a/specials/downloads.py +++ b/specials/downloads.py @@ -11,17 +11,13 @@ if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int from future.builtins import filter from past.utils import old_div -import re -import time -import unicodedata -import xbmc +import re, time, unicodedata, xbmc from channelselector import get_thumb from core import filetools, jsontools, scraper, scrapertools, servertools, videolibrarytools, support from core.downloader import Downloader from core.item import Item -from platformcode import config, logger -from platformcode import platformtools +from platformcode import config, logger, platformtools from core.support import log, dbg, typo from servers import torrent diff --git a/specials/favorites.py b/specials/favorites.py index 51089d8f..bc4ba832 100644 --- a/specials/favorites.py +++ b/specials/favorites.py @@ -1,19 +1,16 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -# Lista de vídeos favoritos +# List of favorite videos # ------------------------------------------------------------ -import os -import time +import os, time -from core import filetools -from core import scrapertools +from core import filetools, scrapertools from core.item import Item -from platformcode import config, logger -from platformcode import platformtools +from platformcode import config, logger, platformtools try: - # Fijamos la ruta a favourites.xml + # We set the path to favorites.xml if config.is_xbmc(): import xbmc @@ -32,8 +29,7 @@ def mainlist(item): for name, thumb, data in read_favourites(): if "plugin://plugin.video.%s/?" % config.PLUGIN_NAME in data: - url = scrapertools.find_single_match(data, 'plugin://plugin.video.%s/\?([^;]*)' % config.PLUGIN_NAME) \ - .replace(""", "") + url = scrapertools.find_single_match(data, 'plugin://plugin.video.%s/\?([^;]*)' % config.PLUGIN_NAME).replace(""", "") item = Item().fromurl(url) item.title = name @@ -45,7 +41,7 @@ def mainlist(item): elif type(item.context) != list: item.context = [] - item.context.extend([{"title": config.get_localized_string(30154), # "Quitar de favoritos" + item.context.extend([{"title": config.get_localized_string(30154), # "Remove from favorites " "action": "delFavourite", "channel": "favorites", "from_title": item.title}, @@ -88,7 +84,7 @@ def addFavourite(item): logger.info() # logger.debug(item.tostring('\n')) - # Si se llega aqui mediante el menu contextual, hay que recuperar los parametros action y channel + # If you get here through the context menu, you must retrieve the action and channel parameters if item.from_action: item.__dict__["action"] = item.__dict__.pop("from_action") if item.from_channel: @@ -100,8 +96,7 @@ def addFavourite(item): favourites_list.append((titulo, item.thumbnail, data)) if save_favourites(favourites_list): - platformtools.dialog_ok(config.get_localized_string(30102), titulo, - config.get_localized_string(30108)) # 'se ha añadido a favoritos' + platformtools.dialog_ok(config.get_localized_string(30102), titulo, config.get_localized_string(30108)) # 'added to favorites' def delFavourite(item): @@ -117,8 +112,7 @@ def delFavourite(item): favourites_list.remove(fav) if save_favourites(favourites_list): - platformtools.dialog_ok(config.get_localized_string(30102), item.title, - config.get_localized_string(30105).lower()) # 'Se ha quitado de favoritos' + platformtools.dialog_ok(config.get_localized_string(30102), item.title, config.get_localized_string(30105).lower()) # 'Removed from favorites' platformtools.itemlist_refresh() break @@ -127,22 +121,21 @@ def renameFavourite(item): logger.info() # logger.debug(item.tostring('\n')) - # Buscar el item q queremos renombrar en favourites.xml + # Find the item we want to rename in favorites.xml favourites_list = read_favourites() for i, fav in enumerate(favourites_list): if fav[0] == item.from_title: - # abrir el teclado + # open keyboard new_title = platformtools.dialog_input(item.from_title, item.title) if new_title: favourites_list[i] = (new_title, fav[1], fav[2]) if save_favourites(favourites_list): - platformtools.dialog_ok(config.get_localized_string(30102), item.from_title, - "se ha renombrado como:", new_title) # 'Se ha quitado de favoritos' + platformtools.dialog_ok(config.get_localized_string(30102), item.from_title, config.get_localized_string(60086) + ' ', new_title) # 'Removed from favorites' platformtools.itemlist_refresh() ################################################## -# Funciones para migrar favoritos antiguos (.txt) +# Features to migrate old favorites (.txt) def readbookmark(filepath): logger.info() import urllib @@ -176,7 +169,7 @@ def readbookmark(filepath): except: plot = lines[4].strip() - # Campos contentTitle y canal añadidos + # ContentTitle and channel fields added if len(lines) >= 6: try: contentTitle = urllib.unquote_plus(lines[5].strip()) @@ -199,7 +192,7 @@ def readbookmark(filepath): def check_bookmark(readpath): - # Crea un listado con las entradas de favoritos + # Create a list with favorite entries itemlist = [] if readpath.startswith("special://") and config.is_xbmc(): @@ -207,12 +200,12 @@ def check_bookmark(readpath): readpath = xbmc.translatePath(readpath) for fichero in sorted(filetools.listdir(readpath)): - # Ficheros antiguos (".txt") + # Old files (".txt") if fichero.endswith(".txt"): - # Esperamos 0.1 segundos entre ficheros, para que no se solapen los nombres de archivo + # We wait 0.1 seconds between files, so that the file names do not overlap time.sleep(0.1) - # Obtenemos el item desde el .txt + # We get the item from the .txt canal, titulo, thumbnail, plot, server, url, contentTitle = readbookmark(filetools.join(readpath, fichero)) if canal == "": canal = "favorites" @@ -222,21 +215,21 @@ def check_bookmark(readpath): filetools.rename(filetools.join(readpath, fichero), fichero[:-4] + ".old") itemlist.append(item) - # Si hay Favoritos q guardar + # If there are Favorites to save if itemlist: favourites_list = read_favourites() for item in itemlist: data = "ActivateWindow(10025,"plugin://plugin.video.kod/?" + item.tourl() + "",return)" favourites_list.append((item.title, item.thumbnail, data)) if save_favourites(favourites_list): - logger.debug("Conversion de txt a xml correcta") + logger.debug("Correct txt to xml conversion") -# Esto solo funcionara al migrar de versiones anteriores, ya no existe "bookmarkpath" +# This will only work when migrating from previous versions, there is no longer a "bookmarkpath" try: if config.get_setting("bookmarkpath") != "": check_bookmark(config.get_setting("bookmarkpath")) else: - logger.info("No existe la ruta a los favoritos de versiones antiguas") + logger.info("No path to old version favorites") except: pass diff --git a/specials/filtertools.py b/specials/filtertools.py index f8d05001..8dae6e0d 100644 --- a/specials/filtertools.py +++ b/specials/filtertools.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -# filtertools - se encarga de filtrar resultados +# filtertools - is responsible for filtering results # ------------------------------------------------------------ from builtins import object @@ -27,7 +27,7 @@ filter_global = None __channel__ = "filtertools" -# TODO echar un ojo a https://pyformat.info/, se puede formatear el estilo y hacer referencias directamente a elementos +# TODO take a look at https://pyformat.info/, you can format the style and make references directly to elements class ResultFilter(object): @@ -37,8 +37,7 @@ class ResultFilter(object): self.quality_allowed = dict_filter[TAG_QUALITY_ALLOWED] def __str__(self): - return "{active: '%s', language: '%s', quality_allowed: '%s'}" % \ - (self.active, self.language, self.quality_allowed) + return "{active: '%s', language: '%s', quality_allowed: '%s'}" % (self.active, self.language, self.quality_allowed) class Filter(object): @@ -59,7 +58,7 @@ class Filter(object): TAG_LANGUAGE: dict_filtered_shows[tvshow][TAG_LANGUAGE], TAG_QUALITY_ALLOWED: dict_filtered_shows[tvshow][TAG_QUALITY_ALLOWED]}) - # opcion general "no filtrar" + # general option "do not filter" elif global_filter_language != 0: from core import channeltools list_controls, dict_settings = channeltools.get_channel_controls_settings(item.channel) @@ -71,8 +70,7 @@ class Filter(object): language = control["lvalues"][global_filter_language] # logger.debug("language %s" % language) except: - logger.error("No se ha encontrado el valor asociado al codigo '%s': %s" % - (global_filter_lang_id, global_filter_language)) + logger.error("The value associated with the code was not found '%s': %s" % (global_filter_lang_id, global_filter_language)) break self.result = ResultFilter({TAG_ACTIVE: True, TAG_LANGUAGE: language, TAG_QUALITY_ALLOWED: []}) @@ -84,7 +82,7 @@ class Filter(object): def access(): """ - Devuelve si se puede usar o no filtertools + Returns whether or not filtertools can be used """ allow = False @@ -96,24 +94,23 @@ def access(): def context(item, list_language=None, list_quality=None, exist=False): """ - Para xbmc/kodi y mediaserver ya que pueden mostrar el menú contextual, se añade un menu para configuración - la opción de filtro, sólo si es para series. - Dependiendo del lugar y si existe filtro se añadirán más opciones a mostrar. - El contexto -solo se muestra para series-. + For xbmc / kodi and mediaserver since they can show the contextual menu, a filter option is added to the configuration menu, only if it is for series. + Depending on the place and if there is a filter, more options will be added to show. + The context -is shown only for series-. - @param item: elemento para obtener la información y ver que contexto añadir + @param item: eelement to get the information and see what context to add @type item: item - param list_language: listado de idiomas posibles + param list_language: list of possible languages @type list_language: list[str] - @param list_quality: listado de calidades posibles + @param list_quality: list of possible qualities @type list_quality: list[str] - @param exist: si existe el filtro + @param exist: if the filter exists @type exist: bool - @return: lista de opciones a mostrar en el menú contextual + @return: list of options to display in the context menu @rtype: list """ - # Dependiendo de como sea el contexto lo guardamos y añadimos las opciones de filtertools. + # Depending on how the context is, we save it and add the filtertools options. if isinstance(item.context, str): _context = item.context.split("|") elif isinstance(item.context, list): @@ -152,10 +149,8 @@ def context(item, list_language=None, list_quality=None, exist=False): def show_option(itemlist, channel, list_language, list_quality): if access(): - itemlist.append(Item(channel=__channel__, title=config.get_localized_string(60429) % - COLOR.get("parent_item", "auto"), action="load", - list_language=list_language, - list_quality=list_quality, from_channel=channel)) + itemlist.append(Item(channel=__channel__, title=config.get_localized_string(60429) % COLOR.get("parent_item", "auto"), action="load", + list_language=list_language, list_quality=list_quality, from_channel=channel)) return itemlist @@ -169,14 +164,14 @@ def check_conditions(_filter, list_item, item, list_language, list_quality, qual if _filter.language: # logger.debug("title es %s" % item.title) - #2nd lang + # 2nd lang from platformcode import unify _filter.language = unify.set_lang(_filter.language).upper() - # viene de episodios + # comes from episodes if isinstance(item.language, list): - #2nd lang + # 2nd lang for n, lang in enumerate(item.language): item.language[n] = unify.set_lang(lang).upper() @@ -184,9 +179,9 @@ def check_conditions(_filter, list_item, item, list_language, list_quality, qual language_count += 1 else: is_language_valid = False - # viene de findvideos + # comes from findvideos else: - #2nd lang + # 2nd lang item.language = unify.set_lang(item.language).upper() if item.language.lower() == _filter.language.lower(): @@ -198,7 +193,7 @@ def check_conditions(_filter, list_item, item, list_language, list_quality, qual quality = "" if _filter.quality_allowed and item.quality != "": - # if hasattr(item, 'quality'): # esta validación no hace falta por que SIEMPRE se devuelve el atributo vacío + # if hasattr (item, 'quality'): # this validation is not necessary because the empty attribute is ALWAYS returned if item.quality.lower() in _filter.quality_allowed: quality = item.quality.lower() quality_count += 1 @@ -206,7 +201,7 @@ def check_conditions(_filter, list_item, item, list_language, list_quality, qual is_quality_valid = False if is_language_valid and is_quality_valid: - #TODO 2nd lang: habría que ver si conviene unificar el idioma aqui o no + #TODO 2nd lang: we should see if it is convenient to unify the language here or not item.list_language = list_language if list_quality: item.list_quality = list_quality @@ -216,34 +211,32 @@ def check_conditions(_filter, list_item, item, list_language, list_quality, qual # logger.debug(" -Enlace añadido") elif not item.language: list_item.append(item) - logger.debug(" idioma valido?: %s, item.language: %s, filter.language: %s" % - (is_language_valid, item.language, _filter.language)) - logger.debug(" calidad valida?: %s, item.quality: %s, filter.quality_allowed: %s" - % (is_quality_valid, quality, _filter.quality_allowed)) + logger.debug(" idioma valido?: %s, item.language: %s, filter.language: %s" % (is_language_valid, item.language, _filter.language)) + logger.debug(" calidad valida?: %s, item.quality: %s, filter.quality_allowed: %s" % (is_quality_valid, quality, _filter.quality_allowed)) return list_item, quality_count, language_count, _filter.language def get_link(list_item, item, list_language, list_quality=None, global_filter_lang_id="filter_languages"): """ - Devuelve una lista de enlaces, si el item está filtrado correctamente se agrega a la lista recibida. + Returns a list of links, if the item is correctly filtered it is added to the received list. - @param list_item: lista de enlaces + @param list_item: list of links @type list_item: list[Item] - @param item: elemento a filtrar + @param item: element to filter @type item: Item - @param list_language: listado de idiomas posibles + @param list_language: list of possible languages @type list_language: list[str] - @param list_quality: listado de calidades posibles + @param list_quality: list of possible qualities @type list_quality: list[str] - @param global_filter_lang_id: id de la variable de filtrado por idioma que está en settings + @param global_filter_lang_id: id of the filtering variable by language that is in settings @type global_filter_lang_id: str - @return: lista de Item + @return: Item list @rtype: list[Item] """ logger.info() - # si los campos obligatorios son None salimos + # if the required fields are None we leave if list_item is None or item is None: return [] @@ -256,8 +249,7 @@ def get_link(list_item, item, list_language, list_quality=None, global_filter_la logger.debug("filter: '%s' datos: '%s'" % (item.show, filter_global)) if filter_global and filter_global.active: - list_item, quality_count, language_count = \ - check_conditions(filter_global, list_item, item, list_language, list_quality)[:3] + list_item, quality_count, language_count = check_conditions(filter_global, list_item, item, list_language, list_quality)[:3] else: item.context = context(item) list_item.append(item) @@ -267,17 +259,17 @@ def get_link(list_item, item, list_language, list_quality=None, global_filter_la def get_links(list_item, item, list_language, list_quality=None, global_filter_lang_id="filter_languages"): """ - Devuelve una lista de enlaces filtrados. + Returns a list of filtered links. - @param list_item: lista de enlaces + @param list_item: list of links @type list_item: list[Item] - @param item: elemento a filtrar + @param item: element to filter @type item: item - @param list_language: listado de idiomas posibles + @param list_language: list of possible languages @type list_language: list[str] - @param list_quality: listado de calidades posibles + @param list_quality: list of possible qualities @type list_quality: list[str] - @param global_filter_lang_id: id de la variable de filtrado por idioma que está en settings + @param global_filter_lang_id: id of the filtering variable by language that is in settings @type global_filter_lang_id: str @return: lista de Item @rtype: list[Item] @@ -285,18 +277,18 @@ def get_links(list_item, item, list_language, list_quality=None, global_filter_l logger.info() - # si los campos obligatorios son None salimos + # if the required fields are None we leave if list_item is None or item is None: return [] - # si list_item está vacío volvemos, no se añade validación de plataforma para que Plex pueda hacer filtro global + # if list_item is empty we go back, no platform validation is added so Plex can do global filter if len(list_item) == 0: return list_item second_lang = config.get_setting('second_language') - #Ordena segun servidores favoritos, elima servers de blacklist y desactivados + # Sort by favorite servers, delete blacklist servers and disabled from core import servertools list_item= servertools.filter_servers(list_item) @@ -313,8 +305,7 @@ def get_links(list_item, item, list_language, list_quality=None, global_filter_l if _filter and _filter.active: for item in list_item: - new_itemlist, quality_count, language_count, first_lang = check_conditions(_filter, new_itemlist, item, list_language, - list_quality, quality_count, language_count) + new_itemlist, quality_count, language_count, first_lang = check_conditions(_filter, new_itemlist, item, list_language, list_quality, quality_count, language_count) #2nd lang if second_lang and second_lang != 'No' and first_lang.lower() != second_lang.lower() : @@ -322,7 +313,6 @@ def get_links(list_item, item, list_language, list_quality=None, global_filter_l _filter2 = _filter _filter2.language = second_lang for it in new_itemlist: - if isinstance(it.language, list): if not second_lang in it.language: second_list.append(it) @@ -330,32 +320,27 @@ def get_links(list_item, item, list_language, list_quality=None, global_filter_l second_list = new_itemlist break for item in list_item: - new_itemlist, quality_count, language_count, second_lang = check_conditions(_filter2, second_list, item, list_language, - list_quality, quality_count, language_count) + new_itemlist, quality_count, language_count, second_lang = check_conditions(_filter2, second_list, item, list_language, list_quality, quality_count, language_count) - logger.debug("ITEMS FILTRADOS: %s/%s, idioma [%s]: %s, calidad_permitida %s: %s" - % (len(new_itemlist), len(list_item), _filter.language, language_count, _filter.quality_allowed, - quality_count)) + logger.debug("FILTERED ITEMS: %s/%s, language [%s]: %s, allowed quality %s: %s" % (len(new_itemlist), len(list_item), _filter.language, language_count, _filter.quality_allowed, quality_count)) if len(new_itemlist) == 0: list_item_all = [] for i in list_item: list_item_all.append(i.tourl()) - _context = [ - {"title": config.get_localized_string(60430) % _filter.language, "action": "delete_from_context", - "channel": "filtertools", "to_channel": item.channel}] + _context = [{"title": config.get_localized_string(60430) % _filter.language, "action": "delete_from_context", "channel": "filtertools", "to_channel": item.channel}] if _filter.quality_allowed: msg_quality_allowed = " y calidad %s" % _filter.quality_allowed else: msg_quality_allowed = "" - + msg_lang = ' %s' % first_lang.upper() if second_lang and second_lang != 'No': msg_lang = 's %s ni %s' % (first_lang.upper(), second_lang.upper()) - + new_itemlist.append(Item(channel=__channel__, action="no_filter", list_item_all=list_item_all, show=item.show, title=config.get_localized_string(60432) % (_filter.language, msg_quality_allowed), @@ -392,15 +377,15 @@ def no_filter(item): def mainlist(channel, list_language, list_quality): """ - Muestra una lista de las series filtradas + Shows a list of the leaked series - @param channel: nombre del canal para obtener las series filtradas + @param channel: channel name to get filtered series @type channel: str - @param list_language: lista de idiomas del canal + @param list_language: channel language list @type list_language: list[str] - @param list_quality: lista de calidades del canal + @param list_quality: channel quality list @type list_quality: list[str] - @return: lista de Item + @return: Item list @rtype: list[Item] """ logger.info() @@ -439,7 +424,7 @@ def mainlist(channel, list_language, list_quality): def config_item(item): """ - muestra una serie filtrada para su configuración + displays a filtered series for your setup @param item: item @type item: Item @@ -447,7 +432,7 @@ def config_item(item): logger.info() logger.info("item %s" % item.tostring()) - # OBTENEMOS LOS DATOS DEL JSON + # WE GET THE JSON DATA dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER) tvshow = item.show.lower().strip() @@ -462,7 +447,7 @@ def config_item(item): pass if default_lang == '': - platformtools.dialog_notification("FilterTools", "No hay idiomas definidos") + platformtools.dialog_notification("FilterTools", "There are no defined languages") return else: lang_selected = dict_series.get(tvshow, {}).get(TAG_LANGUAGE, default_lang) @@ -525,7 +510,7 @@ def config_item(item): "visible": True, }) - # concatenamos list_controls con list_controls_calidad + # we concatenate list_controls with list_controls_quality list_controls.extend(list_controls_calidad) title = config.get_localized_string(60441) % (COLOR.get("selected", "auto"), item.show) @@ -566,11 +551,11 @@ def delete(item, dict_values): def save(item, dict_data_saved): """ - Guarda los valores configurados en la ventana + Save the configured values ​​in the window @param item: item @type item: Item - @param dict_data_saved: diccionario con los datos salvados + @param dict_data_saved: dictionary with saved data @type dict_data_saved: dict """ logger.info() @@ -583,7 +568,7 @@ def save(item, dict_data_saved): dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER) tvshow = item.show.strip().lower() - logger.info("Se actualiza los datos") + logger.info("Data is updated") list_quality = [] for _id, value in list(dict_data_saved.items()): @@ -613,7 +598,7 @@ def save(item, dict_data_saved): def save_from_context(item): """ - Salva el filtro a través del menú contextual + Save the filter through the context menu @param item: item @type item: item @@ -630,9 +615,9 @@ def save_from_context(item): sound = False if result: - message = "FILTRO GUARDADO" + message = "SAVED FILTER" else: - message = "Error al guardar en disco" + message = "Error saving to disk" sound = True heading = "%s [%s]" % (item.show.strip(), item.language) @@ -644,14 +629,14 @@ def save_from_context(item): def delete_from_context(item): """ - Elimina el filtro a través del menú contextual + Delete the filter through the context menu @param item: item @type item: item """ logger.info() - # venimos desde get_links y no se ha obtenido ningún resultado, en menu contextual y damos a borrar + # We come from get_links and no result has been obtained, in context menu and we delete if item.to_channel != "": item.from_channel = item.to_channel @@ -665,9 +650,9 @@ def delete_from_context(item): sound = False if result: - message = "FILTRO ELIMINADO" + message = "FILTER REMOVED" else: - message = "Error al guardar en disco" + message = "Error saving to disk" sound = True heading = "%s [%s]" % (item.show.strip(), lang_selected) diff --git a/specials/kodfavorites.py b/specials/kodfavorites.py index 0322ec3c..615b3117 100644 --- a/specials/kodfavorites.py +++ b/specials/kodfavorites.py @@ -1,21 +1,21 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -# Alfa favoritos +# KoD favorites # ============== -# - Lista de enlaces guardados como favoritos, solamente en Alfa, no Kodi. -# - Los enlaces se organizan en carpetas (virtuales) que puede definir el usuario. -# - Se utiliza un sólo fichero para guardar todas las carpetas y enlaces: kodfavourites-default.json -# - Se puede copiar kodfavourites-default.json a otros dispositivos ya que la única dependencia local es el thumbnail asociado a los enlaces, -# pero se detecta por código y se ajusta al dispositivo actual. -# - Se pueden tener distintos ficheros de alfavoritos y alternar entre ellos, pero solamente uno de ellos es la "lista activa". -# - Los ficheros deben estar en config.get_data_path() y empezar por kodfavourites- y terminar en .json +# - List of links saved as favorites, only in Alpha, not Kodi. +# - Links are organized in (virtual) folders that can be defined by the user. +# - A single file is used to save all folders and links: kodfavourites-default.json +# - kodfavourites-default.json can be copied to other devices since the only local dependency is the thumbnail associated with the links, +# but it is detected by code and adjusts to the current device. +# - You can have different alphabet files and alternate between them, but only one of them is the "active list". +# - Files must be in config.get_data_path () and start with kodfavourites- and end in .json -# Requerimientos en otros módulos para ejecutar este canal: -# - Añadir un enlace a este canal en channelselector.py -# - Modificar platformtools.py para controlar el menú contextual y añadir "Guardar enlace" en set_context_commands +# Requirements in other modules to run this channel: +# - Add a link to this channel in channelselector.py +# - Modify platformtools.py to control the context menu and add "Save link" in set_context_commands # ------------------------------------------------------------ -#from builtins import str +# from builtins import str import sys PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int @@ -32,35 +32,34 @@ from core import filetools, jsontools def fechahora_actual(): return datetime.now().strftime('%Y-%m-%d %H:%M') -# Helpers para listas -# ------------------- +# List Helpers PREFIJO_LISTA = 'kodfavorites-' -# Devuelve el nombre de la lista activa (Ej: kodfavourites-default.json) +# Returns the name of the active list (Ex: kodfavourites-default.json) def get_lista_activa(): return config.get_setting('lista_activa', default = PREFIJO_LISTA + 'default.json') -# Extrae nombre de la lista del fichero, quitando prefijo y sufijo (Ej: kodfavourites-Prueba.json => Prueba) +# Extract list name from file, removing prefix and suffix (Ex: kodfavourites-Test.json => Test) def get_name_from_filename(filename): return filename.replace(PREFIJO_LISTA, '').replace('.json', '') -# Componer el fichero de lista a partir de un nombre, añadiendo prefijo y sufijo (Ej: Prueba => kodfavourites-Prueba.json) +# Compose the list file from a name, adding prefix and suffix (Ex: Test => kodfavourites-Test.json) def get_filename_from_name(name): return PREFIJO_LISTA + name + '.json' -# Apuntar en un fichero de log los códigos de los ficheros que se hayan compartido +# Record the codes of the files that have been shared in a log file def save_log_lista_shared(msg): msg = fechahora_actual() + ': ' + msg + os.linesep fullfilename = os.path.join(config.get_data_path(), 'kodfavorites_shared.log') with open(fullfilename, 'a') as f: f.write(msg); f.close() -# Limpiar texto para usar como nombre de fichero +# Clean text to use as file name def text_clean(txt, disallowed_chars = '[^a-zA-Z0-9\-_()\[\]. ]+', blank_char = ' '): import unicodedata try: txt = unicode(txt, 'utf-8') - except NameError: # unicode is a default on python 3 + except NameError: # unicode is a default on python 3 pass txt = unicodedata.normalize('NFKD', txt).encode('ascii', 'ignore') txt = txt.decode('utf-8').strip() @@ -70,13 +69,12 @@ def text_clean(txt, disallowed_chars = '[^a-zA-Z0-9\-_()\[\]. ]+', blank_char = -# Clase para cargar y guardar en el fichero de Alfavoritos -# -------------------------------------------------------- +# Class to load and save in the KoDFavorites file class KodfavouritesData(object): def __init__(self, filename = None): - # Si no se especifica ningún fichero se usa la lista_activa (si no la hay se crea) + # If no file is specified, the active_list is used (if not, it is created) if filename == None: filename = get_lista_activa() @@ -84,7 +82,7 @@ class KodfavouritesData(object): if not os.path.exists(self.user_favorites_file): fichero_anterior = os.path.join(config.get_data_path(), 'user_favorites.json') - if os.path.exists(fichero_anterior): # formato anterior, convertir (a eliminar después de algunas versiones) + if os.path.exists(fichero_anterior): # old format, convert (to delete after some versions) jsondata = jsontools.load(filetools.read(fichero_anterior)) self.user_favorites = jsondata self.info_lista = {} @@ -94,7 +92,7 @@ class KodfavouritesData(object): self.user_favorites = [] else: jsondata = jsontools.load(filetools.read(self.user_favorites_file)) - if not 'user_favorites' in jsondata or not 'info_lista' in jsondata: # formato incorrecto + if not 'user_favorites' in jsondata or not 'info_lista' in jsondata: # incorrect format self.user_favorites = [] else: self.user_favorites = jsondata['user_favorites'] @@ -103,8 +101,8 @@ class KodfavouritesData(object): if len(self.user_favorites) == 0: self.info_lista = {} - - # Crear algunas carpetas por defecto + + # Create some default folders self.user_favorites.append({ 'title': config.get_localized_string(30122), 'items': [] }) self.user_favorites.append({ 'title': config.get_localized_string(30123), 'items': [] }) self.user_favorites.append({ 'title': config.get_localized_string(70149), 'items': [] }) @@ -112,7 +110,7 @@ class KodfavouritesData(object): self.save() def save(self): - if 'created' not in self.info_lista: + if 'created' not in self.info_lista: self.info_lista['created'] = fechahora_actual() self.info_lista['updated'] = fechahora_actual() @@ -120,34 +118,34 @@ class KodfavouritesData(object): jsondata['user_favorites'] = self.user_favorites jsondata['info_lista'] = self.info_lista if not filetools.write(self.user_favorites_file, jsontools.dump(jsondata)): - platformtools.dialog_ok('Alfa', config.get_localized_string(70614), os.path.basename(self.user_favorites_file)) + platformtools.dialog_ok('KoD', config.get_localized_string(70614), os.path.basename(self.user_favorites_file)) # ============================ -# Añadir desde menú contextual +# Add from context menu # ============================ def addFavourite(item): logger.info() alfav = KodfavouritesData() - # Si se llega aquí mediante el menú contextual, hay que recuperar los parámetros action y channel + # If you get here through the context menu, you must retrieve the action and channel parameters if item.from_action: item.__dict__['action'] = item.__dict__.pop('from_action') if item.from_channel: item.__dict__['channel'] = item.__dict__.pop('from_channel') - # Limpiar título + #Clear title item.title = re.sub(r'\[COLOR [^\]]*\]', '', item.title.replace('[/COLOR]', '')).strip() if item.text_color: item.__dict__.pop('text_color') - # Diálogo para escoger/crear carpeta + # Dialog to choose / create folder i_perfil = _selecciona_perfil(alfav, config.get_localized_string(70546)) if i_perfil == -1: return False - # Detectar que el mismo enlace no exista ya en la carpeta - campos = ['channel','action','url','extra','list_type'] # si todos estos campos coinciden se considera que el enlace ya existe + # Detect that the same link does not already exist in the folder + campos = ['channel','action','url','extra','list_type'] # if all these fields match the link is considered to already exist for enlace in alfav.user_favorites[i_perfil]['items']: it = Item().fromurl(enlace) repe = True @@ -159,25 +157,25 @@ def addFavourite(item): platformtools.dialog_notification(config.get_localized_string(70615), config.get_localized_string(70616)) return False - # Si es una película/serie, completar información de tmdb si no se tiene activado tmdb_plus_info (para season/episodio no hace falta pq ya se habrá hecho la "segunda pasada") + # If it is a movie / series, fill in tmdb information if tmdb_plus_info is not activated (for season / episode it is not necessary because the "second pass" will have already been done) if (item.contentType == 'movie' or item.contentType == 'tvshow') and not config.get_setting('tmdb_plus_info', default=False): from core import tmdb - tmdb.set_infoLabels(item, True) # obtener más datos en "segunda pasada" (actores, duración, ...) + tmdb.set_infoLabels(item, True) # get more data in "second pass" (actors, duration, ...) - # Añadir fecha en que se guarda + # Add date saved item.date_added = fechahora_actual() - # Guardar + # save alfav.user_favorites[i_perfil]['items'].append(item.tourl()) alfav.save() platformtools.dialog_notification(config.get_localized_string(70531), config.get_localized_string(70532) % alfav.user_favorites[i_perfil]['title']) - + return True # ==================== -# NAVEGACIÓN +# NAVIGATION # ==================== def mainlist(item): @@ -211,7 +209,7 @@ def mainlist(item): itemlist.append(Item(channel=item.channel, action='mostrar_perfil', title=perfil['title'], plot=plot, i_perfil=i_perfil, context=context)) itemlist.append(item.clone(action='crear_perfil', title=config.get_localized_string(70542), folder=False)) - + itemlist.append(item.clone(action='mainlist_listas', title=config.get_localized_string(70603))) return itemlist @@ -240,15 +238,15 @@ def mostrar_perfil(item): it.plot += '[CR][COLOR blue]Url:[/COLOR] ' + it.url if isinstance(it.url, str) else '...' if it.date_added != '': it.plot += '[CR][COLOR blue]' + config.get_localized_string(70469) + ':[/COLOR] ' + it.date_added - # Si no es una url, ni tiene la ruta del sistema, convertir el path ya que se habrá copiado de otro dispositivo. - # Sería más óptimo que la conversión se hiciera con un menú de importar, pero de momento se controla en run-time. + # If it is not a url, nor does it have the system path, convert the path since it will have been copied from another device. + # It would be more optimal if the conversion was done with an import menu, but at the moment it is controlled in run-time. if it.thumbnail and '://' not in it.thumbnail and not it.thumbnail.startswith(ruta_runtime): ruta, fichero = filetools.split(it.thumbnail) - if ruta == '' and fichero == it.thumbnail: # en linux el split con un path de windows no separa correctamente + if ruta == '' and fichero == it.thumbnail: # in linux the split with a windows path does not separate correctly ruta, fichero = filetools.split(it.thumbnail.replace('\\','/')) - if 'channels' in ruta and 'thumb' in ruta: + if 'channels' in ruta and 'thumb' in ruta: it.thumbnail = filetools.join(ruta_runtime, 'resources', 'media', 'channels', 'thumb', fichero) - elif 'themes' in ruta and 'default' in ruta: + elif 'themes' in ruta and 'default' in ruta: it.thumbnail = filetools.join(ruta_runtime, 'resources', 'media', 'themes', 'default', fichero) itemlist.append(it) @@ -256,28 +254,27 @@ def mostrar_perfil(item): return itemlist -# Rutinas internas compartidas -# ---------------------------- +# Shared internal routines -# Diálogo para seleccionar/crear una carpeta. Devuelve índice de la carpeta en user_favorites (-1 si cancel) +# Dialog to select / create a folder. Returns index of folder on user_favorites (-1 if cancel) def _selecciona_perfil(alfav, titulo='Seleccionar carpeta', i_actual=-1): acciones = [(perfil['title'] if i_p != i_actual else '[I][COLOR pink]%s[/COLOR][/I]' % perfil['title']) for i_p, perfil in enumerate(alfav.user_favorites)] acciones.append('Crear nueva carpeta') i_perfil = -1 - while i_perfil == -1: # repetir hasta seleccionar una carpeta o cancelar + while i_perfil == -1: # repeat until a folder is selected or cancel ret = platformtools.dialog_select(titulo, acciones) - if ret == -1: return -1 # pedido cancel + if ret == -1: return -1 # order cancel if ret < len(alfav.user_favorites): i_perfil = ret - else: # crear nueva carpeta + else: # create new folder if _crea_perfil(alfav): i_perfil = len(alfav.user_favorites) - 1 return i_perfil -# Diálogo para crear una carpeta +# Dialog to create a folder def _crea_perfil(alfav): titulo = platformtools.dialog_input(default='', heading=config.get_localized_string(70551)) if titulo is None or titulo == '': @@ -289,8 +286,7 @@ def _crea_perfil(alfav): return True -# Gestión de perfiles y enlaces -# ----------------------------- +# Profile and link management def crear_perfil(item): logger.info() @@ -325,7 +321,7 @@ def eliminar_perfil(item): if not alfav.user_favorites[item.i_perfil]: return False - # Pedir confirmación + # Ask for confirmation if not platformtools.dialog_yesno(config.get_localized_string(70618), config.get_localized_string(70619)): return False del alfav.user_favorites[item.i_perfil] @@ -342,9 +338,9 @@ def acciones_enlace(item): config.get_localized_string(70624), config.get_localized_string(70548), config.get_localized_string(70625), config.get_localized_string(70626), config.get_localized_string(70627), config.get_localized_string(70628)] - ret = platformtools.dialog_select('Acción a ejecutar', acciones) - if ret == -1: - return False # pedido cancel + ret = platformtools.dialog_select('Action to execute', acciones) + if ret == -1: + return False # order cancel elif ret == 0: return editar_enlace_titulo(item) elif ret == 1: @@ -375,11 +371,11 @@ def editar_enlace_titulo(item): if not alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False it = Item().fromurl(alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]) - - titulo = platformtools.dialog_input(default=it.title, heading='Cambiar título del enlace') + + titulo = platformtools.dialog_input(default=it.title, heading='Change link title') if titulo is None or titulo == '' or titulo == it.title: return False - + it.title = titulo alfav.user_favorites[item.i_perfil]['items'][item.i_enlace] = it.tourl() @@ -397,13 +393,13 @@ def editar_enlace_color(item): if not alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False it = Item().fromurl(alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]) - + colores = ['green','yellow','red','blue','white','orange','lime','aqua','pink','violet','purple','tomato','olive','antiquewhite','gold'] opciones = ['[COLOR %s]%s[/COLOR]' % (col, col) for col in colores] - ret = platformtools.dialog_select('Seleccionar color:', opciones) + ret = platformtools.dialog_select('Select color:', opciones) - if ret == -1: return False # pedido cancel + if ret == -1: return False # order cancel it.text_color = colores[ret] alfav.user_favorites[item.i_perfil]['items'][item.i_enlace] = it.tourl() @@ -421,13 +417,13 @@ def editar_enlace_thumbnail(item): if not alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False it = Item().fromurl(alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]) - - # A partir de Kodi 17 se puede usar xbmcgui.Dialog().select con thumbnails (ListItem & useDetails=True) + + # Starting with Kodi 17, you can use xbmcgui.Dialog (). Select with thumbnails (ListItem & useDetails = True) is_kodi17 = (config.get_platform(True)['num_version'] >= 17.0) if is_kodi17: import xbmcgui - # Diálogo para escoger thumbnail (el del canal o iconos predefinidos) + # Dialog to choose thumbnail (the channel or predefined icons) opciones = [] ids = [] try: @@ -444,7 +440,7 @@ def editar_enlace_thumbnail(item): ids.append(channel_parameters['thumbnail']) except: pass - + resource_path = os.path.join(config.get_runtime_path(), 'resources', 'media', 'themes', 'default') for f in sorted(os.listdir(resource_path)): if f.startswith('thumb_') and not f.startswith('thumb_intervenido') and f != 'thumb_back.png': @@ -458,11 +454,11 @@ def editar_enlace_thumbnail(item): ids.append(os.path.join(resource_path, f)) if is_kodi17: - ret = xbmcgui.Dialog().select('Seleccionar thumbnail:', opciones, useDetails=True) + ret = xbmcgui.Dialog().select('Select thumbnail:', opciones, useDetails=True) else: - ret = platformtools.dialog_select('Seleccionar thumbnail:', opciones) + ret = platformtools.dialog_select('Select thumbnail:', opciones) - if ret == -1: return False # pedido cancel + if ret == -1: return False # order cancel it.thumbnail = ids[ret] @@ -480,8 +476,8 @@ def editar_enlace_carpeta(item): if not alfav.user_favorites[item.i_perfil]: return False if not alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False - # Diálogo para escoger/crear carpeta - i_perfil = _selecciona_perfil(alfav, 'Mover enlace a:', item.i_perfil) + # Dialog to choose / create folder + i_perfil = _selecciona_perfil(alfav, 'Move link to:', item.i_perfil) if i_perfil == -1 or i_perfil == item.i_perfil: return False alfav.user_favorites[i_perfil]['items'].append(alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]) @@ -499,26 +495,26 @@ def editar_enlace_lista(item): if not alfav.user_favorites[item.i_perfil]: return False if not alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False - # Diálogo para escoger lista + # Dialog to choose list opciones = [] itemlist_listas = mainlist_listas(item) for it in itemlist_listas: - if it.lista != '' and '[<---]' not in it.title: # descarta item crear y lista activa + if it.lista != '' and '[<---]' not in it.title: # discard item create and active list opciones.append(it.lista) if len(opciones) == 0: - platformtools.dialog_ok('Alfa', 'No hay otras listas dónde mover el enlace.', 'Puedes crearlas desde el menú Gestionar listas de enlaces') + platformtools.dialog_ok('KoD', 'There are no other lists where to move the link.', 'You can create them from the Manage link lists menu') return False - ret = platformtools.dialog_select('Seleccionar lista destino', opciones) + ret = platformtools.dialog_select('Select destination list', opciones) - if ret == -1: - return False # pedido cancel + if ret == -1: + return False # order cancel alfav_destino = KodfavouritesData(opciones[ret]) - # Diálogo para escoger/crear carpeta en la lista de destino - i_perfil = _selecciona_perfil(alfav_destino, 'Seleccionar carpeta destino', -1) + # Dialog to choose / create folder in the destination list + i_perfil = _selecciona_perfil(alfav_destino, 'Select destination folder', -1) if i_perfil == -1: return False alfav_destino.user_favorites[i_perfil]['items'].append(alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]) @@ -544,8 +540,7 @@ def eliminar_enlace(item): return True -# Mover perfiles y enlaces (arriba, abajo, top, bottom) -# ------------------------ +# Move profiles and links (up, down, top, bottom) def mover_perfil(item): logger.info() alfav = KodfavouritesData() @@ -568,28 +563,28 @@ def mover_enlace(item): return True -# Mueve un item determinado (numérico) de una lista (arriba, abajo, top, bottom) y devuelve la lista modificada +# Move a certain item (numeric) from a list (up, down, top, bottom) and return the modified list def _mover_item(lista, i_selected, direccion): last_i = len(lista) - 1 - if i_selected > last_i or i_selected < 0: return lista # índice inexistente en lista + if i_selected > last_i or i_selected < 0: return lista # non-existent index in list if direccion == 'arriba': - if i_selected == 0: # Ya está arriba de todo + if i_selected == 0: # It's already on top of everything return lista lista.insert(i_selected - 1, lista.pop(i_selected)) elif direccion == 'abajo': - if i_selected == last_i: # Ya está abajo de todo + if i_selected == last_i: # It's already down return lista lista.insert(i_selected + 1, lista.pop(i_selected)) elif direccion == 'top': - if i_selected == 0: # Ya está arriba de todo + if i_selected == 0: # It's already on top of everything return lista lista.insert(0, lista.pop(i_selected)) elif direccion == 'bottom': - if i_selected == last_i: # Ya está abajo de todo + if i_selected == last_i: # It's already down return lista lista.insert(last_i, lista.pop(i_selected)) @@ -598,7 +593,7 @@ def _mover_item(lista, i_selected, direccion): # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# Gestionar diferentes listas de alfavoritos +# Manage different alphabetical lists # ------------------------------------------ def mainlist_listas(item): @@ -607,19 +602,19 @@ def mainlist_listas(item): item.category = 'Listas' lista_activa = get_lista_activa() - + import glob path = os.path.join(config.get_data_path(), PREFIJO_LISTA+'*.json') for fichero in glob.glob(path): lista = os.path.basename(fichero) nombre = get_name_from_filename(lista) - titulo = nombre if lista != lista_activa else '[COLOR gold]%s[/COLOR] [<---]' % nombre - + titulo = nombre if lista != lista_activa else nombre + itemlist.append(item.clone(action='acciones_lista', lista=lista, title=titulo, folder=False)) itemlist.append(item.clone(action='acciones_nueva_lista', title=config.get_localized_string(70642), folder=False)) - + return itemlist @@ -631,7 +626,7 @@ def acciones_lista(item): ret = platformtools.dialog_select(item.lista, acciones) - if ret == -1: + if ret == -1: return False # pedido cancel elif ret == 0: return activar_lista(item) @@ -650,7 +645,7 @@ def activar_lista(item): fullfilename = os.path.join(config.get_data_path(), item.lista) if not os.path.exists(fullfilename): - platformtools.dialog_ok('Alfa', config.get_localized_string(70630), item.lista) + platformtools.dialog_ok('KoD', config.get_localized_string(70630), item.lista) return False config.set_setting('lista_activa', item.lista) @@ -668,9 +663,9 @@ def renombrar_lista(item): fullfilename_current = os.path.join(config.get_data_path(), item.lista) if not os.path.exists(fullfilename_current): - platformtools.dialog_ok('Alfa', config.get_localized_string(70630), fullfilename_current) + platformtools.dialog_ok('KoD', config.get_localized_string(70630), fullfilename_current) return False - + nombre = get_name_from_filename(item.lista) titulo = platformtools.dialog_input(default=nombre, heading=config.get_localized_string(70612)) if titulo is None or titulo == '' or titulo == nombre: @@ -680,17 +675,17 @@ def renombrar_lista(item): filename = get_filename_from_name(titulo) fullfilename = os.path.join(config.get_data_path(), filename) - # Comprobar que el nuevo nombre no exista + # Check that the new name does not exist if os.path.exists(fullfilename): - platformtools.dialog_ok('Alfa', config.get_localized_string(70613), fullfilename) + platformtools.dialog_ok('KoD', config.get_localized_string(70613), fullfilename) return False - # Rename del fichero + # Rename the file if not filetools.rename(fullfilename_current, filename): - platformtools.dialog_ok('Alfa', config.get_localized_string(70631), fullfilename) + platformtools.dialog_ok('KoD', config.get_localized_string(70631), fullfilename) return False - # Update settings si es la lista activa + # Update settings if it is the active list if item.lista == get_lista_activa(): config.set_setting('lista_activa', filename) @@ -704,11 +699,11 @@ def eliminar_lista(item): fullfilename = os.path.join(config.get_data_path(), item.lista) if not os.path.exists(fullfilename): - platformtools.dialog_ok('Alfa', config.get_localized_string(70630), item.lista) + platformtools.dialog_ok('KoD', config.get_localized_string(70630), item.lista) return False if item.lista == get_lista_activa(): - platformtools.dialog_ok('Alfa', config.get_localized_string(70632), item.lista) + platformtools.dialog_ok('KoD', config.get_localized_string(70632), item.lista) return False if not platformtools.dialog_yesno(config.get_localized_string(70606), config.get_localized_string(70633) + ' %s ?' % item.lista): return False @@ -720,15 +715,15 @@ def eliminar_lista(item): def informacion_lista(item): logger.info() - + fullfilename = os.path.join(config.get_data_path(), item.lista) if not os.path.exists(fullfilename): - platformtools.dialog_ok('Alfa', config.get_localized_string(70630), item.lista) + platformtools.dialog_ok('KoD', config.get_localized_string(70630), item.lista) return False alfav = KodfavouritesData(item.lista) - - txt = 'Lista: [COLOR gold]%s[/COLOR]' % item.lista + + txt = 'Lista: %s' % item.lista txt += '[CR]' + config.get_localized_string(70634) + ' ' + alfav.info_lista['created'] + ' ' + config.get_localized_string(70635) + ' ' + alfav.info_lista['updated'] if 'downloaded_date' in alfav.info_lista: @@ -736,7 +731,7 @@ def informacion_lista(item): if 'tinyupload_date' in alfav.info_lista: txt += '[CR]' + config.get_localized_string(70638) + ' ' + alfav.info_lista['tinyupload_date'] + ' ' + config.get_localized_string(70639) + ' [COLOR blue]' + alfav.info_lista['tinyupload_code'] + '[/COLOR]' - + txt += '[CR]' + config.get_localized_string(70640) + ' ' + str(len(alfav.user_favorites)) for perfil in alfav.user_favorites: txt += '[CR]- %s (%d %s)' % (perfil['title'], len(perfil['items']), config.get_localized_string(70641)) @@ -750,21 +745,21 @@ def compartir_lista(item): fullfilename = os.path.join(config.get_data_path(), item.lista) if not os.path.exists(fullfilename): - platformtools.dialog_ok('Alfa', config.get_localized_string(70630), fullfilename) + platformtools.dialog_ok('KoD', config.get_localized_string(70630), fullfilename) return False try: progreso = platformtools.dialog_progress_bg(config.get_localized_string(70643), config.get_localized_string(70644)) - - # Acceso a la página principal de tinyupload para obtener datos necesarios + + # Access to the tinyupload home page to obtain necessary data from core import httptools, scrapertools data = httptools.downloadpage('http://s000.tinyupload.com/index.php').data upload_url = scrapertools.find_single_match(data, 'form action="([^"]+)') sessionid = scrapertools.find_single_match(upload_url, 'sid=(.+)') - + progreso.update(10, config.get_localized_string(70645), config.get_localized_string(70646)) - # Envío del fichero a tinyupload mediante multipart/form-data + # Sending the file to tinyupload using multipart / form-data from future import standard_library standard_library.install_aliases() from lib import MultipartPostHandler @@ -773,31 +768,31 @@ def compartir_lista(item): params = { 'MAX_FILE_SIZE' : '52428800', 'file_description' : '', 'sessionid' : sessionid, 'uploaded_file' : open(fullfilename, 'rb') } handle = opener.open(upload_url, params) data = handle.read() - + progreso.close() if not 'File was uploaded successfuly' in data: logger.debug(data) - platformtools.dialog_ok('Alfa', config.get_localized_string(70647)) + platformtools.dialog_ok('KoD', config.get_localized_string(70647)) return False codigo = scrapertools.find_single_match(data, 'href="index\.php\?file_id=([^"]+)') except: - platformtools.dialog_ok('Alfa', config.get_localized_string(70647), item.lista) + platformtools.dialog_ok('KoD', config.get_localized_string(70647), item.lista) return False - # Apuntar código en fichero de log y dentro de la lista + # Point code in log file and inside the list save_log_lista_shared(config.get_localized_string(70648) + ' ' + item.lista + ' ' + codigo + ' ' + config.get_localized_string(70649)) - + alfav = KodfavouritesData(item.lista) alfav.info_lista['tinyupload_date'] = fechahora_actual() alfav.info_lista['tinyupload_code'] = codigo alfav.save() - platformtools.dialog_ok('Alfa', config.get_localized_string(70650), codigo) + platformtools.dialog_ok('KoD', config.get_localized_string(70650), codigo) return True - + def acciones_nueva_lista(item): @@ -810,8 +805,8 @@ def acciones_nueva_lista(item): ret = platformtools.dialog_select(config.get_localized_string(70608), acciones) - if ret == -1: - return False # pedido cancel + if ret == -1: + return False # order cancel elif ret == 0: return crear_lista(item) @@ -845,12 +840,12 @@ def crear_lista(item): filename = get_filename_from_name(titulo) fullfilename = os.path.join(config.get_data_path(), filename) - # Comprobar que el fichero no exista ya + # Check that the file does not already exist if os.path.exists(fullfilename): - platformtools.dialog_ok('Alfa', config.get_localized_string(70613), fullfilename) + platformtools.dialog_ok('KoD', config.get_localized_string(70613), fullfilename) return False - # Provocar que se guarde con las carpetas vacías por defecto + # Cause it to be saved with empty folders by default alfav = KodfavouritesData(filename) platformtools.itemlist_refresh() @@ -860,7 +855,7 @@ def crear_lista(item): def descargar_lista(item, url): logger.info() from core import httptools, scrapertools - + if 'tinyupload.com/' in url: try: from urllib.parse import urlparse @@ -869,15 +864,15 @@ def descargar_lista(item, url): down_url, url_name = scrapertools.find_single_match(data, ' href="(download\.php[^"]*)">([^<]*)') url_json = '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(url)) + down_url except: - platformtools.dialog_ok('Alfa', config.get_localized_string(70655), url) + platformtools.dialog_ok('KoD', config.get_localized_string(70655), url) return False elif 'zippyshare.com/' in url: from core import servertools video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing('zippyshare', url) - + if not puedes: - platformtools.dialog_ok('Alfa', config.get_localized_string(70655), motivo) + platformtools.dialog_ok('KoD', config.get_localized_string(70655), motivo) return False url_json = video_urls[0][1] # https://www58.zippyshare.com/d/qPzzQ0UM/25460/kodfavourites-testeanding.json url_name = url_json[url_json.rfind('/')+1:] @@ -893,19 +888,19 @@ def descargar_lista(item, url): # Download json data = httptools.downloadpage(url_json).data - - # Verificar formato json de kodfavourites y añadir info de la descarga + + # Verify ksonfavourites json format and add download info jsondata = jsontools.load(data) if 'user_favorites' not in jsondata or 'info_lista' not in jsondata: logger.debug(data) - platformtools.dialog_ok('Alfa', config.get_localized_string(70656)) + platformtools.dialog_ok('KoD', config.get_localized_string(70656)) return False jsondata['info_lista']['downloaded_date'] = fechahora_actual() jsondata['info_lista']['downloaded_from'] = url data = jsontools.dump(jsondata) - # Pedir nombre para la lista descargada + # Ask for name for downloaded list nombre = get_name_from_filename(url_name) titulo = platformtools.dialog_input(default=nombre, heading=config.get_localized_string(70657)) if titulo is None or titulo == '': @@ -915,14 +910,14 @@ def descargar_lista(item, url): filename = get_filename_from_name(titulo) fullfilename = os.path.join(config.get_data_path(), filename) - # Si el nuevo nombre ya existe pedir confirmación para sobrescribir + # If the new name already exists ask for confirmation to overwrite if os.path.exists(fullfilename): - if not platformtools.dialog_yesno('Alfa', config.get_localized_string(70613), config.get_localized_string(70658), filename): + if not platformtools.dialog_yesno('KoD', config.get_localized_string(70613), config.get_localized_string(70658), filename): return False - - if not filetools.write(fullfilename, data): - platformtools.dialog_ok('Alfa', config.get_localized_string(70659), filename) - platformtools.dialog_ok('Alfa', config.get_localized_string(70660), filename) + if not filetools.write(fullfilename, data): + platformtools.dialog_ok('KoD', config.get_localized_string(70659), filename) + + platformtools.dialog_ok('KoD', config.get_localized_string(70660), filename) platformtools.itemlist_refresh() return True diff --git a/specials/news.py b/specials/news.py index 3d9a4ad1..2287a15a 100644 --- a/specials/news.py +++ b/specials/news.py @@ -11,25 +11,19 @@ from core.support import typo PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int -import glob -import os -import re -import time +import glob, os, re, time from threading import Thread from channelselector import get_thumb, auto_filter -from core import channeltools -from core import jsontools -from core import scrapertools, support +from core import channeltools, jsontools, scrapertools, support from core.item import Item -from platformcode import config, logger -from platformcode import platformtools +from platformcode import config, logger, platformtools THUMBNAILS = {'0': 'posters', '1': 'banners', '2': 'squares'} __perfil__ = config.get_setting('perfil', "news") -# Fijar perfil de color +# Set color profile perfil = [['0xFF0B7B92', '0xFF89FDFB', '0xFFACD5D4'], ['0xFFB31313', '0xFFFF9000', '0xFFFFEE82'], ['0xFF891180', '0xFFCB22D7', '0xFFEEA1EB'], @@ -141,7 +135,7 @@ def get_channels_list(): list_canales = {'peliculas': [], 'series': [],'anime': [], 'italiano':[], 'documentales': []} any_active = False - # Rellenar listas de canales disponibles + # Fill available channel lists channels_path = os.path.join(config.get_runtime_path(), "channels", '*.json') channel_language = config.get_setting("channel_language", default="all") if channel_language =="auto": @@ -151,16 +145,16 @@ def get_channels_list(): channel_id = os.path.basename(infile)[:-5] channel_parameters = channeltools.get_channel_parameters(channel_id) - # No incluir si es un canal inactivo + # Do not include if it is an inactive channel if not channel_parameters["active"]: continue - # No incluir si el canal es en un idioma filtrado + # Do not include if the channel is in a filtered language if channel_language != "all" and channel_language not in str(channel_parameters["language"]) \ and "*" not in channel_parameters["language"]: continue - # Incluir en cada categoria, si en su configuracion el canal esta activado para mostrar novedades + # Include in each category, if in your configuration the channel is activated to show news for categoria in list_canales: include_in_newest = config.get_setting("include_in_newest_" + categoria, channel_id) @@ -267,7 +261,7 @@ def novedades(item): if any_active and len(list_canales[item.extra])>0: import math - # fix float porque la division se hace mal en python 2.x + # fix float because division is done poorly in python 2.x number_of_channels = float(100) / len(list_canales[item.extra]) for index, channel in enumerate(list_canales[item.extra]): @@ -294,7 +288,7 @@ def novedades(item): progreso.update(percentage, "", config.get_localized_string(60520) % channel_title) get_newest(channel_id, item.extra) - # Modo Multi Thread: esperar q todos los hilos terminen + # Multi Thread mode: wait for all threads to finish if multithread: pendent = [a for a in threads if a.isAlive()] t = float(100) / len(pendent) @@ -326,15 +320,15 @@ def novedades(item): if mode != 'normal': result_mode=0 - if result_mode == 0: # Agrupados por contenido + if result_mode == 0: # Grouped by content ret = group_by_content(list_newest) - elif result_mode == 1: # Agrupados por canales + elif result_mode == 1: # Grouped by channels ret = group_by_channel(list_newest) - else: # Sin agrupar + else: # Ungrouped ret = no_group(list_newest) while time.time() - start_time < 2: - # mostrar cuadro de progreso con el tiempo empleado durante almenos 2 segundos + # show progress chart with time spent for at least 2 seconds time.sleep(0.5) if mode == 'normal': progreso.close() @@ -356,8 +350,8 @@ def get_newest(channel_id, categoria): global list_newest global list_newest_tourl - # Solicitamos las novedades de la categoria (item.extra) buscada en el canal channel - # Si no existen novedades para esa categoria en el canal devuelve una lista vacia + # We request the news of the category (item.extra) searched in the channel channel + # If there are no news for that category in the channel, it returns an empty list try: puede = True @@ -381,7 +375,7 @@ def get_newest(channel_id, categoria): exist=True else: cache_node = {} - #logger.debug('cache node: %s' % cache_node) + # logger.debug('cache node: %s' % cache_node) for item in list_result: # logger.info("item="+item.tostring()) item.channel = channel_id @@ -399,11 +393,11 @@ def get_newest(channel_id, categoria): def get_title(item): - #support.log("ITEM NEWEST ->", item) + # support.log("ITEM NEWEST ->", item) # item.contentSerieName c'è anche se è un film if item.contentSerieName and item.contentType != 'movie': # Si es una serie title = item.contentSerieName - #title = re.compile("\[.*?\]", re.DOTALL).sub("", item.contentSerieName) + # title = re.compile("\[.*?\]", re.DOTALL).sub("", item.contentSerieName) if not scrapertools.get_season_and_episode(title) and item.contentEpisodeNumber: # contentSeason non c'è in support if not item.contentSeason: @@ -414,14 +408,14 @@ def get_title(item): if seas: title = "%s - %s" % (seas, title) - elif item.contentTitle: # Si es una pelicula con el canal adaptado + elif item.contentTitle: # If it is a movie with the adapted channel title = item.contentTitle - elif item.contentTitle: # Si el canal no esta adaptado + elif item.contentTitle: # If the channel is not adapted title = item.contentTitle - else: # Como ultimo recurso + else: # As a last resort title = item.title - # Limpiamos el titulo de etiquetas de formato anteriores + # We clean the title of previous format labels title = re.compile("\[/*COLO.*?\]", re.DOTALL).sub("", title) title = re.compile("\[/*B\]", re.DOTALL).sub("", title) title = re.compile("\[/*I\]", re.DOTALL).sub("", title) @@ -452,9 +446,9 @@ def no_group(list_result_canal): global channels_id_name for i in list_result_canal: - #support.log("NO GROUP i -> ", i) + # support.log("NO GROUP i -> ", i) canale = channels_id_name[i.channel] - canale = canale # per differenziarlo dal colore delle altre voci + canale = canale # to differentiate it from the color of the other items i.title = get_title(i) + " [" + canale + "]" # i.text_color = color3 @@ -471,12 +465,12 @@ def group_by_channel(list_result_canal): for i in list_result_canal: if i.channel not in dict_canales: dict_canales[i.channel] = [] - # Formatear titulo + # Format title i.title = get_title(i) - # Añadimos el contenido al listado de cada canal + # We add the content to the list of each channel dict_canales[i.channel].append(i) - # Añadimos el contenido encontrado en la lista list_result + # We add the content found in the list_result list for c in sorted(dict_canales): itemlist.append(Item(channel="news", title=channels_id_name[c] + ':', text_color=color1, text_bold=True)) @@ -498,10 +492,10 @@ def group_by_content(list_result_canal): list_result = [] for i in list_result_canal: - # Formatear titulo + # Format title i.title = get_title(i) - # Eliminar tildes y otros caracteres especiales para la key + # Remove tildes and other special characters for the key import unicodedata try: new_key = i.title.lower().strip().decode("UTF-8") @@ -511,16 +505,16 @@ def group_by_content(list_result_canal): new_key = i.title if new_key in dict_contenidos: - # Si el contenido ya estaba en el diccionario añadirlo a la lista de opciones... + #If the content was already in the dictionary add it to the list of options ... dict_contenidos[new_key].append(i) - else: # ...sino añadirlo al diccionario + else: # ...but add it to the dictionary dict_contenidos[new_key] = [i] - # Añadimos el contenido encontrado en la lista list_result + # We add the content found in the list_result list for v in list(dict_contenidos.values()): title = v[0].title if len(v) > 1: - # Eliminar de la lista de nombres de canales los q esten duplicados + # Remove duplicate q's from the channel names list canales_no_duplicados = [] for i in v: if i.channel not in canales_no_duplicados: @@ -622,16 +616,16 @@ def setting_channel(item): channel_id = os.path.basename(infile)[:-5] channel_parameters = channeltools.get_channel_parameters(channel_id) - # No incluir si es un canal inactivo + # Do not include if it is an inactive channel if not channel_parameters["active"]: continue - # No incluir si el canal es en un idioma filtrado + # Do not include if the channel is in a filtered language if channel_language != "all" and channel_language not in str(channel_parameters["language"]) \ and "*" not in channel_parameters["language"]: continue - # No incluir si en su configuracion el canal no existe 'include_in_newest' + # Do not include if the channel does not exist 'include_in_newest' in your configuration include_in_newest = config.get_setting("include_in_newest_" + item.extra, channel_id) if include_in_newest is None: continue diff --git a/specials/renumbertools.py b/specials/renumbertools.py deleted file mode 100644 index ddb22080..00000000 --- a/specials/renumbertools.py +++ /dev/null @@ -1,1008 +0,0 @@ -# -*- coding: utf-8 -*- -# -------------------------------------------------------------------------------- -# renumeratetools - se encarga de renumerar episodios -# -------------------------------------------------------------------------------- - -#from builtins import str -import sys -PY3 = False -if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int -from builtins import range -from builtins import object - -import os - -try: - import xbmcgui -except: - xbmcgui = None - -from platformcode import config, logger -from core import jsontools -from core.item import Item -from platformcode import platformtools - -TAG_TVSHOW_RENUMERATE = "TVSHOW_RENUMBER" -TAG_SEASON_EPISODE = "season_episode" -__channel__ = "renumbertools" - - -def access(): - """ - Devuelve si se puede usar o no renumbertools - """ - allow = False - - if config.is_xbmc(): - allow = True - - return allow - - -def context(item): - """ - Para xbmc/kodi que pueden mostrar el menú contextual, se añade un menu para configuración - la opción de renumerar, sólo si es para series. - - @param item: elemento para obtener la información y ver que contexto añadir - @type item: item - @return: lista de opciones a mostrar en el menú contextual - @rtype: list - """ - - # Dependiendo de como sea el contexto lo guardamos y añadimos las opciones de filtertools. - if isinstance(item.context, str): - _context = item.context.split("|") - elif isinstance(item.context, list): - _context = item.context - else: - _context = [] - - if access(): - dict_data = {"title": config.get_localized_string(70585), "action": "config_item", "channel": "renumbertools"} - _context.append(dict_data) - - return _context - - -def show_option(channel, itemlist): - if access(): - itemlist.append(Item(channel=__channel__, title="[COLOR yellow]" + config.get_localized_string(70586)+ "[/COLOR]", - action="load", from_channel=channel)) - - return itemlist - - -def load(item): - return mainlist(channel=item.from_channel) - - -def mainlist(channel): - """ - Muestra una lista de las series renumeradas - - :param channel: nombre del canal para obtener las series renumeradas - :type channel: str - :return: lista de Item - :rtype: list[Item] - """ - logger.info() - itemlist = [] - dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE) - - idx = 0 - for tvshow in sorted(dict_series): - tag_color = "0xff008000" - if idx % 2 == 0: - tag_color = "blue" - - idx += 1 - name = tvshow - title = config.get_localized_string(70587)+" [COLOR %s][%s][/COLOR]" % (tag_color, name) - - itemlist.append(Item(channel=__channel__, action="config_item", title=title, show=name, from_channel=channel)) - - if len(itemlist) == 0: - itemlist.append(Item(channel=channel, action="mainlist", - title=config.get_localized_string(70588) + ' ' + config.get_localized_string(70585))) - - return itemlist - - -def config_item(item): - """ - muestra una serie renumerada para su configuración - - :param item: item - :type item: Item - """ - logger.info("item %s" % item.tostring("\n")) - - dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_RENUMERATE) - data = dict_series.get(item.show, {}) - - if data: - data = data.get(TAG_SEASON_EPISODE, []) - - ventana = RenumberWindow(show=item.show, channel=item.from_channel, data=data) - del ventana - else: - # tenemos información y devolvemos los datos añadidos para que se muestre en la ventana - if data: - return add_season(data) - # es la primera vez que se añaden datos (usando menú contextual) por lo que no devolvemos nada - # para evitar error al listar los items - else: - data = add_season(data) - write_data(item.from_channel, item.show, data) - - -def numbered_for_tratk(channel, show, season, episode): - """ - Devuelve la temporada y episodio convertido para que se marque correctamente en tratk.tv - - @param channel: Nombre del canal - @type channel: str - @param show: Nombre de la serie a comprobar - @type show: str - @param season: Temporada que devuelve el scrapper - @type season: int - @param episode: Episodio que devuelve el scrapper - @type episode: int - @return: season, episode - @rtype: int, int - """ - logger.info() - - if access(): - show = show.lower() - - new_season = season - new_episode = episode - dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE) - - # ponemos en minusculas el key, ya que previamente hemos hecho lo mismo con show. - for key in list(dict_series.keys()): - new_key = key.lower() - if new_key != key: - dict_series[new_key] = dict_series[key] - del dict_series[key] - - if show in dict_series: - logger.debug(config.get_localized_string(70589) + " %s" % dict_series[show]) - - if len(dict_series[show]['season_episode']) > 1: - for row in dict_series[show]['season_episode']: - - if new_episode > row[1]: - new_episode -= row[1] - new_season = row[0] - break - - else: - new_season = dict_series[show]['season_episode'][0][0] - new_episode += dict_series[show]['season_episode'][0][1] - - logger.debug("%s:%s" % (new_season, new_episode)) - else: - # no se tiene acceso se devuelven los datos. - new_season = season - new_episode = episode - - return new_season, new_episode - - -def borrar(channel, show): - logger.info() - heading = config.get_localized_string(70590) - line1 = config.get_localized_string(70591) + ' [COLOR blue]' + show.strip() + '[/COLOR], ' + config.get_localized_string(70592) - - if platformtools.dialog_yesno(heading, line1) == 1: - dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE) - dict_series.pop(show, None) - - result, json_data = jsontools.update_node(dict_series, channel, TAG_TVSHOW_RENUMERATE) - - if result: - message = config.get_localized_string(60444) - else: - message = config.get_localized_string(70593) - - heading = show.strip() - platformtools.dialog_notification(heading, message) - - -def add_season(data=None): - logger.debug("data %s" % data) - heading = config.get_localized_string(70594) - # default = 2 - # se reordena la lista - list_season_episode = data - if list_season_episode: - list_season_episode.sort(key=lambda el: int(el[0]), reverse=False) - - # if list_season_episode: - # # mostrar temporada + 1 de la lista - # # TODO buscar la primera posicion libre - # default = list_season_episode[0][0]+1 - - season = platformtools.dialog_numeric(0, heading) # , str(default)) - for element in list_season_episode: - if int(season) == element[0]: - platformtools.dialog_notification(config.get_localized_string(70595), config.get_localized_string(70596)) - return - - # si hemos insertado un valor en la temporada - if season != "" and int(season) > 0: - heading = config.get_localized_string(70597) - # default = 0 - # if list_season_episode: - # for e in list_season_episode: - # # mostrar suma episodios de la lista - # # sumar hasta el indice del primer libre encontrado - # default += e[1] - episode = platformtools.dialog_numeric(0, heading) # , str(default)) - - # si hemos insertado un valor en el episodio - if episode != "": - if list_season_episode: - list_season_episode.insert(0, [int(season), int(episode)]) - new_list_season_episode = list_season_episode[:] - return new_list_season_episode - else: - return [[int(season), int(episode)]] - - -def write_data(channel, show, data): - # OBTENEMOS LOS DATOS DEL JSON - dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE) - tvshow = show.strip() - list_season_episode = dict_series.get(tvshow, {}).get(TAG_SEASON_EPISODE, []) - logger.debug("data %s" % list_season_episode) - - if data: - # cambiamos el orden para que se vea en orden descendente y usarse bien en el _data.json - data.sort(key=lambda el: int(el[0]), reverse=True) - dict_renumerate = {TAG_SEASON_EPISODE: data} - - dict_series[tvshow] = dict_renumerate - else: - # hemos borrado todos los elementos, por lo que se borra la serie del fichero - dict_series.pop(tvshow, None) - - result, json_data = jsontools.update_node(dict_series, channel, TAG_TVSHOW_RENUMERATE) - - if result: - if data: - message = config.get_localized_string(60446) - else: - message = config.get_localized_string(60444) - else: - message = config.get_localized_string(70593) - - heading = show.strip() - platformtools.dialog_notification(heading, message) - - -if xbmcgui: - - # Align - ALIGN_LEFT = 0 - ALIGN_RIGHT = 1 - ALIGN_CENTER_X = 2 - ALIGN_CENTER_Y = 4 - ALIGN_CENTER = 6 - ALIGN_TRUNCATED = 8 - ALIGN_JUSTIFY = 10 - - # button ids - ID_BUTTON_CLOSE = 3003 - ID_BUTTON_ADD_SEASON = 3008 - ID_BUTTON_INFO = 3009 - ID_CHECK_UPDATE_INTERNET = 3010 - ID_BUTTON_OK = 3012 - ID_BUTTON_CANCEL = 3013 - ID_BUTTON_DELETE = 3014 - - - class RenumberWindow(xbmcgui.WindowDialog): - def __init__(self, *args, **kwargs): - logger.debug() - - #### Compatibilidad con Kodi 18 #### - if config.get_platform(True)['num_version'] < 18: - if xbmcgui.__version__ == "1.2": - self.setCoordinateResolution(1) - else: - self.setCoordinateResolution(5) - - self.show = kwargs.get("show") - self.channel = kwargs.get("channel") - self.data = kwargs.get("data") - self.init = True - - self.mediapath = os.path.join(config.get_runtime_path(), 'resources', 'skins', 'Default', 'media') - self.font = "font12" - - window_bg = xbmcgui.ControlImage(320, 130, 600, 440, - os.path.join(self.mediapath, 'Windows', 'DialogBack.png')) - self.addControl(window_bg) - - header_bg = xbmcgui.ControlImage(window_bg.getX(), window_bg.getY() + 8, window_bg.getWidth(), 35, - os.path.join(self.mediapath, 'Windows', 'dialogheader.png')) - self.addControl(header_bg) - - btn_close_w = 64 - self.btn_close = xbmcgui.ControlButton(window_bg.getX() + window_bg.getWidth() - btn_close_w - 13, - header_bg.getY() + 6, btn_close_w, 30, '', - focusTexture=os.path.join(self.mediapath, 'Controls', - 'DialogCloseButton-focus.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'DialogCloseButton.png')) - self.addControl(self.btn_close) - - header_title_x = window_bg.getX() + 20 - header_title = xbmcgui.ControlFadeLabel(header_title_x, header_bg.getY() + 5, self.btn_close.getX() - - header_title_x, 30, font="font12_title", textColor="0xFFFFA500", - _alignment=ALIGN_CENTER) - self.addControl(header_title) - header_title.addLabel(self.show) - - self.controls_bg = xbmcgui.ControlImage(window_bg.getX() + 20, header_bg.getY() + header_bg.getHeight() + 6, - 562, 260, - os.path.join(self.mediapath, 'Windows', 'BackControls.png')) - self.addControl(self.controls_bg) - - self.scroll_bg = xbmcgui.ControlImage(window_bg.getX() + window_bg.getWidth() - 25, self.controls_bg.getY(), - 10, - self.controls_bg.getHeight(), os.path.join(self.mediapath, 'Controls', - 'ScrollBack.png')) - self.addControl(self.scroll_bg) - self.scroll_bg.setVisible(False) - - self.scroll2_bg = xbmcgui.ControlImage(window_bg.getX() + window_bg.getWidth() - 25, - self.controls_bg.getY(), - 10, self.controls_bg.getHeight(), os.path.join(self.mediapath, - 'Controls', - 'ScrollBar.png')) - self.addControl(self.scroll2_bg) - self.scroll2_bg.setVisible(False) - - btn_add_season = xbmcgui.ControlButton(window_bg.getX() + 20, self.controls_bg.getY() + - self.controls_bg.getHeight() + 14, 165, 30, config.get_localized_string(70600), - font=self.font, focusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKey.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKeyNF.png'), - alignment=ALIGN_CENTER) - self.addControl(btn_add_season) - - self.btn_info = xbmcgui.ControlButton(window_bg.getX() + 210, btn_add_season.getY(), 120, 30, config.get_localized_string(60348), - font=self.font, focusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKey.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKeyNF.png'), - alignment=ALIGN_CENTER) - self.addControl(self.btn_info) - - check_update_internet_w = 235 - # Versiones antiguas no admite algunas texturas - if xbmcgui.__version__ in ["1.2", "2.0"]: - self.check_update_internet = xbmcgui.ControlRadioButton( - window_bg.getX() + window_bg.getWidth() - check_update_internet_w - 20, btn_add_season.getY() - 3, - check_update_internet_w, 34, config.get_localized_string(70601), font=self.font, - focusTexture=os.path.join(self.mediapath, 'Controls', 'MenuItemFO.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', 'MenuItemNF.png')) - else: - self.check_update_internet = xbmcgui.ControlRadioButton( - window_bg.getX() + window_bg.getWidth() - check_update_internet_w - 20, btn_add_season.getY() - 3, - check_update_internet_w, 34, config.get_localized_string(70601), font=self.font, - focusTexture=os.path.join(self.mediapath, 'Controls', 'MenuItemFO.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', 'MenuItemNF.png'), - focusOnTexture=os.path.join(self.mediapath, 'Controls', 'radiobutton-focus.png'), - noFocusOnTexture=os.path.join(self.mediapath, 'Controls', 'radiobutton-focus.png'), - focusOffTexture=os.path.join(self.mediapath, 'Controls', 'radiobutton-nofocus.png'), - noFocusOffTexture=os.path.join(self.mediapath, 'Controls', 'radiobutton-nofocus.png')) - - self.addControl(self.check_update_internet) - self.check_update_internet.setEnabled(False) - - hb_bg = xbmcgui.ControlImage(window_bg.getX() + 20, btn_add_season.getY() + btn_add_season.getHeight() + 13, - window_bg.getWidth() - 40, 2, - os.path.join(self.mediapath, 'Controls', 'ScrollBack.png')) - self.addControl(hb_bg) - - self.btn_ok = xbmcgui.ControlButton(window_bg.getX() + 68, hb_bg.getY() + hb_bg.getHeight() + 13, 120, 30, - 'OK', font=self.font, - focusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKey.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKeyNF.png'), - alignment=ALIGN_CENTER) - self.addControl(self.btn_ok) - - self.btn_cancel = xbmcgui.ControlButton(self.btn_info.getX() + 30, self.btn_ok.getY(), 120, 30, config.get_localized_string(70002), - font=self.font, - focusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKey.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKeyNF.png'), - alignment=ALIGN_CENTER) - self.addControl(self.btn_cancel) - - self.btn_delete = xbmcgui.ControlButton(self.btn_cancel.getX() + self.btn_cancel.getWidth() + 50, - self.btn_ok.getY(), 120, 30, config.get_localized_string(60437), font=self.font, - focusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKey.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKeyNF.png'), - alignment=ALIGN_CENTER) - self.addControl(self.btn_delete) - - self.controls = [] - self.onInit() - self.setFocus(self.controls[0].edit_season) - self.doModal() - - def onInit(self, *args, **kwargs): - try: - # listado temporada / episodios - pos_y = self.controls_bg.getY() + 10 - - # eliminamos los componentes al repintar la ventana - for linea in self.controls: - self.removeControls(linea.list_elements()) - - # mostramos el scroll si hay más de 5 elementos - if len(self.data) > 5: - self.controls_bg.setWidth(545) - self.scroll_bg.setVisible(True) - self.scroll2_bg.setVisible(True) - else: - self.controls_bg.setWidth(562) - self.scroll_bg.setVisible(False) - self.scroll2_bg.setVisible(False) - - self.controls = [] - # cambiamos el orden para que se vea en orden ascendente - self.data.sort(key=lambda el: int(el[0]), reverse=False) - - for index, e in enumerate(self.data): - pos_x = self.controls_bg.getX() + 15 - label_season_w = 100 - label_season = xbmcgui.ControlLabel(pos_x, pos_y + 3, label_season_w, 34, - config.get_localized_string(60385), font=self.font, textColor="0xFF2E64FE") - self.addControl(label_season) - label_season.setVisible(False) - - pos_x += label_season_w + 5 - - # TODO mirar retro-compatilibidad - # if xbmcgui.ControlEdit == ControlEdit: - # edit_season = xbmcgui.ControlEdit(0, 0, 0, 0, '', font=self.font, isPassword=False, - # textColor='', - # focusTexture=os.path.join(self.mediapath, 'Controls', - # 'MenuItemFO.png'), - # noFocusTexture=os.path.join(self.mediapath, 'Controls', - # 'MenuItemNF.png'), window=self) - # else: - - # control bugeado se tiene que usar metodos sets para que se cree correctamente. - edit_season = xbmcgui.ControlEdit(0, 0, 0, 0, "", self.font, "", '', 4, isPassword=False, - focusTexture=os.path.join(self.mediapath, 'Controls', - 'MenuItemFO.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'MenuItemNF.png')) - self.addControl(edit_season) - edit_season.setText(str(e[0])) - # edit_season.setLabel("Temporada:", font=self.font, textColor="0xFF2E64FE") - edit_season.setPosition(pos_x, pos_y - 2) - edit_season.setWidth(25) - edit_season.setHeight(35) - edit_season.setVisible(False) - - label_episode_w = 90 - pos_x += edit_season.getWidth() + 60 - label_episode = xbmcgui.ControlLabel(pos_x, pos_y + 3, label_episode_w, 34, config.get_localized_string(70598), - font=self.font, textColor="0xFF2E64FE") - self.addControl(label_episode) - label_episode.setVisible(False) - - pos_x += label_episode_w + 5 - # control bugeado se tiene que usar metodos sets para que se cree correctamente. - edit_episode = xbmcgui.ControlEdit(0, 0, 0, 0, "", self.font, "", '', 4, isPassword=False, - focusTexture=os.path.join(self.mediapath, 'Controls', - 'MenuItemFO.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'MenuItemNF.png')) - self.addControl(edit_episode) - edit_episode.setText(str(e[1])) - # edit_episode.setLabel("Episodios:", font=self.font, textColor="0xFF2E64FE") - edit_episode.setPosition(pos_x, pos_y - 2) - edit_episode.setWidth(40) - edit_episode.setHeight(35) - edit_episode.setVisible(False) - - btn_delete_season_w = 120 - btn_delete_season = xbmcgui.ControlButton(self.controls_bg.getX() + self.controls_bg.getWidth() - - btn_delete_season_w - 14, pos_y, btn_delete_season_w, 30, - config.get_localized_string(70599), font=self.font, - focusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKey.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'KeyboardKeyNF.png'), - alignment=ALIGN_CENTER) - self.addControl(btn_delete_season) - btn_delete_season.setVisible(False) - - hb_bg = xbmcgui.ControlImage(self.controls_bg.getX() + 10, pos_y + 40, - self.controls_bg.getWidth() - 20, - 2, os.path.join(self.mediapath, 'Controls', 'ScrollBack.png')) - self.addControl(hb_bg) - hb_bg.setVisible(False) - - group = ControlGroup(label_season=label_season, edit_season=edit_season, - label_episode=label_episode, - edit_episode=edit_episode, btn_delete_season=btn_delete_season, hb=hb_bg) - - pos_y += 50 - - if index < 5: - group.set_visible(True) - - self.controls.append(group) - - if len(self.data) > 5: - self.move_scroll() - - except Exception as Ex: - logger.error("HA HABIDO UNA HOSTIA %s" % Ex) - - # def onClick(self, control_id): - # pass - # - # def onFocus(self, control_id): - # pass - - def onControl(self, control): - # logger.debug("%s" % control.getId()) - control_id = control.getId() - - if control_id == ID_BUTTON_OK: - write_data(self.channel, self.show, self.data) - self.close() - if control_id in [ID_BUTTON_CLOSE, ID_BUTTON_CANCEL]: - self.close() - elif control_id == ID_BUTTON_DELETE: - self.close() - borrar(self.channel, self.show) - elif control_id == ID_BUTTON_ADD_SEASON: - # logger.debug("data que enviamos: {}".format(self.data)) - data = add_season(self.data) - if data: - self.data = data - # logger.debug("data que recibimos: {}".format(self.data)) - self.onInit() - - # si hay más de 5 elementos movemos el scroll - if len(self.data) > 5: - self.scroll(len(self.data) - 2, 1) - self.move_scroll() - - elif control_id == ID_BUTTON_INFO: - self.method_info() - else: - for x, grupo in enumerate(self.controls): - if control_id == self.controls[x].btn_delete_season.getId(): - # logger.debug("A data %s" % self.data) - self.removeControls(self.controls[x].list_elements()) - del self.controls[x] - del self.data[x] - # logger.debug("D data %s" % self.data) - self.onInit() - - return - - def onAction(self, action): - # logger.debug("%s" % action.getId()) - # logger.debug("focus %s" % self.getFocusId()) - # Obtenemos el foco - focus = self.getFocusId() - - action = action.getId() - # Flecha izquierda - if action == xbmcgui.ACTION_MOVE_LEFT: - # Si el foco no está en ninguno de los 6 botones inferiores, y esta en un "list" cambiamos el valor - if focus not in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET, - ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: - - # Localizamos en el listado de controles el control que tiene el focus - # todo mirar tema del cursor en el valor al desplazar lateralmente - for x, linea in enumerate(self.controls): - if focus == linea.edit_season.getId(): - return self.setFocus(self.controls[x].btn_delete_season) - elif focus == linea.edit_episode.getId(): - return self.setFocus(self.controls[x].edit_season) - elif focus == linea.btn_delete_season.getId(): - return self.setFocus(self.controls[x].edit_episode) - - # Si el foco está en alguno de los 6 botones inferiores, movemos al siguiente - else: - if focus in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET]: - if focus == ID_BUTTON_ADD_SEASON: - self.setFocusId(ID_BUTTON_INFO) - # TODO cambiar cuando se habilite la opcion de actualizar por internet - # self.setFocusId(ID_CHECK_UPDATE_INTERNET) - elif focus == ID_BUTTON_INFO: - self.setFocusId(ID_BUTTON_ADD_SEASON) - elif focus == ID_CHECK_UPDATE_INTERNET: - self.setFocusId(ID_BUTTON_INFO) - - elif focus in [ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: - if focus == ID_BUTTON_OK: - self.setFocusId(ID_BUTTON_DELETE) - elif focus == ID_BUTTON_CANCEL: - self.setFocusId(ID_BUTTON_OK) - elif focus == ID_BUTTON_DELETE: - self.setFocusId(ID_BUTTON_CANCEL) - - # Flecha derecha - elif action == xbmcgui.ACTION_MOVE_RIGHT: - # Si el foco no está en ninguno de los 6 botones inferiores, y esta en un "list" cambiamos el valor - if focus not in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET, - ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: - - # Localizamos en el listado de controles el control que tiene el focus - # todo mirar tema del cursor en el valor al desplazar lateralmente - for x, linea in enumerate(self.controls): - if focus == linea.edit_season.getId(): - return self.setFocus(self.controls[x].edit_episode) - elif focus == linea.edit_episode.getId(): - return self.setFocus(self.controls[x].btn_delete_season) - elif focus == linea.btn_delete_season.getId(): - return self.setFocus(self.controls[x].edit_season) - - # Si el foco está en alguno de los 6 botones inferiores, movemos al siguiente - else: - if focus in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET]: - if focus == ID_BUTTON_ADD_SEASON: - self.setFocusId(ID_BUTTON_INFO) - if focus == ID_BUTTON_INFO: - self.setFocusId(ID_BUTTON_ADD_SEASON) - # TODO cambiar cuando se habilite la opcion de actualizar por internet - # self.setFocusId(ID_CHECK_UPDATE_INTERNET) - if focus == ID_CHECK_UPDATE_INTERNET: - self.setFocusId(ID_BUTTON_OK) - - elif focus in [ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: - if focus == ID_BUTTON_OK: - self.setFocusId(ID_BUTTON_CANCEL) - if focus == ID_BUTTON_CANCEL: - self.setFocusId(ID_BUTTON_DELETE) - if focus == ID_BUTTON_DELETE: - self.setFocusId(ID_BUTTON_OK) - - # Flecha arriba - elif action == xbmcgui.ACTION_MOVE_UP: - self.move_up(focus) - # Flecha abajo - elif action == xbmcgui.ACTION_MOVE_DOWN: - self.move_down(focus) - # scroll up - elif action == xbmcgui.ACTION_MOUSE_WHEEL_UP: - self.move_up(focus) - # scroll down - elif action == xbmcgui.ACTION_MOUSE_WHEEL_DOWN: - self.move_down(focus) - - # ACTION_PAGE_DOWN = 6 - # ACTION_PAGE_UP = 5 - - # Menú previo o Atrás - elif action in [xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK]: - self.close() - - def move_down(self, focus): - # logger.debug("focus " + str(focus)) - # Si el foco está en uno de los tres botones medios, bajamos el foco a la otra linea de botones - if focus in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET]: - if focus == ID_BUTTON_ADD_SEASON: - self.setFocusId(ID_BUTTON_OK) - elif focus == ID_BUTTON_INFO: - self.setFocusId(ID_BUTTON_CANCEL) - elif focus == ID_CHECK_UPDATE_INTERNET: - self.setFocusId(ID_BUTTON_DELETE) - # Si el foco está en uno de los tres botones inferiores, subimos el foco al primer control del listado - elif focus in [ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: - first_visible = 0 - for x, linea in enumerate(self.controls): - if linea.get_visible(): - first_visible = x - break - - if focus == ID_BUTTON_OK: - self.setFocus(self.controls[first_visible].edit_season) - elif focus == ID_BUTTON_CANCEL: - self.setFocus(self.controls[first_visible].edit_episode) - elif focus == ID_BUTTON_DELETE: - self.setFocus(self.controls[first_visible].btn_delete_season) - # nos movemos entre los elementos del listado - else: - # Localizamos en el listado de controles el control que tiene el focus - for x, linea in enumerate(self.controls): - if focus == linea.edit_season.getId(): - if x + 1 < len(self.controls): - if not self.controls[x + 1].get_visible(): - self.scroll(x, 1) - - return self.setFocus(self.controls[x + 1].edit_season) - else: - return self.setFocusId(ID_BUTTON_ADD_SEASON) - elif focus == linea.edit_episode.getId(): - if x + 1 < len(self.controls): - if not self.controls[x + 1].get_visible(): - self.scroll(x, 1) - - return self.setFocus(self.controls[x + 1].edit_episode) - else: - self.setFocusId(ID_BUTTON_INFO) - elif focus == linea.btn_delete_season.getId(): - if x + 1 < len(self.controls): - if not self.controls[x + 1].get_visible(): - self.scroll(x, 1) - - return self.setFocus(self.controls[x + 1].btn_delete_season) - else: - return self.setFocusId(ID_BUTTON_INFO) - # TODO cambiar cuando se habilite la opcion de actualizar por internet - # return self.setFocusId(ID_CHECK_UPDATE_INTERNET) - - def move_up(self, focus): - # Si el foco está en uno de los tres botones medios, subimos el foco al último control del listado - if focus in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET]: - last_visible = 0 - for x, linea in reversed(list(enumerate(self.controls))): - if linea.get_visible(): - last_visible = x - break - - if focus == ID_BUTTON_ADD_SEASON: - self.setFocus(self.controls[last_visible].edit_season) - elif focus == ID_BUTTON_INFO: - self.setFocus(self.controls[last_visible].edit_episode) - elif focus == ID_CHECK_UPDATE_INTERNET: - self.setFocus(self.controls[last_visible].btn_delete_season) - # Si el foco está en uno de los tres botones inferiores, subimos el foco a la otra linea de botones - elif focus in [ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: - if focus == ID_BUTTON_OK: - self.setFocusId(ID_BUTTON_ADD_SEASON) - elif focus == ID_BUTTON_CANCEL: - self.setFocusId(ID_BUTTON_INFO) - elif focus == ID_BUTTON_DELETE: - self.setFocusId(ID_BUTTON_INFO) - # TODO cambiar cuando se habilite la opcion de actualizar por internet - # self.setFocusId(ID_CHECK_UPDATE_INTERNET) - # nos movemos entre los elementos del listado - else: - # Localizamos en el listado de controles el control que tiene el focus - for x, linea in enumerate(self.controls): - if focus == linea.edit_season.getId(): - if x > 0: - if not self.controls[x - 1].get_visible(): - self.scroll(x, -1) - - return self.setFocus(self.controls[x - 1].edit_season) - else: - return self.setFocusId(ID_BUTTON_OK) - elif focus == linea.edit_episode.getId(): - if x > 0: - if not self.controls[x - 1].get_visible(): - self.scroll(x, -1) - - return self.setFocus(self.controls[x - 1].edit_episode) - else: - self.setFocusId(ID_BUTTON_CANCEL) - elif focus == linea.btn_delete_season.getId(): - if x > 0: - if not self.controls[x - 1].get_visible(): - self.scroll(x, -1) - - return self.setFocus(self.controls[x - 1].btn_delete_season) - else: - return self.setFocusId(ID_BUTTON_DELETE) - # TODO cambiar cuando se habilite la opcion de actualizar por internet - # return self.setFocusId(ID_CHECK_UPDATE_INTERNET) - - def scroll(self, position, movement): - try: - for index, group in enumerate(self.controls): - # ponemos todos los elementos como no visibles - group.set_visible(False) - - if movement > 0: - pos_fin = position + movement + 1 - pos_inicio = pos_fin - 5 - else: - pos_inicio = position + movement - pos_fin = pos_inicio + 5 - - # logger.debug("position {}, movement {}, pos_inicio{}, pos_fin{}, self.data.length{}". - # format(position, movement, pos_inicio, pos_fin, len(self.data))) - pos_y = self.controls_bg.getY() + 10 - for i in range(pos_inicio, pos_fin): - pos_x = self.controls_bg.getX() + 15 - - self.controls[i].label_season.setPosition(pos_x, pos_y + 3) - - pos_x += self.controls[i].label_season.getWidth() + 5 - self.controls[i].edit_season.setPosition(pos_x, pos_y - 2) - - pos_x += self.controls[i].edit_season.getWidth() + 60 - self.controls[i].label_episode.setPosition(pos_x, pos_y + 3) - - pos_x += self.controls[i].label_episode.getWidth() + 5 - self.controls[i].edit_episode.setPosition(pos_x, pos_y - 2) - - self.controls[i].btn_delete_season.setPosition( - self.controls_bg.getX() + self.controls_bg.getWidth() - - self.controls[i].btn_delete_season.getWidth() - 14, - pos_y) - - self.controls[i].hb.setPosition(self.controls_bg.getX() + 10, pos_y + 40) - - pos_y += 50 - - # logger.debug("ponemos como True %s" % i) - self.controls[i].set_visible(True) - - self.move_scroll() - - except Exception as Ex: - logger.error("HA HABIDO UNA HOSTIA %s" % Ex) - - def move_scroll(self): - visible_controls = [group for group in self.controls if group.get_visible() == True] - hidden_controls = [group for group in self.controls if group.get_visible() == False] - scroll_position = self.controls.index(visible_controls[0]) - scrollbar_height = self.scroll_bg.getHeight() - (len(hidden_controls) * 10) - scrollbar_y = self.scroll_bg.getPosition()[1] + (scroll_position * 10) - self.scroll2_bg.setPosition(self.scroll_bg.getPosition()[0], scrollbar_y) - self.scroll2_bg.setHeight(scrollbar_height) - - @staticmethod - def method_info(): - title = config.get_localized_string(60348) - # text = "La primera temporada que se añade siempre empieza en \"0\" episodios, la segunda temporada que se " - # text += "añade empieza en el número total de episodios de la primera temporada, la tercera temporada será " - # text += "la suma de los episodios de las temporadas previas y así sucesivamente.\n" - # text += "[COLOR blue]\nEjemplo de serie divida en varias temporadas:\n" - # text += "\nFairy Tail:\n" - # text += " - SEASON 1: EPISODE 48 --> [season 1, episode: 0]\n" - # text += " - SEASON 2: EPISODE 48 --> [season 2, episode: 48]\n" - # text += " - SEASON 3: EPISODE 54 --> [season 3, episode: 96 ([48=season2] + [48=season1])]\n" - # text += " - SEASON 4: EPISODE 175 --> [season 4: episode: 150 ([54=season3] + [48=season2] + [48=season3" \ - # "])][/COLOR]\n" - # text += "[COLOR green]\nEjemplo de serie que continua en la temporada de la original:\n" - # text += "\nFate/Zero 2nd Season:\n" - # text += " - SEASON 1: EPISODE 12 --> [season 1, episode: 13][/COLOR]\n" - - # text += "[COLOR blue]\nEjemplo de serie que es la segunda temporada de la original:\n" - # text += "\nFate/kaleid liner Prisma☆Illya 2wei!:\n" - # text += " - SEASON 1: EPISODE 12 --> [season 2, episode: 0][/COLOR]\n" - text = config.get_localized_string(70602) - - return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text) - - - class ControlGroup(object): - """ - conjunto de controles, son los elementos que se muestra por línea de una lista. - """ - - def __init__(self, label_season, edit_season, label_episode, edit_episode, btn_delete_season, hb): - self.visible = False - self.label_season = label_season - self.edit_season = edit_season - self.label_episode = label_episode - self.edit_episode = edit_episode - self.btn_delete_season = btn_delete_season - self.hb = hb - - def list_elements(self): - return [self.label_season, self.edit_season, self.label_episode, self.edit_episode, self.btn_delete_season, - self.hb] - - def get_visible(self): - return self.visible - - def set_visible(self, visible): - self.visible = visible - self.label_season.setVisible(visible) - self.edit_season.setVisible(visible) - self.label_episode.setVisible(visible) - self.edit_episode.setVisible(visible) - self.btn_delete_season.setVisible(visible) - self.hb.setVisible(visible) - - - class TextBox(xbmcgui.WindowXMLDialog): - """ Create a skinned textbox window """ - - def __init__(self, *args, **kwargs): - self.title = kwargs.get('title') - self.text = kwargs.get('text') - self.doModal() - - def onInit(self): - try: - self.getControl(5).setText(self.text) - self.getControl(1).setLabel(self.title) - except: - pass - - def onClick(self, control_id): - pass - - def onFocus(self, control_id): - pass - - def onAction(self, action): - self.close() - - # TODO mirar retro-compatiblidad - # class ControlEdit(xbmcgui.ControlButton): - # def __new__(self, *args, **kwargs): - # del kwargs["isPassword"] - # del kwargs["window"] - # args = list(args) - # return xbmcgui.ControlButton.__new__(self, *args, **kwargs) - # - # def __init__(self, *args, **kwargs): - # self.isPassword = kwargs["isPassword"] - # self.window = kwargs["window"] - # self.label = "" - # self.text = "" - # self.textControl = xbmcgui.ControlLabel(self.getX(), self.getY(), self.getWidth(), self.getHeight(), - # self.text, - # font=kwargs["font"], textColor=kwargs["textColor"], alignment=4 | 1) - # self.window.addControl(self.textControl) - # - # def setLabel(self, val): - # self.label = val - # xbmcgui.ControlButton.setLabel(self, val) - # - # def getX(self): - # return xbmcgui.ControlButton.getPosition(self)[0] - # - # def getY(self): - # return xbmcgui.ControlButton.getPosition(self)[1] - # - # def setEnabled(self, e): - # xbmcgui.ControlButton.setEnabled(self, e) - # self.textControl.setEnabled(e) - # - # def setWidth(self, w): - # xbmcgui.ControlButton.setWidth(self, w) - # self.textControl.setWidth(w / 2) - # - # def setHeight(self, w): - # xbmcgui.ControlButton.setHeight(self, w) - # self.textControl.setHeight(w) - # - # def setPosition(self, x, y): - # xbmcgui.ControlButton.setPosition(self, x, y) - # self.textControl.setPosition(x + self.getWidth() / 2, y) - # - # def setText(self, text): - # self.text = text - # if self.isPassword: - # self.textControl.setLabel("*" * len(self.text)) - # else: - # self.textControl.setLabel(self.text) - # - # def getText(self): - # return self.text - # - # - # if not hasattr(xbmcgui, "ControlEdit"): - # xbmcgui.ControlEdit = ControlEdit diff --git a/specials/setting.py b/specials/setting.py index 27826152..1a886e70 100644 --- a/specials/setting.py +++ b/specials/setting.py @@ -12,11 +12,9 @@ from builtins import range from past.utils import old_div from channelselector import get_thumb -from core import filetools -from core import servertools +from core import filetools, servertools from core.item import Item -from platformcode import config, logger -from platformcode import platformtools +from platformcode import config, logger, platformtools import xbmcgui CHANNELNAME = "setting" @@ -73,7 +71,7 @@ def menu_channels(item): itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60546) + ":", action="", folder=False, text_bold = True, thumbnail=get_thumb("setting_0.png"))) - # Inicio - Canales configurables + # Home - Configurable channels import channelselector from core import channeltools channel_list = channelselector.filterchannels("all") @@ -85,7 +83,7 @@ def menu_channels(item): itemlist.append(Item(channel=CHANNELNAME, title=". " + config.get_localized_string(60547) % channel.title, action="channel_config", config=channel.channel, folder=False, thumbnail=channel.thumbnail)) - # Fin - Canales configurables + # End - Configurable channels itemlist.append(Item(channel=CHANNELNAME, action="", title="", folder=False, thumbnail=get_thumb("setting_0.png"))) itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60548) + ":", action="", folder=False, text_bold=True, thumbnail=get_thumb("channels.png"))) @@ -98,7 +96,7 @@ def channel_config(item): return platformtools.show_channel_settings(channelpath=filetools.join(config.get_runtime_path(), "channels", item.config)) -def autostart(item): # item necessario launcher.py linea 265 +def autostart(item): # item required launcher.py line 265 if config.enable_disable_autorun(AUTOSTART): logger.info('AUTOSTART ENABLED') # xbmcgui.Dialog().ok(config.get_localized_string(20000), config.get_localized_string(70709)) @@ -126,7 +124,7 @@ def autostart(item): # item necessario launcher.py linea 265 # torrent_options = [config.get_localized_string(30006), config.get_localized_string(70254), config.get_localized_string(70255)] # torrent_options.extend(platformtools.torrent_client_installed()) - + # list_controls = [ # { @@ -257,7 +255,7 @@ def menu_servers(item): itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60552), action="", folder=False, text_bold = True, thumbnail=get_thumb("setting_0.png"))) - # Inicio - Servidores configurables + # Home - Configurable servers server_list = list(servertools.get_debriders_list().keys()) for server in server_list: @@ -280,7 +278,7 @@ def menu_servers(item): Item(channel=CHANNELNAME, title=". " + config.get_localized_string(60553) % server_parameters["name"], action="server_config", config=server, folder=False, thumbnail="")) - # Fin - Servidores configurables + # End - Configurable servers return itemlist @@ -332,13 +330,13 @@ def cb_servers_blacklist(item, dict_values): config.set_setting('filter_servers', v) else: config.set_setting("black_list", v, server=k) - if v: # Si el servidor esta en la lista negra no puede estar en la de favoritos + if v: # If the server is blacklisted it cannot be in the favorites list config.set_setting("favorites_servers_list", 100, server=k) f = True progreso.update(old_div((i * 100), n), config.get_localized_string(60559) % k) i += 1 - if not f: # Si no hay ningun servidor en la lista, desactivarla + if not f: # If there is no server in the list, deactivate it config.set_setting('filter_servers', False) progreso.close() @@ -406,7 +404,7 @@ def cb_servers_favorites(server_names, dict_values): progreso.update(old_div((i * 100), n), config.get_localized_string(60559) % server_parameters['name']) i += 1 - if not dict_name: # Si no hay ningun servidor en lalista desactivarla + if not dict_name: # If there is no server in the list, deactivate it config.set_setting("favorites_servers", False) progreso.close() @@ -420,7 +418,7 @@ def submenu_tools(item): logger.info() itemlist = list() - # Herramientas personalizadas + # Custom tools import os channel_custom = os.path.join(config.get_runtime_path(), 'channels', 'custom.py') if not filetools.exists(channel_custom): @@ -483,9 +481,9 @@ def check_quickfixes(item): def conf_tools(item): logger.info() - # Activar o desactivar canales + # Enable or disable channels if item.extra == "channels_onoff": - if config.get_platform(True)['num_version'] >= 17.0: # A partir de Kodi 16 se puede usar multiselect, y de 17 con preselect + if config.get_platform(True)['num_version'] >= 17.0: # From Kodi 16 you can use multiselect, and from 17 with preselect return channels_onoff(item) import channelselector @@ -515,14 +513,14 @@ def conf_tools(item): config.get_localized_string(60593)]}) for channel in channel_list: - # Si el canal esta en la lista de exclusiones lo saltamos + # If the channel is on the exclusion list, we skip it if channel.channel not in excluded_channels: channel_parameters = channeltools.get_channel_parameters(channel.channel) status_control = "" status = config.get_setting("enabled", channel.channel) - # si status no existe es que NO HAY valor en _data.json + # if status does not exist, there is NO value in _data.json if status is None: status = channel_parameters["active"] logger.debug("%s | Status (XML): %s" % (channel.channel, status)) @@ -552,15 +550,14 @@ def conf_tools(item): callback="channel_status", custom_button={"visible": False}) - # Comprobacion de archivos channel_data.json + # Checking channel_data.json files elif item.extra == "lib_check_datajson": itemlist = [] import channelselector from core import channeltools channel_list = channelselector.filterchannels("allchannelstatus") - # Tener una lista de exclusion no tiene mucho sentido por que se comprueba si channel.json tiene "settings", - # pero por si acaso se deja + # Having an exclusion list doesn't make much sense because it checks if channel.json has "settings", but just in case it is left excluded_channels = ['url', 'setting', 'help'] @@ -573,9 +570,9 @@ def conf_tools(item): list_status = None default_settings = None - # Se comprueba si el canal esta en la lista de exclusiones + # It is checked if the channel is in the exclusion list if channel.channel not in excluded_channels: - # Se comprueba que tenga "settings", sino se salta + # It is checked that it has "settings", otherwise it skips list_controls, dict_settings = channeltools.get_channel_controls_settings(channel.channel) if not list_controls: @@ -586,23 +583,22 @@ def conf_tools(item): continue # logger.info(channel.channel + " SALTADO!") - # Se cargan los ajustes del archivo json del canal - file_settings = os.path.join(config.get_data_path(), "settings_channels", - channel.channel + "_data.json") + # The json file settings of the channel are loaded + file_settings = os.path.join(config.get_data_path(), "settings_channels", channel.channel + "_data.json") dict_settings = {} dict_file = {} if filetools.exists(file_settings): - # logger.info(channel.channel + " Tiene archivo _data.json") + # logger.info(channel.channel + " Has _data.json file") channeljson_exists = True - # Obtenemos configuracion guardada de ../settings/channel_data.json + # We get saved settings from ../settings/channel_data.json try: dict_file = jsontools.load(filetools.read(file_settings)) if isinstance(dict_file, dict) and 'settings' in dict_file: dict_settings = dict_file['settings'] except EnvironmentError: - logger.error("ERROR al leer el archivo: %s" % file_settings) + logger.error("ERROR when reading the file: %s" % file_settings) else: - # logger.info(channel.channel + " No tiene archivo _data.json") + # logger.info(channel.channel + " No _data.json file") channeljson_exists = False if channeljson_exists: @@ -614,12 +610,12 @@ def conf_tools(item): else: datajson_size = None - # Si el _data.json esta vacio o no existe... + # If the _data.json is empty or does not exist ... if (len(dict_settings) and datajson_size) == 0 or not channeljson_exists: - # Obtenemos controles del archivo ../channels/channel.json + # We get controls from the file ../channels/channel.json needsfix = True try: - # Se cargan los ajustes por defecto + # Default settings are loaded list_controls, default_settings = channeltools.get_channel_controls_settings( channel.channel) # logger.info(channel.title + " | Default: %s" % default_settings) @@ -628,26 +624,26 @@ def conf_tools(item): logger.error(channel.title + config.get_localized_string(60570) % traceback.format_exc()) # default_settings = {} - # Si _data.json necesita ser reparado o no existe... + # If _data.json needs to be repaired or doesn't exist ... if needsfix or not channeljson_exists: if default_settings is not None: - # Creamos el channel_data.json + # We create the channel_data.json default_settings.update(dict_settings) dict_settings = default_settings dict_file['settings'] = dict_settings - # Creamos el archivo ../settings/channel_data.json + # We create the file ../settings/channel_data.json if not filetools.write(file_settings, jsontools.dump(dict_file), silent=True): - logger.error("ERROR al salvar el archivo: %s" % file_settings) + logger.error("ERROR saving file: %s" % file_settings) list_status = config.get_localized_string(60560) else: if default_settings is None: list_status = config.get_localized_string(60571) else: - # logger.info(channel.channel + " - NO necesita correccion!") + # logger.info(channel.channel + " - NO correction needed!") needsfix = False - # Si se ha establecido el estado del canal se añade a la lista + # If the channel status has been set it is added to the list if needsfix is not None: if needsfix: if not channeljson_exists: @@ -657,8 +653,7 @@ def conf_tools(item): list_status = config.get_localized_string(60589) list_colour = "green" else: - # Si "needsfix" es "false" y "datjson_size" es None habra - # ocurrido algun error + # If "needsfix" is "false" and "datjson_size" is None, an error will have occurred if datajson_size is None: list_status = config.get_localized_string(60590) list_colour = "red" @@ -673,9 +668,9 @@ def conf_tools(item): thumbnail=channel.thumbnail, text_color=list_colour)) else: - logger.error("Algo va mal con el canal %s" % channel.channel) + logger.error("Something is wrong with the channel %s" % channel.channel) - # Si el canal esta en la lista de exclusiones lo saltamos + # If the channel is on the exclusion list, we skip it else: continue except: @@ -689,7 +684,7 @@ def channels_onoff(item): import channelselector, xbmcgui from core import channeltools - # Cargar lista de opciones + # Load list of options # ------------------------ lista = []; ids = [] channels_list = channelselector.filterchannels('allchannelstatus') @@ -704,11 +699,11 @@ def channels_onoff(item): lista.append(it) ids.append(channel.channel) - # Diálogo para pre-seleccionar + # Dialog to pre-select # ---------------------------- preselecciones = [config.get_localized_string(70517), config.get_localized_string(70518), config.get_localized_string(70519)] ret = platformtools.dialog_select(config.get_localized_string(60545), preselecciones) - if ret == -1: return False # pedido cancel + if ret == -1: return False # order cancel if ret == 2: preselect = [] elif ret == 1: preselect = list(range(len(ids))) else: @@ -719,13 +714,13 @@ def channels_onoff(item): if channel_status: preselect.append(i) - # Diálogo para seleccionar + # Dialog to select # ------------------------ ret = xbmcgui.Dialog().multiselect(config.get_localized_string(60545), lista, preselect=preselect, useDetails=True) - if ret == None: return False # pedido cancel + if ret == None: return False # order cancel seleccionados = [ids[i] for i in ret] - # Guardar cambios en canales activados + # Save changes to activated channels # ------------------------------------ for canal in ids: channel_status = config.get_setting('enabled', canal) @@ -744,7 +739,7 @@ def channel_status(item, dict_values): for k in dict_values: if k == "all_channels": - logger.info("Todos los canales | Estado seleccionado: %s" % dict_values[k]) + logger.info("All channels | Selected state: %s" % dict_values[k]) if dict_values[k] != 0: excluded_channels = ['url', 'search', 'videolibrary', 'setting', @@ -759,25 +754,25 @@ def channel_status(item, dict_values): new_status_all = None new_status_all_default = channel_parameters["active"] - # Opcion Activar todos + # Option Activate all if dict_values[k] == 1: new_status_all = True - # Opcion Desactivar todos + # Option Deactivate all if dict_values[k] == 2: new_status_all = False - # Opcion Recuperar estado por defecto + # Retrieve default status option if dict_values[k] == 3: - # Si tiene "enabled" en el _data.json es porque el estado no es el del channel.json + # If you have "enabled" in the _data.json, it is because the state is not that of the channel.json if config.get_setting("enabled", channel.channel): new_status_all = new_status_all_default - # Si el canal no tiene "enabled" en el _data.json no se guarda, se pasa al siguiente + # If the channel does not have "enabled" in the _data.json it is not saved, it goes to the next else: continue - # Se guarda el estado del canal + # Channel status is saved if new_status_all is not None: config.set_setting("enabled", new_status_all, channel.channel) break @@ -785,15 +780,15 @@ def channel_status(item, dict_values): continue else: - logger.info("Canal: %s | Estado: %s" % (k, dict_values[k])) + logger.info("Channel: %s | State: %s" % (k, dict_values[k])) config.set_setting("enabled", dict_values[k], k) - logger.info("el valor esta como %s " % config.get_setting("enabled", k)) + logger.info("the value is like %s " % config.get_setting("enabled", k)) platformtools.itemlist_update(Item(channel=CHANNELNAME, action="mainlist")) except: import traceback - logger.error("Detalle del error: %s" % traceback.format_exc()) + logger.error("Error detail: %s" % traceback.format_exc()) platformtools.dialog_notification(config.get_localized_string(60579), config.get_localized_string(60580)) @@ -823,15 +818,15 @@ def restore_tools(item): path = filetools.dirname(tvshow_file) if not serie.active: - # si la serie no esta activa descartar + # if the series is not active discard continue - # Eliminamos la carpeta con la serie ... + # We delete the folder with the series ... if tvshow_file.endswith('.strm') or tvshow_file.endswith('.json') or tvshow_file.endswith('.nfo'): os.remove(os.path.join(path, tvshow_file)) # filetools.rmdirtree(path) - # ... y la volvemos a añadir + # ... and we add it again service.update(path, p_dialog, i, t, serie, 3) p_dialog.close() @@ -855,7 +850,7 @@ def restore_tools(item): path = filetools.dirname(movie_json) movie = Item().fromjson(filetools.read(movie_json)) - # Eliminamos la carpeta con la pelicula ... + # We delete the folder with the movie ... filetools.rmdirtree(path) import math @@ -863,69 +858,69 @@ def restore_tools(item): p_dialog2.update(int(math.ceil((i + 1) * t)), heading, config.get_localized_string(60389) % (movie.contentTitle, movie.channel.capitalize())) - # ... y la volvemos a añadir + # ... and we add it again videolibrarytools.save_movie(movie) except Exception as ex: - logger.error("Error al crear de nuevo la película") + logger.error("Error creating movie again") template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex).__name__, ex.args) logger.error(message) p_dialog2.close() - + def report_menu(item): logger.info('URL: ' + item.url) - + from channelselector import get_thumb - + thumb_debug = get_thumb("update.png") thumb_error = get_thumb("error.png") thumb_next = get_thumb("next.png") itemlist = [] paso = 1 - # Crea un menú de opciones para permitir al usuario reportar un fallo de Alfa a través de un servidor "pastebin" - # Para que el informe sea completo el usuario debe tener la opción de DEBUG=ON - # Los servidores "pastbin" gratuitos tienen limitación de capacidad, por lo que el tamaño del log es importante - # Al final de la operación de upload, se pasa al usuario la dirección de log en el servidor para que los reporte - + # Create a menu of options to allow the user to report an Alpha failure through a "pastebin" server + # For the report to be complete, the user must have the option DEBUG = ON + # Free pastbin servers have capacity limitations, so the size of the log is important + # At the end of the upload operation, the user is passed the log address on the server to report them + itemlist.append(Item(channel=item.channel, action="", title=config.get_localized_string(707418), thumbnail=thumb_next, folder=False)) - #if not config.get_setting('debug'): - itemlist.append(Item(channel=item.channel, action="activate_debug", extra=True, + # if not config.get_setting('debug'): + itemlist.append(Item(channel=item.channel, action="activate_debug", extra=True, title=config.get_localized_string(707419) % str(paso), thumbnail=thumb_debug, folder=False)) paso += 1 - itemlist.append(Item(channel="channelselector", action="getmainlist", + itemlist.append(Item(channel="channelselector", action="getmainlist", title=config.get_localized_string(707420) % str(paso), thumbnail=thumb_debug)) paso += 1 - itemlist.append(Item(channel=item.channel, action="report_send", + itemlist.append(Item(channel=item.channel, action="report_send", title=config.get_localized_string(707421) % str(paso), thumbnail=thumb_error, folder=False)) paso += 1 - #if config.get_setting('debug'): - itemlist.append(Item(channel=item.channel, action="activate_debug", extra=False, + # if config.get_setting('debug'): + itemlist.append(Item(channel=item.channel, action="activate_debug", extra=False, title=config.get_localized_string(707422) % str(paso), thumbnail=thumb_debug, folder=False)) paso += 1 - + if item.url: itemlist.append(Item(channel=item.channel, action="", title="", folder=False)) - - itemlist.append(Item(channel=item.channel, action="", + + itemlist.append(Item(channel=item.channel, action="", title=config.get_localized_string(707423), thumbnail=thumb_next, folder=False)) - + if item.one_use: action = '' url = '' else: action = 'call_browser' url = item.url - itemlist.append(Item(channel=item.channel, action=action, - title="**- LOG: [COLOR gold]%s[/COLOR] -**" % item.url, url=url, + itemlist.append(Item(channel=item.channel, action=action, + title="**- LOG: [COLOR gold]%s[/COLOR] -**" % item.url, url=url, thumbnail=thumb_next, unify=False, folder=False)) itemlist.append(Item(channel=item.channel, action="call_browser", @@ -935,24 +930,24 @@ def report_menu(item): itemlist.append(Item(channel=item.channel, action="call_browser", url='https://t.me/kodiondemand', title="Su telegram", thumbnail=thumb_next, unify=False, folder=False)) - + if item.one_use: - itemlist.append(Item(channel=item.channel, action="", - title="[COLOR orange]NO ACCEDA al INFORME: se BORRARÁ[/COLOR]", + itemlist.append(Item(channel=item.channel, action="", + title="[COLOR orange]NO ACCEDA al INFORME: se BORRARÁ[/COLOR]", thumbnail=thumb_next, folder=False)) - itemlist.append(Item(channel=item.channel, action="", - title="[COLOR orange]ya que es de un solo uso[/COLOR]", + itemlist.append(Item(channel=item.channel, action="", + title="[COLOR orange]ya que es de un solo uso[/COLOR]", thumbnail=thumb_next, folder=False)) - + return itemlist - - + + def activate_debug(item): logger.info(item.extra) from platformcode import platformtools - - # Activa/Desactiva la opción de DEBUB en settings.xml - + + #Enable / disable DEBUB option in settings.xml + if isinstance(item.extra, str): return report_menu(item) if item.extra: @@ -961,20 +956,20 @@ def activate_debug(item): else: config.set_setting('debug', False) platformtools.dialog_notification(config.get_localized_string(707430), config.get_localized_string(707432)) - - + + def report_send(item, description='', fatal=False): import xbmc import random import traceback if PY3: - #from future import standard_library - #standard_library.install_aliases() - import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo + # from future import standard_library + # standard_library.install_aliases() + import urllib.parse as urlparse # It is very slow in PY2. In PY3 it is native import urllib.parse as urllib else: - import urllib # Usamos el nativo de PY2 que es más rápido + import urllib # We use the native of PY2 which is faster import urlparse try: @@ -983,149 +978,149 @@ def report_send(item, description='', fatal=False): except: requests_status = False logger.error(traceback.format_exc()) - + from core import jsontools, httptools, scrapertools from platformcode import envtal - - # Esta función realiza la operación de upload del LOG. El tamaño del archivo es de gran importacia porque - # los servicios de "pastebin" gratuitos tienen limitaciones, a veces muy bajas. - # Hay un ervicio, File.io, que permite subida directa de "achivos binarios" a través de la función "request" - # Esto aumenta dráticamente la capacidad del envío del log, muy por encima de lo necesitado - # Por ello es necesario contar con una lista de servicios "pastebin" que puedan realizar la operación de upload, - # ya sea por capacidad disponible o por disponibilidad. - # Para poder usar los servidores "pastebin" con un código común, se ha creado un diccionario con los servidores - # y sus características. En cada entrada se recogen las peculiaridades de cada servidor, tanto para formar - # la petición consu POST como para la forma de recibir el código del upload en la respuesta (json, header, regex - # en datos,...). - # Al iniciar este método se aleatoriza la lista de servidores "pastebin" para evitar que todos los usuarios hagan - # uploads contra el mismo servidor y puedan ocasionar sobrecargas. - # Se lee el arcivo de log y se compara su tamaño con la capacidad del servidor (parámetro 10 de cada entrada - # (empezando desde 0), expresado en MB, hasta que se encuentra uno capacitado. Si el upload falla se sigue intentado - # con los siguientes servidores que tengan la capacidad requerida. - # Si no se encuentra ningun servidor disponible se pide al usuario que lo intente más tarde, o que suba el log - # directamente en el foro. Si es un problema de tamaño, se le pide que reicinie Kodi y reporducza el fallo, para - # que el LOG sea más pequeño. - - + + # This function performs the LOG upload operation. The file size is of great importance because + # Free pastebin services have limitations, sometimes very low. + # There is an ervice, File.io, that allows direct upload of "binary files" through the "request" function + # This dramatically increases the ability to send the log, well above what is needed. + # Therefore it is necessary to have a list of "pastebin" services that can perform the upload operation, + # either by available capacity or by availability. + # In order to use the "pastebin" servers with a common code, a dictionary has been created with the servers + # and their characteristics. In each entry the peculiarities of each server are collected, both to form + # the request with POST as for the way to receive the upload code in the response (json, header, regex + # in data, ...). + # Starting this method randomizes the list of "pastebin" servers to prevent all users from doing + # uploads against the same server and may cause overloads. + # The log file is read and its size is compared with the server capacity (parameter 10 of each entry + # (starting from 0), expressed in MB, until a qualified one is found. If the upload fails, it continues trying + # with the following servers that have the required capacity. + # If no available server is found, the user is asked to try again later, or to upload the log. + # directly on the forum. If it is a size problem, you are asked to reset Kodi and redo the fault, to + # that the LOG is smaller. + + pastebin_list = { - 'hastebin': ('1', 'https://hastebin.com/', 'documents', 'random', '', '', - 'data', 'json', 'key', '', '0.29', '10', True, 'raw/', '', ''), - 'dpaste': ('1', 'http://dpaste.com/', 'api/v2/', 'random', 'content=', - '&syntax=text&title=%s&poster=alfa&expiry_days=7', + 'hastebin': ('1', 'https://hastebin.com/', 'documents', 'random', '', '', + 'data', 'json', 'key', '', '0.29', '10', True, 'raw/', '', ''), + 'dpaste': ('1', 'http://dpaste.com/', 'api/v2/', 'random', 'content=', + '&syntax=text&title=%s&poster=alfa&expiry_days=7', 'headers', '', '', 'location', '0.23', '15', True, '', '.txt', ''), - 'ghostbin': ('1', 'https://ghostbin.com/', 'paste/new', 'random', 'lang=text&text=', - '&expire=2d&password=&title=%s', - 'data', 'regex', '(.*?)\s*-\s*Ghostbin<\/title>', '', + 'ghostbin': ('1', 'https://ghostbin.com/', 'paste/new', 'random', 'lang=text&text=', + '&expire=2d&password=&title=%s', + 'data', 'regex', '<title>(.*?)\s*-\s*Ghostbin<\/title>', '', '0.49', '15', False, 'paste/', '', ''), - 'write.as': ('1', 'https://write.as/', 'api/posts', 'random', 'body=', '&title=%s', + 'write.as': ('1', 'https://write.as/', 'api/posts', 'random', 'body=', '&title=%s', 'data', 'json', 'data', 'id', '0.018', '15', True, '', '', ''), - 'oneclickpaste': ('1', 'http://oneclickpaste.com/', 'index.php', 'random', 'paste_data=', - '&title=%s&format=text&paste_expire_date=1W&visibility=0&pass=&submit=Submit', - 'data', 'regex', '<a class="btn btn-primary" href="[^"]+\/(\d+\/)">\s*View\s*Paste\s*<\/a>', + 'oneclickpaste': ('1', 'http://oneclickpaste.com/', 'index.php', 'random', 'paste_data=', + '&title=%s&format=text&paste_expire_date=1W&visibility=0&pass=&submit=Submit', + 'data', 'regex', '<a class="btn btn-primary" href="[^"]+\/(\d+\/)">\s*View\s*Paste\s*<\/a>', '', '0.060', '5', True, '', '', ''), - 'bpaste': ('1', 'https://bpaste.net/', '', 'random', 'code=', '&lexer=text&expiry=1week', - 'data', 'regex', 'View\s*<a\s*href="[^*]+/(.*?)">raw<\/a>', '', + 'bpaste': ('1', 'https://bpaste.net/', '', 'random', 'code=', '&lexer=text&expiry=1week', + 'data', 'regex', 'View\s*<a\s*href="[^*]+/(.*?)">raw<\/a>', '', '0.79', '15', True, 'raw/', '', ''), - 'dumpz': ('0', 'http://dumpz.org/', 'api/dump', 'random', 'code=', '&lexer=text&comment=%s&password=', + 'dumpz': ('0', 'http://dumpz.org/', 'api/dump', 'random', 'code=', '&lexer=text&comment=%s&password=', 'headers', '', '', 'location', '0.99', '15', False, '', '', ''), - 'file.io': ('1', 'https://file.io/', '', 'random', '', 'expires=1w', - 'requests', 'json', 'key', '', '99.0', '30', False, '', '.log', ''), - 'uploadfiles': ('1', 'https://up.uploadfiles.io/upload', '', 'random', '', '', - 'requests', 'json', 'url', '', '99.0', '30', False, None, '', '') + 'file.io': ('1', 'https://file.io/', '', 'random', '', 'expires=1w', + 'requests', 'json', 'key', '', '99.0', '30', False, '', '.log', ''), + 'uploadfiles': ('1', 'https://up.uploadfiles.io/upload', '', 'random', '', '', + 'requests', 'json', 'url', '', '99.0', '30', False, None, '', '') } - pastebin_list_last = ['hastebin', 'ghostbin', 'file.io'] # Estos servicios los dejamos los últimos - pastebin_one_use = ['file.io'] # Servidores de un solo uso y se borra + pastebin_list_last = ['hastebin', 'ghostbin', 'file.io'] # We leave these services the last + pastebin_one_use = ['file.io'] # Single-use servers and deletes pastebin_dir = [] paste_file = {} paste_params = () paste_post = '' status = False msg = config.get_localized_string(707424) - - # Se verifica que el DEBUG=ON, si no está se rechaza y se pide al usuario que lo active y reproduzca el fallo + + # DEBUG = ON is verified, if it is not it is rejected and the user is asked to activate it and reproduce the fault if not config.get_setting('debug'): platformtools.dialog_notification(config.get_localized_string(707425), config.get_localized_string(707426)) return report_menu(item) - - # De cada al futuro se permitira al usuario que introduzca una breve descripción del fallo que se añadirá al LOG + + # From each to the future the user will be allowed to enter a brief description of the fault that will be added to the LOG if description == 'OK': description = platformtools.dialog_input('', 'Introduzca una breve descripción del fallo') - # Escribimos en el log algunas variables de Kodi y Alfa que nos ayudarán en el diagnóstico del fallo + # We write in the log some Kodi and Alpha variables that will help us diagnose the failure environment = envtal.list_env() if not environment['log_path']: environment['log_path'] = str(filetools.join(xbmc.translatePath("special://logpath/"), 'kodi.log')) environment['log_size_bytes'] = str(filetools.getsize(environment['log_path'])) environment['log_size'] = str(round(float(environment['log_size_bytes']) / (1024*1024), 3)) - - # Se lee el archivo de LOG + + # LOG file is read log_path = environment['log_path'] if filetools.exists(log_path): - log_size_bytes = int(environment['log_size_bytes']) # Tamaño del archivivo en Bytes - log_size = float(environment['log_size']) # Tamaño del archivivo en MB - log_data = filetools.read(log_path) # Datos del archivo - if not log_data: # Algún error? + log_size_bytes = int(environment['log_size_bytes']) # File size in Bytes + log_size = float(environment['log_size']) # File size in MB + log_data = filetools.read(log_path) # File data + if not log_data: # Some mistake? platformtools.dialog_notification(config.get_localized_string(707427), '', 2) return report_menu(item) - else: # Log no existe o path erroneo? + else: # Log no existe or erroneous path? platformtools.dialog_notification(config.get_localized_string(707427), '', 2) return report_menu(item) - # Si se ha introducido la descripción del fallo, se inserta la principio de los datos del LOG - # log_title = '***** DESCRIPCIÓN DEL FALLO *****' + # If the fault description has been entered, the beginning of the LOG data is inserted + # log_title = '***** FAULT DESCRIPTION *****' # if description: # log_data = '%s\n%s\n\n%s' %(log_title, description, log_data) - - # Se aleatorizan los nombre de los servidores "patebin" + + # Server names "patebin" are scrambled for label_a, value_a in list(pastebin_list.items()): if label_a not in pastebin_list_last: pastebin_dir.append(label_a) random.shuffle(pastebin_dir) - pastebin_dir.extend(pastebin_list_last) # Estos servicios los dejamos los últimos - - #pastebin_dir = ['uploadfiles'] # Para pruebas de un servicio - #log_data = 'TEST PARA PRUEBAS DEL SERVICIO' - - # Se recorre la lista de servidores "pastebin" hasta localizar uno activo, con capacidad y disponibilidad + pastebin_dir.extend(pastebin_list_last) # We leave these services the last + + #pastebin_dir = ['uploadfiles'] # For testing a service + #log_data = 'TEST FOR SERVICE TESTS' + + # The list of "pastebin" servers is scrolled to locate an active one, with capacity and availability for paste_name in pastebin_dir: - if pastebin_list[paste_name][0] != '1': # Si no esta activo el servidore, pasamos + if pastebin_list[paste_name][0] != '1': # If the server is not active, we pass continue - if pastebin_list[paste_name][6] == 'requests' and not requests_status: # Si "requests" no esta activo, pasamos + if pastebin_list[paste_name][6] == 'requests' and not requests_status: # If "requests" is not active, we pass continue - paste_host = pastebin_list[paste_name][1] # URL del servidor "pastebin" - paste_sufix = pastebin_list[paste_name][2] # sufijo del API para el POST + paste_host = pastebin_list[paste_name][1] # Server URL "pastebin" + paste_sufix = pastebin_list[paste_name][2] # API suffix for POST paste_title = '' if pastebin_list[paste_name][3] == 'random': - paste_title = "LOG" + str(random.randrange(1, 999999999)) # Título del LOG - paste_post1 = pastebin_list[paste_name][4] # Parte inicial del POST - paste_post2 = pastebin_list[paste_name][5] # Parte secundaria del POST - paste_type = pastebin_list[paste_name][6] # Tipo de downloadpage: DATA o HEADERS - paste_resp = pastebin_list[paste_name][7] # Tipo de respuesta: JSON o datos con REGEX - paste_resp_key = pastebin_list[paste_name][8] # Si es JSON, etiqueta `primaria con la CLAVE - paste_url = pastebin_list[paste_name][9] # Etiqueta primaria para HEADER y sec. para JSON - paste_file_size = float(pastebin_list[paste_name][10]) # Capacidad en MB del servidor - if paste_file_size > 0: # Si es 0, la capacidad es ilimitada - if log_size > paste_file_size: # Verificación de capacidad y tamaño - msg = 'Archivo de log demasiado grande. Reinicie Kodi y reinténtelo' + paste_title = "LOG" + str(random.randrange(1, 999999999)) # LOG title + paste_post1 = pastebin_list[paste_name][4] # Initial part of the POST + paste_post2 = pastebin_list[paste_name][5] # Secondary part of POST + paste_type = pastebin_list[paste_name][6] # Type of downloadpage: DATE HEADERS + paste_resp = pastebin_list[paste_name][7] # Response type: JSON or data with REGEX + paste_resp_key = pastebin_list[paste_name][8] # If JSON, label `primary with KEY + paste_url = pastebin_list[paste_name][9] # Primary label for HEADER and sec. for JSON + paste_file_size = float(pastebin_list[paste_name][10]) # Server capacity in MB + if paste_file_size > 0: # If it is 0, the capacity is unlimited + if log_size > paste_file_size: # Capacity and size verification + msg = 'Log file too large. Restart Kodi and retry' continue - paste_timeout = int(pastebin_list[paste_name][11]) # Timeout para el servidor - paste_random_headers = pastebin_list[paste_name][12] # Utiliza RAMDOM headers para despistar el serv.? - paste_host_return = pastebin_list[paste_name][13] # Parte de url para componer la clave para usuario - paste_host_return_tail = pastebin_list[paste_name][14] # Sufijo de url para componer la clave para usuario + paste_timeout = int(pastebin_list[paste_name][11]) # Timeout for the server + paste_random_headers = pastebin_list[paste_name][12] # Do you use RAMDOM headers to mislead the serv? + paste_host_return = pastebin_list[paste_name][13] # Part of url to compose the key for user + paste_host_return_tail = pastebin_list[paste_name][14] # Url suffix to compose user key paste_headers = {} - if pastebin_list[paste_name][15]: # Headers requeridas por el servidor + if pastebin_list[paste_name][15]: # Headers required by the server paste_headers.update(jsontools.load((pastebin_list[paste_name][15]))) if paste_name in pastebin_one_use: - pastebin_one_use_msg = '[COLOR red]NO ACCEDA al INFORME: se BORRARÁ[/COLOR]' + pastebin_one_use_msg = 'DO NOT ACCESS THE REPORT: it will be DELETED' item.one_use = True else: pastebin_one_use_msg = '' - + try: - # Se crea el POST con las opciones del servidor "pastebin" - # Se trata el formato de "requests" + # POST is created with server options "pastebin" + # This is the "requests" format if paste_type == 'requests': paste_file = {'file': (paste_title+'.log', log_data)} if paste_post1: @@ -1135,14 +1130,14 @@ def report_send(item, description='', fatal=False): paste_params = paste_post2 % (paste_title+'.log', log_size_bytes) else: paste_params = paste_post2 - - #Se trata el formato de downloads + + # This is the download format else: - #log_data = 'Test de Servidor para ver su viabilidad (áéíóúñ¿?)' - if paste_name in ['hastebin']: # Hay algunos servicios que no necesitan "quote" + # log_data = 'Server Test to see its viability (áéíóúñ¿?)' + if paste_name in ['hastebin']: # There are some services that do not need "quote" paste_post = log_data else: - paste_post = urllib.quote_plus(log_data) # Se hace un "quote" de los datos del LOG + paste_post = urllib.quote_plus(log_data) # A "quote" is made from the LOG data if paste_post1: paste_post = '%s%s' % (paste_post1, paste_post) if paste_post2: @@ -1151,104 +1146,101 @@ def report_send(item, description='', fatal=False): else: paste_post += paste_post2 - # Se hace la petición en downloadpage con HEADERS o DATA, con los parámetros del servidor + # Request is made on downloadpage with HEADERS or DATA, with server parameters if paste_type == 'headers': - data = httptools.downloadpage(paste_host+paste_sufix, post=paste_post, - timeout=paste_timeout, random_headers=paste_random_headers, + data = httptools.downloadpage(paste_host+paste_sufix, post=paste_post, + timeout=paste_timeout, random_headers=paste_random_headers, headers=paste_headers).headers elif paste_type == 'data': - data = httptools.downloadpage(paste_host+paste_sufix, post=paste_post, - timeout=paste_timeout, random_headers=paste_random_headers, + data = httptools.downloadpage(paste_host+paste_sufix, post=paste_post, + timeout=paste_timeout, random_headers=paste_random_headers, headers=paste_headers).data - - # Si la petición es con formato REQUESTS, se realiza aquí + + # If the request is in REQUESTS format, it is made here elif paste_type == 'requests': - #data = requests.post(paste_host, params=paste_params, files=paste_file, + #data = requests.post(paste_host, params=paste_params, files=paste_file, # timeout=paste_timeout) - data = httptools.downloadpage(paste_host, params=paste_params, file=log_data, - file_name=paste_title+'.log', timeout=paste_timeout, + data = httptools.downloadpage(paste_host, params=paste_params, file=log_data, + file_name=paste_title+'.log', timeout=paste_timeout, random_headers=paste_random_headers, headers=paste_headers) except: msg = 'Inténtelo más tarde' - logger.error('Fallo al guardar el informe. ' + msg) + logger.error('Failed to save report. ' + msg) logger.error(traceback.format_exc()) continue - # Se analiza la respuesta del servidor y se localiza la clave del upload para formar la url a pasar al usuario + # The server response is analyzed and the upload key is located to form the url to pass to the user if data: paste_host_resp = paste_host - if paste_host_return == None: # Si devuelve la url completa, no se compone + if paste_host_return == None: # If you return the full url, it is not composed paste_host_resp = '' paste_host_return = '' - - # Respuestas a peticiones REQUESTS - if paste_type == 'requests': # Respuesta de petición tipo "requests"? - if paste_resp == 'json': # Respuesta en formato JSON? + + # Responses to REQUESTS requests + if paste_type == 'requests': # Response of request type "requests"? + if paste_resp == 'json': # Answer in JSON format? if paste_resp_key in data.data: if not paste_url: - key = jsontools.load(data.data)[paste_resp_key] # con una etiqueta + key = jsontools.load(data.data)[paste_resp_key] # with a label else: - key = jsontools.load(data.data)[paste_resp_key][paste_url] # con dos etiquetas anidadas - item.url = "%s%s%s" % (paste_host_resp+paste_host_return, key, + key = jsontools.load(data.data)[paste_resp_key][paste_url] # with two nested tags + item.url = "%s%s%s" % (paste_host_resp+paste_host_return, key, paste_host_return_tail) else: - logger.error('ERROR en formato de retorno de datos. data.data=' + - str(data.data)) + logger.error('ERROR in data return format. data.data=' + str(data.data)) continue - - # Respuestas a peticiones DOWNLOADPAGE - elif paste_resp == 'json': # Respuesta en formato JSON? + + # Responses to DOWNLOADPAGE requests + elif paste_resp == 'json': # Answer in JSON format? if paste_resp_key in data: if not paste_url: - key = jsontools.load(data)[paste_resp_key] # con una etiqueta + key = jsontools.load(data)[paste_resp_key] # with a label else: - key = jsontools.load(data)[paste_resp_key][paste_url] # con dos etiquetas anidadas - item.url = "%s%s%s" % (paste_host_resp+paste_host_return, key, + key = jsontools.load(data)[paste_resp_key][paste_url] # con two nested tags + item.url = "%s%s%s" % (paste_host_resp+paste_host_return, key, paste_host_return_tail) else: - logger.error('ERROR en formato de retorno de datos. data=' + str(data)) + logger.error('ERROR in data return format. data=' + str(data)) continue - elif paste_resp == 'regex': # Respuesta en DATOS, a buscar con un REGEX? + elif paste_resp == 'regex': # Answer in DATA, to search with a REGEX? key = scrapertools.find_single_match(data, paste_resp_key) if key: - item.url = "%s%s%s" % (paste_host_resp+paste_host_return, key, + item.url = "%s%s%s" % (paste_host_resp+paste_host_return, key, paste_host_return_tail) else: - logger.error('ERROR en formato de retorno de datos. data=' + str(data)) + logger.error('ERROR in data return format. data=' + str(data)) continue - elif paste_type == 'headers': # Respuesta en HEADERS, a buscar en "location"? + elif paste_type == 'headers': # Answer in HEADERS, to search in "location"? if paste_url in data: - item.url = data[paste_url] # Etiqueta de retorno de la clave - item.url = urlparse.urljoin(paste_host_resp + paste_host_return, + item.url = data[paste_url] # Key return label + item.url = urlparse.urljoin(paste_host_resp + paste_host_return, item.url + paste_host_return_tail) else: - logger.error('ERROR en formato de retorno de datos. response.headers=' + - str(data)) + logger.error('ERROR in data return format. response.headers=' + str(data)) continue else: - logger.error('ERROR en formato de retorno de datos. paste_type=' + - str(paste_type) + ' / DATA: ' + data) + logger.error('ERROR in data return format. paste_type=' + str(paste_type) + ' / DATA: ' + data) continue - status = True # Operación de upload terminada con éxito - logger.info('Report created: ' + str(item.url)) #Se guarda la URL del informe a usuario - # if fatal: # De uso futuro, para logger.crash - # platformtools.dialog_ok('Informe de ERROR en Alfa CREADO', 'Repórtelo en el foro agregando ERROR FATAL y esta URL: ', '[COLOR gold]%s[/COLOR]' % item.url, pastebin_one_use_msg) - # else: # Se pasa la URL del informe a usuario - # platformtools.dialog_ok('Informe de Fallo en Alfa CREADO', 'Repórtelo en el foro agregando una descripcion del fallo y esta URL: ', '[COLOR gold]%s[/COLOR]' % item.url, pastebin_one_use_msg) + status = True # Upload operation completed successfully + logger.info('Report created: ' + str(item.url)) # The URL of the user report is saved + # if fatal: # For future use, for logger.crash + # platformtools.dialog_ok('KoD CREATED ERROR report', 'Report it in the forum by adding FATAL ERROR and this URL: ', '[COLOR gold]%s[/COLOR]' % item.url, pastebin_one_use_msg) + # else: # Report URL passed to user + # platformtools.dialog_ok('KoD Crash Report CREATED', 'Report it on the forum by adding a bug description and this URL: ', '[COLOR gold]%s[/COLOR]' % item.url, pastebin_one_use_msg) - break # Operación terminado, no seguimos buscando - - if not status and not fatal: # Operación fracasada... - platformtools.dialog_notification(config.get_localized_string(707428), msg) #... se notifica la causa + break # Operation finished, we don't keep looking + + if not status and not fatal: # Operation failed ... + platformtools.dialog_notification(config.get_localized_string(707428), msg) #... cause is reported logger.error(config.get_localized_string(707428) + msg) - - # Se devuelve control con item.url actualizado, así aparecerá en el menú la URL del informe + + # Control is returned with updated item.url, so the report URL will appear in the menu item.action = 'report_menu' platformtools.itemlist_update(item, True) # return report_menu(item) - - + + def call_browser(item): import webbrowser if not webbrowser.open(item.url): diff --git a/specials/side_menu.py b/specials/side_menu.py index b0da3e4d..1ede1c95 100644 --- a/specials/side_menu.py +++ b/specials/side_menu.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -#from builtins import str +# from builtins import str import sys PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int @@ -9,8 +9,7 @@ if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int import os from core.item import Item from core import jsontools -from platformcode import config, logger -from platformcode import launcher +from platformcode import config, logger, launcher import xbmc, xbmcgui, xbmcplugin, xbmcaddon media_path = os.path.join(config.get_runtime_path(), "resources/skins/Default/media/side_menu/") @@ -113,7 +112,7 @@ class Main(xbmcgui.WindowXMLDialog): self.items = [] def onInit(self): - #### Compatibilidad con Kodi 18 #### + #### Kodi 18 compatibility #### if config.get_platform(True)['num_version'] < 18: self.setCoordinateResolution(2) diff --git a/specials/trailertools.py b/specials/trailertools.py index 7db64695..a240cea3 100644 --- a/specials/trailertools.py +++ b/specials/trailertools.py @@ -16,10 +16,10 @@ from past.utils import old_div if PY3: #from future import standard_library #standard_library.install_aliases() - import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo + import urllib.parse as urllib # It is very slow in PY2. In PY3 it is native import urllib.parse as urlparse else: - import urllib # Usamos el nativo de PY2 que es más rápido + import urllib # We use the native of PY2 which is faster import urlparse import re @@ -37,7 +37,7 @@ def_lang = info_language[config.get_setting("info_language", "videolibrary")] result = None window_select = [] -# Para habilitar o no la opción de búsqueda manual +# To enable or disable the manual search option if config.get_platform() != "plex": keyboard = True else: @@ -47,14 +47,14 @@ else: def buscartrailer(item, trailers=[]): logger.info() - # Lista de acciones si se ejecuta desde el menú contextual + # List of actions if run from context menu if item.action == "manual_search" and item.contextual: itemlist = manual_search(item) item.contentTitle = itemlist[0].contentTitle elif 'search' in item.action and item.contextual: itemlist = globals()[item.action](item) else: - # Se elimina la opción de Buscar Trailer del menú contextual para evitar redundancias + # Remove Trailer Search option from context menu to avoid redundancies if isinstance(item.context, str) and "buscar_trailer" in item.context: item.context = item.context.replace("buscar_trailer", "") elif isinstance(item.context, list) and "buscar_trailer" in item.context: @@ -80,8 +80,8 @@ def buscartrailer(item, trailers=[]): item.year = item.infoLabels['year'] - logger.info("Búsqueda: %s" % item.contentTitle) - logger.info("Año: %s" % item.year) + logger.info("Search: %s" % item.contentTitle) + logger.info("Year: %s" % item.year) if item.infoLabels['trailer'] and not trailers: url = item.infoLabels['trailer'] if "youtube" in url: @@ -98,8 +98,7 @@ def buscartrailer(item, trailers=[]): itemlist.extend(tmdb_trailers(item, tipo)) else: for trailer in trailers: - title = trailer['name'] + " [" + trailer['size'] + "p] (" + trailer['language'].replace("en", "ING") \ - .replace("it", "ITA") + ") [tmdb/youtube]" + title = trailer['name'] + " [" + trailer['size'] + "p] (" + trailer['language'].replace("en", "ING").replace("it", "ITA") + ") [tmdb/youtube]" itemlist.append(item.clone(action="play", title=title, url=trailer['url'], server="youtube")) except: import traceback @@ -111,7 +110,7 @@ def buscartrailer(item, trailers=[]): title = "%s" itemlist.append(item.clone(title=title % config.get_localized_string(70507), action="youtube_search")) itemlist.append(item.clone(title=title % config.get_localized_string(70024), action="filmaffinity_search")) - # Si se trata de una serie, no se incluye la opción de buscar en Abandomoviez + # If it is a series, the option to search in Abandomoviez is not included if not item.show and not item.infoLabels['tvshowtitle']: itemlist.append(item.clone(title=title % config.get_localized_string(70508), action="abandomoviez_search")) @@ -152,8 +151,7 @@ def tmdb_trailers(item, tipo="movie"): if tmdb_search: for result in tmdb_search.get_videos(): - title = result['name'] + " [" + result['size'] + "p] (" + result['language'].replace("en", "ING") \ - .replace("it", "ITA") + ") [tmdb/youtube]" + title = result['name'] + " [" + result['size'] + "p] (" + result['language'].replace("en", "ING").replace("it", "ITA") + ") [tmdb/youtube]" itemlist.append(item.clone(action="play", title=title, url=result['url'], server="youtube")) return itemlist @@ -165,7 +163,7 @@ def youtube_search(item): titulo = item.contentTitle if item.extra != "youtube": titulo += " trailer" - # Comprueba si es una búsqueda de cero o viene de la opción Siguiente + # Check if it is a zero search or comes from the Next option if item.page != "": data = httptools.downloadpage(item.page).data else: @@ -183,8 +181,7 @@ def youtube_search(item): if item.contextual: scrapedtitle = "%s" % scrapedtitle url = urlparse.urljoin('https://www.youtube.com/', scrapedurl) - itemlist.append(item.clone(title=scrapedtitle, action="play", server="youtube", url=url, - thumbnail=scrapedthumbnail)) + itemlist.append(item.clone(title=scrapedtitle, action="play", server="youtube", url=url, thumbnail=scrapedthumbnail)) next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]+><span class="yt-uix-button-content">' 'Siguiente') if next_page != "": @@ -207,7 +204,7 @@ def youtube_search(item): def abandomoviez_search(item): logger.info() - # Comprueba si es una búsqueda de cero o viene de la opción Siguiente + # Check if it is a zero search or comes from the Next option if item.page != "": data = httptools.downloadpage(item.page).data else: @@ -226,7 +223,7 @@ def abandomoviez_search(item): patron = '(?:<td width="85"|<div class="col-md-2 col-sm-2 col-xs-3">).*?<img src="([^"]+)"' \ '.*?href="([^"]+)">(.*?)(?:<\/td>|<\/small>)' matches = scrapertools.find_multiple_matches(data, patron) - # Si solo hay un resultado busca directamente los trailers, sino lista todos los resultados + # If there is only one result, search directly for the trailers, but list all the results if len(matches) == 1: item.url = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, matches[0][1]) item.thumbnail = matches[0][0] @@ -235,26 +232,22 @@ def abandomoviez_search(item): for scrapedthumbnail, scrapedurl, scrapedtitle in matches: scrapedurl = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, scrapedurl) scrapedtitle = scrapertools.htmlclean(scrapedtitle) - itemlist.append(item.clone(title=scrapedtitle, action="search_links_abando", - url=scrapedurl, thumbnail=scrapedthumbnail)) + itemlist.append(item.clone(title=scrapedtitle, action="search_links_abando", url=scrapedurl, thumbnail=scrapedthumbnail)) next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">Siguiente') if next_page != "": next_page = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, next_page) - itemlist.append(item.clone(title=config.get_localized_string(70502), action="abandomoviez_search", page=next_page, thumbnail="", - text_color="")) + itemlist.append(item.clone(title=config.get_localized_string(70502), action="abandomoviez_search", page=next_page, thumbnail="", text_color="")) if not itemlist: - itemlist.append(item.clone(title=config.get_localized_string(70501), action="", thumbnail="", - text_color="")) + itemlist.append(item.clone(title=config.get_localized_string(70501), action="", thumbnail="", text_color="")) if keyboard: if item.contextual: title = "%s" else: title = "%s" - itemlist.append(item.clone(title=title % config.get_localized_string(70511), - action="manual_search", thumbnail="", extra="abandomoviez")) + itemlist.append(item.clone(title=title % config.get_localized_string(70511), action="manual_search", thumbnail="", extra="abandomoviez")) return itemlist @@ -321,7 +314,7 @@ def filmaffinity_search(item): item.url = item.filmaffinity return search_links_filmaff(item) - # Comprueba si es una búsqueda de cero o viene de la opción Siguiente + # Check if it is a zero search or comes from the Next option if item.page != "": data = httptools.downloadpage(item.page).data else: @@ -334,7 +327,7 @@ def filmaffinity_search(item): patron = '<div class="mc-poster">.*?<img.*?src="([^"]+)".*?' \ '<div class="mc-title"><a href="/es/film(\d+).html"[^>]+>(.*?)<img' matches = scrapertools.find_multiple_matches(data, patron) - # Si solo hay un resultado, busca directamente los trailers, sino lista todos los resultados + # If there is only one result, search directly for the trailers, but list all the results if len(matches) == 1: item.url = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % matches[0][1] item.thumbnail = matches[0][0] @@ -349,26 +342,22 @@ def filmaffinity_search(item): if PY3: scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore") scrapedtitle = scrapertools.htmlclean(scrapedtitle) - itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, - action="search_links_filmaff", thumbnail=scrapedthumbnail)) + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="search_links_filmaff", thumbnail=scrapedthumbnail)) next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">>></a>') if next_page != "": next_page = urlparse.urljoin("http://www.filmaffinity.com/es/", next_page) - itemlist.append(item.clone(title=config.get_localized_string(70502), page=next_page, action="filmaffinity_search", thumbnail="", - text_color="")) + itemlist.append(item.clone(title=config.get_localized_string(70502), page=next_page, action="filmaffinity_search", thumbnail="", text_color="")) if not itemlist: - itemlist.append(item.clone(title=config.get_localized_string(70501) % item.contentTitle, - action="", thumbnail="", text_color="")) + itemlist.append(item.clone(title=config.get_localized_string(70501) % item.contentTitle, action="", thumbnail="", text_color="")) if keyboard: if item.contextual: title = "%s" else: title = "%s" - itemlist.append(item.clone(title=title % config.get_localized_string(70513), - action="manual_search", thumbnail="", extra="filmaffinity")) + itemlist.append(item.clone(title=title % config.get_localized_string(70513), action="manual_search", thumbnail="", extra="filmaffinity")) return itemlist @@ -400,8 +389,7 @@ def search_links_filmaff(item): scrapedtitle += " [" + server + "]" if item.contextual: scrapedtitle = "%s" % scrapedtitle - itemlist.append(item.clone(title=scrapedtitle, url=trailer_url, server=server, action="play", - thumbnail=thumbnail)) + itemlist.append(item.clone(title=scrapedtitle, url=trailer_url, server=server, action="play", thumbnail=thumbnail)) itemlist = servertools.get_servers_itemlist(itemlist) if keyboard: @@ -409,8 +397,7 @@ def search_links_filmaff(item): title = "%s" else: title = "%s" - itemlist.append(item.clone(title=title % config.get_localized_string(70513), - action="manual_search", thumbnail="", extra="filmaffinity")) + itemlist.append(item.clone(title=title % config.get_localized_string(70513), action="manual_search", thumbnail="", extra="filmaffinity")) return itemlist @@ -451,7 +438,7 @@ try: self.control_list.addItems(self.items) self.setFocus(self.control_list) def onClick(self, id): - # Boton Cancelar y [X] + # Cancel button y [X] if id == 5: global window_select, result self.result = "_no_video" diff --git a/specials/tvmoviedb.py b/specials/tvmoviedb.py index dc6263b2..03250850 100644 --- a/specials/tvmoviedb.py +++ b/specials/tvmoviedb.py @@ -1,22 +1,14 @@ # -*- coding: utf-8 -*- -import re -import urllib +import re, urllib, xbmcaddon from base64 import b64decode as bdec -import xbmcaddon - from channelselector import get_thumb -from core import filetools -from core import httptools -from core import jsontools -from core import scrapertools +from core import filetools, httptools, jsontools, scrapertools, trakt_tools from core.item import Item from core.support import typo from core.tmdb import Tmdb -from core import trakt_tools -from platformcode import config, logger -from platformcode import platformtools +from platformcode import config, logger, platformtools info_language = ["de", "en", "es", "fr", "it", "pt"] # from videolibrary.json def_lang = info_language[config.get_setting("info_language", "videolibrary")] @@ -102,8 +94,7 @@ def search_(item): return listado_fa(item) if "myanimelist" in item.url: item.url += texto.replace(" ", "%20") - item.url += "&type=0&score=0&status=0&p=0&r=0&sm=0&sd=0&sy=0&em=0&ed=0&ey=0&c[0]=a" \ - "&c[1]=b&c[2]=c&c[3]=d&c[4]=f&gx=0" + item.url += "&type=0&score=0&status=0&p=0&r=0&sm=0&sd=0&sy=0&em=0&ed=0&ey=0&c[0]=a&c[1]=b&c[2]=c&c[3]=d&c[4]=f&gx=0" item.action = "busqueda_mal" return busqueda_mal(item) @@ -125,8 +116,7 @@ def search_(item): def busqueda(item): logger.info() - new_item = Item(title=item.contentTitle, text=item.contentTitle.replace("+", " "), mode=item.contentType, - infoLabels=item.infoLabels) + new_item = Item(title=item.contentTitle, text=item.contentTitle.replace("+", " "), mode=item.contentType, infoLabels=item.infoLabels) from specials import search return search.channel_search(new_item) @@ -278,35 +268,26 @@ def trakt(item): itemlist.append(item.clone(title=typo(config.get_localized_string(70048), 'color kod bold'), extra="cuenta")) else: item.extra = "movie" - # Se comprueba si existe un token guardado y sino se ejecuta el proceso de autentificación + # A saved token is checked and the authentication process is executed if not token_auth: - #folder = (config.get_platform() == "plex") + # folder = (config.get_platform() == "plex") itemlist.append(item.clone(title=config.get_localized_string(70054), action="auth_trakt", folder=folder)) else: itemlist.append(item.clone(title=config.get_localized_string(70055), action="", )) itemlist.append( - item.clone(title=config.get_localized_string(60651), action="acciones_trakt", url="users/me/watchlist/movies%s" % page, - order="added", how="desc")) + item.clone(title=config.get_localized_string(60651), action="acciones_trakt", url="users/me/watchlist/movies%s" % page, order="added", how="desc")) itemlist.append( - item.clone(title=config.get_localized_string(60652), action="acciones_trakt", url="users/me/watchlist/shows%s" % page, - extra="show", - order="added", how="desc")) + item.clone(title=config.get_localized_string(60652), action="acciones_trakt", url="users/me/watchlist/shows%s" % page, extra="show", order="added", how="desc")) itemlist.append(item.clone(title=config.get_localized_string(70056), action="", )) itemlist.append( - item.clone(title=config.get_localized_string(60651), action="acciones_trakt", url="users/me/watched/movies%s" % page, - order="added", how="desc")) + item.clone(title=config.get_localized_string(60651), action="acciones_trakt", url="users/me/watched/movies%s" % page, order="added", how="desc")) itemlist.append( - item.clone(title=config.get_localized_string(60652), action="acciones_trakt", url="users/me/watched/shows%s" % page, - extra="show", - order="added", how="desc")) + item.clone(title=config.get_localized_string(60652), action="acciones_trakt", url="users/me/watched/shows%s" % page, extra="show", order="added", how="desc")) itemlist.append(item.clone(title=config.get_localized_string(70068), action="", )) itemlist.append( - item.clone(title=config.get_localized_string(60651), action="acciones_trakt", url="users/me/collection/movies%s" % page, - order="added", how="desc")) + item.clone(title=config.get_localized_string(60651), action="acciones_trakt", url="users/me/collection/movies%s" % page, order="added", how="desc")) itemlist.append( - item.clone(title=config.get_localized_string(60652), action="acciones_trakt", url="users/me/collection/shows%s" % page, - extra="show", - order="added", how="desc")) + item.clone(title=config.get_localized_string(60652), action="acciones_trakt", url="users/me/collection/shows%s" % page, extra="show", order="added", how="desc")) itemlist.append( item.clone(title=config.get_localized_string(70057), action="acciones_trakt", url="users/me/lists", )) @@ -320,24 +301,17 @@ def mal(item): item.login = True itemlist.append( - item.clone(title=config.get_localized_string(70058), url="https://myanimelist.net/topanime.php?type=tv&limit=0", action="top_mal", - contentType="tvshow", extra="tv")) - itemlist.append(item.clone(title=config.get_localized_string(70059), url="https://myanimelist.net/topanime.php?type=movie&limit=0", - action="top_mal", - contentType="movie", extra="movie")) + item.clone(title=config.get_localized_string(70058), url="https://myanimelist.net/topanime.php?type=tv&limit=0", action="top_mal", contentType="tvshow", extra="tv")) + itemlist.append(item.clone(title=config.get_localized_string(70059), url="https://myanimelist.net/topanime.php?type=movie&limit=0", action="top_mal", contentType="movie", extra="movie")) itemlist.append( - item.clone(title=config.get_localized_string(70061), url="https://myanimelist.net/topanime.php?type=ova&limit=0", action="top_mal", - contentType="tvshow", extra="tv", tipo="ova")) + item.clone(title=config.get_localized_string(70061), url="https://myanimelist.net/topanime.php?type=ova&limit=0", action="top_mal", contentType="tvshow", extra="tv", tipo="ova")) itemlist.append( - item.clone(title=config.get_localized_string(70028), url="https://myanimelist.net/topanime.php?type=bypopularity&limit=0", - action="top_mal")) - itemlist.append(item.clone(title=config.get_localized_string(70060), url="https://myanimelist.net/topanime.php?type=upcoming&limit=0", - action="top_mal")) + item.clone(title=config.get_localized_string(70028), url="https://myanimelist.net/topanime.php?type=bypopularity&limit=0", action="top_mal")) + itemlist.append(item.clone(title=config.get_localized_string(70060), url="https://myanimelist.net/topanime.php?type=upcoming&limit=0", action="top_mal")) itemlist.append(item.clone(title=config.get_localized_string(70062), url="", action="indices_mal")) itemlist.append(item.clone(title=config.get_localized_string(70063), url="", action="indices_mal")) if config.get_platform() != "plex": - itemlist.append(item.clone(title=config.get_localized_string(70064), url="https://myanimelist.net/anime.php?q=", - action="search_")) + itemlist.append(item.clone(title=config.get_localized_string(70064), url="https://myanimelist.net/anime.php?q=", action="search_")) itemlist.append(item.clone(title=typo(config.get_localized_string(70038), 'bold submenu'), action="filtro_mal")) itemlist.append(item.clone(title=typo(config.get_localized_string(70057), 'bold submenu'), action="cuenta_mal")) @@ -345,15 +319,15 @@ def mal(item): return itemlist -##-------------------- SECCION TMDB ------------------------## +##-------------------- SECTION TMDB ------------------------## def listado_tmdb(item): - # Listados principales de la categoría Tmdb (Más populares, más vistas, etc...) + # Main listings of the Tmdb category (Most popular, Most viewed, etc ...) itemlist = [] item.fanart = default_fan if not item.pagina: item.pagina = 1 - # Listado de actores + # List of actors if 'nm' in item.infoLabels['imdb_id']: try: @@ -370,7 +344,7 @@ def listado_tmdb(item): else: ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt) - # Sagas y colecciones + # Sagas and collections if "collection" in item.search["url"]: try: new_item = item.clone(action="", url='') @@ -394,7 +368,7 @@ def listado_tmdb(item): else: try: orden = False - # Si se hace una búsqueda por actores o directores, se extraen esos resultados + # If you do a search for actors or directors, those results are extracted if "cast" in ob_tmdb.result and not item.crew: ob_tmdb.results = ob_tmdb.result["cast"] orden = True @@ -404,7 +378,7 @@ def listado_tmdb(item): for i in range(0, len(ob_tmdb.results)): new_item = item.clone(action="detalles", url='', infoLabels={'mediatype': item.contentType}) new_item.infoLabels = ob_tmdb.get_infoLabels(new_item.infoLabels, origen=ob_tmdb.results[i]) - # Si no hay sinopsis en idioma elegido, buscar en el alternativo + # If there is no synopsis in the chosen language, search in the alternative if not new_item.infoLabels["plot"] and not 'person' in item.search["url"]: ob_tmdb2 = Tmdb(id_Tmdb=new_item.infoLabels["tmdb_id"], tipo=item.extra, idioma_busqueda=langt_alt) new_item.infoLabels["plot"] = ob_tmdb2.get_sinopsis() @@ -443,7 +417,7 @@ def listado_tmdb(item): % (typo(new_item.contentTitle,'bold'), typo(new_item.infoLabels['rating'].replace("0.0", ""),'color kod bold')) else: - # Si es una búsqueda de personas se incluye en el título y fanart una película por la que es conocido + # If it is a search for people, a film for which it is known is included in the title and fanart known_for = ob_tmdb.results[i].get("known_for") type=item.type if known_for: @@ -475,7 +449,7 @@ def detalles(item): itemlist = [] images = {} data = "" - # Si viene de seccion imdb + # If it comes from imdb section if not item.infoLabels["tmdb_id"]: headers = [['Accept-Language', langi]] #data = httptools.downloadpage("http://www.imdb.com/title/" + item.infoLabels['imdb_id'], headers=headers, @@ -483,7 +457,7 @@ def detalles(item): data = httptools.downloadpage("http://www.imdb.com/title/" + item.infoLabels['imdb_id'], headers=headers).data pics = scrapertools.find_single_match(data, 'showAllVidsAndPics.*?href=".*?(tt\d+)') - # Imágenes imdb + # Imdb images if pics: images["imdb"] = {'url': 'http://www.imdb.com/_json/title/%s/mediaviewer' % pics} @@ -495,7 +469,7 @@ def detalles(item): try: item.infoLabels = ob_tmdb.get_infoLabels(item.infoLabels) - # Si no hay sinopsis en idioma elegido, buscar en el alternativo + # If there is no synopsis in the chosen language, search in the alternative if not item.infoLabels["plot"]: item.infoLabels["plot"] = ob_tmdb.get_sinopsis(idioma_alternativo=langt_alt) except: @@ -505,7 +479,7 @@ def detalles(item): if item.infoLabels['thumbnail']: item.thumbnail = item.infoLabels['thumbnail'] - # Sinopsis, votos de imdb + # Synopsis, votes from imdb if data: plot = scrapertools.find_single_match(data, 'class="inline canwrap" itemprop="description">(.*?)</div>') plot = scrapertools.htmlclean(plot) @@ -525,29 +499,24 @@ def detalles(item): itemlist.append(item.clone(title="--- %s ---" % item.infoLabels['tagline'], action="")) title = item.contentType.replace("movie", config.get_localized_string(70283)).replace("tvshow", "serie") - # Búsqueda por títulos idioma elegido y/o versión original y español + # Search by titles chosen language and / or original version and Spanish itemlist.append(item.clone(action="busqueda", title=config.get_localized_string(70069) % (title, item.contentTitle))) if item.infoLabels['originaltitle'] and item.contentTitle != item.infoLabels['originaltitle']: - itemlist.append(item.clone(action="busqueda", contentTitle=item.infoLabels['originaltitle'], - title=config.get_localized_string(70070) % item.infoLabels['originaltitle'])) + itemlist.append(item.clone(action="busqueda", contentTitle=item.infoLabels['originaltitle'], title=config.get_localized_string(70070) % item.infoLabels['originaltitle'])) if langt != "es" and langt != "en" and item.infoLabels["tmdb_id"]: tmdb_lang = Tmdb(id_Tmdb=item.infoLabels["tmdb_id"], tipo=item.extra, idioma_busqueda=def_lang) if tmdb_lang.result.get("title") and tmdb_lang.result["title"] != item.contentTitle \ and tmdb_lang.result["title"] != item.infoLabels['originaltitle']: tmdb_lang = tmdb_lang.result["title"] - itemlist.append(item.clone(action="busqueda", title=config.get_localized_string(70066) % tmdb_lang, - contentTitle=tmdb_lang)) + itemlist.append(item.clone(action="busqueda", title=config.get_localized_string(70066) % tmdb_lang, contentTitle=tmdb_lang)) - # En caso de serie, opción de info por temporadas + # In case of series, option of info by seasons if item.contentType == "tvshow" and item.infoLabels['tmdb_id']: - itemlist.append(item.clone(action="info_seasons", - title=config.get_localized_string(70067) % item.infoLabels["number_of_seasons"])) - # Opción de ver el reparto y navegar por sus películas/series + itemlist.append(item.clone(action="info_seasons", title=config.get_localized_string(70067) % item.infoLabels["number_of_seasons"])) + # Option to watch the cast and browse their movies / series if item.infoLabels['tmdb_id']: - itemlist.append(item.clone(action="reparto", title=config.get_localized_string(70071), - infoLabels={'tmdb_id': item.infoLabels['tmdb_id'], - 'mediatype': item.contentType})) + itemlist.append(item.clone(action="reparto", title=config.get_localized_string(70071), infoLabels={'tmdb_id': item.infoLabels['tmdb_id'], 'mediatype': item.contentType})) if config.is_xbmc(): item.contextual = True @@ -556,8 +525,7 @@ def detalles(item): try: images['tmdb'] = ob_tmdb.result["images"] - itemlist.append(item.clone(action="imagenes", title=config.get_localized_string(70316), images=images, - extra="menu")) + itemlist.append(item.clone(action="imagenes", title=config.get_localized_string(70316), images=images, extra="menu")) except: pass @@ -580,9 +548,7 @@ def detalles(item): url_album = scrapertools.find_single_match(data_music, 'album(?:|s) on request.*?href="([^"]+)"') if url_album: url_album = "https://nl.hideproxy.me" + url_album - itemlist.append( - item.clone(action="musica_movie", title=config.get_localized_string(70317), url=url_album, - )) + itemlist.append(item.clone(action="musica_movie", title=config.get_localized_string(70317), url=url_album)) except: pass @@ -591,7 +557,7 @@ def detalles(item): itemlist.append(item.clone(title=config.get_localized_string(70318), action="menu_trakt")) itemlist.append(item.clone(title="", action="")) - # Es parte de una colección + # It is part of a collection try: if ob_tmdb.result.get("belongs_to_collection"): new_item = item.clone(search='', infoLabels={'mediatype': item.contentType}) @@ -602,12 +568,11 @@ def detalles(item): if saga["backdrop_path"]: new_item.fanart = 'http://image.tmdb.org/t/p/original' + saga["backdrop_path"] new_item.search = {'url': 'collection/%s' % saga['id'], 'language': langt} - itemlist.append(new_item.clone(title=config.get_localized_string(70327) % saga["name"], action="listado_tmdb", - )) + itemlist.append(new_item.clone(title=config.get_localized_string(70327) % saga["name"], action="listado_tmdb")) except: pass - # Películas/Series similares y recomendaciones + # Similar Movies / Series and Recommendations if item.infoLabels['tmdb_id']: item.extra = item.contentType.replace('tvshow', 'tv') title = title.replace("película", config.get_localized_string(70137)).replace("serie", config.get_localized_string(30123)) @@ -624,7 +589,7 @@ def detalles(item): def reparto(item): - # Actores y equipo de rodaje de una película/serie + # Actors and film crew for a movie / series itemlist = [] item.extra=item.contentType.replace('tvshow','tv') item.search = {'url': '%s/%s/credits' % (item.extra, item.infoLabels['tmdb_id'])} @@ -672,7 +637,7 @@ def reparto(item): def info_seasons(item): - # Info de temporadas y episodios + # Season and episode info itemlist = [] ob_tmdb = Tmdb(id_Tmdb=item.infoLabels["tmdb_id"], tipo="tv", idioma_busqueda=langt) @@ -719,7 +684,7 @@ def info_seasons(item): def indices_tmdb(item): - # Indices por genero y año + # Indices by gender and year itemlist = [] from datetime import datetime if config.get_localized_string(70032) in item.title: @@ -835,7 +800,7 @@ def filtro(item): def filtrado(item, values): values_copy = values.copy() - # Guarda el filtro para que sea el que se cargue por defecto + # Save the filter to be the one loaded by default if "save" in values and values["save"]: values_copy.pop("save") config.set_setting("filtro_defecto_" + item.extra, values_copy, item.channel) @@ -882,24 +847,24 @@ def musica_movie(item): return itemlist -##-------------------- SECCION IMDB ------------------------## +##-------------------- SECTION IMDB ------------------------## def listado_imdb(item): - # Método principal para secciones de imdb + # Main method for imdb sections itemlist = [] headers = [['Accept-Language', langi]] if "www.imdb.com" in item.url: - #data = httptools.downloadpage(item.url, headers=headers, replace_headers=True).data + # data = httptools.downloadpage(item.url, headers=headers, replace_headers=True).data data = httptools.downloadpage(item.url, headers=headers).data else: url = 'http://www.imdb.com/search/title?' + item.url - #data = httptools.downloadpage(url, headers=headers, replace_headers=True).data + # data = httptools.downloadpage(url, headers=headers, replace_headers=True).data data = httptools.downloadpage(url, headers=headers).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) - # Listado de actores + # List of actors if 'search/name' in item.url: patron = '<td class="image">.*?src="([^"]+)".*?href="/name/(nm\d+).*?>([^<]+)<.*?href.*?>([^<]+)</a>' \ '</span>(.*?)</td>' @@ -996,7 +961,7 @@ def filtro_imdb(item): valores = {} dict_values = None - # Se utilizan los valores por defecto/guardados + # Default / saved values ​​are used valores_guardados = config.get_setting("filtro_defecto_imdb_" + item.extra, item.channel) if valores_guardados: dict_values = valores_guardados @@ -1086,7 +1051,7 @@ def filtro_imdb(item): def filtrado_imdb(item, values): values_copy = values.copy() - # Guarda el filtro para que sea el que se cargue por defecto + # Save the filter to be the one loaded by default if "save" in values and values["save"]: values_copy.pop("save") config.set_setting("filtro_defecto_imdb_" + item.extra, values_copy, item.channel) @@ -1119,7 +1084,7 @@ def filtrado_imdb(item, values): def indices_imdb(item): - # Índices imdb por año y genero + # Imdb indices by year and gender itemlist = [] from datetime import datetime if config.get_localized_string(70032) in item.title: @@ -1149,12 +1114,12 @@ def indices_imdb(item): return itemlist -##-------------------- SECCION FILMAFFINITY ------------------------## +##-------------------- FILMAFFINITY SECTION ------------------------## def listado_fa(item): - # Método para listados principales de filmaffinity + # Filmaffinity main listing method itemlist = [] - # Listados con paginación por post + # Listings with pagination per post if item.extra == "top": if item.page_fa: post = "from=%s" % item.page_fa @@ -1176,7 +1141,7 @@ def listado_fa(item): data = re.sub(r"\s{2}", " ", data) votaciones = [] - # Si es la sección de estrenos cambia la estructura del scraper + # If it is the premiere section, change the structure of the scraper if item.extra == "estrenos": patron = '<i class="fa fa-calendar"></i>\s*(\d+[^<]+)<(.*?)(?:<div class="panel panel-default">|' \ '<div class="text-center")' @@ -1269,7 +1234,7 @@ def listado_fa(item): def indices_fa(item): - # Índices por genero, año, temas y sagas/colecciones + # Indexes by gender, year, themes and sagas / collections itemlist = [] if item.url: data = httptools.downloadpage(item.url).data @@ -1357,7 +1322,7 @@ def indices_fa(item): def temas_fa(item): - # Películas y series por temas + # Movies and series by themes itemlist = [] data = httptools.downloadpage(item.url).data @@ -1402,7 +1367,7 @@ def detalles_fa(item): data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) - # Se extrae el título original para posibles búsquedas en tmdb posteriores + # The original title is extracted for possible searches in later tmdb orig_title = scrapertools.find_single_match(data, 'itemprop="datePublished">.*?<dd>([^<]+)</dd>').strip() if item.contentType == "movie": item.infoLabels['originaltitle'] = re.sub(r"(?i)\(TV Series\)|\(S\)|\(TV\)", "", orig_title) @@ -1426,11 +1391,11 @@ def detalles_fa(item): ob_tmdb = Tmdb(id_Tmdb=ob_tmdb.get_id(), tipo=item_tmdb.extra, idioma_busqueda=langt) item.infoLabels = ob_tmdb.get_infoLabels(item.infoLabels) - # Si no hay sinopsis en idioma elegido, buscar en el alternativo + # If there is no synopsis in the chosen language, search in the alternative if not item.infoLabels["plot"]: item.infoLabels["plot"] = ob_tmdb.get_sinopsis(idioma_alternativo=langt_alt) - # Se concatena el plot de filmaffinity al de tmdb si lo hay + # The filmaffinity plot is concatenated to the tmdb plot if any plot = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') plot = plot.replace("<br><br />", "\n") plot = scrapertools.decodeHtmlentities(plot).replace(" (FILMAFFINITY)", "") @@ -1439,7 +1404,7 @@ def detalles_fa(item): elif plot and not item.infoLabels['plot']: item.infoLabels['plot'] = plot - # Se busca y rellena con la info de filmaffinity para diferenciarla de tmdb + # It is searched and filled with the filmaffinity info to differentiate it from tmdb if not item.infoLabels['duration']: duration = scrapertools.find_single_match(data, '<dd itemprop="duration">(\d+)') if duration: @@ -1544,7 +1509,7 @@ def detalles_fa(item): token_auth = config.get_setting("token_trakt", "trakt") if token_auth and ob_tmdb.result: itemlist.append(item.clone(title=config.get_localized_string(70323), action="menu_trakt")) - # Acciones si se configura cuenta en FA (Votar y añadir/quitar en listas) + # Actions if account is configured in FA (Vote and add / remove in lists) mivoto = scrapertools.find_single_match(data, 'bg-my-rating.*?>\s*(\d+)') itk = scrapertools.find_single_match(data, 'data-itk="([^"]+)"') folder = not config.is_xbmc() @@ -1568,7 +1533,7 @@ def detalles_fa(item): new_item.infoLabels["duration"] = "" itemlist.append(new_item) - # Si pertenece a una saga/colección + # If you belong to a saga / collection if ob_tmdb.result: itemlist.append(item.clone(title="", action="", infoLabels={})) if ob_tmdb.result.get("belongs_to_collection"): @@ -1603,7 +1568,7 @@ def filtro_fa(item): valores = {} dict_values = None - # Se utilizan los valores por defecto/guardados + # Default / saved values ​​are used valores_guardados = config.get_setting("filtro_defecto_filmaf_" + item.extra, item.channel) if valores_guardados: dict_values = valores_guardados @@ -1675,7 +1640,7 @@ def filtro_fa(item): def filtrado_fa(item, values): values_copy = values.copy() - # Guarda el filtro para que sea el que se cargue por defecto + # Save the filter to be the one loaded by default if "save" in values and values["save"]: values_copy.pop("save") config.set_setting("filtro_defecto_filmaf_" + item.extra, values_copy, item.channel) @@ -1732,7 +1697,7 @@ def login_fa(): def cuenta_fa(item): - # Menú de cuenta filmaffinity + # Filmaffinity account menu itemlist = [] login, message = login_fa() if not login: @@ -1748,7 +1713,7 @@ def cuenta_fa(item): def acciones_fa(item): - # Acciones cuenta filmaffinity, votar, ver listas o añadir/quitar de lista + # Actions account filmaffinity, vote, view lists or add / remove from list itemlist = [] if item.accion == "votos" or item.accion == "lista": @@ -1847,7 +1812,7 @@ def acciones_fa(item): def votar_fa(item): - # Ventana para seleccionar el voto + # Window to select the vote logger.info() list_controls = [] @@ -1889,7 +1854,7 @@ def callback_voto(item, values): def newlist(item): - # Creación de nueva lista en filmaffinity + # Creation of new list in filmaffinity itemlist = [] if item.accion == "lista": location = httptools.downloadpage(item.url, only_headers=True).headers["location"] @@ -1910,7 +1875,7 @@ def newlist(item): return itemlist -##-------------------- LISTADOS DE IMAGENES ------------------------## +##-------------------- IMAGE LISTINGS ------------------------## def imagenes(item): itemlist = [] @@ -2055,13 +2020,13 @@ def fanartv(item): return item, resultado -##-------------------- SECCION TRAKT.TV ------------------------## +##-------------------- SECTION TRAKT.TV ------------------------## def auth_trakt(item): return trakt_tools.auth_trakt() def menu_trakt(item): - # Menú con acciones de cuenta trakt (vistas, watchlist, coleccion) + # Menu with trakt account actions (views, watchlist, collection) itemlist = [] token_auth = config.get_setting("token_trakt", "trakt") tipo = item.extra.replace("tv", "show") + "s" @@ -2279,9 +2244,9 @@ def order_trakt(item, values): return acciones_trakt(item) -##-------------------- SECCION MYANIMELIST ------------------------## +##-------------------- MYANIMELIST SECTION ------------------------## def top_mal(item): - # Para los menús principales de tops pelícuas/series/ovas + # For the main menus of movie tops / series / ova itemlist = [] data = httptools.downloadpage(item.url, cookies=False).data data = re.sub(r"\n|\r|\t| ", "", data) @@ -2388,7 +2353,7 @@ def detalles_mal(item): ob_tmdb = Tmdb(id_Tmdb=ob_tmdb.get_id(), tipo=item_tmdb.extra, idioma_busqueda=langt) item.infoLabels = ob_tmdb.get_infoLabels(item.infoLabels) - # Se concatena sinopsis myanimelist con la de tmdb si la hubiese + # Myanimelist synopsis is concatenated with that of tmdb if any plot = scrapertools.find_single_match(data, '<span itemprop="description">(.*?)</span>') plot = plot.replace("<br />", "\n").replace("<i>", "[I]").replace("</i>", "[/I]") plot = scrapertools.decodeHtmlentities(plot) @@ -2411,7 +2376,7 @@ def detalles_mal(item): except: pass - # Se sobreescribe la info de myanimelist sobre la de tmdb + # Myanimelist info overwrites tmdb info generos = scrapertools.find_single_match(data, 'Genres:</span>(.*?)</div>') if generos: item.infoLabels['genre'] = scrapertools.htmlclean(generos) @@ -2445,7 +2410,7 @@ def detalles_mal(item): itemlist.append(item.clone(action="videos_mal", title=config.get_localized_string(70353), url=item.url + "/video")) - # Opción para ver la info de personajes y dobladores/equipo de rodaje + # Option to see the info of characters and voiceovers / filming equipment if not "No characters or voice actors" in data and not "No staff for this anime" in data: itemlist.append(item.clone(action="staff_mal", title=config.get_localized_string(70354), url=item.url + "/characters")) @@ -2497,7 +2462,7 @@ def detalles_mal(item): if token_auth and ob_tmdb.result: itemlist.append(item.clone(title=config.get_localized_string(70323), action="menu_trakt")) - # Se listan precuelas, secuelas y series alternativas + # Prequels, sequels and alternative series are listed prequel = scrapertools.find_single_match(data, 'Prequel:</td>(.*?)</td>') if prequel: matches = scrapertools.find_multiple_matches(prequel, 'href="([^"]+)">(.*?)</a>') @@ -2550,7 +2515,7 @@ def detalles_mal(item): search={'url': '%s/%s/recommendations' % (item.extra, item.infoLabels['tmdb_id']), 'language': langt, 'page': 1}, )) - # Recomendaciones myanimelist y búsqueda de info en anidb (fansubs en español) + # Myanimelist recommendations and info search on anidb (fansubs in Spanish) itemlist.append(item.clone(title=config.get_localized_string(70359), action="reco_mal")) anidb_link = scrapertools.find_single_match(data, '<a href="(http://anidb.info/perl-bin/animedb.pl\?show=anime&aid=\d+)') @@ -2562,7 +2527,7 @@ def detalles_mal(item): def videos_mal(item): - # Método para episodios en crunchyroll y trailer/promocionales + # Method for crunchyroll and trailer / promotional episodes itemlist = [] data = httptools.downloadpage(item.url, cookies=False).data @@ -2604,7 +2569,7 @@ def videos_mal(item): def reco_mal(item): - # Recomendaciones de myanimelist + # Myanimelist recommendations itemlist = [] data = httptools.downloadpage(item.url + "/userrecs", cookies=False).data @@ -2628,7 +2593,7 @@ def reco_mal(item): def indices_mal(item): - # Índices por temporadas y generos + # Seasonal and gender indices itemlist = [] url_base = "" if "Temporadas" in item.title: @@ -2664,7 +2629,7 @@ def indices_mal(item): def season_mal(item): - # Scraper para temporadas de anime + # Scraper for anime seasons itemlist = [] cookie_session = get_cookie_value() @@ -2758,7 +2723,7 @@ def season_mal(item): def staff_mal(item): - # Dobladores/Equipo de rodaje + # Benders / Filming Equipment itemlist = [] data = httptools.downloadpage(item.url, cookies=False).data data = re.sub(r"\n|\r|\t| ", "", data) @@ -2869,7 +2834,7 @@ def detail_staff(item): def busqueda_mal(item): - # Scraper para búsquedas en myanimelist + # Scraper for myanimelist searches itemlist = [] cookie_session = get_cookie_value() @@ -2942,7 +2907,7 @@ def busqueda_mal(item): def info_anidb(item, itemlist, url): - # Extrae info, puntuación y fansubs en anidb + # Extract info, score and fansubs on anidb data = httptools.downloadpage(url).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) @@ -2994,7 +2959,7 @@ def filtro_mal(item): list_controls = [] valores = {} dict_values = None - # Se utilizan los valores por defecto/guardados + # Default / saved values ​​are used valores_guardados = config.get_setting("filtro_defecto_mal", item.channel) if valores_guardados: dict_values = valores_guardados @@ -3044,7 +3009,7 @@ def filtro_mal(item): def callback_mal(item, values): values_copy = values.copy() - # Guarda el filtro para que sea el que se cargue por defecto + # Save the filter to be the one loaded by default if "save" in values and values["save"]: values_copy.pop("save") config.set_setting("filtro_defecto_mal", values_copy, item.channel) @@ -3072,7 +3037,7 @@ def callback_mal(item, values): def musica_anime(item): - # Lista los animes y canciones disponibles similares al título del anime + # List available anime and songs similar to the anime title logger.info() itemlist = [] @@ -3145,7 +3110,7 @@ def login_mal(from_list=False): def cuenta_mal(item): - # Menú de cuenta myanimelist + # Myanimelist account menu itemlist = [] login, message, user = login_mal(True) if not login: @@ -3167,7 +3132,7 @@ def cuenta_mal(item): def items_mal(item): - # Scraper para las listas personales + # Scraper for personal lists logger.info() itemlist = [] data = httptools.downloadpage(item.url).data @@ -3213,7 +3178,7 @@ def items_mal(item): def menu_mal(item): - # Opciones cuenta MAL, añadir a lista/votar + # Options BAD account, add to list / vote itemlist = [] data = httptools.downloadpage(item.url).data @@ -3271,7 +3236,7 @@ def addlist_mal(item): url = "https://myanimelist.net/ownlist/anime/add.json" if item.lista: url = "https://myanimelist.net/ownlist/anime/edit.json" - #data = httptools.downloadpage(url, post=jsontools.dump(post), headers=headers_mal, replace_headers=True).data + # data = httptools.downloadpage(url, post=jsontools.dump(post), headers=headers_mal, replace_headers=True).data data = httptools.downloadpage(url, post=jsontools.dump(post), headers=headers_mal).data item.title = "En tu lista" if config.is_xbmc(): diff --git a/specials/url.py b/specials/url.py index 1fdd9db3..d808747e 100644 --- a/specials/url.py +++ b/specials/url.py @@ -17,7 +17,7 @@ def mainlist(item): return itemlist -# Al llamarse "search" la función, el launcher pide un text a buscar y lo añade como parámetro +# When the function "search" is called, the launcher asks for a text to search for and adds it as a parameter def search(item, text): log(text)