Altre Traduzioni
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# channeltools - Herramientas para trabajar con canales
|
||||
# channeltools - Tools for working with channels
|
||||
# ------------------------------------------------------------
|
||||
|
||||
from __future__ import absolute_import
|
||||
@@ -29,11 +29,11 @@ def get_channel_parameters(channel_name):
|
||||
channel_parameters = get_channel_json(channel_name)
|
||||
# logger.debug(channel_parameters)
|
||||
if channel_parameters:
|
||||
# cambios de nombres y valores por defecto
|
||||
# name and default changes
|
||||
channel_parameters["title"] = channel_parameters.pop("name") + (' [DEPRECATED]' if 'deprecated' in channel_parameters and channel_parameters['deprecated'] else '')
|
||||
channel_parameters["channel"] = channel_parameters.pop("id")
|
||||
|
||||
# si no existe el key se declaran valor por defecto para que no de fallos en las funciones que lo llaman
|
||||
# if the key does not exist, they are declared a default value so that there are no failures in the functions that call it
|
||||
channel_parameters["update_url"] = channel_parameters.get("update_url", DEFAULT_UPDATE_URL)
|
||||
channel_parameters["language"] = channel_parameters.get("language", ["all"])
|
||||
channel_parameters["active"] = channel_parameters.get("active", False)
|
||||
@@ -45,7 +45,7 @@ def get_channel_parameters(channel_name):
|
||||
channel_parameters["banner"] = channel_parameters.get("banner", "")
|
||||
channel_parameters["fanart"] = channel_parameters.get("fanart", "")
|
||||
|
||||
# Imagenes: se admiten url y archivos locales dentro de "resources/images"
|
||||
# Images: url and local files are allowed inside "resources / images"
|
||||
if channel_parameters.get("thumbnail") and "://" not in channel_parameters["thumbnail"]:
|
||||
channel_parameters["thumbnail"] = filetools.join(remote_path, "resources", "thumb", channel_parameters["thumbnail"])
|
||||
if channel_parameters.get("banner") and "://" not in channel_parameters["banner"]:
|
||||
@@ -53,7 +53,7 @@ def get_channel_parameters(channel_name):
|
||||
if channel_parameters.get("fanart") and "://" not in channel_parameters["fanart"]:
|
||||
channel_parameters["fanart"] = filetools.join(remote_path, "resources", channel_parameters["fanart"])
|
||||
|
||||
# Obtenemos si el canal tiene opciones de configuración
|
||||
# We obtain if the channel has configuration options
|
||||
channel_parameters["has_settings"] = False
|
||||
if 'settings' in channel_parameters:
|
||||
channel_parameters['settings'] = get_default_settings(channel_name)
|
||||
@@ -71,8 +71,7 @@ def get_channel_parameters(channel_name):
|
||||
dict_channels_parameters[channel_name] = channel_parameters
|
||||
|
||||
else:
|
||||
# para evitar casos donde canales no están definidos como configuración
|
||||
# lanzamos la excepcion y asi tenemos los valores básicos
|
||||
# To avoid cases where channels are not defined as configuration, we throw the exception and thus we have the basic values
|
||||
raise Exception
|
||||
|
||||
except Exception as ex:
|
||||
@@ -123,7 +122,7 @@ def get_channel_controls_settings(channel_name):
|
||||
|
||||
for c in list_controls:
|
||||
if 'id' not in c or 'type' not in c or 'default' not in c:
|
||||
# Si algun control de la lista no tiene id, type o default lo ignoramos
|
||||
# If any control in the list does not have id, type or default, we ignore it
|
||||
continue
|
||||
|
||||
# new dict with key(id) and value(default) from settings
|
||||
@@ -173,7 +172,7 @@ def get_default_settings(channel_name):
|
||||
default_controls_renumber = default_file['renumber']
|
||||
channel_json = get_channel_json(channel_name)
|
||||
|
||||
# Collects configurations
|
||||
# Collects configurations
|
||||
channel_language = channel_json['language']
|
||||
channel_controls = channel_json['settings']
|
||||
categories = channel_json['categories']
|
||||
@@ -189,28 +188,22 @@ def get_default_settings(channel_name):
|
||||
label = label[-1]
|
||||
if label == 'peliculas':
|
||||
if 'movie' in categories:
|
||||
control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string(
|
||||
30122)
|
||||
control['default'] = False if ('include_in_newest' in default_off) or (
|
||||
'include_in_newest_peliculas' in default_off) else True
|
||||
control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string( 30122)
|
||||
control['default'] = False if ('include_in_newest' in default_off) or ( ' include_in_newest_peliculas' in default_off) else True
|
||||
channel_controls.append(control)
|
||||
else:
|
||||
pass
|
||||
elif label == 'series':
|
||||
if 'tvshow' in categories:
|
||||
control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string(
|
||||
30123)
|
||||
control['default'] = False if ('include_in_newest' in default_off) or (
|
||||
'include_in_newest_series' in default_off) else True
|
||||
control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string( 30123)
|
||||
control['default'] = False if ('include_in_newest' in default_off) or ( 'include_in_newest_series' in default_off) else True
|
||||
channel_controls.append(control)
|
||||
else:
|
||||
pass
|
||||
elif label == 'anime':
|
||||
if 'anime' in categories:
|
||||
control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string(
|
||||
30124)
|
||||
control['default'] = False if ('include_in_newest' in default_off) or (
|
||||
'include_in_newest_anime' in default_off) else True
|
||||
control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string( 30124)
|
||||
control['default'] = False if ('include_in_newest' in default_off) or ( 'include_in_newest_anime' in default_off) else True
|
||||
channel_controls.append(control)
|
||||
else:
|
||||
pass
|
||||
@@ -239,24 +232,24 @@ def get_default_settings(channel_name):
|
||||
def get_channel_setting(name, channel, default=None):
|
||||
from core import filetools
|
||||
"""
|
||||
Retorna el valor de configuracion del parametro solicitado.
|
||||
Returns the configuration value of the requested parameter.
|
||||
|
||||
Devuelve el valor del parametro 'name' en la configuracion propia del canal 'channel'.
|
||||
Returns the value of the parameter 'name' in the own configuration of the channel 'channel'.
|
||||
|
||||
Busca en la ruta \addon_data\plugin.video.kod\settings_channels el archivo channel_data.json y lee
|
||||
el valor del parametro 'name'. Si el archivo channel_data.json no existe busca en la carpeta channels el archivo
|
||||
channel.json y crea un archivo channel_data.json antes de retornar el valor solicitado. Si el parametro 'name'
|
||||
tampoco existe en el el archivo channel.json se devuelve el parametro default.
|
||||
Look in the path \addon_data\plugin.video.kod\settings_channels for the file channel_data.json and read
|
||||
the value of the parameter 'name'. If the file channel_data.json does not exist look in the channels folder for the file
|
||||
channel.json and create a channel_data.json file before returning the requested value. If the parameter 'name'
|
||||
also does not exist in the channel.json file the default parameter is returned.
|
||||
|
||||
|
||||
@param name: nombre del parametro
|
||||
@param name: parameter name
|
||||
@type name: str
|
||||
@param channel: nombre del canal
|
||||
@param channel: channel name
|
||||
@type channel: str
|
||||
@param default: valor devuelto en caso de que no exista el parametro name
|
||||
@param default: return value in case the name parameter does not exist
|
||||
@type default: any
|
||||
|
||||
@return: El valor del parametro 'name'
|
||||
@return: The value of the parameter 'name'
|
||||
@rtype: any
|
||||
|
||||
"""
|
||||
@@ -266,58 +259,58 @@ def get_channel_setting(name, channel, default=None):
|
||||
if channel not in ['trakt']: def_settings = get_default_settings(channel)
|
||||
|
||||
if filetools.exists(file_settings):
|
||||
# Obtenemos configuracion guardada de ../settings/channel_data.json
|
||||
# We get saved configuration from ../settings/channel_data.json
|
||||
try:
|
||||
dict_file = jsontools.load(filetools.read(file_settings))
|
||||
if isinstance(dict_file, dict) and 'settings' in dict_file:
|
||||
dict_settings = dict_file['settings']
|
||||
except EnvironmentError:
|
||||
logger.error("ERROR al leer el archivo: %s" % file_settings)
|
||||
logger.error("ERROR when reading the file: %s" % file_settings)
|
||||
|
||||
if not dict_settings or name not in dict_settings:
|
||||
# Obtenemos controles del archivo ../channels/channel.json
|
||||
# We get controls from the file ../channels/channel.json
|
||||
try:
|
||||
list_controls, default_settings = get_channel_controls_settings(channel)
|
||||
except:
|
||||
default_settings = {}
|
||||
|
||||
if name in default_settings: # Si el parametro existe en el channel.json creamos el channel_data.json
|
||||
if name in default_settings: #If the parameter exists in the channel.json we create the channel_data.json
|
||||
default_settings.update(dict_settings)
|
||||
dict_settings = default_settings
|
||||
dict_file['settings'] = dict_settings
|
||||
# Creamos el archivo ../settings/channel_data.json
|
||||
# We create the file ../settings/channel_data.json
|
||||
json_data = jsontools.dump(dict_file)
|
||||
if not filetools.write(file_settings, json_data, silent=True):
|
||||
logger.error("ERROR al salvar el archivo: %s" % file_settings)
|
||||
logger.error("ERROR saving file: %s" % file_settings)
|
||||
|
||||
# Devolvemos el valor del parametro local 'name' si existe, si no se devuelve default
|
||||
# We return the value of the local parameter 'name' if it exists, if default is not returned
|
||||
return dict_settings.get(name, default)
|
||||
|
||||
|
||||
def set_channel_setting(name, value, channel):
|
||||
from core import filetools
|
||||
"""
|
||||
Fija el valor de configuracion del parametro indicado.
|
||||
Sets the configuration value of the indicated parameter.
|
||||
|
||||
Establece 'value' como el valor del parametro 'name' en la configuracion propia del canal 'channel'.
|
||||
Devuelve el valor cambiado o None si la asignacion no se ha podido completar.
|
||||
Set 'value' as the value of the parameter 'name' in the own configuration of the channel 'channel'.
|
||||
Returns the changed value or None if the assignment could not be completed.
|
||||
|
||||
Si se especifica el nombre del canal busca en la ruta \addon_data\plugin.video.kod\settings_channels el
|
||||
archivo channel_data.json y establece el parametro 'name' al valor indicado por 'value'.
|
||||
Si el parametro 'name' no existe lo añade, con su valor, al archivo correspondiente.
|
||||
If the name of the channel is specified, search in the path \addon_data\plugin.video.kod\settings_channels the
|
||||
channel_data.json file and set the parameter 'name' to the value indicated by 'value'.
|
||||
If the parameter 'name' does not exist, it adds it, with its value, to the corresponding file.
|
||||
|
||||
@param name: nombre del parametro
|
||||
@param name: parameter name
|
||||
@type name: str
|
||||
@param value: valor del parametro
|
||||
@param value: parameter value
|
||||
@type value: str
|
||||
@param channel: nombre del canal
|
||||
@param channel: channel name
|
||||
@type channel: str
|
||||
|
||||
@return: 'value' en caso de que se haya podido fijar el valor y None en caso contrario
|
||||
@return: 'value' if the value could be set and None otherwise
|
||||
@rtype: str, None
|
||||
|
||||
"""
|
||||
# Creamos la carpeta si no existe
|
||||
# We create the folder if it does not exist
|
||||
if not filetools.exists(filetools.join(config.get_data_path(), "settings_channels")):
|
||||
filetools.mkdir(filetools.join(config.get_data_path(), "settings_channels"))
|
||||
|
||||
@@ -327,16 +320,16 @@ def set_channel_setting(name, value, channel):
|
||||
dict_file = None
|
||||
|
||||
if filetools.exists(file_settings):
|
||||
# Obtenemos configuracion guardada de ../settings/channel_data.json
|
||||
# We get saved settings from ../settings/channel_data.json
|
||||
try:
|
||||
dict_file = jsontools.load(filetools.read(file_settings))
|
||||
dict_settings = dict_file.get('settings', {})
|
||||
except EnvironmentError:
|
||||
logger.error("ERROR al leer el archivo: %s" % file_settings)
|
||||
logger.error("ERROR when reading the file: %s" % file_settings)
|
||||
|
||||
dict_settings[name] = value
|
||||
|
||||
# comprobamos si existe dict_file y es un diccionario, sino lo creamos
|
||||
# we check if dict_file exists and it is a dictionary, if not we create it
|
||||
if dict_file is None or not dict_file:
|
||||
dict_file = {}
|
||||
|
||||
@@ -345,7 +338,7 @@ def set_channel_setting(name, value, channel):
|
||||
# Creamos el archivo ../settings/channel_data.json
|
||||
json_data = jsontools.dump(dict_file)
|
||||
if not filetools.write(file_settings, json_data, silent=True):
|
||||
logger.error("ERROR al salvar el archivo: %s" % file_settings)
|
||||
logger.error("ERROR saving file: %s" % file_settings)
|
||||
return None
|
||||
|
||||
return value
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Clase Downloader
|
||||
Downloader class
|
||||
Downloader(url, path [, filename, headers, resume])
|
||||
|
||||
url : string - url para descargar
|
||||
path : string - Directorio donde se guarda la descarga
|
||||
filename : [opt] string - Nombre de archivo para guardar
|
||||
headers : [opt] dict - Headers para usar en la descarga
|
||||
resume : [opt] bool - continuar una descarga previa en caso de existir, por defecto True
|
||||
url : string - url to download
|
||||
path : string - Directory where the download is saved
|
||||
filename : [opt] string - File name to save
|
||||
headers : [opt] dict - Headers to use for download
|
||||
resume : [opt] bool - continue a previous download if it exists, by default True
|
||||
|
||||
|
||||
metodos:
|
||||
start_dialog() Inicia la descarga mostrando el progreso
|
||||
start() Inicia la descarga en segundo plano
|
||||
stop(erase = False) Detiene la descarga, con erase = True elimina los datos descargados
|
||||
start_dialog() Start the download showing the progress
|
||||
start() Download starts in the background
|
||||
stop(erase = False) Stop the download, with erase = True it deletes the downloaded data
|
||||
|
||||
"""
|
||||
from __future__ import division
|
||||
@@ -26,7 +26,7 @@ standard_library.install_aliases()
|
||||
from builtins import range
|
||||
from builtins import object
|
||||
from past.utils import old_div
|
||||
#from builtins import str
|
||||
# from builtins import str
|
||||
import sys
|
||||
PY3 = False
|
||||
VFS = True
|
||||
@@ -53,8 +53,7 @@ class Downloader(object):
|
||||
|
||||
@property
|
||||
def connections(self):
|
||||
return len([c for c in self._download_info["parts"] if
|
||||
c["status"] in [self.states.downloading, self.states.connecting]]), self._max_connections
|
||||
return len([c for c in self._download_info["parts"] if c["status"] in [self.states.downloading, self.states.connecting]]), self._max_connections
|
||||
|
||||
@property
|
||||
def downloaded(self):
|
||||
@@ -102,7 +101,7 @@ class Downloader(object):
|
||||
def fullpath(self):
|
||||
return os.path.abspath(filetools.join(self._path, self._filename))
|
||||
|
||||
# Funciones
|
||||
# Features
|
||||
def start_dialog(self, title=config.get_localized_string(60200)):
|
||||
from platformcode import platformtools
|
||||
progreso = platformtools.dialog_progress_bg(title, config.get_localized_string(60201))
|
||||
@@ -111,9 +110,7 @@ class Downloader(object):
|
||||
while self.state == self.states.downloading:
|
||||
time.sleep(0.2)
|
||||
line1 = "%s" % (self.filename)
|
||||
line2 = config.get_localized_string(59983) % (
|
||||
self.downloaded[1], self.downloaded[2], self.size[1], self.size[2],
|
||||
self.speed[1], self.speed[2], self.connections[0], self.connections[1])
|
||||
line2 = config.get_localized_string(59983) % ( self.downloaded[1], self.downloaded[2], self.size[1], self.size[2], self.speed[1], self.speed[2], self.connections[0], self.connections[1])
|
||||
line3 = config.get_localized_string(60202) % (self.remaining_time)
|
||||
|
||||
progreso.update(int(self.progress), line1, line2 + " " + line3)
|
||||
@@ -130,9 +127,7 @@ class Downloader(object):
|
||||
conns.append(self.__open_connection__("0", ""))
|
||||
except:
|
||||
self._max_connections = x
|
||||
self._threads = [
|
||||
Thread(target=self.__start_part__, name="Downloader %s/%s" % (x + 1, self._max_connections)) for x
|
||||
in range(self._max_connections)]
|
||||
self._threads = [ Thread(target=self.__start_part__, name="Downloader %s/%s" % (x + 1, self._max_connections)) for x in range(self._max_connections)]
|
||||
break
|
||||
del conns
|
||||
self._start_time = time.time() - 1
|
||||
@@ -144,7 +139,7 @@ class Downloader(object):
|
||||
|
||||
def stop(self, erase=False):
|
||||
if self._state == self.states.downloading:
|
||||
# Detenemos la descarga
|
||||
# We stop downloading
|
||||
self._state = self.states.stopped
|
||||
for t in self._threads:
|
||||
if t.isAlive(): t.join()
|
||||
@@ -193,10 +188,10 @@ class Downloader(object):
|
||||
|
||||
time.sleep(0.5)
|
||||
|
||||
# Funciones internas
|
||||
# Internal functions
|
||||
def __init__(self, url, path, filename=None, headers=[], resume=True, max_connections=10, block_size=2 ** 17,
|
||||
part_size=2 ** 24, max_buffer=10, json_path=None):
|
||||
# Parametros
|
||||
# Parameters
|
||||
self._resume = resume
|
||||
self._path = path
|
||||
self._filename = filename
|
||||
@@ -214,29 +209,26 @@ class Downloader(object):
|
||||
except:
|
||||
self.tmp_path = os.getenv("TEMP") or os.getenv("TMP") or os.getenv("TMPDIR")
|
||||
|
||||
self.states = type('states', (),
|
||||
{"stopped": 0, "connecting": 1, "downloading": 2, "completed": 3, "error": 4, "saving": 5})
|
||||
self.states = type('states', (), {"stopped": 0, "connecting": 1, "downloading": 2, "completed": 3, "error": 4, "saving": 5})
|
||||
|
||||
self._state = self.states.stopped
|
||||
self._download_lock = Lock()
|
||||
self._headers = {
|
||||
"User-Agent": "Kodi/15.2 (Windows NT 10.0; WOW64) App_Bitness/32 Version/15.2-Git:20151019-02e7013"}
|
||||
self._headers = {"User-Agent": "Kodi/15.2 (Windows NT 10.0; WOW64) App_Bitness/32 Version/15.2-Git:20151019-02e7013"}
|
||||
self._speed = 0
|
||||
self._buffer = {}
|
||||
self._seekable = True
|
||||
|
||||
self._threads = [Thread(target=self.__start_part__, name="Downloader %s/%s" % (x + 1, self._max_connections))
|
||||
for x in range(self._max_connections)]
|
||||
self._threads = [Thread(target=self.__start_part__, name="Downloader %s/%s" % (x + 1, self._max_connections)) for x in range(self._max_connections)]
|
||||
self._speed_thread = Thread(target=self.__speed_metter__, name="Speed Meter")
|
||||
self._save_thread = Thread(target=self.__save_file__, name="File Writer")
|
||||
|
||||
# Actualizamos los headers
|
||||
# We update the headers
|
||||
self._headers.update(dict(headers))
|
||||
|
||||
# Separamos los headers de la url
|
||||
# We separate the headers from the url
|
||||
self.__url_to_headers__(url)
|
||||
|
||||
# Obtenemos la info del servidor
|
||||
# We get the server info
|
||||
self.__get_download_headers__()
|
||||
|
||||
self._file_size = int(self.response_headers.get("content-length", "0"))
|
||||
@@ -246,10 +238,10 @@ class Downloader(object):
|
||||
self._part_size = 0
|
||||
self._resume = False
|
||||
|
||||
# Obtenemos el nombre del archivo
|
||||
# We get the file name
|
||||
self.__get_download_filename__()
|
||||
|
||||
# Abrimos en modo "a+" para que cree el archivo si no existe, luego en modo "r+b" para poder hacer seek()
|
||||
# We open in "a+" mode to create the file if it does not exist, then in "r + b" mode to be able to do seek ()
|
||||
self.file = filetools.file_open(filetools.join(self._path, self._filename), "a+", vfs=VFS)
|
||||
if self.file: self.file.close()
|
||||
self.file = filetools.file_open(filetools.join(self._path, self._filename), "r+b", vfs=VFS)
|
||||
@@ -266,20 +258,17 @@ class Downloader(object):
|
||||
self.__get_download_info__()
|
||||
|
||||
try:
|
||||
logger.info("Download started: Parts: %s | Path: %s | File: %s | Size: %s" % \
|
||||
(str(len(self._download_info["parts"])), self._pathencode('utf-8'), \
|
||||
self._filenameencode('utf-8'), str(self._download_info["size"])))
|
||||
logger.info("Download started: Parts: %s | Path: %s | File: %s | Size: %s" % (str(len(self._download_info["parts"])), self._pathencode('utf-8'), self._filenameencode('utf-8'), str(self._download_info["size"])))
|
||||
except:
|
||||
pass
|
||||
|
||||
def __url_to_headers__(self, url):
|
||||
# Separamos la url de los headers adicionales
|
||||
# We separate the url from the additional headers
|
||||
self.url = url.split("|")[0]
|
||||
|
||||
# headers adicionales
|
||||
# additional headers
|
||||
if "|" in url:
|
||||
self._headers.update(dict([[header.split("=")[0], urllib.parse.unquote_plus(header.split("=")[1])] for header in
|
||||
url.split("|")[1].split("&")]))
|
||||
self._headers.update(dict([[header.split("=")[0], urllib.parse.unquote_plus(header.split("=")[1])] for header in url.split("|")[1].split("&")]))
|
||||
|
||||
def __get_download_headers__(self):
|
||||
if self.url.startswith("https"):
|
||||
@@ -307,29 +296,21 @@ class Downloader(object):
|
||||
break
|
||||
|
||||
def __get_download_filename__(self):
|
||||
# Obtenemos nombre de archivo y extension
|
||||
if "filename" in self.response_headers.get("content-disposition",
|
||||
"") and "attachment" in self.response_headers.get(
|
||||
"content-disposition", ""):
|
||||
cd_filename, cd_ext = os.path.splitext(urllib.parse.unquote_plus(
|
||||
re.compile("attachment; filename ?= ?[\"|']?([^\"']+)[\"|']?").match(
|
||||
self.response_headers.get("content-disposition")).group(1)))
|
||||
elif "filename" in self.response_headers.get("content-disposition", "") and "inline" in self.response_headers.get(
|
||||
"content-disposition", ""):
|
||||
cd_filename, cd_ext = os.path.splitext(urllib.parse.unquote_plus(
|
||||
re.compile("inline; filename ?= ?[\"|']?([^\"']+)[\"|']?").match(
|
||||
self.response_headers.get("content-disposition")).group(1)))
|
||||
# We get file name and extension
|
||||
if "filename" in self.response_headers.get("content-disposition", "") and "attachment" in self.response_headers.get("content-disposition", ""):
|
||||
cd_filename, cd_ext = os.path.splitext(urllib.parse.unquote_plus( re.compile("attachment; filename ?= ?[\"|']?([^\"']+)[\"|']?").match(self.response_headers.get("content-disposition")).group(1)))
|
||||
elif "filename" in self.response_headers.get("content-disposition", "") and "inline" in self.response_headers.get("content-disposition", ""):
|
||||
cd_filename, cd_ext = os.path.splitext(urllib.parse.unquote_plus(re.compile("inline; filename ?= ?[\"|']?([^\"']+)[\"|']?").match(self.response_headers.get("content-disposition")).group(1)))
|
||||
else:
|
||||
cd_filename, cd_ext = "", ""
|
||||
|
||||
url_filename, url_ext = os.path.splitext(
|
||||
urllib.parse.unquote_plus(filetools.basename(urllib.parse.urlparse(self.url)[2])))
|
||||
url_filename, url_ext = os.path.splitext(urllib.parse.unquote_plus(filetools.basename(urllib.parse.urlparse(self.url)[2])))
|
||||
if self.response_headers.get("content-type", "application/octet-stream") != "application/octet-stream":
|
||||
mime_ext = mimetypes.guess_extension(self.response_headers.get("content-type"))
|
||||
else:
|
||||
mime_ext = ""
|
||||
|
||||
# Seleccionamos el nombre mas adecuado
|
||||
# We select the most suitable name
|
||||
if cd_filename:
|
||||
self.remote_filename = cd_filename
|
||||
if not self._filename:
|
||||
@@ -340,7 +321,7 @@ class Downloader(object):
|
||||
if not self._filename:
|
||||
self._filename = url_filename
|
||||
|
||||
# Seleccionamos la extension mas adecuada
|
||||
# We select the most suitable extension
|
||||
if cd_ext:
|
||||
if not cd_ext in self._filename: self._filename += cd_ext
|
||||
if self.remote_filename: self.remote_filename += cd_ext
|
||||
@@ -360,7 +341,7 @@ class Downloader(object):
|
||||
return value, old_div(value, 1024.0 ** int(math.log(value, 1024))), units[int(math.log(value, 1024))]
|
||||
|
||||
def __get_download_info__(self):
|
||||
# Continuamos con una descarga que contiene la info al final del archivo
|
||||
# We continue with a download that contains the info at the end of the file
|
||||
self._download_info = {}
|
||||
|
||||
try:
|
||||
@@ -386,25 +367,21 @@ class Downloader(object):
|
||||
part["current"] == part["start"]
|
||||
|
||||
self._start_downloaded = sum([c["current"] - c["start"] for c in self._download_info["parts"]])
|
||||
self.pending_parts = set(
|
||||
[x for x, a in enumerate(self._download_info["parts"]) if not a["status"] == self.states.completed])
|
||||
self.completed_parts = set(
|
||||
[x for x, a in enumerate(self._download_info["parts"]) if a["status"] == self.states.completed])
|
||||
self.pending_parts = set([x for x, a in enumerate(self._download_info["parts"]) if not a["status"] == self.states.completed])
|
||||
self.completed_parts = set([x for x, a in enumerate(self._download_info["parts"]) if a["status"] == self.states.completed])
|
||||
self.save_parts = set()
|
||||
self.download_parts = set()
|
||||
|
||||
# La info no existe o no es correcta, comenzamos de 0
|
||||
# The info does not exist or is not correct, we start from 0
|
||||
except:
|
||||
self._download_info["parts"] = []
|
||||
if self._file_size and self._part_size:
|
||||
for x in range(0, self._file_size, self._part_size):
|
||||
end = x + self._part_size - 1
|
||||
if end >= self._file_size: end = self._file_size - 1
|
||||
self._download_info["parts"].append(
|
||||
{"start": x, "end": end, "current": x, "status": self.states.stopped})
|
||||
self._download_info["parts"].append({"start": x, "end": end, "current": x, "status": self.states.stopped})
|
||||
else:
|
||||
self._download_info["parts"].append(
|
||||
{"start": 0, "end": self._file_size - 1, "current": 0, "status": self.states.stopped})
|
||||
self._download_info["parts"].append({"start": 0, "end": self._file_size - 1, "current": 0, "status": self.states.stopped})
|
||||
|
||||
self._download_info["size"] = self._file_size
|
||||
self._start_downloaded = 0
|
||||
@@ -436,7 +413,7 @@ class Downloader(object):
|
||||
logger.info("Thread started: %s" % threading.current_thread().name)
|
||||
|
||||
while self._state == self.states.downloading:
|
||||
if not self.pending_parts and not self.download_parts and not self.save_parts: # Descarga finalizada
|
||||
if not self.pending_parts and not self.download_parts and not self.save_parts: # Download finished
|
||||
self._state = self.states.completed
|
||||
self.file.close()
|
||||
continue
|
||||
@@ -446,8 +423,7 @@ class Downloader(object):
|
||||
|
||||
save_id = min(self.save_parts)
|
||||
|
||||
if not self._seekable and self._download_info["parts"][save_id][
|
||||
"start"] >= 2 ** 31 and not self.__check_consecutive__(save_id):
|
||||
if not self._seekable and self._download_info["parts"][save_id]["start"] >= 2 ** 31 and not self.__check_consecutive__(save_id):
|
||||
continue
|
||||
|
||||
if self._seekable or self._download_info["parts"][save_id]["start"] < 2 ** 31:
|
||||
@@ -533,8 +509,7 @@ class Downloader(object):
|
||||
self.__set_part_connecting__(id)
|
||||
|
||||
try:
|
||||
connection = self.__open_connection__(self._download_info["parts"][id]["current"],
|
||||
self._download_info["parts"][id]["end"])
|
||||
connection = self.__open_connection__(self._download_info["parts"][id]["current"], self._download_info["parts"][id]["end"])
|
||||
except:
|
||||
self.__set_part__error__(id)
|
||||
time.sleep(5)
|
||||
@@ -559,8 +534,7 @@ class Downloader(object):
|
||||
self.download_parts.remove(id)
|
||||
break
|
||||
else:
|
||||
if len(buffer) and self._download_info["parts"][id]["current"] < self._download_info["parts"][id][
|
||||
"end"]:
|
||||
if len(buffer) and self._download_info["parts"][id]["current"] < self._download_info["parts"][id]["end"]:
|
||||
# file.write(buffer)
|
||||
self._buffer[id].append(buffer)
|
||||
self._download_info["parts"][id]["current"] += len(buffer)
|
||||
@@ -570,13 +544,9 @@ class Downloader(object):
|
||||
vm = self.__change_units__(velocidad_minima)
|
||||
v = self.__change_units__(velocidad)
|
||||
|
||||
if velocidad_minima > speed[-1] and velocidad_minima > speed[-2] and \
|
||||
self._download_info["parts"][id]["current"] < \
|
||||
self._download_info["parts"][id]["end"]:
|
||||
if velocidad_minima > speed[-1] and velocidad_minima > speed[-2] and self._download_info["parts"][id]["current"] < self._download_info["parts"][id]["end"]:
|
||||
if connection.fp: connection.fp._sock.close()
|
||||
logger.info(
|
||||
"ID: %s Restarting connection! | Minimum Speed: %.2f %s/s | Speed: %.2f %s/s" % \
|
||||
(id, vm[1], vm[2], v[1], v[2]))
|
||||
logger.info("ID: %s Restarting connection! | Minimum Speed: %.2f %s/s | Speed: %.2f %s/s" % (id, vm[1], vm[2], v[1], v[2]))
|
||||
# file.close()
|
||||
break
|
||||
else:
|
||||
|
||||
@@ -92,8 +92,8 @@ def limpia_nombre_sin_acentos(s):
|
||||
def limpia_nombre_excepto_1(s):
|
||||
if not s:
|
||||
return ''
|
||||
# Titulo de entrada
|
||||
# Convierte a unicode
|
||||
# Entrance title
|
||||
# Convert to unicode
|
||||
try:
|
||||
s = unicode(s, "utf-8")
|
||||
except UnicodeError:
|
||||
@@ -103,12 +103,12 @@ def limpia_nombre_excepto_1(s):
|
||||
except UnicodeError:
|
||||
# logger.info("no es iso-8859-1")
|
||||
pass
|
||||
# Elimina acentos
|
||||
# Remove accents
|
||||
s = limpia_nombre_sin_acentos(s)
|
||||
# Elimina caracteres prohibidos
|
||||
# Remove prohibited characters
|
||||
validchars = " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!#$%&'()-@[]^_`{}~."
|
||||
stripped = ''.join(c for c in s if c in validchars)
|
||||
# Convierte a iso
|
||||
# Convert to iso
|
||||
s = stripped.encode("iso-8859-1")
|
||||
if PY3:
|
||||
s = s.decode('utf-8')
|
||||
@@ -124,30 +124,30 @@ def limpia_nombre_excepto_2(s):
|
||||
|
||||
|
||||
def getfilefromtitle(url, title):
|
||||
# Imprime en el log lo que va a descartar
|
||||
# Print in the log what you will discard
|
||||
logger.info("title=" + title)
|
||||
logger.info("url=" + url)
|
||||
plataforma = config.get_system_platform()
|
||||
logger.info("platform=" + plataforma)
|
||||
|
||||
# nombrefichero = xbmc.makeLegalFilename(title + url[-4:])
|
||||
# filename = xbmc.makeLegalFilename(title + url[-4:])
|
||||
from core import scrapertools
|
||||
|
||||
nombrefichero = title + scrapertools.get_filename_from_url(url)[-4:]
|
||||
logger.info("filename=%s" % nombrefichero)
|
||||
logger.info("filename= %s" % nombrefichero)
|
||||
if "videobb" in url or "videozer" in url or "putlocker" in url:
|
||||
nombrefichero = title + ".flv"
|
||||
if "videobam" in url:
|
||||
nombrefichero = title + "." + url.rsplit(".", 1)[1][0:3]
|
||||
|
||||
logger.info("filename=%s" % nombrefichero)
|
||||
logger.info("filename= %s" % nombrefichero)
|
||||
|
||||
nombrefichero = limpia_nombre_caracteres_especiales(nombrefichero)
|
||||
|
||||
logger.info("filename=%s" % nombrefichero)
|
||||
logger.info("filename= %s" % nombrefichero)
|
||||
|
||||
fullpath = filetools.join(config.get_setting("downloadpath"), nombrefichero)
|
||||
logger.info("fullpath=%s" % fullpath)
|
||||
logger.info("fullpath= %s" % fullpath)
|
||||
|
||||
if config.is_xbmc() and fullpath.startswith("special://"):
|
||||
import xbmc
|
||||
@@ -164,7 +164,7 @@ def downloadtitle(url, title):
|
||||
def downloadbest(video_urls, title, continuar=False):
|
||||
logger.info()
|
||||
|
||||
# Le da la vuelta, para poner el de más calidad primero ( list() es para que haga una copia )
|
||||
# Flip it over, to put the highest quality one first (list () is for you to make a copy of)
|
||||
invertida = list(video_urls)
|
||||
invertida.reverse()
|
||||
|
||||
@@ -176,10 +176,10 @@ def downloadbest(video_urls, title, continuar=False):
|
||||
else:
|
||||
logger.info("Downloading option " + title + " " + url.encode('ascii', 'ignore').decode('utf-8'))
|
||||
|
||||
# Calcula el fichero donde debe grabar
|
||||
# Calculate the file where you should record
|
||||
try:
|
||||
fullpath = getfilefromtitle(url, title.strip())
|
||||
# Si falla, es porque la URL no vale para nada
|
||||
# If it fails, it is because the URL is useless
|
||||
except:
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
@@ -188,24 +188,24 @@ def downloadbest(video_urls, title, continuar=False):
|
||||
# Descarga
|
||||
try:
|
||||
ret = downloadfile(url, fullpath, continuar=continuar)
|
||||
# Llegados a este punto, normalmente es un timeout
|
||||
# At this point, it is usually a timeout.
|
||||
except urllib.error.URLError as e:
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
ret = -2
|
||||
|
||||
# El usuario ha cancelado la descarga
|
||||
# The user has canceled the download
|
||||
if ret == -1:
|
||||
return -1
|
||||
else:
|
||||
# El fichero ni siquiera existe
|
||||
# EThe file doesn't even exist
|
||||
if not filetools.exists(fullpath):
|
||||
logger.info("-> You have not downloaded anything, testing with the following option if there is")
|
||||
# El fichero existe
|
||||
# The file exists
|
||||
else:
|
||||
tamanyo = filetools.getsize(fullpath)
|
||||
|
||||
# Tiene tamaño 0
|
||||
# It has size 0
|
||||
if tamanyo == 0:
|
||||
logger.info("-> Download a file with size 0, testing with the following option if it exists")
|
||||
os.remove(fullpath)
|
||||
@@ -217,8 +217,8 @@ def downloadbest(video_urls, title, continuar=False):
|
||||
|
||||
|
||||
def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False, resumir=True):
|
||||
logger.info("url=" + url)
|
||||
logger.info("filename=" + nombrefichero)
|
||||
logger.info("url= " + url)
|
||||
logger.info("filename= " + nombrefichero)
|
||||
|
||||
if headers is None:
|
||||
headers = []
|
||||
@@ -230,36 +230,36 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
|
||||
nombrefichero = xbmc.translatePath(nombrefichero)
|
||||
|
||||
try:
|
||||
# Si no es XBMC, siempre a "Silent"
|
||||
# If it is not XBMC, always "Silent"
|
||||
from platformcode import platformtools
|
||||
|
||||
# antes
|
||||
# before
|
||||
# f=open(nombrefichero,"wb")
|
||||
try:
|
||||
import xbmc
|
||||
nombrefichero = xbmc.makeLegalFilename(nombrefichero)
|
||||
except:
|
||||
pass
|
||||
logger.info("filename=" + nombrefichero)
|
||||
logger.info("filename= " + nombrefichero)
|
||||
|
||||
# El fichero existe y se quiere continuar
|
||||
# The file exists and you want to continue
|
||||
if filetools.exists(nombrefichero) and continuar:
|
||||
f = filetools.file_open(nombrefichero, 'r+b', vfs=VFS)
|
||||
if resumir:
|
||||
exist_size = filetools.getsize(nombrefichero)
|
||||
logger.info("the file exists, size=%d" % exist_size)
|
||||
logger.info("the file exists, size= %d" % exist_size)
|
||||
grabado = exist_size
|
||||
f.seek(exist_size)
|
||||
else:
|
||||
exist_size = 0
|
||||
grabado = 0
|
||||
|
||||
# el fichero ya existe y no se quiere continuar, se aborta
|
||||
# the file already exists and you don't want to continue, it aborts
|
||||
elif filetools.exists(nombrefichero) and not continuar:
|
||||
logger.info("the file exists, it does not download again")
|
||||
return -3
|
||||
|
||||
# el fichero no existe
|
||||
# the file does not exist
|
||||
else:
|
||||
exist_size = 0
|
||||
logger.info("the file does not exist")
|
||||
@@ -267,11 +267,11 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
|
||||
f = filetools.file_open(nombrefichero, 'wb', vfs=VFS)
|
||||
grabado = 0
|
||||
|
||||
# Crea el diálogo de progreso
|
||||
# Create the progress dialog
|
||||
if not silent:
|
||||
progreso = platformtools.dialog_progress("plugin", "Downloading...", url, nombrefichero)
|
||||
|
||||
# Si la plataforma no devuelve un cuadro de diálogo válido, asume modo silencio
|
||||
# If the platform does not return a valid dialog box, it assumes silent mode
|
||||
if progreso is None:
|
||||
silent = True
|
||||
|
||||
@@ -291,29 +291,28 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
|
||||
url = url.split("|")[0]
|
||||
logger.info("url=" + url)
|
||||
|
||||
# Timeout del socket a 60 segundos
|
||||
# Socket timeout at 60 seconds
|
||||
socket.setdefaulttimeout(60)
|
||||
|
||||
h = urllib.request.HTTPHandler(debuglevel=0)
|
||||
request = urllib.request.Request(url)
|
||||
for header in headers:
|
||||
logger.info("Header=" + header[0] + ": " + header[1])
|
||||
logger.info("Header= " + header[0] + ": " + header[1])
|
||||
request.add_header(header[0], header[1])
|
||||
|
||||
if exist_size > 0:
|
||||
request.add_header('Range', 'bytes=%d-' % (exist_size,))
|
||||
request.add_header('Range', 'bytes= %d-' % (exist_size,))
|
||||
|
||||
opener = urllib.request.build_opener(h)
|
||||
urllib.request.install_opener(opener)
|
||||
try:
|
||||
connexion = opener.open(request)
|
||||
except urllib.error.HTTPError as e:
|
||||
logger.error("error %d (%s) al abrir la url %s" %
|
||||
(e.code, e.msg, url))
|
||||
logger.error("error %d (%s) opening url %s" % (e.code, e.msg, url))
|
||||
f.close()
|
||||
if not silent:
|
||||
progreso.close()
|
||||
# El error 416 es que el rango pedido es mayor que el fichero => es que ya está completo
|
||||
# Error 416 is that the requested range is greater than the file => is that it is already complete
|
||||
if e.code == 416:
|
||||
return 0
|
||||
else:
|
||||
@@ -327,25 +326,25 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
|
||||
if exist_size > 0:
|
||||
totalfichero = totalfichero + exist_size
|
||||
|
||||
logger.info("Content-Length=%s" % totalfichero)
|
||||
logger.info("Content-Length= %s" % totalfichero)
|
||||
|
||||
blocksize = 100 * 1024
|
||||
|
||||
bloqueleido = connexion.read(blocksize)
|
||||
logger.info("Starting downloading the file, blocked=%s" % len(bloqueleido))
|
||||
logger.info("Starting downloading the file, blocked= %s" % len(bloqueleido))
|
||||
|
||||
maxreintentos = 10
|
||||
|
||||
while len(bloqueleido) > 0:
|
||||
try:
|
||||
# Escribe el bloque leido
|
||||
# Write the block read
|
||||
f.write(bloqueleido)
|
||||
grabado += len(bloqueleido)
|
||||
percent = int(float(grabado) * 100 / float(totalfichero))
|
||||
totalmb = float(float(totalfichero) / (1024 * 1024))
|
||||
descargadosmb = float(float(grabado) / (1024 * 1024))
|
||||
|
||||
# Lee el siguiente bloque, reintentando para no parar todo al primer timeout
|
||||
# Read the next block, retrying not to stop everything at the first timeout
|
||||
reintentos = 0
|
||||
while reintentos <= maxreintentos:
|
||||
try:
|
||||
@@ -371,7 +370,7 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
|
||||
import traceback
|
||||
logger.error(traceback.print_exc())
|
||||
|
||||
# El usuario cancelo la descarga
|
||||
# The user cancels the download
|
||||
try:
|
||||
if progreso.iscanceled():
|
||||
logger.info("Download of file canceled")
|
||||
@@ -381,7 +380,7 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
|
||||
except:
|
||||
pass
|
||||
|
||||
# Ha habido un error en la descarga
|
||||
# There was an error in the download
|
||||
if reintentos > maxreintentos:
|
||||
logger.info("ERROR in the file download")
|
||||
f.close()
|
||||
@@ -407,7 +406,7 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
|
||||
error = downloadfileRTMP(url, nombrefichero, silent)
|
||||
if error and not silent:
|
||||
from platformcode import platformtools
|
||||
platformtools.dialog_ok("No puedes descargar ese vídeo", "Las descargas en RTMP aún no", "están soportadas")
|
||||
platformtools.dialog_ok("You cannot download that video "," RTMP downloads not yet "," are supported")
|
||||
else:
|
||||
import traceback
|
||||
from pprint import pprint
|
||||
@@ -433,21 +432,21 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
|
||||
|
||||
|
||||
def downloadfileRTMP(url, nombrefichero, silent):
|
||||
''' No usa librtmp ya que no siempre está disponible.
|
||||
Lanza un subproceso con rtmpdump. En Windows es necesario instalarlo.
|
||||
No usa threads así que no muestra ninguna barra de progreso ni tampoco
|
||||
se marca el final real de la descarga en el log info.
|
||||
'''
|
||||
Do not use librtmp as it is not always available.
|
||||
Launch a thread with rtmpdump. In Windows it is necessary to install it.
|
||||
It doesn't use threads so it doesn't show any progress bar nor the actual end of the download is marked in the log info.
|
||||
'''
|
||||
Programfiles = os.getenv('Programfiles')
|
||||
if Programfiles: # Windows
|
||||
rtmpdump_cmd = Programfiles + "/rtmpdump/rtmpdump.exe"
|
||||
nombrefichero = '"' + nombrefichero + '"' # Windows necesita las comillas en el nombre
|
||||
nombrefichero = '"' + nombrefichero + '"' # Windows needs the quotes in the name
|
||||
else:
|
||||
rtmpdump_cmd = "/usr/bin/rtmpdump"
|
||||
|
||||
if not filetools.isfile(rtmpdump_cmd) and not silent:
|
||||
from platformcode import platformtools
|
||||
advertencia = platformtools.dialog_ok("Falta " + rtmpdump_cmd, "Comprueba que rtmpdump está instalado")
|
||||
advertencia = platformtools.dialog_ok("Lack " + rtmpdump_cmd, "Check that rtmpdump is installed")
|
||||
return True
|
||||
|
||||
valid_rtmpdump_options = ["help", "url", "rtmp", "host", "port", "socks", "protocol", "playpath", "playlist",
|
||||
@@ -475,13 +474,11 @@ def downloadfileRTMP(url, nombrefichero, silent):
|
||||
try:
|
||||
rtmpdump_args = [rtmpdump_cmd] + rtmpdump_args + ["-o", nombrefichero]
|
||||
from os import spawnv, P_NOWAIT
|
||||
logger.info("Iniciando descarga del fichero: %s" % " ".join(rtmpdump_args))
|
||||
logger.info("Initiating file download: %s" % " ".join(rtmpdump_args))
|
||||
rtmpdump_exit = spawnv(P_NOWAIT, rtmpdump_cmd, rtmpdump_args)
|
||||
if not silent:
|
||||
from platformcode import platformtools
|
||||
advertencia = platformtools.dialog_ok("La opción de descarga RTMP es experimental",
|
||||
"y el vídeo se descargará en segundo plano.",
|
||||
"No se mostrará ninguna barra de progreso.")
|
||||
advertencia = platformtools.dialog_ok("RTMP download option is experimental", "and the video will download in the background.", "No progress bar will be displayed.")
|
||||
except:
|
||||
return True
|
||||
|
||||
@@ -489,13 +486,13 @@ def downloadfileRTMP(url, nombrefichero, silent):
|
||||
|
||||
|
||||
def downloadfileGzipped(url, pathfichero):
|
||||
logger.info("url=" + url)
|
||||
logger.info("url= " + url)
|
||||
nombrefichero = pathfichero
|
||||
logger.info("filename=" + nombrefichero)
|
||||
logger.info("filename= " + nombrefichero)
|
||||
|
||||
import xbmc
|
||||
nombrefichero = xbmc.makeLegalFilename(nombrefichero)
|
||||
logger.info("filename=" + nombrefichero)
|
||||
logger.info("filename= " + nombrefichero)
|
||||
patron = "(http://[^/]+)/.+"
|
||||
matches = re.compile(patron, re.DOTALL).findall(url)
|
||||
|
||||
@@ -519,11 +516,11 @@ def downloadfileGzipped(url, pathfichero):
|
||||
|
||||
txdata = ""
|
||||
|
||||
# Crea el diálogo de progreso
|
||||
# Create the progress dialog
|
||||
from platformcode import platformtools
|
||||
progreso = platformtools.dialog_progress("addon", config.get_localized_string(60200), url.split("|")[0], nombrefichero)
|
||||
|
||||
# Timeout del socket a 60 segundos
|
||||
# Socket timeout at 60 seconds
|
||||
socket.setdefaulttimeout(10)
|
||||
|
||||
h = urllib.request.HTTPHandler(debuglevel=0)
|
||||
@@ -536,10 +533,10 @@ def downloadfileGzipped(url, pathfichero):
|
||||
try:
|
||||
connexion = opener.open(request)
|
||||
except urllib.error.HTTPError as e:
|
||||
logger.error("error %d (%s) al abrir la url %s" %
|
||||
logger.error("error %d (%s) when opening the url %s" %
|
||||
(e.code, e.msg, url))
|
||||
progreso.close()
|
||||
# El error 416 es que el rango pedido es mayor que el fichero => es que ya está completo
|
||||
# Error 416 is that the requested range is greater than the file => is that it is already complete
|
||||
if e.code == 416:
|
||||
return 0
|
||||
else:
|
||||
@@ -562,13 +559,13 @@ def downloadfileGzipped(url, pathfichero):
|
||||
nombrefichero = filetools.join(pathfichero, titulo)
|
||||
totalfichero = int(connexion.headers["Content-Length"])
|
||||
|
||||
# despues
|
||||
# then
|
||||
f = filetools.file_open(nombrefichero, 'w', vfs=VFS)
|
||||
|
||||
logger.info("new file open")
|
||||
|
||||
grabado = 0
|
||||
logger.info("Content-Length=%s" % totalfichero)
|
||||
logger.info("Content-Length= %s" % totalfichero)
|
||||
|
||||
blocksize = 100 * 1024
|
||||
|
||||
@@ -581,7 +578,7 @@ def downloadfileGzipped(url, pathfichero):
|
||||
gzipper = gzip.GzipFile(fileobj=compressedstream)
|
||||
bloquedata = gzipper.read()
|
||||
gzipper.close()
|
||||
logger.info("Starting downloading the file, blocked=%s" % len(bloqueleido))
|
||||
logger.info("Starting downloading the file, blocked= %s" % len(bloqueleido))
|
||||
except:
|
||||
logger.error("ERROR: The file to be downloaded is not compressed with Gzip")
|
||||
f.close()
|
||||
@@ -592,14 +589,14 @@ def downloadfileGzipped(url, pathfichero):
|
||||
|
||||
while len(bloqueleido) > 0:
|
||||
try:
|
||||
# Escribe el bloque leido
|
||||
# Write the block read
|
||||
f.write(bloquedata)
|
||||
grabado += len(bloqueleido)
|
||||
percent = int(float(grabado) * 100 / float(totalfichero))
|
||||
totalmb = float(float(totalfichero) / (1024 * 1024))
|
||||
descargadosmb = float(float(grabado) / (1024 * 1024))
|
||||
|
||||
# Lee el siguiente bloque, reintentando para no parar todo al primer timeout
|
||||
# Read the next block, retrying not to stop everything at the first timeout
|
||||
reintentos = 0
|
||||
while reintentos <= maxreintentos:
|
||||
try:
|
||||
@@ -621,8 +618,7 @@ def downloadfileGzipped(url, pathfichero):
|
||||
else:
|
||||
tiempofalta = 0
|
||||
logger.info(sec_to_hms(tiempofalta))
|
||||
progreso.update(percent, "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s mancanti " %
|
||||
(descargadosmb, totalmb, percent, old_div(velocidad, 1024), sec_to_hms(tiempofalta)))
|
||||
progreso.update(percent, "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s left " % (descargadosmb, totalmb, percent, old_div(velocidad, 1024), sec_to_hms(tiempofalta)))
|
||||
break
|
||||
except:
|
||||
reintentos += 1
|
||||
@@ -630,14 +626,14 @@ def downloadfileGzipped(url, pathfichero):
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
|
||||
# El usuario cancelo la descarga
|
||||
# The user cancels the download
|
||||
if progreso.iscanceled():
|
||||
logger.info("Download of file canceled")
|
||||
f.close()
|
||||
progreso.close()
|
||||
return -1
|
||||
|
||||
# Ha habido un error en la descarga
|
||||
# There was an error in the download
|
||||
if reintentos > maxreintentos:
|
||||
logger.info("ERROR in the file download")
|
||||
f.close()
|
||||
@@ -662,10 +658,10 @@ def downloadfileGzipped(url, pathfichero):
|
||||
|
||||
|
||||
def GetTitleFromFile(title):
|
||||
# Imprime en el log lo que va a descartar
|
||||
logger.info("title=" + title)
|
||||
# Print in the log what you will discard
|
||||
logger.info("title= " + title)
|
||||
plataforma = config.get_system_platform()
|
||||
logger.info("plataform=" + plataforma)
|
||||
logger.info("plataform= " + plataforma)
|
||||
|
||||
# nombrefichero = xbmc.makeLegalFilename(title + url[-4:])
|
||||
nombrefichero = title
|
||||
@@ -681,16 +677,15 @@ def sec_to_hms(seconds):
|
||||
def downloadIfNotModifiedSince(url, timestamp):
|
||||
logger.info("(" + url + "," + time.ctime(timestamp) + ")")
|
||||
|
||||
# Convierte la fecha a GMT
|
||||
# Convert date to GMT
|
||||
fecha_formateada = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(timestamp))
|
||||
logger.info("fechaFormateada=%s" % fecha_formateada)
|
||||
logger.info("Formatted date= %s" % fecha_formateada)
|
||||
|
||||
# Comprueba si ha cambiado
|
||||
# Check if it has changed
|
||||
inicio = time.clock()
|
||||
req = urllib.request.Request(url)
|
||||
req.add_header('If-Modified-Since', fecha_formateada)
|
||||
req.add_header('User-Agent',
|
||||
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12')
|
||||
req.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12')
|
||||
|
||||
updated = False
|
||||
|
||||
@@ -698,18 +693,18 @@ def downloadIfNotModifiedSince(url, timestamp):
|
||||
response = urllib.request.urlopen(req)
|
||||
data = response.read()
|
||||
|
||||
# Si llega hasta aquí, es que ha cambiado
|
||||
# If it gets this far, it has changed
|
||||
updated = True
|
||||
response.close()
|
||||
|
||||
except urllib.error.URLError as e:
|
||||
# Si devuelve 304 es que no ha cambiado
|
||||
# If it returns 304 it is that it has not changed
|
||||
if hasattr(e, 'code'):
|
||||
logger.info("HTTP response code : %d" % e.code)
|
||||
if e.code == 304:
|
||||
logger.info("It has not changed")
|
||||
updated = False
|
||||
# Agarra los errores con codigo de respuesta del servidor externo solicitado
|
||||
# Grab errors with response code from requested external server
|
||||
else:
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
@@ -722,20 +717,20 @@ def downloadIfNotModifiedSince(url, timestamp):
|
||||
|
||||
|
||||
def download_all_episodes(item, channel, first_episode="", preferred_server="vidspot", filter_language=""):
|
||||
logger.info("show=" + item.show)
|
||||
logger.info("show= " + item.show)
|
||||
show_title = item.show
|
||||
|
||||
# Obtiene el listado desde el que se llamó
|
||||
# Gets the listing from which it was called
|
||||
action = item.extra
|
||||
|
||||
# Esta marca es porque el item tiene algo más aparte en el atributo "extra"
|
||||
# This mark is because the item has something else apart in the "extra" attribute
|
||||
if "###" in item.extra:
|
||||
action = item.extra.split("###")[0]
|
||||
item.extra = item.extra.split("###")[1]
|
||||
|
||||
episode_itemlist = getattr(channel, action)(item)
|
||||
|
||||
# Ordena los episodios para que funcione el filtro de first_episode
|
||||
# Sort episodes for the first_episode filter to work
|
||||
episode_itemlist = sorted(episode_itemlist, key=lambda it: it.title)
|
||||
|
||||
from core import servertools
|
||||
@@ -744,7 +739,7 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid
|
||||
best_server = preferred_server
|
||||
# worst_server = "moevideos"
|
||||
|
||||
# Para cada episodio
|
||||
# For each episode
|
||||
if first_episode == "":
|
||||
empezar = True
|
||||
else:
|
||||
@@ -752,9 +747,9 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid
|
||||
|
||||
for episode_item in episode_itemlist:
|
||||
try:
|
||||
logger.info("episode=" + episode_item.title)
|
||||
episode_title = scrapertools.find_single_match(episode_item.title, "(\d+x\d+)")
|
||||
logger.info("episode=" + episode_title)
|
||||
logger.info("episode= " + episode_item.title)
|
||||
episode_title = scrapertools.find_single_match(episode_item.title, r"(\d+x\d+)")
|
||||
logger.info("episode= " + episode_title)
|
||||
except:
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
@@ -769,7 +764,7 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid
|
||||
if not empezar:
|
||||
continue
|
||||
|
||||
# Extrae los mirrors
|
||||
# Extract the mirrors
|
||||
try:
|
||||
mirrors_itemlist = channel.findvideos(episode_item)
|
||||
except:
|
||||
@@ -787,7 +782,7 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid
|
||||
|
||||
for mirror_item in mirrors_itemlist:
|
||||
|
||||
# Si está en español va al principio, si no va al final
|
||||
# If it is in Spanish it goes to the beginning, if it does not go to the end
|
||||
if "(Italiano)" in mirror_item.title:
|
||||
if best_server in mirror_item.title.lower():
|
||||
new_mirror_itemlist_1.append(mirror_item)
|
||||
@@ -818,7 +813,7 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid
|
||||
new_mirror_itemlist_4 + new_mirror_itemlist_5 + new_mirror_itemlist_6)
|
||||
|
||||
for mirror_item in mirrors_itemlist:
|
||||
logger.info("mirror=" + mirror_item.title)
|
||||
logger.info("mirror= " + mirror_item.title)
|
||||
|
||||
if "(Italiano)" in mirror_item.title:
|
||||
idioma = "(Italiano)"
|
||||
@@ -854,16 +849,13 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid
|
||||
if len(video_items) > 0:
|
||||
video_item = video_items[0]
|
||||
|
||||
# Comprueba que está disponible
|
||||
video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing(video_item.server,
|
||||
video_item.url,
|
||||
video_password="",
|
||||
muestra_dialogo=False)
|
||||
# Check that it is available
|
||||
video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing(video_item.server, video_item.url, video_password="", muestra_dialogo=False)
|
||||
|
||||
# Lo añade a la lista de descargas
|
||||
# Adds it to the download list
|
||||
if puedes:
|
||||
logger.info("downloading mirror started...")
|
||||
# El vídeo de más calidad es el último
|
||||
# The highest quality video is the latest
|
||||
# mediaurl = video_urls[len(video_urls) - 1][1]
|
||||
devuelve = downloadbest(video_urls, show_title + " " + episode_title + " " + idioma +
|
||||
" [" + video_item.server + "]", continuar=False)
|
||||
@@ -896,9 +888,8 @@ def episodio_ya_descargado(show_title, episode_title):
|
||||
|
||||
for fichero in ficheros:
|
||||
# logger.info("fichero="+fichero)
|
||||
if fichero.lower().startswith(show_title.lower()) and \
|
||||
scrapertools.find_single_match(fichero, "(\d+x\d+)") == episode_title:
|
||||
logger.info("encontrado!")
|
||||
if fichero.lower().startswith(show_title.lower()) and scrapertools.find_single_match(fichero, "(\d+x\d+)") == episode_title:
|
||||
logger.info("found!")
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# filetools
|
||||
# Gestion de archivos con discriminación xbmcvfs/samba/local
|
||||
# File management with xbmcvfs / samba / local discrimination
|
||||
# ------------------------------------------------------------
|
||||
|
||||
from __future__ import division
|
||||
#from builtins import str
|
||||
# from builtins import str
|
||||
from future.builtins import range
|
||||
from past.utils import old_div
|
||||
import sys
|
||||
@@ -18,13 +18,13 @@ import traceback
|
||||
from core import scrapertools
|
||||
from platformcode import platformtools, logger
|
||||
|
||||
xbmc_vfs = True # False para desactivar XbmcVFS, True para activar
|
||||
xbmc_vfs = True # False to disable XbmcVFS, True to enable
|
||||
if xbmc_vfs:
|
||||
try:
|
||||
import xbmcvfs
|
||||
if not PY3:
|
||||
reload(sys) ### Workoround. Revisar en la migración a Python 3
|
||||
sys.setdefaultencoding('utf-8') # xbmcvfs degrada el valor de defaultencoding. Se reestablece
|
||||
reload(sys) # Workoround. Review on migration to Python 3
|
||||
sys.setdefaultencoding('utf-8') # xbmcvfs demeans the value of defaultencoding. It is reestablished
|
||||
xbmc_vfs = True
|
||||
except:
|
||||
xbmc_vfs = False
|
||||
@@ -35,9 +35,9 @@ if not xbmc_vfs:
|
||||
from lib.sambatools import libsmb as samba
|
||||
except:
|
||||
samba = None
|
||||
# Python 2.4 No compatible con modulo samba, hay que revisar
|
||||
# Python 2.4 Not compatible with samba module, you have to check
|
||||
|
||||
# Windows es "mbcs" linux, osx, android es "utf8"
|
||||
# Windows is "mbcs" linux, osx, android is "utf8"
|
||||
if os.name == "nt":
|
||||
fs_encoding = ""
|
||||
else:
|
||||
@@ -47,15 +47,15 @@ else:
|
||||
|
||||
def validate_path(path):
|
||||
"""
|
||||
Elimina cáracteres no permitidos
|
||||
@param path: cadena a validar
|
||||
Eliminate illegal characters
|
||||
@param path: string to validate
|
||||
@type path: str
|
||||
@rtype: str
|
||||
@return: devuelve la cadena sin los caracteres no permitidos
|
||||
@return: returns the string without the characters not allowed
|
||||
"""
|
||||
chars = ":*?<>|"
|
||||
if scrapertools.find_single_match(path, '(^\w+:\/\/)'):
|
||||
protocolo = scrapertools.find_single_match(path, '(^\w+:\/\/)')
|
||||
if scrapertools.find_single_match(path, r'(^\w+:\/\/)'):
|
||||
protocolo = scrapertools.find_single_match(path, r'(^\w+:\/\/)')
|
||||
import re
|
||||
parts = re.split(r'^\w+:\/\/(.+?)/(.+)', path)[1:3]
|
||||
return protocolo + parts[0] + "/" + ''.join([c for c in parts[1] if c not in chars])
|
||||
@@ -72,19 +72,19 @@ def validate_path(path):
|
||||
|
||||
def encode(path, _samba=False):
|
||||
"""
|
||||
Codifica una ruta según el sistema operativo que estemos utilizando.
|
||||
El argumento path tiene que estar codificado en utf-8
|
||||
@type path unicode o str con codificación utf-8
|
||||
@param path parámetro a codificar
|
||||
It encodes a path according to the operating system we are using.
|
||||
The path argument has to be encoded in utf-8
|
||||
@type unicode or str path with utf-8 encoding
|
||||
@param path parameter to encode
|
||||
@type _samba bool
|
||||
@para _samba si la ruta es samba o no
|
||||
@para _samba if the path is samba or not
|
||||
@rtype: str
|
||||
@return ruta codificada en juego de caracteres del sistema o utf-8 si samba
|
||||
@return path encoded in system character set or utf-8 if samba
|
||||
"""
|
||||
if not isinstance(path, unicode):
|
||||
path = unicode(path, "utf-8", "ignore")
|
||||
|
||||
if scrapertools.find_single_match(path, '(^\w+:\/\/)') or _samba:
|
||||
if scrapertools.find_single_match(path, r'(^\w+:\/\/)') or _samba:
|
||||
path = path.encode("utf-8", "ignore")
|
||||
else:
|
||||
if fs_encoding and not PY3:
|
||||
@@ -95,12 +95,12 @@ def encode(path, _samba=False):
|
||||
|
||||
def decode(path):
|
||||
"""
|
||||
Convierte una cadena de texto al juego de caracteres utf-8
|
||||
eliminando los caracteres que no estén permitidos en utf-8
|
||||
@type: str, unicode, list de str o unicode
|
||||
@param path: puede ser una ruta o un list() con varias rutas
|
||||
Converts a text string to the utf-8 character set
|
||||
removing characters that are not allowed in utf-8
|
||||
@type: str, unicode, list of str o unicode
|
||||
@param path:can be a path or a list () with multiple paths
|
||||
@rtype: str
|
||||
@return: ruta codificado en UTF-8
|
||||
@return: ruta encoded in UTF-8
|
||||
"""
|
||||
if isinstance(path, list):
|
||||
for x in range(len(path)):
|
||||
@@ -116,16 +116,15 @@ def decode(path):
|
||||
|
||||
def read(path, linea_inicio=0, total_lineas=None, whence=0, silent=False, vfs=True):
|
||||
"""
|
||||
Lee el contenido de un archivo y devuelve los datos
|
||||
@param path: ruta del fichero
|
||||
Read the contents of a file and return the data
|
||||
@param path: file path
|
||||
@type path: str
|
||||
@param linea_inicio: primera linea a leer del fichero
|
||||
@type linea_inicio: int positivo
|
||||
@param total_lineas: numero maximo de lineas a leer. Si es None o superior al total de lineas se leera el
|
||||
fichero hasta el final.
|
||||
@type total_lineas: int positivo
|
||||
@param linea_inicio: first line to read from the file
|
||||
@type linea_inicio: positive int
|
||||
@param total_lineas: maximum number of lines to read. If it is None or greater than the total lines, the file will be read until the end.
|
||||
@type total_lineas: positive int
|
||||
@rtype: str
|
||||
@return: datos que contiene el fichero
|
||||
@return: data contained in the file
|
||||
"""
|
||||
path = encode(path)
|
||||
try:
|
||||
@@ -182,13 +181,13 @@ def read(path, linea_inicio=0, total_lineas=None, whence=0, silent=False, vfs=Tr
|
||||
|
||||
def write(path, data, mode="wb", silent=False, vfs=True):
|
||||
"""
|
||||
Guarda los datos en un archivo
|
||||
@param path: ruta del archivo a guardar
|
||||
Save the data to a file
|
||||
@param path: file path to save
|
||||
@type path: str
|
||||
@param data: datos a guardar
|
||||
@param data: data to save
|
||||
@type data: str
|
||||
@rtype: bool
|
||||
@return: devuelve True si se ha escrito correctamente o False si ha dado un error
|
||||
@return: returns True if it was written correctly or False if it gave an error
|
||||
"""
|
||||
path = encode(path)
|
||||
try:
|
||||
@@ -205,7 +204,7 @@ def write(path, data, mode="wb", silent=False, vfs=True):
|
||||
f.write(data)
|
||||
f.close()
|
||||
except:
|
||||
logger.error("ERROR al guardar el archivo: %s" % path)
|
||||
logger.error("ERROR saving file: %s" % path)
|
||||
if not silent:
|
||||
logger.error(traceback.format_exc())
|
||||
return False
|
||||
@@ -215,11 +214,11 @@ def write(path, data, mode="wb", silent=False, vfs=True):
|
||||
|
||||
def file_open(path, mode="r", silent=False, vfs=True):
|
||||
"""
|
||||
Abre un archivo
|
||||
@param path: ruta
|
||||
Open a file
|
||||
@param path: path
|
||||
@type path: str
|
||||
@rtype: str
|
||||
@return: objeto file
|
||||
@return: file object
|
||||
"""
|
||||
path = encode(path)
|
||||
try:
|
||||
@@ -245,11 +244,11 @@ def file_open(path, mode="r", silent=False, vfs=True):
|
||||
|
||||
def file_stat(path, silent=False, vfs=True):
|
||||
"""
|
||||
Stat de un archivo
|
||||
@param path: ruta
|
||||
Stat of a file
|
||||
@param path: path
|
||||
@type path: str
|
||||
@rtype: str
|
||||
@return: objeto file
|
||||
@return: file object
|
||||
"""
|
||||
path = encode(path)
|
||||
try:
|
||||
@@ -266,13 +265,13 @@ def file_stat(path, silent=False, vfs=True):
|
||||
|
||||
def rename(path, new_name, silent=False, strict=False, vfs=True):
|
||||
"""
|
||||
Renombra un archivo o carpeta
|
||||
@param path: ruta del fichero o carpeta a renombrar
|
||||
Rename a file or folder
|
||||
@param path: path of the file or folder to rename
|
||||
@type path: str
|
||||
@param new_name: nuevo nombre
|
||||
@param new_name: new name
|
||||
@type new_name: str
|
||||
@rtype: bool
|
||||
@return: devuelve False en caso de error
|
||||
@return: returns False on error
|
||||
"""
|
||||
path = encode(path)
|
||||
try:
|
||||
@@ -309,13 +308,13 @@ def rename(path, new_name, silent=False, strict=False, vfs=True):
|
||||
|
||||
def move(path, dest, silent=False, strict=False, vfs=True):
|
||||
"""
|
||||
Mueve un archivo
|
||||
@param path: ruta del fichero a mover
|
||||
Move a file
|
||||
@param path: path of the file to move
|
||||
@type path: str
|
||||
@param dest: ruta donde mover
|
||||
@param dest: path where to move
|
||||
@type dest: str
|
||||
@rtype: bool
|
||||
@return: devuelve False en caso de error
|
||||
@return: returns False on error
|
||||
"""
|
||||
try:
|
||||
if xbmc_vfs and vfs:
|
||||
@@ -343,10 +342,10 @@ def move(path, dest, silent=False, strict=False, vfs=True):
|
||||
dest = encode(dest)
|
||||
path = encode(path)
|
||||
os.rename(path, dest)
|
||||
# mixto En este caso se copia el archivo y luego se elimina el de origen
|
||||
# mixed In this case the file is copied and then the source file is deleted
|
||||
else:
|
||||
if not silent:
|
||||
dialogo = platformtools.dialog_progress("Copiando archivo", "")
|
||||
dialogo = platformtools.dialog_progress("Copying file", "")
|
||||
return copy(path, dest) == True and remove(path) == True
|
||||
except:
|
||||
logger.error("ERROR when moving file: %s to %s" % (path, dest))
|
||||
@@ -359,29 +358,29 @@ def move(path, dest, silent=False, strict=False, vfs=True):
|
||||
|
||||
def copy(path, dest, silent=False, vfs=True):
|
||||
"""
|
||||
Copia un archivo
|
||||
@param path: ruta del fichero a copiar
|
||||
Copy a file
|
||||
@param path: path of the file to copy
|
||||
@type path: str
|
||||
@param dest: ruta donde copiar
|
||||
@param dest: path to copy
|
||||
@type dest: str
|
||||
@param silent: se muestra o no el cuadro de dialogo
|
||||
@param silent: the dialog box is displayed or not
|
||||
@type silent: bool
|
||||
@rtype: bool
|
||||
@return: devuelve False en caso de error
|
||||
@return: returns False on error
|
||||
"""
|
||||
try:
|
||||
if xbmc_vfs and vfs:
|
||||
path = encode(path)
|
||||
dest = encode(dest)
|
||||
if not silent:
|
||||
dialogo = platformtools.dialog_progress("Copiando archivo", "")
|
||||
dialogo = platformtools.dialog_progress("Copying file", "")
|
||||
return bool(xbmcvfs.copy(path, dest))
|
||||
|
||||
fo = file_open(path, "rb")
|
||||
fd = file_open(dest, "wb")
|
||||
if fo and fd:
|
||||
if not silent:
|
||||
dialogo = platformtools.dialog_progress("Copiando archivo", "")
|
||||
dialogo = platformtools.dialog_progress("Copying file", "")
|
||||
size = getsize(path)
|
||||
copiado = 0
|
||||
while True:
|
||||
@@ -408,11 +407,11 @@ def copy(path, dest, silent=False, vfs=True):
|
||||
|
||||
def exists(path, silent=False, vfs=True):
|
||||
"""
|
||||
Comprueba si existe una carpeta o fichero
|
||||
@param path: ruta
|
||||
Check if there is a folder or file
|
||||
@param path: path
|
||||
@type path: str
|
||||
@rtype: bool
|
||||
@return: Retorna True si la ruta existe, tanto si es una carpeta como un archivo
|
||||
@return: Returns True if the path exists, whether it is a folder or a file
|
||||
"""
|
||||
path = encode(path)
|
||||
try:
|
||||
@@ -434,16 +433,16 @@ def exists(path, silent=False, vfs=True):
|
||||
|
||||
def isfile(path, silent=False, vfs=True):
|
||||
"""
|
||||
Comprueba si la ruta es un fichero
|
||||
@param path: ruta
|
||||
Check if the path is a file
|
||||
@param path: path
|
||||
@type path: str
|
||||
@rtype: bool
|
||||
@return: Retorna True si la ruta existe y es un archivo
|
||||
@return: Returns True if the path exists and is a file
|
||||
"""
|
||||
path = encode(path)
|
||||
try:
|
||||
if xbmc_vfs and vfs:
|
||||
if not scrapertools.find_single_match(path, '(^\w+:\/\/)'):
|
||||
if not scrapertools.find_single_match(path, r'(^\w+:\/\/)'):
|
||||
return os.path.isfile(path)
|
||||
if path.endswith('/') or path.endswith('\\'):
|
||||
path = path[:-1]
|
||||
@@ -466,16 +465,16 @@ def isfile(path, silent=False, vfs=True):
|
||||
|
||||
def isdir(path, silent=False, vfs=True):
|
||||
"""
|
||||
Comprueba si la ruta es un directorio
|
||||
@param path: ruta
|
||||
Check if the path is a directory
|
||||
@param path: path
|
||||
@type path: str
|
||||
@rtype: bool
|
||||
@return: Retorna True si la ruta existe y es un directorio
|
||||
@return: Returns True if the path exists and is a directory
|
||||
"""
|
||||
path = encode(path)
|
||||
try:
|
||||
if xbmc_vfs and vfs:
|
||||
if not scrapertools.find_single_match(path, '(^\w+:\/\/)'):
|
||||
if not scrapertools.find_single_match(path, r'(^\w+:\/\/)'):
|
||||
return os.path.isdir(path)
|
||||
if path.endswith('/') or path.endswith('\\'):
|
||||
path = path[:-1]
|
||||
@@ -498,11 +497,11 @@ def isdir(path, silent=False, vfs=True):
|
||||
|
||||
def getsize(path, silent=False, vfs=True):
|
||||
"""
|
||||
Obtiene el tamaño de un archivo
|
||||
@param path: ruta del fichero
|
||||
Gets the size of a file
|
||||
@param path: file path
|
||||
@type path: str
|
||||
@rtype: str
|
||||
@return: tamaño del fichero
|
||||
@return: file size
|
||||
"""
|
||||
path = encode(path)
|
||||
try:
|
||||
@@ -525,11 +524,11 @@ def getsize(path, silent=False, vfs=True):
|
||||
|
||||
def remove(path, silent=False, vfs=True):
|
||||
"""
|
||||
Elimina un archivo
|
||||
@param path: ruta del fichero a eliminar
|
||||
Delete a file
|
||||
@param path: path of the file to delete
|
||||
@type path: str
|
||||
@rtype: bool
|
||||
@return: devuelve False en caso de error
|
||||
@return: returns False on error
|
||||
"""
|
||||
path = encode(path)
|
||||
try:
|
||||
@@ -551,11 +550,11 @@ def remove(path, silent=False, vfs=True):
|
||||
|
||||
def rmdirtree(path, silent=False, vfs=True):
|
||||
"""
|
||||
Elimina un directorio y su contenido
|
||||
@param path: ruta a eliminar
|
||||
Delete a directory and its contents
|
||||
@param path: path to remove
|
||||
@type path: str
|
||||
@rtype: bool
|
||||
@return: devuelve False en caso de error
|
||||
@return: returns False on error
|
||||
"""
|
||||
path = encode(path)
|
||||
try:
|
||||
@@ -591,11 +590,11 @@ def rmdirtree(path, silent=False, vfs=True):
|
||||
|
||||
def rmdir(path, silent=False, vfs=True):
|
||||
"""
|
||||
Elimina un directorio
|
||||
@param path: ruta a eliminar
|
||||
Delete a directory
|
||||
@param path: path to remove
|
||||
@type path: str
|
||||
@rtype: bool
|
||||
@return: devuelve False en caso de error
|
||||
@return: returns False on error
|
||||
"""
|
||||
path = encode(path)
|
||||
try:
|
||||
@@ -619,11 +618,11 @@ def rmdir(path, silent=False, vfs=True):
|
||||
|
||||
def mkdir(path, silent=False, vfs=True):
|
||||
"""
|
||||
Crea un directorio
|
||||
@param path: ruta a crear
|
||||
Create a directory
|
||||
@param path: path to create
|
||||
@type path: str
|
||||
@rtype: bool
|
||||
@return: devuelve False en caso de error
|
||||
@return: returns False on error
|
||||
"""
|
||||
path = encode(path)
|
||||
try:
|
||||
@@ -652,37 +651,37 @@ def mkdir(path, silent=False, vfs=True):
|
||||
|
||||
def walk(top, topdown=True, onerror=None, vfs=True):
|
||||
"""
|
||||
Lista un directorio de manera recursiva
|
||||
@param top: Directorio a listar, debe ser un str "UTF-8"
|
||||
List a directory recursively
|
||||
@param top: Directory to list, must be a str "UTF-8"
|
||||
@type top: str
|
||||
@param topdown: se escanea de arriba a abajo
|
||||
@param topdown: scanned from top to bottom
|
||||
@type topdown: bool
|
||||
@param onerror: muestra error para continuar con el listado si tiene algo seteado sino levanta una excepción
|
||||
@param onerror: show error to continue listing if you have something set but raise an exception
|
||||
@type onerror: bool
|
||||
***El parametro followlinks que por defecto es True, no se usa aqui, ya que en samba no discrimina los links
|
||||
***The followlinks parameter, which by default is True, is not used here, since in samba it does not discriminate links
|
||||
"""
|
||||
top = encode(top)
|
||||
if xbmc_vfs and vfs:
|
||||
for a, b, c in walk_vfs(top, topdown, onerror):
|
||||
# list(b) es para que haga una copia del listado de directorios
|
||||
# si no da error cuando tiene que entrar recursivamente en directorios con caracteres especiales
|
||||
# list (b) is for you to make a copy of the directory listing
|
||||
# if it doesn't give error when you have to recursively enter directories with special characters
|
||||
yield a, list(b), c
|
||||
elif top.lower().startswith("smb://"):
|
||||
for a, b, c in samba.walk(top, topdown, onerror):
|
||||
# list(b) es para que haga una copia del listado de directorios
|
||||
# si no da error cuando tiene que entrar recursivamente en directorios con caracteres especiales
|
||||
# list (b) is for you to make a copy of the directory listing
|
||||
# if it doesn't give error when you have to recursively enter directories with special characters
|
||||
yield decode(a), decode(list(b)), decode(c)
|
||||
else:
|
||||
for a, b, c in os.walk(top, topdown, onerror):
|
||||
# list(b) es para que haga una copia del listado de directorios
|
||||
# si no da error cuando tiene que entrar recursivamente en directorios con caracteres especiales
|
||||
# list (b) is for you to make a copy of the directory listing
|
||||
# if it doesn't give error when you have to recursively enter directories with special characters
|
||||
yield decode(a), decode(list(b)), decode(c)
|
||||
|
||||
|
||||
def walk_vfs(top, topdown=True, onerror=None):
|
||||
"""
|
||||
Lista un directorio de manera recursiva
|
||||
Como xmbcvfs no tiene esta función, se copia la lógica de libsmb(samba) para realizar la previa al Walk
|
||||
List a directory recursively
|
||||
Since xmbcvfs does not have this function, the logic of libsmb (samba) is copied to carry out the pre-Walk
|
||||
"""
|
||||
top = encode(top)
|
||||
dirs, nondirs = xbmcvfs.listdir(top)
|
||||
@@ -707,11 +706,11 @@ def walk_vfs(top, topdown=True, onerror=None):
|
||||
|
||||
def listdir(path, silent=False, vfs=True):
|
||||
"""
|
||||
Lista un directorio
|
||||
@param path: Directorio a listar, debe ser un str "UTF-8"
|
||||
List a directory
|
||||
@param path: Directory to list, must be a str "UTF-8"
|
||||
@type path: str
|
||||
@rtype: str
|
||||
@return: contenido de un directorio
|
||||
@return: content of a directory
|
||||
"""
|
||||
|
||||
path = encode(path)
|
||||
@@ -732,10 +731,10 @@ def listdir(path, silent=False, vfs=True):
|
||||
|
||||
def join(*paths):
|
||||
"""
|
||||
Junta varios directorios
|
||||
Corrige las barras "/" o "\" segun el sistema operativo y si es o no smaba
|
||||
Join several directories
|
||||
Correct the bars "/" or "\" according to the operating system and whether or not it is smaba
|
||||
@rytpe: str
|
||||
@return: la ruta concatenada
|
||||
@return: the concatenated path
|
||||
"""
|
||||
list_path = []
|
||||
if paths[0].startswith("/"):
|
||||
@@ -754,14 +753,14 @@ def join(*paths):
|
||||
|
||||
def split(path, vfs=True):
|
||||
"""
|
||||
Devuelve una tupla formada por el directorio y el nombre del fichero de una ruta
|
||||
Returns a tuple consisting of the directory and filename of a path
|
||||
@param path: ruta
|
||||
@type path: str
|
||||
@return: (dirname, basename)
|
||||
@rtype: tuple
|
||||
"""
|
||||
if scrapertools.find_single_match(path, '(^\w+:\/\/)'):
|
||||
protocol = scrapertools.find_single_match(path, '(^\w+:\/\/)')
|
||||
if scrapertools.find_single_match(path, r'(^\w+:\/\/)'):
|
||||
protocol = scrapertools.find_single_match(path, r'(^\w+:\/\/)')
|
||||
if '/' not in path[6:]:
|
||||
path = path.replace(protocol, protocol + "/", 1)
|
||||
return path.rsplit('/', 1)
|
||||
@@ -771,10 +770,10 @@ def split(path, vfs=True):
|
||||
|
||||
def basename(path, vfs=True):
|
||||
"""
|
||||
Devuelve el nombre del fichero de una ruta
|
||||
@param path: ruta
|
||||
Returns the file name of a path
|
||||
@param path: path
|
||||
@type path: str
|
||||
@return: fichero de la ruta
|
||||
@return: path file
|
||||
@rtype: str
|
||||
"""
|
||||
return split(path)[1]
|
||||
@@ -782,10 +781,10 @@ def basename(path, vfs=True):
|
||||
|
||||
def dirname(path, vfs=True):
|
||||
"""
|
||||
Devuelve el directorio de una ruta
|
||||
@param path: ruta
|
||||
Returns the directory of a path
|
||||
@param path: path
|
||||
@type path: str
|
||||
@return: directorio de la ruta
|
||||
@return: path directory
|
||||
@rtype: str
|
||||
"""
|
||||
return split(path)[0]
|
||||
@@ -797,15 +796,15 @@ def is_relative(path):
|
||||
|
||||
def remove_tags(title):
|
||||
"""
|
||||
devuelve el titulo sin tags como color
|
||||
returns the title without tags as color
|
||||
@type title: str
|
||||
@param title: title
|
||||
@rtype: str
|
||||
@return: cadena sin tags
|
||||
@return: string without tags
|
||||
"""
|
||||
logger.info()
|
||||
|
||||
title_without_tags = scrapertools.find_single_match(title, '\[color .+?\](.+)\[\/color\]')
|
||||
title_without_tags = scrapertools.find_single_match(title, r'\[color .+?\](.+)\[\/color\]')
|
||||
|
||||
if title_without_tags:
|
||||
return title_without_tags
|
||||
@@ -815,19 +814,19 @@ def remove_tags(title):
|
||||
|
||||
def remove_smb_credential(path):
|
||||
"""
|
||||
devuelve el path sin contraseña/usuario para paths de SMB
|
||||
@param path: ruta
|
||||
returns the path without password / user for SMB paths
|
||||
@param path: path
|
||||
@type path: str
|
||||
@return: cadena sin credenciales
|
||||
@return: chain without credentials
|
||||
@rtype: str
|
||||
"""
|
||||
logger.info()
|
||||
|
||||
if not scrapertools.find_single_match(path, '(^\w+:\/\/)'):
|
||||
if not scrapertools.find_single_match(path, r'(^\w+:\/\/)'):
|
||||
return path
|
||||
|
||||
protocol = scrapertools.find_single_match(path, '(^\w+:\/\/)')
|
||||
path_without_credentials = scrapertools.find_single_match(path, '^\w+:\/\/(?:[^;\n]+;)?(?:[^:@\n]+[:|@])?(?:[^@\n]+@)?(.*?$)')
|
||||
protocol = scrapertools.find_single_match(path, r'(^\w+:\/\/)')
|
||||
path_without_credentials = scrapertools.find_single_match(path, r'^\w+:\/\/(?:[^;\n]+;)?(?:[^:@\n]+[:|@])?(?:[^@\n]+@)?(.*?$)')
|
||||
|
||||
if path_without_credentials:
|
||||
return (protocol + path_without_credentials)
|
||||
|
||||
110
core/item.py
110
core/item.py
@@ -12,9 +12,9 @@ if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
|
||||
if PY3:
|
||||
#from future import standard_library
|
||||
#standard_library.install_aliases()
|
||||
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
|
||||
import urllib.parse as urllib # It is very slow in PY2. In PY3 it is native
|
||||
else:
|
||||
import urllib # Usamos el nativo de PY2 que es más rápido
|
||||
import urllib # We use the native of PY2 which is faster
|
||||
from core.scrapertools import unescape
|
||||
|
||||
import base64
|
||||
@@ -29,14 +29,14 @@ class InfoLabels(dict):
|
||||
|
||||
def __setitem__(self, name, value):
|
||||
if name in ["season", "episode"]:
|
||||
# forzamos int() en season y episode
|
||||
# we force int () in season and episode
|
||||
try:
|
||||
super(InfoLabels, self).__setitem__(name, int(value))
|
||||
except:
|
||||
pass
|
||||
|
||||
elif name in ['IMDBNumber', 'imdb_id']:
|
||||
# Por compatibilidad hemos de guardar el valor en los tres campos
|
||||
# For compatibility we have to save the value in the three fields
|
||||
super(InfoLabels, self).__setitem__('IMDBNumber', str(value))
|
||||
# super(InfoLabels, self).__setitem__('code', value)
|
||||
super(InfoLabels, self).__setitem__('imdb_id', str(value))
|
||||
@@ -62,22 +62,22 @@ class InfoLabels(dict):
|
||||
El parametro 'default' en la funcion obj_infoLabels.get(key,default) tiene preferencia sobre los aqui definidos.
|
||||
"""
|
||||
if key in ['rating']:
|
||||
# Ejemplo de clave q devuelve un str formateado como float por defecto
|
||||
# Key example q returns a str formatted as float by default
|
||||
return '0.0'
|
||||
|
||||
elif key == 'code':
|
||||
code = []
|
||||
# Añadir imdb_id al listado de codigos
|
||||
# Add imdb_id to the code list
|
||||
if 'imdb_id' in list(super(InfoLabels, self).keys()) and super(InfoLabels, self).__getitem__('imdb_id'):
|
||||
code.append(super(InfoLabels, self).__getitem__('imdb_id'))
|
||||
|
||||
# Completar con el resto de codigos
|
||||
# Complete with the rest of the codes
|
||||
for scr in ['tmdb_id', 'tvdb_id', 'noscrap_id']:
|
||||
if scr in list(super(InfoLabels, self).keys()) and super(InfoLabels, self).__getitem__(scr):
|
||||
value = "%s%s" % (scr[:-2], super(InfoLabels, self).__getitem__(scr))
|
||||
code.append(value)
|
||||
|
||||
# Opcion añadir un code del tipo aleatorio
|
||||
# Option to add a code of the random type
|
||||
if not code:
|
||||
import time
|
||||
value = time.strftime("%Y%m%d%H%M%S", time.gmtime())
|
||||
@@ -109,7 +109,7 @@ class InfoLabels(dict):
|
||||
return 'list'
|
||||
|
||||
else:
|
||||
# El resto de claves devuelven cadenas vacias por defecto
|
||||
# The rest of the keys return empty strings by default
|
||||
return ""
|
||||
|
||||
def tostring(self, separador=', '):
|
||||
@@ -132,7 +132,7 @@ class InfoLabels(dict):
|
||||
class Item(object):
|
||||
def __init__(self, **kwargs):
|
||||
"""
|
||||
Inicializacion del item
|
||||
Item initialization
|
||||
"""
|
||||
|
||||
# Creamos el atributo infoLabels
|
||||
@@ -159,14 +159,13 @@ class Item(object):
|
||||
|
||||
def __contains__(self, m):
|
||||
"""
|
||||
Comprueba si un atributo existe en el item
|
||||
Check if an attribute exists in the item
|
||||
"""
|
||||
return m in self.__dict__
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
"""
|
||||
Función llamada al modificar cualquier atributo del item, modifica algunos atributos en función de los datos
|
||||
modificados.
|
||||
Function called when modifying any attribute of the item, modifies some attributes based on the modified data.
|
||||
"""
|
||||
if PY3: name = self.toutf8(name)
|
||||
value = self.toutf8(value)
|
||||
@@ -175,14 +174,14 @@ class Item(object):
|
||||
self.__setattr__(key, value[key])
|
||||
return
|
||||
|
||||
# Descodificamos los HTML entities
|
||||
# We decode the HTML entities
|
||||
if name in ["title", "plot", "fulltitle", "contentPlot", "contentTitle"]:
|
||||
value = self.decode_html(value)
|
||||
|
||||
# Al modificar cualquiera de estos atributos content...
|
||||
# By modifying any of these attributes content...
|
||||
if name in ["contentTitle", "contentPlot", "plot", "contentSerieName", "contentType", "contentEpisodeTitle",
|
||||
"contentSeason", "contentEpisodeNumber", "contentThumbnail", "show", "contentQuality", "quality"]:
|
||||
# ...y actualizamos infoLables
|
||||
# ...and update infoLables
|
||||
if name == "contentTitle":
|
||||
self.__dict__["infoLabels"]["title"] = value
|
||||
elif name == "contentPlot" or name == "plot":
|
||||
@@ -203,13 +202,13 @@ class Item(object):
|
||||
self.__dict__["infoLabels"]["quality"] = value
|
||||
|
||||
elif name == "duration":
|
||||
# String q representa la duracion del video en segundos
|
||||
# String q represents the duration of the video in seconds
|
||||
self.__dict__["infoLabels"]["duration"] = str(value)
|
||||
|
||||
elif name == "viewcontent" and value not in ["files", "movies", "tvshows", "seasons", "episodes"]:
|
||||
super(Item, self).__setattr__("viewcontent", "files")
|
||||
|
||||
# Al asignar un valor a infoLables
|
||||
# When assigning a value to infoLables
|
||||
elif name == "infoLabels":
|
||||
if isinstance(value, dict):
|
||||
value_defaultdict = InfoLabels(value)
|
||||
@@ -220,22 +219,22 @@ class Item(object):
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""
|
||||
Devuelve los valores por defecto en caso de que el atributo solicitado no exista en el item
|
||||
Returns the default values in case the requested attribute does not exist in the item
|
||||
"""
|
||||
if name.startswith("__"):
|
||||
return super(Item, self).__getattribute__(name)
|
||||
|
||||
# valor por defecto para folder
|
||||
# default value for folder
|
||||
if name == "folder":
|
||||
return True
|
||||
|
||||
# valor por defecto para contentChannel
|
||||
# default value for contentChannel
|
||||
elif name == "contentChannel":
|
||||
return "list"
|
||||
|
||||
# valor por defecto para viewcontent
|
||||
# default value for viewcontent
|
||||
elif name == "viewcontent":
|
||||
# intentamos fijarlo segun el tipo de contenido...
|
||||
# we try to fix it according to the type of content...
|
||||
if self.__dict__["infoLabels"]["mediatype"] == 'movie':
|
||||
viewcontent = 'movies'
|
||||
elif self.__dict__["infoLabels"]["mediatype"] in ["tvshow", "season", "episode"]:
|
||||
@@ -246,7 +245,7 @@ class Item(object):
|
||||
self.__dict__["viewcontent"] = viewcontent
|
||||
return viewcontent
|
||||
|
||||
# valores guardados en infoLabels
|
||||
# values saved in infoLabels
|
||||
elif name in ["contentTitle", "contentPlot", "contentSerieName", "show", "contentType", "contentEpisodeTitle",
|
||||
"contentSeason", "contentEpisodeNumber", "contentThumbnail", "plot", "duration",
|
||||
"contentQuality", "quality"]:
|
||||
@@ -258,7 +257,7 @@ class Item(object):
|
||||
return self.__dict__["infoLabels"]["tvshowtitle"]
|
||||
elif name == "contentType":
|
||||
ret = self.__dict__["infoLabels"]["mediatype"]
|
||||
if ret == 'list' and self.__dict__.get("fulltitle", None): # retrocompatibilidad
|
||||
if ret == 'list' and self.__dict__.get("fulltitle", None): # backward compatibility
|
||||
ret = 'movie'
|
||||
self.__dict__["infoLabels"]["mediatype"] = ret
|
||||
return ret
|
||||
@@ -275,7 +274,7 @@ class Item(object):
|
||||
else:
|
||||
return self.__dict__["infoLabels"][name]
|
||||
|
||||
# valor por defecto para el resto de atributos
|
||||
# default value for all other attributes
|
||||
else:
|
||||
return ""
|
||||
|
||||
@@ -284,28 +283,28 @@ class Item(object):
|
||||
|
||||
def set_parent_content(self, parentContent):
|
||||
"""
|
||||
Rellena los campos contentDetails con la informacion del item "padre"
|
||||
@param parentContent: item padre
|
||||
Fill the contentDetails fields with the information of the item "parent"
|
||||
@param parentContent: item father
|
||||
@type parentContent: item
|
||||
"""
|
||||
# Comprueba que parentContent sea un Item
|
||||
# Check that parentContent is an Item
|
||||
if not type(parentContent) == type(self):
|
||||
return
|
||||
# Copia todos los atributos que empiecen por "content" y esten declarados y los infoLabels
|
||||
# Copy all the attributes that start with "content" and are declared and the infoLabels
|
||||
for attr in parentContent.__dict__:
|
||||
if attr.startswith("content") or attr == "infoLabels":
|
||||
self.__setattr__(attr, parentContent.__dict__[attr])
|
||||
|
||||
def tostring(self, separator=", "):
|
||||
"""
|
||||
Genera una cadena de texto con los datos del item para el log
|
||||
Uso: logger.info(item.tostring())
|
||||
@param separator: cadena que se usará como separador
|
||||
Generate a text string with the item's data for the log
|
||||
Use: logger.info(item.tostring())
|
||||
@param separator: string to be used as a separator
|
||||
@type separator: str
|
||||
'"""
|
||||
dic = self.__dict__.copy()
|
||||
|
||||
# Añadimos los campos content... si tienen algun valor
|
||||
# We add the content fields... if they have any value
|
||||
for key in ["contentTitle", "contentPlot", "contentSerieName", "contentEpisodeTitle",
|
||||
"contentSeason", "contentEpisodeNumber", "contentThumbnail"]:
|
||||
value = self.__getattr__(key)
|
||||
@@ -337,10 +336,9 @@ class Item(object):
|
||||
|
||||
def tourl(self):
|
||||
"""
|
||||
Genera una cadena de texto con los datos del item para crear una url, para volver generar el Item usar
|
||||
item.fromurl().
|
||||
Generate a text string with the item data to create a url, to re-generate the Item use item.fromurl ().
|
||||
|
||||
Uso: url = item.tourl()
|
||||
Use: url = item.tourl()
|
||||
"""
|
||||
dump = json.dump(self.__dict__).encode("utf8")
|
||||
# if empty dict
|
||||
@@ -351,9 +349,9 @@ class Item(object):
|
||||
|
||||
def fromurl(self, url):
|
||||
"""
|
||||
Genera un item a partir de una cadena de texto. La cadena puede ser creada por la funcion tourl() o tener
|
||||
el formato antiguo: plugin://plugin.video.kod/?channel=... (+ otros parametros)
|
||||
Uso: item.fromurl("cadena")
|
||||
Generate an item from a text string. The string can be created by the tourl () function or have
|
||||
the old format: plugin: //plugin.video.kod/? channel = ... (+ other parameters)
|
||||
Use: item.fromurl("string")
|
||||
|
||||
@param url: url
|
||||
@type url: str
|
||||
@@ -384,12 +382,12 @@ class Item(object):
|
||||
def tojson(self, path=""):
|
||||
from core import filetools
|
||||
"""
|
||||
Crea un JSON a partir del item, para guardar archivos de favoritos, lista de descargas, etc...
|
||||
Si se especifica un path, te lo guarda en la ruta especificada, si no, devuelve la cadena json
|
||||
Usos: item.tojson(path="ruta\archivo\json.json")
|
||||
file.write(item.tojson())
|
||||
Create a JSON from the item, to save favorite files, download list, etc....
|
||||
If a path is specified, it saves it in the specified path, if not, it returns the string json
|
||||
Applications: item.tojson(path="path\archivo\json.json")
|
||||
file.write(item.tojson())
|
||||
|
||||
@param path: ruta
|
||||
@param path: path
|
||||
@type path: str
|
||||
"""
|
||||
if path:
|
||||
@@ -401,14 +399,14 @@ class Item(object):
|
||||
def fromjson(self, json_item=None, path=""):
|
||||
from core import filetools
|
||||
"""
|
||||
Genera un item a partir de un archivo JSON
|
||||
Si se especifica un path, lee directamente el archivo, si no, lee la cadena de texto pasada.
|
||||
Usos: item = Item().fromjson(path="ruta\archivo\json.json")
|
||||
item = Item().fromjson("Cadena de texto json")
|
||||
Generate an item from a JSON file
|
||||
If a path is specified, it directly reads the file, if not, it reads the passed text string.
|
||||
Applications: item = Item().fromjson(path="path\archivo\json.json")
|
||||
item = Item().fromjson("Cadena de texto json")
|
||||
|
||||
@param json_item: item
|
||||
@type json_item: json
|
||||
@param path: ruta
|
||||
@param path: path
|
||||
@type path: str
|
||||
"""
|
||||
if path:
|
||||
@@ -431,9 +429,9 @@ class Item(object):
|
||||
|
||||
def clone(self, **kwargs):
|
||||
"""
|
||||
Genera un nuevo item clonando el item actual
|
||||
Usos: NuevoItem = item.clone()
|
||||
NuevoItem = item.clone(title="Nuevo Titulo", action = "Nueva Accion")
|
||||
Generate a new item by cloning the current item
|
||||
Applications: NewItem = item.clone()
|
||||
NuewItem = item.clone(title="New Title", action = "New Action")
|
||||
"""
|
||||
newitem = copy.deepcopy(self)
|
||||
if "infoLabels" in kwargs:
|
||||
@@ -447,8 +445,8 @@ class Item(object):
|
||||
@staticmethod
|
||||
def decode_html(value):
|
||||
"""
|
||||
Descodifica las HTML entities
|
||||
@param value: valor a decodificar
|
||||
Decode the HTML entities
|
||||
@param value: value to decode
|
||||
@type value: str
|
||||
"""
|
||||
try:
|
||||
@@ -461,7 +459,7 @@ class Item(object):
|
||||
|
||||
def toutf8(self, *args):
|
||||
"""
|
||||
Pasa el item a utf8
|
||||
Pass the item to utf8
|
||||
"""
|
||||
if len(args) > 0:
|
||||
value = args[0]
|
||||
|
||||
@@ -80,15 +80,14 @@ def to_utf8(dct):
|
||||
|
||||
def get_node_from_file(name_file, node, path=None):
|
||||
"""
|
||||
Obtiene el nodo de un fichero JSON
|
||||
Gets the node of a JSON file
|
||||
|
||||
@param name_file: Puede ser el nombre de un canal o server (sin incluir extension)
|
||||
o bien el nombre de un archivo json (con extension)
|
||||
@param name_file: It can be the name of a channel or server (not including extension) or the name of a json file (with extension)
|
||||
@type name_file: str
|
||||
@param node: nombre del nodo a obtener
|
||||
@param node: name of the node to obtain
|
||||
@type node: str
|
||||
@param path: Ruta base del archivo json. Por defecto la ruta de settings_channels.
|
||||
@return: dict con el nodo a devolver
|
||||
@param path: Base path of the json file. By default the path of settings_channels.
|
||||
@return: dict with the node to return
|
||||
@rtype: dict
|
||||
"""
|
||||
logger.info()
|
||||
@@ -121,14 +120,13 @@ def get_node_from_file(name_file, node, path=None):
|
||||
|
||||
def check_to_backup(data, fname, dict_data):
|
||||
"""
|
||||
Comprueba que si dict_data(conversion del fichero JSON a dict) no es un diccionario, se genere un fichero con
|
||||
data de nombre fname.bk.
|
||||
Check that if dict_data (conversion of the JSON file to dict) is not a dictionary, a file with data name fname.bk will be generated.
|
||||
|
||||
@param data: contenido del fichero fname
|
||||
@param data: fname file content
|
||||
@type data: str
|
||||
@param fname: nombre del fichero leido
|
||||
@param fname: name of the read file
|
||||
@type fname: str
|
||||
@param dict_data: nombre del diccionario
|
||||
@param dict_data: dictionary name
|
||||
@type dict_data: dict
|
||||
"""
|
||||
logger.info()
|
||||
@@ -137,7 +135,7 @@ def check_to_backup(data, fname, dict_data):
|
||||
logger.error("Error loading json from file %s" % fname)
|
||||
|
||||
if data != "":
|
||||
# se crea un nuevo fichero
|
||||
# a new file is created
|
||||
from core import filetools
|
||||
title = filetools.write("%s.bk" % fname, data)
|
||||
if title != "":
|
||||
@@ -150,16 +148,15 @@ def check_to_backup(data, fname, dict_data):
|
||||
|
||||
def update_node(dict_node, name_file, node, path=None, silent=False):
|
||||
"""
|
||||
actualiza el json_data de un fichero con el diccionario pasado
|
||||
update the json_data of a file with the last dictionary
|
||||
|
||||
@param dict_node: diccionario con el nodo
|
||||
@param dict_node: dictionary with node
|
||||
@type dict_node: dict
|
||||
@param name_file: Puede ser el nombre de un canal o server (sin incluir extension)
|
||||
o bien el nombre de un archivo json (con extension)
|
||||
@param name_file: It can be the name of a channel or server (not including extension) or the name of a json file (with extension)
|
||||
@type name_file: str
|
||||
@param node: nodo a actualizar
|
||||
@param path: Ruta base del archivo json. Por defecto la ruta de settings_channels.
|
||||
@return result: Devuelve True si se ha escrito correctamente o False si ha dado un error
|
||||
@param node: node to update
|
||||
@param path: Base path of the json file. By default the path of settings_channels.
|
||||
@return result: Returns True if it was written correctly or False if it gave an error
|
||||
@rtype: bool
|
||||
@return json_data
|
||||
@rtype: dict
|
||||
@@ -182,7 +179,7 @@ def update_node(dict_node, name_file, node, path=None, silent=False):
|
||||
try:
|
||||
data = filetools.read(fname)
|
||||
dict_data = load(data)
|
||||
# es un dict
|
||||
# it's a dict
|
||||
if dict_data:
|
||||
if node in dict_data:
|
||||
if not silent: logger.debug(" the key exists %s" % node)
|
||||
|
||||
@@ -9,8 +9,7 @@ from core.item import InfoLabels
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
|
||||
# Este modulo es una interface para poder implementar diferentes scrapers
|
||||
# contendra todos las funciones comunes
|
||||
# This module is an interface to implement different scrapers, it will contain all the common functions
|
||||
|
||||
dict_default = None
|
||||
scraper = None
|
||||
@@ -18,36 +17,35 @@ scraper = None
|
||||
|
||||
def find_and_set_infoLabels(item):
|
||||
"""
|
||||
función que se llama para buscar y setear los infolabels
|
||||
function called to search and set infolabels
|
||||
:param item:
|
||||
:return: boleano que indica si se ha podido encontrar el 'code'
|
||||
:return: Boolean indicating if the 'code' could be found
|
||||
"""
|
||||
global scraper
|
||||
scraper = None
|
||||
# logger.debug("item:\n" + item.tostring('\n'))
|
||||
|
||||
list_opciones_cuadro = [config.get_localized_string(60223), config.get_localized_string(60224)]
|
||||
# Si se añaden más scrapers hay q declararlos aqui-> "modulo_scraper": "Texto_en_cuadro"
|
||||
scrapers_disponibles = {'tmdb': config.get_localized_string(60225),
|
||||
'tvdb': config.get_localized_string(60226)}
|
||||
# If more scrapers are added, they must be declared here-> "modulo_scraper": "Text_in_box"
|
||||
scrapers_disponibles = {'tmdb': config.get_localized_string(60225), 'tvdb': config.get_localized_string(60226)}
|
||||
|
||||
# Obtener el Scraper por defecto de la configuracion segun el tipo de contenido
|
||||
# Get the default Scraper of the configuration according to the content type
|
||||
if item.contentType == "movie":
|
||||
scraper_actual = ['tmdb'][config.get_setting("scraper_movies", "videolibrary")]
|
||||
tipo_contenido = config.get_localized_string(70283)
|
||||
title = item.contentTitle
|
||||
# Completar lista de opciones para este tipo de contenido
|
||||
# Complete list of options for this type of content
|
||||
list_opciones_cuadro.append(scrapers_disponibles['tmdb'])
|
||||
|
||||
else:
|
||||
scraper_actual = ['tmdb', 'tvdb'][config.get_setting("scraper_tvshows", "videolibrary")]
|
||||
tipo_contenido = "serie"
|
||||
title = item.contentSerieName
|
||||
# Completar lista de opciones para este tipo de contenido
|
||||
# Complete list of options for this type of content
|
||||
list_opciones_cuadro.append(scrapers_disponibles['tmdb'])
|
||||
list_opciones_cuadro.append(scrapers_disponibles['tvdb'])
|
||||
|
||||
# Importamos el scraper
|
||||
# We import the scraper
|
||||
try:
|
||||
scraper = __import__('core.%s' % scraper_actual, fromlist=["core.%s" % scraper_actual])
|
||||
except ImportError:
|
||||
@@ -57,34 +55,34 @@ def find_and_set_infoLabels(item):
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
while scraper:
|
||||
# Llamamos a la funcion find_and_set_infoLabels del scraper seleccionado
|
||||
# We call the find_and_set_infoLabels function of the selected scraper
|
||||
scraper_result = scraper.find_and_set_infoLabels(item)
|
||||
|
||||
# Verificar si existe 'code'
|
||||
# Check if there is a 'code'
|
||||
if scraper_result and item.infoLabels['code']:
|
||||
# code correcto
|
||||
# correct code
|
||||
logger.info("Identificador encontrado: %s" % item.infoLabels['code'])
|
||||
scraper.completar_codigos(item)
|
||||
return True
|
||||
elif scraper_result:
|
||||
# Contenido encontrado pero no hay 'code'
|
||||
# Content found but no 'code'
|
||||
msg = config.get_localized_string(60227) % title
|
||||
else:
|
||||
# Contenido no encontrado
|
||||
# Content not found
|
||||
msg = config.get_localized_string(60228) % title
|
||||
|
||||
logger.info(msg)
|
||||
# Mostrar cuadro con otras opciones:
|
||||
# Show box with other options:
|
||||
if scrapers_disponibles[scraper_actual] in list_opciones_cuadro:
|
||||
list_opciones_cuadro.remove(scrapers_disponibles[scraper_actual])
|
||||
index = platformtools.dialog_select(msg, list_opciones_cuadro)
|
||||
|
||||
if index < 0:
|
||||
logger.debug("Se ha pulsado 'cancelar' en la ventana '%s'" % msg)
|
||||
logger.debug("You have clicked 'cancel' in the window '%s'" % msg)
|
||||
return False
|
||||
|
||||
elif index == 0:
|
||||
# Pregunta el titulo
|
||||
# Ask the title
|
||||
title = platformtools.dialog_input(title, config.get_localized_string(60229) % tipo_contenido)
|
||||
if title:
|
||||
if item.contentType == "movie":
|
||||
@@ -92,25 +90,25 @@ def find_and_set_infoLabels(item):
|
||||
else:
|
||||
item.contentSerieName = title
|
||||
else:
|
||||
logger.debug("he pulsado 'cancelar' en la ventana 'Introduzca el nombre correcto'")
|
||||
logger.debug("I clicked 'cancel' in the window 'Enter the correct name'")
|
||||
return False
|
||||
|
||||
elif index == 1:
|
||||
# Hay q crear un cuadro de dialogo para introducir los datos
|
||||
logger.info("Completar información")
|
||||
# You have to create a dialog box to enter the data
|
||||
logger.info("Complete information")
|
||||
if cuadro_completar(item):
|
||||
# code correcto
|
||||
logger.info("Identificador encontrado: %s" % str(item.infoLabels['code']))
|
||||
# correct code
|
||||
logger.info("Identifier found: %s" % str(item.infoLabels['code']))
|
||||
return True
|
||||
# raise
|
||||
|
||||
elif list_opciones_cuadro[index] in list(scrapers_disponibles.values()):
|
||||
# Obtener el nombre del modulo del scraper
|
||||
# Get the name of the scraper module
|
||||
for k, v in list(scrapers_disponibles.items()):
|
||||
if list_opciones_cuadro[index] == v:
|
||||
if scrapers_disponibles[scraper_actual] not in list_opciones_cuadro:
|
||||
list_opciones_cuadro.append(scrapers_disponibles[scraper_actual])
|
||||
# Importamos el scraper k
|
||||
# We import the scraper k
|
||||
scraper_actual = k
|
||||
try:
|
||||
scraper = None
|
||||
@@ -119,7 +117,7 @@ def find_and_set_infoLabels(item):
|
||||
exec("import core." + scraper_actual + " as scraper_module")
|
||||
break
|
||||
|
||||
logger.error("Error al importar el modulo scraper %s" % scraper_actual)
|
||||
logger.error("Error importing the scraper module %s" % scraper_actual)
|
||||
|
||||
|
||||
def cuadro_completar(item):
|
||||
@@ -129,7 +127,7 @@ def cuadro_completar(item):
|
||||
dict_default = {}
|
||||
|
||||
COLOR = ["0xFF65B3DA", "0xFFFFFFFF"]
|
||||
# Creamos la lista de campos del infoLabel
|
||||
# We create the list of infoLabel fields
|
||||
controls = [("title", "text", config.get_localized_string(60230)),
|
||||
("originaltitle", "text", config.get_localized_string(60231)),
|
||||
("year", "text", config.get_localized_string(60232)),
|
||||
@@ -171,7 +169,7 @@ def cuadro_completar(item):
|
||||
if len(c) > 3:
|
||||
enabled += c[3]
|
||||
|
||||
# default para casos especiales
|
||||
# default for special cases
|
||||
if c[0] == "url_tmdb" and item.infoLabels["tmdb_id"] and 'tmdb' in item.infoLabels["url_scraper"]:
|
||||
dict_default[c[0]] = item.infoLabels["url_scraper"]
|
||||
|
||||
@@ -181,7 +179,7 @@ def cuadro_completar(item):
|
||||
if not dict_default[c[0]] or dict_default[c[0]] == 'None' or dict_default[c[0]] == 0:
|
||||
dict_default[c[0]] = ''
|
||||
elif isinstance(dict_default[c[0]], (int, float)) or (not PY3 and isinstance(dict_default[c[0]], (int, float, long))):
|
||||
# Si es numerico lo convertimos en str
|
||||
# If it is numerical we convert it into str
|
||||
dict_default[c[0]] = str(dict_default[c[0]])
|
||||
|
||||
listado_controles.append({'id': c[0],
|
||||
@@ -207,7 +205,7 @@ def callback_cuadro_completar(item, dict_values):
|
||||
global dict_default
|
||||
|
||||
if dict_values.get("title", None):
|
||||
# Adaptar dict_values a infoLabels validos
|
||||
# Adapt dict_values to valid infoLabels
|
||||
dict_values['mediatype'] = ['movie', 'tvshow'][dict_values['mediatype']]
|
||||
for k, v in list(dict_values.items()):
|
||||
if k in dict_default and dict_default[k] == dict_values[k]:
|
||||
@@ -229,16 +227,16 @@ def callback_cuadro_completar(item, dict_values):
|
||||
|
||||
def get_nfo(item):
|
||||
"""
|
||||
Devuelve la información necesaria para que se scrapee el resultado en la videoteca de kodi,
|
||||
Returns the information necessary for the result to be scraped into the kodi video library,
|
||||
|
||||
@param item: elemento que contiene los datos necesarios para generar la info
|
||||
@param item: element that contains the data necessary to generate the info
|
||||
@type item: Item
|
||||
@rtype: str
|
||||
@return:
|
||||
"""
|
||||
logger.info()
|
||||
if "infoLabels" in item and "noscrap_id" in item.infoLabels:
|
||||
# Crea el fichero xml con los datos que se obtiene de item ya que no hay ningún scraper activo
|
||||
# Create the xml file with the data obtained from the item since there is no active scraper
|
||||
info_nfo = '<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>'
|
||||
|
||||
if "season" in item.infoLabels and "episode" in item.infoLabels:
|
||||
|
||||
@@ -56,15 +56,15 @@ def find_multiple_matches_groups(text, pattern):
|
||||
return [m.groupdict() for m in r.finditer(text)]
|
||||
|
||||
|
||||
# Convierte los codigos html "ñ" y lo reemplaza por "ñ" caracter unicode utf-8
|
||||
# Convert html codes "ñ" and replace it with "ñ" unicode utf-8 character
|
||||
def decodeHtmlentities(data):
|
||||
entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8})(;?)")
|
||||
entity_re = re.compile(r"&(#?)(\d{1,5}|\w{1,8})(;?)")
|
||||
|
||||
def substitute_entity(match):
|
||||
ent = match.group(2) + match.group(3)
|
||||
res = ""
|
||||
while not ent in html5 and not ent.endswith(";") and match.group(1) != "#":
|
||||
# Excepción para cuando '&' se usa como argumento en la urls contenidas en los datos
|
||||
# Exception for when '&' is used as an argument in the urls contained in the data
|
||||
try:
|
||||
res = ent[-1] + res
|
||||
ent = ent[:-1]
|
||||
@@ -85,9 +85,9 @@ def decodeHtmlentities(data):
|
||||
|
||||
|
||||
def unescape(text):
|
||||
"""Removes HTML or XML character references
|
||||
and entities from a text string.
|
||||
keep &, >, < in the source code.
|
||||
"""
|
||||
Removes HTML or XML character references and entities from a text string.
|
||||
keep &, >, < in the source code.
|
||||
from Fredrik Lundh
|
||||
http://effbot.org/zone/re-sub.htm#unescape-html
|
||||
"""
|
||||
@@ -122,7 +122,7 @@ def unescape(text):
|
||||
|
||||
return re.sub("&#?\w+;", fixup, str(text))
|
||||
|
||||
# Convierte los codigos html "ñ" y lo reemplaza por "ñ" caracter unicode utf-8
|
||||
# Convert html codes "ñ" and replace it with "ñ" unicode utf-8 character
|
||||
|
||||
|
||||
# def decodeHtmlentities(string):
|
||||
@@ -277,7 +277,7 @@ def htmlclean(cadena):
|
||||
def slugify(title):
|
||||
# print title
|
||||
|
||||
# Sustituye acentos y eñes
|
||||
# Substitutes accents and eñes
|
||||
title = title.replace("Á", "a")
|
||||
title = title.replace("É", "e")
|
||||
title = title.replace("Í", "i")
|
||||
@@ -305,23 +305,23 @@ def slugify(title):
|
||||
title = title.replace("/", "-")
|
||||
title = title.replace("&", "&")
|
||||
|
||||
# Pasa a minúsculas
|
||||
# Lowercase
|
||||
title = title.lower().strip()
|
||||
|
||||
# Elimina caracteres no válidos
|
||||
# Remove invalid characters
|
||||
validchars = "abcdefghijklmnopqrstuvwxyz1234567890- "
|
||||
title = ''.join(c for c in title if c in validchars)
|
||||
|
||||
# Sustituye espacios en blanco duplicados y saltos de línea
|
||||
title = re.compile("\s+", re.DOTALL).sub(" ", title)
|
||||
# Replace duplicate blanks and line breaks
|
||||
title = re.compile(r"\s+", re.DOTALL).sub(" ", title)
|
||||
|
||||
# Sustituye espacios en blanco por guiones
|
||||
title = re.compile("\s", re.DOTALL).sub("-", title.strip())
|
||||
# Replace blanks with hyphens
|
||||
title = re.compile(r"\s", re.DOTALL).sub("-", title.strip())
|
||||
|
||||
# Sustituye espacios en blanco duplicados y saltos de línea
|
||||
title = re.compile("\-+", re.DOTALL).sub("-", title)
|
||||
# Replace duplicate blanks and line breaks
|
||||
title = re.compile(r"\-+", re.DOTALL).sub("-", title)
|
||||
|
||||
# Arregla casos especiales
|
||||
# Fix special cases
|
||||
if title.startswith("-"):
|
||||
title = title[1:]
|
||||
|
||||
@@ -337,10 +337,10 @@ def remove_htmltags(string):
|
||||
|
||||
def remove_show_from_title(title, show):
|
||||
# print slugify(title)+" == "+slugify(show)
|
||||
# Quita el nombre del programa del título
|
||||
# Remove program name from title
|
||||
if slugify(title).startswith(slugify(show)):
|
||||
|
||||
# Convierte a unicode primero, o el encoding se pierde
|
||||
# Convert to unicode first, or encoding is lost
|
||||
title = unicode(title, "utf-8", "replace")
|
||||
show = unicode(show, "utf-8", "replace")
|
||||
title = title[len(show):].strip()
|
||||
@@ -351,7 +351,7 @@ def remove_show_from_title(title, show):
|
||||
if title == "":
|
||||
title = str(time.time())
|
||||
|
||||
# Vuelve a utf-8
|
||||
# Return to utf-8
|
||||
title = title.encode("utf-8", "ignore")
|
||||
show = show.encode("utf-8", "ignore")
|
||||
|
||||
@@ -360,15 +360,15 @@ def remove_show_from_title(title, show):
|
||||
|
||||
def get_filename_from_url(url):
|
||||
if PY3:
|
||||
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
|
||||
import urllib.parse as urlparse # It is very slow in PY2. In PY3 it is native
|
||||
else:
|
||||
import urlparse # Usamos el nativo de PY2 que es más rápido
|
||||
import urlparse # We use the native of PY2 which is faster
|
||||
|
||||
parsed_url = urlparse.urlparse(url)
|
||||
try:
|
||||
filename = parsed_url.path
|
||||
except:
|
||||
# Si falla es porque la implementación de parsed_url no reconoce los atributos como "path"
|
||||
# If it fails it is because the implementation of parsed_url does not recognize the attributes as "path"
|
||||
if len(parsed_url) >= 4:
|
||||
filename = parsed_url[2]
|
||||
else:
|
||||
@@ -382,15 +382,15 @@ def get_filename_from_url(url):
|
||||
|
||||
def get_domain_from_url(url):
|
||||
if PY3:
|
||||
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
|
||||
import urllib.parse as urlparse # It is very slow in PY2. In PY3 it is native
|
||||
else:
|
||||
import urlparse # Usamos el nativo de PY2 que es más rápido
|
||||
import urlparse # We use the native of PY2 which is faster
|
||||
|
||||
parsed_url = urlparse.urlparse(url)
|
||||
try:
|
||||
filename = parsed_url.netloc
|
||||
except:
|
||||
# Si falla es porque la implementación de parsed_url no reconoce los atributos como "path"
|
||||
# If it fails it is because the implementation of parsed_url does not recognize the attributes as "path"
|
||||
if len(parsed_url) >= 4:
|
||||
filename = parsed_url[1]
|
||||
else:
|
||||
@@ -401,8 +401,8 @@ def get_domain_from_url(url):
|
||||
|
||||
def get_season_and_episode(title):
|
||||
"""
|
||||
Retorna el numero de temporada y de episodio en formato "1x01" obtenido del titulo de un episodio
|
||||
Ejemplos de diferentes valores para title y su valor devuelto:
|
||||
Returns the season and episode number in "1x01" format obtained from the title of an episode
|
||||
Examples of different values for title and its return value:
|
||||
"serie 101x1.strm", "s101e1.avi", "t101e1.avi" -> '101x01'
|
||||
"Name TvShow 1x6.avi" -> '1x06'
|
||||
"Temp 3 episodio 2.avi" -> '3x02'
|
||||
@@ -412,9 +412,9 @@ def get_season_and_episode(title):
|
||||
"Episodio 25: titulo episodio" -> '' (no existe el numero de temporada)
|
||||
"Serie X Temporada 1" -> '' (no existe el numero del episodio)
|
||||
@type title: str
|
||||
@param title: titulo del episodio de una serie
|
||||
@param title: title of a series episode
|
||||
@rtype: str
|
||||
@return: Numero de temporada y episodio en formato "1x01" o cadena vacia si no se han encontrado
|
||||
@return: Nseason and episode number in "1x01" format or empty string if not found
|
||||
"""
|
||||
filename = ""
|
||||
|
||||
|
||||
Reference in New Issue
Block a user