Core Rebase (manca httptools)

This commit is contained in:
Alhaziel
2020-02-05 20:03:46 +01:00
parent 94727450ee
commit 9e1cea2217
9 changed files with 488 additions and 366 deletions

View File

@@ -129,7 +129,7 @@ def getfilefromtitle(url, title):
logger.info("platform=" + plataforma)
# nombrefichero = xbmc.makeLegalFilename(title + url[-4:])
from . import scrapertools
from core import scrapertools
nombrefichero = title + scrapertools.get_filename_from_url(url)[-4:]
logger.info("filename=%s" % nombrefichero)
@@ -886,8 +886,8 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid
def episodio_ya_descargado(show_title, episode_title):
from . import scrapertools
ficheros = os.listdir(".")
from core import scrapertools
ficheros = filetools.listdir(".")
for fichero in ficheros:
# logger.info("fichero="+fichero)

View File

@@ -3,12 +3,23 @@
# Item is the object we use for representing data
# --------------------------------------------------------------------------------
#from builtins import str
from future.builtins import object
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
from html.parser import HTMLParser
else:
import urllib # Usamos el nativo de PY2 que es más rápido
from HTMLParser import HTMLParser
import base64
import copy
import os
import urllib
from HTMLParser import HTMLParser
from core import jsontools as json
@@ -58,12 +69,12 @@ class InfoLabels(dict):
elif key == 'code':
code = []
# Añadir imdb_id al listado de codigos
if 'imdb_id' in super(InfoLabels, self).keys() and super(InfoLabels, self).__getitem__('imdb_id'):
if 'imdb_id' in list(super(InfoLabels, self).keys()) and super(InfoLabels, self).__getitem__('imdb_id'):
code.append(super(InfoLabels, self).__getitem__('imdb_id'))
# Completar con el resto de codigos
for scr in ['tmdb_id', 'tvdb_id', 'noscrap_id']:
if scr in super(InfoLabels, self).keys() and super(InfoLabels, self).__getitem__(scr):
if scr in list(super(InfoLabels, self).keys()) and super(InfoLabels, self).__getitem__(scr):
value = "%s%s" % (scr[:-2], super(InfoLabels, self).__getitem__(scr))
code.append(value)
@@ -78,21 +89,21 @@ class InfoLabels(dict):
elif key == 'mediatype':
# "list", "movie", "tvshow", "season", "episode"
if 'tvshowtitle' in super(InfoLabels, self).keys() \
if 'tvshowtitle' in list(super(InfoLabels, self).keys()) \
and super(InfoLabels, self).__getitem__('tvshowtitle') != "":
if 'episode' in super(InfoLabels, self).keys() and super(InfoLabels, self).__getitem__('episode') != "":
if 'episode' in list(super(InfoLabels, self).keys()) and super(InfoLabels, self).__getitem__('episode') != "":
return 'episode'
if 'episodeName' in super(InfoLabels, self).keys() \
if 'episodeName' in list(super(InfoLabels, self).keys()) \
and super(InfoLabels, self).__getitem__('episodeName') != "":
return 'episode'
if 'season' in super(InfoLabels, self).keys() and super(InfoLabels, self).__getitem__('season') != "":
if 'season' in list(super(InfoLabels, self).keys()) and super(InfoLabels, self).__getitem__('season') != "":
return 'season'
else:
return 'tvshow'
elif 'title' in super(InfoLabels, self).keys() and super(InfoLabels, self).__getitem__('title') != "":
elif 'title' in list(super(InfoLabels, self).keys()) and super(InfoLabels, self).__getitem__('title') != "":
return 'movie'
else:
@@ -104,7 +115,7 @@ class InfoLabels(dict):
def tostring(self, separador=', '):
ls = []
dic = dict(super(InfoLabels, self).items())
dic = dict(list(super(InfoLabels, self).items()))
for i in sorted(dic.items()):
i_str = str(i)[1:-1]
@@ -158,6 +169,7 @@ class Item(object):
Función llamada al modificar cualquier atributo del item, modifica algunos atributos en función de los datos
modificados.
"""
if PY3: name = self.toutf8(name)
value = self.toutf8(value)
if name == "__dict__":
for key in value:
@@ -313,9 +325,13 @@ class Item(object):
valor = dic[var].tostring(',\r\t\t')
else:
valor = dic[var].tostring()
elif PY3 and isinstance(dic[var], bytes):
valor = "'%s'" % dic[var].decode('utf-8')
else:
valor = str(dic[var])
if PY3 and isinstance(var, bytes):
var = var.decode('utf-8')
ls.append(var + "= " + valor)
return separator.join(ls)
@@ -327,12 +343,12 @@ class Item(object):
Uso: url = item.tourl()
"""
dump = json.dump(self.__dict__)
dump = json.dump(self.__dict__).encode("utf8")
# if empty dict
if not dump:
# set a str to avoid b64encode fails
dump = ""
return urllib.quote(base64.b64encode(dump))
dump = "".encode("utf8")
return str(urllib.quote(base64.b64encode(dump)))
def fromurl(self, url):
"""
@@ -367,6 +383,7 @@ class Item(object):
return self
def tojson(self, path=""):
from core import filetools
"""
Crea un JSON a partir del item, para guardar archivos de favoritos, lista de descargas, etc...
Si se especifica un path, te lo guarda en la ruta especificada, si no, devuelve la cadena json
@@ -377,11 +394,13 @@ class Item(object):
@type path: str
"""
if path:
open(path, "wb").write(json.dump(self.__dict__))
#open(path, "wb").write(json.dump(self.__dict__))
res = filetools.write(path, json.dump(self.__dict__))
else:
return json.dump(self.__dict__)
def fromjson(self, json_item=None, path=""):
from core import filetools
"""
Genera un item a partir de un archivo JSON
Si se especifica un path, lee directamente el archivo, si no, lee la cadena de texto pasada.
@@ -394,8 +413,9 @@ class Item(object):
@type path: str
"""
if path:
if os.path.exists(path):
json_item = open(path, "rb").read()
if filetools.exists(path):
#json_item = open(path, "rb").read()
json_item = filetools.read(path)
else:
json_item = {}
@@ -436,6 +456,8 @@ class Item(object):
unicode_title = unicode(value, "utf8", "ignore")
return HTMLParser().unescape(unicode_title).encode("utf8")
except:
if PY3 and isinstance(value, bytes):
value = value.decode("utf8")
return value
def toutf8(self, *args):
@@ -447,13 +469,18 @@ class Item(object):
else:
value = self.__dict__
if type(value) == unicode:
return value.encode("utf8")
if isinstance(value, unicode):
value = value.encode("utf8")
if PY3: value = value.decode("utf8")
return value
elif type(value) == str:
elif not PY3 and isinstance(value, str):
return unicode(value, "utf8", "ignore").encode("utf8")
elif type(value) == list:
elif PY3 and isinstance(value, bytes):
return value.decode("utf8")
elif isinstance(value, list):
for x, key in enumerate(value):
value[x] = self.toutf8(value[x])
return value
@@ -461,11 +488,12 @@ class Item(object):
elif isinstance(value, dict):
newdct = {}
for key in value:
v = self.toutf8(value[key])
if type(key) == unicode:
key = key.encode("utf8")
value_unc = self.toutf8(value[key])
key_unc = self.toutf8(key)
#if isinstance(key, unicode):
# key = key.encode("utf8")
newdct[key] = v
newdct[key_unc] = value_unc
if len(args) > 0:
if isinstance(value, InfoLabels):

View File

@@ -1,5 +1,10 @@
# -*- coding: utf-8 -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
#from builtins import str
from core.item import InfoLabels
from platformcode import config, logger
from platformcode import platformtools
@@ -46,7 +51,7 @@ def find_and_set_infoLabels(item):
try:
scraper = __import__('core.%s' % scraper_actual, fromlist=["core.%s" % scraper_actual])
except ImportError:
exec "import core." + scraper_actual + " as scraper"
exec("import core." + scraper_actual + " as scraper")
except:
import traceback
logger.error(traceback.format_exc())
@@ -99,9 +104,9 @@ def find_and_set_infoLabels(item):
return True
# raise
elif list_opciones_cuadro[index] in scrapers_disponibles.values():
elif list_opciones_cuadro[index] in list(scrapers_disponibles.values()):
# Obtener el nombre del modulo del scraper
for k, v in scrapers_disponibles.items():
for k, v in list(scrapers_disponibles.items()):
if list_opciones_cuadro[index] == v:
if scrapers_disponibles[scraper_actual] not in list_opciones_cuadro:
list_opciones_cuadro.append(scrapers_disponibles[scraper_actual])
@@ -111,7 +116,7 @@ def find_and_set_infoLabels(item):
scraper = None
scraper = __import__('core.%s' % scraper_actual, fromlist=["core.%s" % scraper_actual])
except ImportError:
exec "import core." + scraper_actual + " as scraper_module"
exec("import core." + scraper_actual + " as scraper_module")
break
logger.error("Error al importar el modulo scraper %s" % scraper_actual)
@@ -175,7 +180,7 @@ def cuadro_completar(item):
if not dict_default[c[0]] or dict_default[c[0]] == 'None' or dict_default[c[0]] == 0:
dict_default[c[0]] = ''
elif isinstance(dict_default[c[0]], (int, float, long)):
elif isinstance(dict_default[c[0]], (int, float)) or (not PY3 and isinstance(dict_default[c[0]], (int, float, long))):
# Si es numerico lo convertimos en str
dict_default[c[0]] = str(dict_default[c[0]])
@@ -204,7 +209,7 @@ def callback_cuadro_completar(item, dict_values):
if dict_values.get("title", None):
# Adaptar dict_values a infoLabels validos
dict_values['mediatype'] = ['movie', 'tvshow'][dict_values['mediatype']]
for k, v in dict_values.items():
for k, v in list(dict_values.items()):
if k in dict_default and dict_default[k] == dict_values[k]:
del dict_values[k]

View File

@@ -1,17 +1,36 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------------
# Scraper tools v2 for reading and processing web elements
# Scraper tools for reading and processing web elements
# --------------------------------------------------------------------------------
#from future import standard_library
#standard_library.install_aliases()
#from builtins import str
#from builtins import chr
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import re
import time
import urlparse
# from core import httptools
from core.entities import html5
from platformcode import logger
# def get_header_from_response(url, header_to_get="", post=None, headers=None):
# header_to_get = header_to_get.lower()
# response = httptools.downloadpage(url, post=post, headers=headers, only_headers=True)
# return response.headers.get(header_to_get)
# def read_body_and_headers(url, post=None, headers=None, follow_redirects=False, timeout=None):
# response = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=follow_redirects,
# timeout=timeout)
# return response.data, response.headers
def printMatches(matches):
i = 0
for match in matches:
@@ -89,7 +108,10 @@ def unescape(text):
else:
# named entity
try:
import htmlentitydefs
if PY3:
import html.entities as htmlentitydefs
else:
import htmlentitydefs
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8")
except KeyError:
logger.error("keyerror")
@@ -98,11 +120,55 @@ def unescape(text):
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
return re.sub("&#?\w+;", str(fixup), str(text))
# Convierte los codigos html "ñ" y lo reemplaza por "ñ" caracter unicode utf-8
# def decodeHtmlentities(string):
# string = entitiesfix(string)
# entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8});")
# def substitute_entity(match):
# if PY3:
# from html.entities import name2codepoint as n2cp
# else:
# from htmlentitydefs import name2codepoint as n2cp
# ent = match.group(2)
# if match.group(1) == "#":
# return unichr(int(ent)).encode('utf-8')
# else:
# cp = n2cp.get(ent)
# if cp:
# return unichr(cp).encode('utf-8')
# else:
# return match.group()
# return entity_re.subn(substitute_entity, string)[0]
# def entitiesfix(string):
# # Las entidades comienzan siempre con el símbolo & , y terminan con un punto y coma ( ; ).
# string = string.replace("&aacute", "á")
# string = string.replace("&eacute", "é")
# string = string.replace("&iacute", "í")
# string = string.replace("&oacute", "ó")
# string = string.replace("&uacute", "ú")
# string = string.replace("&Aacute", "Á")
# string = string.replace("&Eacute", "É")
# string = string.replace("&Iacute", "Í")
# string = string.replace("&Oacute", "Ó")
# string = string.replace("&Uacute", "Ú")
# string = string.replace("&uuml", "ü")
# string = string.replace("&Uuml", "Ü")
# string = string.replace("&ntilde", "ñ")
# string = string.replace("&#191", "¿")
# string = string.replace("&#161", "¡")
# string = string.replace(";;", ";")
# return string
def htmlclean(cadena):
cadena = re.compile("<!--.*?-->", re.DOTALL).sub("", cadena)
@@ -292,8 +358,12 @@ def remove_show_from_title(title, show):
return title
# scrapertools.get_filename_from_url(media_url)[-4:]
def get_filename_from_url(url):
if PY3:
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
else:
import urlparse # Usamos el nativo de PY2 que es más rápido
parsed_url = urlparse.urlparse(url)
try:
filename = parsed_url.path
@@ -311,6 +381,11 @@ def get_filename_from_url(url):
def get_domain_from_url(url):
if PY3:
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
else:
import urlparse # Usamos el nativo de PY2 que es más rápido
parsed_url = urlparse.urlparse(url)
try:
filename = parsed_url.netloc

View File

@@ -3,18 +3,32 @@
# Server management
# --------------------------------------------------------------------------------
import os
from __future__ import division
from __future__ import absolute_import
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
else:
import urlparse # Usamos el nativo de PY2 que es más rápido
from future.builtins import range
from past.utils import old_div
import datetime
import re
import time
import filetools
import urlparse
from core import filetools
from core import httptools
from core import jsontools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
# from servers.decrypters import zcrypt
from lib import unshortenit
dict_servers_parameters = {}
@@ -80,7 +94,7 @@ def get_servers_itemlist(itemlist, fnc=None, sort=False):
@type sort: bool
"""
# Recorre los servidores
for serverid in get_servers_list().keys():
for serverid in list(get_servers_list().keys()):
server_parameters = get_server_parameters(serverid)
# Recorre los patrones
@@ -105,18 +119,18 @@ def get_servers_itemlist(itemlist, fnc=None, sort=False):
item.url = url
# Eliminamos los servidores desactivados
itemlist = filter(lambda i: not i.server or is_server_enabled(i.server), itemlist)
#itemlist = filter(lambda i: not i.server or is_server_enabled(i.server), itemlist)
# Filtrar si es necesario
itemlist = filter_servers(itemlist)
for item in itemlist:
# Asignamos "directo" en caso de que el server no se encuentre en pelisalcarta
# Asignamos "directo" en caso de que el server no se encuentre en Alfa
if not item.server and item.url:
item.server = 'directo'
item.server = "directo"
if fnc:
item.title = fnc(item)
# Filtrar si es necesario
itemlist = filter_servers(itemlist)
# Ordenar segun favoriteslist si es necesario
if sort:
@@ -137,7 +151,8 @@ def findvideos(data, skip=False):
logger.info()
devuelve = []
skip = int(skip)
servers_list = get_servers_list().keys()
servers_list = list(get_servers_list().keys())
# Ordenar segun favoriteslist si es necesario
servers_list = sort_servers(servers_list)
@@ -145,8 +160,8 @@ def findvideos(data, skip=False):
# Ejecuta el findvideos en cada servidor activo
for serverid in servers_list:
if not is_server_enabled(serverid):
continue
'''if not is_server_enabled(serverid):
continue'''
if config.get_setting("filter_servers") == True and config.get_setting("black_list", server=serverid):
is_filter_servers = True
continue
@@ -167,6 +182,8 @@ def findvideosbyserver(data, serverid):
return []
server_parameters = get_server_parameters(serverid)
if not server_parameters["active"]:
return []
devuelve = []
if "find_videos" in server_parameters:
# Recorre los patrones
@@ -229,6 +246,8 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
# Si el vídeo es "directo" o "local", no hay que buscar más
if server == "directo" or server == "local":
if isinstance(video_password, list):
return video_password, len(video_password) > 0, "<br/>".join(error_messages)
logger.info("Server: %s, la url es la buena" % server)
video_urls.append(["%s [%s]" % (urlparse.urlparse(url)[2][-4:], server), url])
@@ -309,7 +328,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
# Muestra el progreso
if muestra_dialogo:
progreso.update((100 / len(opciones)) * opciones.index(opcion), config.get_localized_string(70180) % server_name)
progreso.update((old_div(100, len(opciones))) * opciones.index(opcion), config.get_localized_string(70180) % server_name)
# Modo free
if opcion == "free":
@@ -377,7 +396,7 @@ def get_server_name(serverid):
serverid = serverid.lower().split(".")[0]
# Obtenemos el listado de servers
server_list = get_servers_list().keys()
server_list = list(get_servers_list().keys())
# Si el nombre está en la lista
if serverid in server_list:
@@ -445,25 +464,25 @@ def get_server_parameters(server):
if server not in dict_servers_parameters:
try:
# Servers
if os.path.isfile(os.path.join(config.get_runtime_path(), "servers", server + ".json")):
path = os.path.join(config.get_runtime_path(), "servers", server + ".json")
if filetools.isfile(filetools.join(config.get_runtime_path(), "servers", server + ".json")):
path = filetools.join(config.get_runtime_path(), "servers", server + ".json")
# Debriders
elif os.path.isfile(os.path.join(config.get_runtime_path(), "servers", "debriders", server + ".json")):
path = os.path.join(config.get_runtime_path(), "servers", "debriders", server + ".json")
elif filetools.isfile(filetools.join(config.get_runtime_path(), "servers", "debriders", server + ".json")):
path = filetools.join(config.get_runtime_path(), "servers", "debriders", server + ".json")
#
#Cuando no está bien definido el server en el canal (no existe conector), muestra error por no haber "path" y se tiene que revisar el canal
#
data = filetools.read(path)
dict_server = jsontools.load(data)
dict_server = jsontools.load(filetools.read(path))
# Imagenes: se admiten url y archivos locales dentro de "resources/images"
if dict_server.get("thumbnail") and "://" not in dict_server["thumbnail"]:
dict_server["thumbnail"] = os.path.join("https://raw.githubusercontent.com/kodiondemand/media/master/resources/servers", dict_server["thumbnail"])
dict_server["thumbnail"] = filetools.join(config.get_runtime_path(), "resources", "media",
"servers", dict_server["thumbnail"])
for k in ['premium', 'id']:
dict_server[k] = dict_server.get(k, list())
if type(dict_server[k]) == str:
if isinstance(dict_server[k], str):
dict_server[k] = [dict_server[k]]
if "find_videos" in dict_server:
@@ -497,7 +516,7 @@ def get_server_json(server_name):
server_json = jsontools.load(filetools.read(server_path))
# logger.info("server_json= %s" % server_json)
except Exception, ex:
except Exception as ex:
template = "An exception of type %s occured. Arguments:\n%r"
message = template % (type(ex).__name__, ex.args)
logger.error(" %s" % message)
@@ -554,16 +573,16 @@ def get_server_setting(name, server, default=None):
"""
# Creamos la carpeta si no existe
if not os.path.exists(os.path.join(config.get_data_path(), "settings_servers")):
os.mkdir(os.path.join(config.get_data_path(), "settings_servers"))
if not filetools.exists(filetools.join(config.get_data_path(), "settings_servers")):
filetools.mkdir(filetools.join(config.get_data_path(), "settings_servers"))
file_settings = os.path.join(config.get_data_path(), "settings_servers", server + "_data.json")
file_settings = filetools.join(config.get_data_path(), "settings_servers", server + "_data.json")
dict_settings = {}
dict_file = {}
if os.path.exists(file_settings):
if filetools.exists(file_settings):
# Obtenemos configuracion guardada de ../settings/channel_data.json
try:
dict_file = jsontools.load(open(file_settings, "rb").read())
dict_file = jsontools.load(filetools.read(file_settings))
if isinstance(dict_file, dict) and 'settings' in dict_file:
dict_settings = dict_file['settings']
except EnvironmentError:
@@ -580,10 +599,7 @@ def get_server_setting(name, server, default=None):
dict_settings = default_settings
dict_file['settings'] = dict_settings
# Creamos el archivo ../settings/channel_data.json
json_data = jsontools.dump(dict_file)
try:
open(file_settings, "wb").write(json_data)
except EnvironmentError:
if not filetools.write(file_settings, jsontools.dump(dict_file)):
logger.info("ERROR al salvar el archivo: %s" % file_settings)
# Devolvemos el valor del parametro local 'name' si existe, si no se devuelve default
@@ -592,18 +608,18 @@ def get_server_setting(name, server, default=None):
def set_server_setting(name, value, server):
# Creamos la carpeta si no existe
if not os.path.exists(os.path.join(config.get_data_path(), "settings_servers")):
os.mkdir(os.path.join(config.get_data_path(), "settings_servers"))
if not filetools.exists(filetools.join(config.get_data_path(), "settings_servers")):
filetools.mkdir(filetools.join(config.get_data_path(), "settings_servers"))
file_settings = os.path.join(config.get_data_path(), "settings_servers", server + "_data.json")
file_settings = filetools.join(config.get_data_path(), "settings_servers", server + "_data.json")
dict_settings = {}
dict_file = None
if os.path.exists(file_settings):
if filetools.exists(file_settings):
# Obtenemos configuracion guardada de ../settings/channel_data.json
try:
dict_file = jsontools.load(open(file_settings, "r").read())
dict_file = jsontools.load(filetools.read(file_settings))
dict_settings = dict_file.get('settings', {})
except EnvironmentError:
logger.info("ERROR al leer el archivo: %s" % file_settings)
@@ -617,10 +633,7 @@ def set_server_setting(name, value, server):
dict_file['settings'] = dict_settings
# Creamos el archivo ../settings/channel_data.json
try:
json_data = jsontools.dump(dict_file)
open(file_settings, "w").write(json_data)
except EnvironmentError:
if not filetools.write(file_settings, jsontools.dump(dict_file)):
logger.info("ERROR al salvar el archivo: %s" % file_settings)
return None
@@ -636,11 +649,10 @@ def get_servers_list():
@rtype: dict
"""
server_list = {}
for server in os.listdir(os.path.join(config.get_runtime_path(), "servers")):
for server in filetools.listdir(filetools.join(config.get_runtime_path(), "servers")):
if server.endswith(".json") and not server == "version.json":
server_parameters = get_server_parameters(server)
if server_parameters["active"] == True:
server_list[server.split(".")[0]] = server_parameters
server_list[server.split(".")[0]] = server_parameters
return server_list
@@ -654,7 +666,7 @@ def get_debriders_list():
@rtype: dict
"""
server_list = {}
for server in os.listdir(os.path.join(config.get_runtime_path(), "servers", "debriders")):
for server in filetools.listdir(filetools.join(config.get_runtime_path(), "servers", "debriders")):
if server.endswith(".json"):
server_parameters = get_server_parameters(server)
if server_parameters["active"] == True:
@@ -678,6 +690,7 @@ def sort_servers(servers_list):
else:
servers_list = sorted(servers_list,
key=lambda x: config.get_setting("favorites_servers_list", server=x) or 100)
return servers_list
@@ -689,18 +702,26 @@ def filter_servers(servers_list):
u objetos Item. En cuyo caso es necesario q tengan un atributo item.server del tipo str.
:return: Lista del mismo tipo de objetos que servers_list filtrada en funcion de la Lista Negra.
"""
#Eliminamos los inactivos
if servers_list:
servers_list = [i for i in servers_list if not i.server or is_server_enabled(i.server)]
if servers_list and config.get_setting('filter_servers'):
if isinstance(servers_list[0], Item):
servers_list_filter = filter(lambda x: not config.get_setting("black_list", server=x.server), servers_list)
servers_list_filter = [x for x in servers_list if not config.get_setting("black_list", server=x.server)]
else:
servers_list_filter = filter(lambda x: not config.get_setting("black_list", server=x), servers_list)
servers_list_filter = [x for x in servers_list if not config.get_setting("black_list", server=x)]
# Si no hay enlaces despues de filtrarlos
if servers_list_filter or not platformtools.dialog_yesno(config.get_localized_string(60000),
config.get_localized_string(60010),
config.get_localized_string(70281)):
servers_list = servers_list_filter
if config.get_setting("favorites_servers") == True:
servers_list = sort_servers(servers_list)
return servers_list
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@@ -1,5 +1,22 @@
# -*- coding: utf-8 -*-
#from future import standard_library
#standard_library.install_aliases()
#from builtins import str
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
else:
import urllib # Usamos el nativo de PY2 que es más rápido
from future.builtins import range
from future.builtins import object
import ast
import copy
import re
import sqlite3
@@ -37,8 +54,8 @@ def_lang = addon.getSetting('language')
# tmdb.set_infoLabels(item, seekTmdb = True)
#
# Obtener datos basicos de una pelicula:
# Antes de llamar al metodo set_infoLabels el titulo a buscar debe estar en item.fulltitle
# o en item.contentTitle y el año en item.infoLabels['year'].
# Antes de llamar al metodo set_infoLabels el titulo a buscar debe estar en item.contentTitle
# y el año en item.infoLabels['year'].
#
# Obtener datos basicos de una serie:
# Antes de llamar al metodo set_infoLabels el titulo a buscar debe estar en item.show o en
@@ -73,7 +90,6 @@ def_lang = addon.getSetting('language')
otmdb_global = None
fname = filetools.join(config.get_data_path(), "kod_db.sqlite")
def create_bd():
conn = sqlite3.connect(fname)
c = conn.cursor()
@@ -160,7 +176,7 @@ def cache_response(fn):
conn = sqlite3.connect(fname, timeout=15)
c = conn.cursor()
url = re.sub('&year=-', '', args[0])
# logger.error('la url %s' % url)
if PY3: url = str.encode(url)
url_base64 = base64.b64encode(url)
c.execute("SELECT response, added FROM tmdb_cache WHERE url=?", (url_base64,))
row = c.fetchone()
@@ -171,7 +187,9 @@ def cache_response(fn):
# si no se ha obtenido información, llamamos a la funcion
if not result:
result = fn(*args)
result_base64 = base64.b64encode(str(result))
result = str(result)
if PY3: result = str.encode(result)
result_base64 = base64.b64encode(result)
c.execute("INSERT OR REPLACE INTO tmdb_cache (url, response, added) VALUES (?, ?, ?)",
(url_base64, result_base64, time.time()))
@@ -375,6 +393,8 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None
# ... buscar datos temporada
item.infoLabels['mediatype'] = 'season'
temporada = otmdb_global.get_temporada(numtemporada)
if not isinstance(temporada, dict):
temporada = ast.literal_eval(temporada.decode('utf-8'))
if temporada:
# Actualizar datos
@@ -447,9 +467,6 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None
# ...y año o filtro
if item.contentTitle:
titulo_buscado = item.contentTitle
else:
titulo_buscado = item.fulltitle
otmdb = Tmdb(texto_buscado=titulo_buscado, tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda,
filtro=item.infoLabels.get('filtro', {}), year=item.infoLabels['year'])
if otmdb is not None:
@@ -588,7 +605,6 @@ def get_genres(type):
return genres.dic_generos[lang]
# Clase auxiliar
class ResultDictDefault(dict):
# Python 2.4
@@ -606,7 +622,7 @@ class ResultDictDefault(dict):
return list()
elif key == 'images_posters':
posters = dict()
if 'images' in super(ResultDictDefault, self).keys() and \
if 'images' in list(super(ResultDictDefault, self).keys()) and \
'posters' in super(ResultDictDefault, self).__getitem__('images'):
posters = super(ResultDictDefault, self).__getitem__('images')['posters']
super(ResultDictDefault, self).__setattr__("images_posters", posters)
@@ -615,7 +631,7 @@ class ResultDictDefault(dict):
elif key == "images_backdrops":
backdrops = dict()
if 'images' in super(ResultDictDefault, self).keys() and \
if 'images' in list(super(ResultDictDefault, self).keys()) and \
'backdrops' in super(ResultDictDefault, self).__getitem__('images'):
backdrops = super(ResultDictDefault, self).__getitem__('images')['backdrops']
super(ResultDictDefault, self).__setattr__("images_backdrops", backdrops)
@@ -624,7 +640,7 @@ class ResultDictDefault(dict):
elif key == "images_profiles":
profiles = dict()
if 'images' in super(ResultDictDefault, self).keys() and \
if 'images' in list(super(ResultDictDefault, self).keys()) and \
'profiles' in super(ResultDictDefault, self).__getitem__('images'):
profiles = super(ResultDictDefault, self).__getitem__('images')['profiles']
super(ResultDictDefault, self).__setattr__("images_profiles", profiles)
@@ -640,7 +656,7 @@ class ResultDictDefault(dict):
def tostring(self, separador=',\n'):
ls = []
for i in super(ResultDictDefault, self).items():
for i in list(super(ResultDictDefault, self).items()):
i_str = str(i)[1:-1]
if isinstance(i[0], str):
old = i[0] + "',"
@@ -899,12 +915,16 @@ class Tmdb(object):
logger.info("[Tmdb.py] Filling in dictionary of genres")
resultado = cls.get_json(url)
if not isinstance(resultado, dict):
resultado = ast.literal_eval(resultado.decode('utf-8'))
lista_generos = resultado["genres"]
for i in lista_generos:
cls.dic_generos[idioma][tipo][str(i["id"])] = i["name"]
except:
logger.error("Error generating dictionaries")
import traceback
logger.error(traceback.format_exc())
def __by_id(self, source='tmdb'):
@@ -926,6 +946,8 @@ class Tmdb(object):
logger.info("[Tmdb.py] Searching %s:\n%s" % (buscando, url))
resultado = self.get_json(url)
if not isinstance(resultado, dict):
resultado = ast.literal_eval(resultado.decode('utf-8'))
if resultado:
if source != "tmdb":
@@ -942,14 +964,14 @@ class Tmdb(object):
else:
# No hay resultados de la busqueda
msg = "The search of %s gave no results" % buscando
# logger.debug(msg)
logger.debug(msg)
def __search(self, index_results=0, page=1):
self.result = ResultDictDefault()
results = []
total_results = 0
text_simple = self.busqueda_texto.lower()
text_quote = urllib.quote(text_simple)
total_results = 0
total_pages = 0
buscando = ""
@@ -957,15 +979,17 @@ class Tmdb(object):
# http://api.themoviedb.org/3/search/movie?api_key=a1ab8b8669da03637a4b98fa39c39228&query=superman&language=es
# &include_adult=false&page=1
url = ('http://api.themoviedb.org/3/search/%s?api_key=a1ab8b8669da03637a4b98fa39c39228&query=%s&language=%s'
'&include_adult=%s&page=%s' % (self.busqueda_tipo, text_quote.replace(' ', '%20'),
'&include_adult=%s&page=%s' % (self.busqueda_tipo, text_quote,
self.busqueda_idioma, self.busqueda_include_adult, page))
if self.busqueda_year:
url += '&year=%s' % self.busqueda_year
buscando = self.busqueda_texto.capitalize()
logger.info("[Tmdb.py] Searching %s on page %s:\n%s" % (buscando, page, url))
logger.info("[Tmdb.py] Buscando %s en pagina %s:\n%s" % (buscando, page, url))
resultado = self.get_json(url)
if not isinstance(resultado, dict):
resultado = ast.literal_eval(resultado.decode('utf-8'))
total_results = resultado.get("total_results", 0)
total_pages = resultado.get("total_pages", 0)
@@ -973,11 +997,13 @@ class Tmdb(object):
if total_results > 0:
results = resultado["results"]
if self.busqueda_filtro and results:
if self.busqueda_filtro and total_results > 1:
# TODO documentar esta parte
for key, value in dict(self.busqueda_filtro).items():
for key, value in list(dict(self.busqueda_filtro).items()):
for r in results[:]:
if key not in r or r[key] != value:
if not r[key]:
r[key] = str(r[key])
if key not in r or value not in r[key]:
results.remove(r)
total_results -= 1
@@ -1015,7 +1041,7 @@ class Tmdb(object):
type_search = self.discover.get('url', '')
if type_search:
params = []
for key, value in self.discover.items():
for key, value in list(self.discover.items()):
if key != "url":
params.append(key + "=" + str(value))
# http://api.themoviedb.org/3/discover/movie?api_key=a1ab8b8669da03637a4b98fa39c39228&query=superman&language=es
@@ -1024,6 +1050,8 @@ class Tmdb(object):
logger.info("[Tmdb.py] Searcing %s:\n%s" % (type_search, url))
resultado = self.get_json(url)
if not isinstance(resultado, dict):
resultado = ast.literal_eval(resultado.decode('utf-8'))
total_results = resultado.get("total_results", -1)
total_pages = resultado.get("total_pages", 1)
@@ -1036,7 +1064,7 @@ class Tmdb(object):
results = resultado["results"]
if self.busqueda_filtro and results:
# TODO documentar esta parte
for key, value in dict(self.busqueda_filtro).items():
for key, value in list(dict(self.busqueda_filtro).items()):
for r in results[:]:
if key not in r or r[key] != value:
results.remove(r)
@@ -1184,6 +1212,8 @@ class Tmdb(object):
(self.busqueda_tipo, self.busqueda_id, self.busqueda_idioma))
resultado = self.get_json(url)
if not isinstance(resultado, dict):
resultado = ast.literal_eval(resultado.decode('utf-8'))
if 'overview' in resultado:
self.result['overview'] = resultado['overview']
@@ -1316,6 +1346,8 @@ class Tmdb(object):
logger.info("[Tmdb.py] Searcing " + buscando)
try:
self.temporada[numtemporada] = self.get_json(url)
if not isinstance(self.temporada[numtemporada], dict):
self.temporada[numtemporada] = ast.literal_eval(self.temporada[numtemporada].decode('utf-8'))
except:
logger.error("Unable to get the season")
@@ -1356,6 +1388,8 @@ class Tmdb(object):
return {}
temporada = self.get_temporada(numtemporada)
if not isinstance(temporada, dict):
temporada = ast.literal_eval(temporada.decode('utf-8'))
if not temporada:
# Se ha producido un error
return {}
@@ -1388,9 +1422,9 @@ class Tmdb(object):
dic_aux = dict((i['id'], i) for i in ret_dic["temporada_crew"])
for e in temporada["episodes"]:
for crew in e['crew']:
if crew['id'] not in dic_aux.keys():
if crew['id'] not in list(dic_aux.keys()):
dic_aux[crew['id']] = crew
ret_dic["temporada_crew"] = dic_aux.values()
ret_dic["temporada_crew"] = list(dic_aux.values())
# Obtener datos del capitulo si procede
if capitulo != -1:
@@ -1429,6 +1463,8 @@ class Tmdb(object):
% (self.busqueda_tipo, self.result['id'], self.busqueda_idioma)
dict_videos = self.get_json(url)
if not isinstance(dict_videos, dict):
dict_videos = ast.literal_eval(dict_videos.decode('utf-8'))
if dict_videos['results']:
dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size']))
@@ -1440,6 +1476,8 @@ class Tmdb(object):
% (self.busqueda_tipo, self.result['id'])
dict_videos = self.get_json(url)
if not isinstance(dict_videos, dict):
dict_videos = ast.literal_eval(dict_videos.decode('utf-8'))
if dict_videos['results']:
dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size']))
@@ -1481,13 +1519,13 @@ class Tmdb(object):
if not origen:
origen = self.result
if 'credits' in origen.keys():
if 'credits' in list(origen.keys()):
dic_origen_credits = origen['credits']
origen['credits_cast'] = dic_origen_credits.get('cast', [])
origen['credits_crew'] = dic_origen_credits.get('crew', [])
del origen['credits']
items = origen.items()
items = list(origen.items())
# Informacion Temporada/episodio
if ret_infoLabels['season'] and self.temporada.get(ret_infoLabels['season']):
@@ -1496,14 +1534,14 @@ class Tmdb(object):
if ret_infoLabels['episode']:
episodio = ret_infoLabels['episode']
items.extend(self.get_episodio(ret_infoLabels['season'], episodio).items())
items.extend(list(self.get_episodio(ret_infoLabels['season'], episodio).items()))
# logger.info("ret_infoLabels" % ret_infoLabels)
for k, v in items:
if not v:
continue
elif type(v) == str:
elif isinstance(v, str):
v = re.sub(r"\n|\r|\t", "", v)
# fix
if v == "None":
@@ -1517,7 +1555,7 @@ class Tmdb(object):
elif k == 'runtime': #Duration for movies
ret_infoLabels['duration'] = int(v) * 60
elif k == 'episode_run_time': #Duration for episodes
try:
for v_alt in v: #It comes as a list (?!)
@@ -1572,7 +1610,7 @@ class Tmdb(object):
elif k == 'credits_cast' or k == 'temporada_cast' or k == 'episodio_guest_stars':
dic_aux = dict((name, character) for (name, character) in l_castandrole)
l_castandrole.extend([(p['name'], p['character']) for p in v if p['name'] not in dic_aux.keys()])
l_castandrole.extend([(p['name'], p['character']) for p in v if p['name'] not in list(dic_aux.keys())])
elif k == 'videos':
if not isinstance(v, list):

View File

@@ -7,9 +7,14 @@
# del addon y también Kodi.
# ------------------------------------------------------------
import re
from future import standard_library
standard_library.install_aliases()
#from builtins import str
from future.builtins import object
import urllib2
import urllib.request, urllib.error, urllib.parse
import re
from core import jsontools
from core import scrapertools
@@ -218,7 +223,7 @@ def set_infoLabels_item(item):
break
_next = list_episodes['links']['next']
if type(_next) == int:
if isinstance(_next, int):
page = _next
else:
break
@@ -330,7 +335,7 @@ def completar_codigos(item):
break
class Tvdb:
class Tvdb(object):
def __init__(self, **kwargs):
self.__check_token()
@@ -398,12 +403,12 @@ class Tvdb:
params = {"apikey": apikey}
try:
req = urllib2.Request(url, data=jsontools.dump(params), headers=DEFAULT_HEADERS)
response = urllib2.urlopen(req)
req = urllib.request.Request(url, data=jsontools.dump(params), headers=DEFAULT_HEADERS)
response = urllib.request.urlopen(req)
html = response.read()
response.close()
except Exception, ex:
except Exception as ex:
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
@@ -426,12 +431,12 @@ class Tvdb:
url = HOST + "/refresh_token"
try:
req = urllib2.Request(url, headers=DEFAULT_HEADERS)
response = urllib2.urlopen(req)
req = urllib.request.Request(url, headers=DEFAULT_HEADERS)
response = urllib.request.urlopen(req)
html = response.read()
response.close()
except urllib2.HTTPError, err:
except urllib.error.HTTPError as err:
logger.error("err.code es %s" % err.code)
# si hay error 401 es que el token se ha pasado de tiempo y tenemos que volver a llamar a login
if err.code == 401:
@@ -439,7 +444,7 @@ class Tvdb:
else:
raise
except Exception, ex:
except Exception as ex:
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
@@ -525,19 +530,18 @@ class Tvdb:
params = {"airedSeason": "%s" % season, "airedEpisode": "%s" % episode}
try:
import urllib
params = urllib.urlencode(params)
params = urllib.parse.urlencode(params)
url = HOST + "/series/%s/episodes/query?%s" % (_id, params)
DEFAULT_HEADERS["Accept-Language"] = lang
logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS))
req = urllib2.Request(url, headers=DEFAULT_HEADERS)
response = urllib2.urlopen(req)
req = urllib.request.Request(url, headers=DEFAULT_HEADERS)
response = urllib.request.urlopen(req)
html = response.read()
response.close()
except Exception, ex:
except Exception as ex:
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
@@ -595,12 +599,12 @@ class Tvdb:
url = HOST + "/series/%s/episodes?page=%s" % (_id, page)
logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS))
req = urllib2.Request(url, headers=DEFAULT_HEADERS)
response = urllib2.urlopen(req)
req = urllib.request.Request(url, headers=DEFAULT_HEADERS)
response = urllib.request.urlopen(req)
html = response.read()
response.close()
except Exception, ex:
except Exception as ex:
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
@@ -682,13 +686,13 @@ class Tvdb:
try:
DEFAULT_HEADERS["Accept-Language"] = lang
logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS))
req = urllib2.Request(url, headers=DEFAULT_HEADERS)
response = urllib2.urlopen(req)
req = urllib.request.Request(url, headers=DEFAULT_HEADERS)
response = urllib.request.urlopen(req)
html = response.read()
response.close()
except Exception, ex:
if type(ex) == urllib2.HTTPError:
except Exception as ex:
if isinstance(ex, urllib).HTTPError:
logger.debug("code es %s " % ex.code)
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
@@ -741,20 +745,19 @@ class Tvdb:
elif zap2it_id:
params["zap2itId"] = zap2it_id
import urllib
params = urllib.urlencode(params)
params = urllib.parse.urlencode(params)
DEFAULT_HEADERS["Accept-Language"] = lang
url = HOST + "/search/series?%s" % params
logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS))
req = urllib2.Request(url, headers=DEFAULT_HEADERS)
response = urllib2.urlopen(req)
req = urllib.request.Request(url, headers=DEFAULT_HEADERS)
response = urllib.request.urlopen(req)
html = response.read()
response.close()
except Exception, ex:
if type(ex) == urllib2.HTTPError:
except Exception as ex:
if isinstance(ex, urllib).HTTPError:
logger.debug("code es %s " % ex.code)
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
@@ -835,15 +838,15 @@ class Tvdb:
try:
DEFAULT_HEADERS["Accept-Language"] = lang
req = urllib2.Request(url, headers=DEFAULT_HEADERS)
req = urllib.request.Request(url, headers=DEFAULT_HEADERS)
logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS))
response = urllib2.urlopen(req)
response = urllib.request.urlopen(req)
html = response.read()
response.close()
except Exception, ex:
if type(ex) == urllib2.HTTPError:
except Exception as ex:
if isinstance(ex, urllib).HTTPError:
logger.debug("code es %s " % ex.code)
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
@@ -905,18 +908,17 @@ class Tvdb:
try:
import urllib
params = urllib.urlencode(params)
params = urllib.parse.urlencode(params)
DEFAULT_HEADERS["Accept-Language"] = lang
url = HOST + "/series/%s/images/query?%s" % (_id, params)
logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS))
req = urllib2.Request(url, headers=DEFAULT_HEADERS)
response = urllib2.urlopen(req)
req = urllib.request.Request(url, headers=DEFAULT_HEADERS)
response = urllib.request.urlopen(req)
html = response.read()
response.close()
except Exception, ex:
except Exception as ex:
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
@@ -946,8 +948,8 @@ class Tvdb:
DEFAULT_HEADERS["Accept-Language"] = lang
logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS))
req = urllib2.Request(url, headers=DEFAULT_HEADERS)
response = urllib2.urlopen(req)
req = urllib.request.Request(url, headers=DEFAULT_HEADERS)
response = urllib.request.urlopen(req)
html = response.read()
response.close()
@@ -1039,7 +1041,7 @@ class Tvdb:
# origen['credits_crew'] = dic_origen_credits.get('crew', [])
# del origen['credits']
items = origen.items()
items = list(origen.items())
for k, v in items:
if not v:
@@ -1118,7 +1120,7 @@ class Tvdb:
elif k == 'cast':
dic_aux = dict((name, character) for (name, character) in l_castandrole)
l_castandrole.extend([(p['name'], p['role']) for p in v if p['name'] not in dic_aux.keys()])
l_castandrole.extend([(p['name'], p['role']) for p in v if p['name'] not in list(dic_aux.keys())])
else:
logger.debug("Atributos no añadidos: %s=%s" % (k, v))

View File

@@ -3,6 +3,11 @@
# Common Library Tools
# ------------------------------------------------------------
#from builtins import str
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import errno
import math
import traceback
@@ -130,7 +135,10 @@ def save_movie(item):
else:
base_name = item.contentTitle
base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").encode("utf8")
if not PY3:
base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").encode("utf8")
else:
base_name = filetools.validate_path(base_name.replace('/', '-'))
if config.get_setting("lowerize_title", "videolibrary") == 0:
base_name = base_name.lower()
@@ -191,9 +199,12 @@ def save_movie(item):
# Si se ha marcado la opción de url de emergencia, se añade ésta a la película después de haber ejecutado Findvideos del canal
try:
headers = {}
if item.headers:
headers = item.headers
channel = generictools.verify_channel(item.channel)
if config.get_setting("emergency_urls", channel) in [1, 3]:
item = emergency_urls(item, None, json_path)
item = emergency_urls(item, None, json_path, headers=headers)
if item_nfo.emergency_urls and not isinstance(item_nfo.emergency_urls, dict):
del item_nfo.emergency_urls
if not item_nfo.emergency_urls:
@@ -397,17 +408,29 @@ def save_tvshow(item, episodelist):
return 0, 0, -1, path
_id = item.infoLabels['code'][0]
if not item.infoLabels['code'][0] or item.infoLabels['code'][0] == 'None':
if item.infoLabels['code'][1] and item.infoLabels['code'][1] != 'None':
_id = item.infoLabels['code'][1]
elif item.infoLabels['code'][2] and item.infoLabels['code'][2] != 'None':
_id = item.infoLabels['code'][2]
else:
logger.error("NO ENCONTRADO EN SCRAPER O NO TIENE code: " + item.url
+ ' / ' + item.infoLabels['code'])
return 0, 0, -1, path
if config.get_setting("original_title_folder", "videolibrary") == 1 and item.infoLabels['originaltitle']:
base_name = item.infoLabels[u'originaltitle']
base_name = item.infoLabels['originaltitle']
elif item.infoLabels['tvshowtitle']:
base_name = item.infoLabels[u'tvshowtitle']
base_name = item.infoLabels['tvshowtitle']
elif item.infoLabels['title']:
base_name = item.infoLabels[u'title']
base_name = item.infoLabels['title']
else:
base_name = u'%s' % item.contentSerieName
base_name = item.contentSerieName
base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").encode("utf8")
if not PY3:
base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").encode("utf8")
else:
base_name = filetools.validate_path(base_name.replace('/', '-'))
if config.get_setting("lowerize_title", "videolibrary") == 0:
base_name = base_name.lower()
@@ -415,7 +438,7 @@ def save_tvshow(item, episodelist):
for raiz, subcarpetas, ficheros in filetools.walk(TVSHOWS_PATH):
for c in subcarpetas:
code = scrapertools.find_single_match(c, '\[(.*?)\]')
if code and code in item.infoLabels['code']:
if code and code != 'None' and code in item.infoLabels['code']:
path = filetools.join(raiz, c)
_id = code
break
@@ -425,7 +448,7 @@ def save_tvshow(item, episodelist):
logger.info("Creating series directory: " + path)
try:
filetools.mkdir(path)
except OSError, exception:
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
@@ -518,7 +541,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
news_in_playcounts = {}
# Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno
raiz, carpetas_series, ficheros = filetools.walk(path).next()
raiz, carpetas_series, ficheros = next(filetools.walk(path))
ficheros = [filetools.join(path, f) for f in ficheros]
nostrm_episodelist = []
@@ -550,7 +573,11 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
tags = []
if config.get_setting("enable_filter", "videolibrary"):
tags = [x.strip() for x in config.get_setting("filters", "videolibrary").lower().split(",")]
for e in episodelist:
headers = {}
if e.headers:
headers = e.headers
if tags != [] and tags != None and any(tag in e.title.lower() for tag in tags):
continue
@@ -567,31 +594,34 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
if overwrite: #pero solo si se se sobrescriben los .json
json_epi = Item().fromjson(filetools.read(json_path)) #Leemos el .json
if json_epi.emergency_urls: #si existen las urls de emergencia...
e.emergency_urls = json_epi.emergency_urls #... las copiamos
else: #y si no...
e = emergency_urls(e, channel, json_path) #... las generamos
e.emergency_urls = json_epi.emergency_urls #... las copiamos
else: #y si no...
e = emergency_urls(e, channel, json_path, headers=headers) #... las generamos
else:
e = emergency_urls(e, channel, json_path) #Si el episodio no existe, generamos las urls
if e.emergency_urls: #Si ya tenemos urls...
e = emergency_urls(e, channel, json_path, headers=headers) #Si el episodio no existe, generamos las urls
if e.emergency_urls: #Si ya tenemos urls...
emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo
elif emergency_urls_stat == 2 and e.contentType == 'episode': #Borramos urls de emergencia?
elif emergency_urls_stat == 2 and e.contentType == 'episode': #Borramos urls de emergencia?
if e.emergency_urls: del e.emergency_urls
emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo
elif emergency_urls_stat == 3 and e.contentType == 'episode': #Actualizamos urls de emergencia?
if not silent:
p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title) #progress dialog
e = emergency_urls(e, channel, json_path) #generamos las urls
if e.emergency_urls: #Si ya tenemos urls...
e = emergency_urls(e, channel, json_path, headers=headers) #generamos las urls
if e.emergency_urls: #Si ya tenemos urls...
emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo
if not e.infoLabels["tmdb_id"] or (serie.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != serie.infoLabels["tmdb_id"]): #en series multicanal, prevalece el infolabels...
if not e.infoLabels["tmdb_id"] or (serie.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != serie.infoLabels["tmdb_id"]): #en series multicanal, prevalece el infolabels...
e.infoLabels = serie.infoLabels #... del canal actual y no el del original
e.contentSeason, e.contentEpisodeNumber = season_episode.split("x")
if e.videolibray_emergency_urls:
del e.videolibray_emergency_urls
if e.channel_redir:
del e.channel_redir #... y se borran las marcas de redirecciones
new_episodelist.append(e)
except:
if e.contentType == 'episode':
logger.error("Unable to save %s emergency urls in the video library" % e.contentTitle)
logger.error(traceback.format_exc())
continue
# No hay lista de episodios, no hay nada que guardar
@@ -600,18 +630,35 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
return 0, 0, 0
# fix float porque la division se hace mal en python 2.x
t = float(100) / len(new_episodelist)
try:
t = float(100) / len(new_episodelist)
except:
t = 0
last_season_episode = ''
for i, e in enumerate(scraper.sort_episode_list(new_episodelist)):
if not silent:
p_dialog.update(int(math.ceil((i + 1) * t)), config.get_localized_string(60064), e.title)
high_sea = e.contentSeason
high_epi = e.contentEpisodeNumber
if scrapertools.find_single_match(e.title, '[a|A][l|L]\s*(\d+)'):
high_epi = int(scrapertools.find_single_match(e.title, 'al\s*(\d+)'))
max_sea = e.infoLabels["number_of_seasons"]
max_epi = 0
if e.infoLabels["number_of_seasons"] and (e.infoLabels["temporada_num_episodios"] or e.infoLabels["number_of_seasons"] == 1):
if e.infoLabels["number_of_seasons"] == 1 and e.infoLabels["number_of_episodes"]:
max_epi = e.infoLabels["number_of_episodes"]
else:
max_epi = e.infoLabels["temporada_num_episodios"]
season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2))
strm_path = filetools.join(path, "%s.strm" % season_episode)
nfo_path = filetools.join(path, "%s.nfo" % season_episode)
json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower())
if season_episode in nostrm_episodelist:
logger.error('Error in the structure of the Video Library: Seriese ' + serie.contentSerieName + ' ' + season_episode)
continue
strm_exists = strm_path in ficheros
nfo_exists = nfo_path in ficheros
@@ -659,8 +706,10 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
if not item_nfo:
head_nfo, item_nfo = read_nfo(nfo_path)
if not e.infoLabels["tmdb_id"] or (item_nfo.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != item_nfo.infoLabels["tmdb_id"]): #en series multicanal, prevalece el infolabels...
e.infoLabels = item_nfo.infoLabels #... del canal actual y no el del original
# En series multicanal, prevalece el infolabels del canal actual y no el del original
if not e.infoLabels["tmdb_id"] or (item_nfo.infoLabels["tmdb_id"] \
and e.infoLabels["tmdb_id"] != item_nfo.infoLabels["tmdb_id"]):
e.infoLabels = item_nfo.infoLabels
if filetools.write(json_path, e.tojson()):
if not json_exists:
@@ -688,10 +737,12 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
if not silent and p_dialog.iscanceled():
break
#logger.debug('high_sea x high_epi: %sx%s' % (str(high_sea), str(high_epi)))
#logger.debug('max_sea x max_epi: %sx%s' % (str(max_sea), str(max_epi)))
if not silent:
p_dialog.close()
if news_in_playcounts:
if news_in_playcounts or emergency_urls_succ or serie.infoLabels["status"] == "Ended" or serie.infoLabels["status"] == "Canceled":
# Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ...
tvshow_path = filetools.join(path, "tvshow.nfo")
try:
@@ -703,16 +754,27 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
if emergency_urls_succ:
if tvshow_item.emergency_urls and not isinstance(tvshow_item.emergency_urls, dict):
del tvshow_item.emergency_urls
if emergency_urls_stat in [1, 3]: #Operación de guardar/actualizar enlaces
if emergency_urls_stat in [1, 3]: #Operación de guardar/actualizar enlaces
if not tvshow_item.emergency_urls:
tvshow_item.emergency_urls = dict()
tvshow_item.emergency_urls.update({serie.channel: True})
elif emergency_urls_stat == 2: #Operación de Borrar enlaces
if tvshow_item.library_urls.get(serie.channel, False):
tvshow_item.emergency_urls.update({serie.channel: True})
elif emergency_urls_stat == 2: #Operación de Borrar enlaces
if tvshow_item.emergency_urls and tvshow_item.emergency_urls.get(serie.channel, False):
tvshow_item.emergency_urls.pop(serie.channel, None) #borramos la entrada del .nfo
tvshow_item.emergency_urls.pop(serie.channel, None) #borramos la entrada del .nfo
if tvshow_item.active == 30:
tvshow_item.active = 1
if tvshow_item.infoLabels["tmdb_id"] == serie.infoLabels["tmdb_id"]:
tvshow_item.infoLabels = serie.infoLabels
tvshow_item.infoLabels["title"] = tvshow_item.infoLabels["tvshowtitle"]
if max_sea == high_sea and max_epi == high_epi and (tvshow_item.infoLabels["status"] == "Ended"
or tvshow_item.infoLabels["status"] == "Canceled") and insertados == 0 and fallidos == 0:
tvshow_item.active = 0 # ... no la actualizaremos más
logger.debug("%s [%s]: serie 'Terminada' o 'Cancelada'. Se desactiva la actualización periódica" % \
(serie.contentSerieName, serie.channel))
update_last = datetime.date.today()
tvshow_item.update_last = update_last.strftime('%Y-%m-%d')
update_next = datetime.date.today() + datetime.timedelta(days=int(tvshow_item.active))
@@ -819,10 +881,10 @@ def add_tvshow(item, channel=None):
if not channel:
try:
#channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel])
# channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel])
channel = __import__('specials.%s' % channel_alt, fromlist=["specials.%s" % channel_alt])
except ImportError:
exec "import channels." + item.channel + " as channel"
exec("import channels." + item.channel + " as channel")
#Para desambiguar títulos, se provoca que TMDB pregunte por el título realmente deseado
#El usuario puede seleccionar el título entre los ofrecidos en la primera pantalla
@@ -836,15 +898,15 @@ def add_tvshow(item, channel=None):
# del item.tmdb_stat #Limpiamos el status para que no se grabe en la Videoteca
# Obtiene el listado de episodios
#if item.channel == 'community':
itemlist = getattr(channel, item.action)(item)
global magnet_caching
magnet_caching = False
insertados, sobreescritos, fallidos, path = save_tvshow(item, itemlist)
if not insertados and not sobreescritos and not fallidos:
platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60067))
logger.error("The %s series could not be added to the video library. Could not get any episode"
% item.show)
logger.error("The %s series could not be added to the video library. Could not get any episode" % item.show)
elif fallidos == -1:
platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60068))
@@ -856,8 +918,7 @@ def add_tvshow(item, channel=None):
else:
platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60070))
logger.info("%s episodes of the %s series have been added to the video library" %
(insertados, item.show))
logger.info("%s episodes of the %s series have been added to the video library" % (insertados, item.show))
if config.is_xbmc():
if config.get_setting("sync_trakt_new_tvshow", "videolibrary"):
import xbmc
@@ -872,10 +933,16 @@ def add_tvshow(item, channel=None):
xbmc_videolibrary.sync_trakt_addon(path)
def emergency_urls(item, channel=None, path=None):
def emergency_urls(item, channel=None, path=None, headers={}):
logger.info()
import re
"""
from servers import torrent
try:
magnet_caching_e = magnet_caching
except:
magnet_caching_e = True
"""
Llamamos a Findvideos del canal con la variable "item.videolibray_emergency_urls = True" para obtener la variable
"item.emergency_urls" con la lista de listas de tuplas de los enlaces torrent y de servidores directos para ese episodio o película
En la lista [0] siempre deben ir los enlaces torrents, si los hay. Si se desea cachear los .torrents, la búsqueda va contra esa lista.
@@ -890,17 +957,28 @@ def emergency_urls(item, channel=None, path=None):
if hasattr(channel, 'findvideos'): #Si el canal tiene "findvideos"...
item.videolibray_emergency_urls = True #... se marca como "lookup"
channel_save = item.channel #... guarda el canal original por si hay fail-over en Newpct1
category_save = item.category #... guarda la categoría original por si hay fail-over o redirección en Newpct1
if item.channel_redir: #... si hay un redir, se restaura temporamente el canal alternativo
item.channel = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').lower()
item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize()
item_res = getattr(channel, 'findvideos')(item) #... se procesa Findvideos
item_res.channel = channel_save #... restaura el canal original por si hay fail-over en Newpct1
item_res.category = channel_save.capitalize() #... y la categoría
item_res.category = category_save #... restaura la categoría original por si hay fail-over o redirección en Newpct1
item.category = category_save #... restaura la categoría original por si hay fail-over o redirección en Newpct1
del item_res.videolibray_emergency_urls #... y se borra la marca de lookup
if item.videolibray_emergency_urls:
del item.videolibray_emergency_urls #... y se borra la marca de lookup original
except:
logger.error('ERROR when processing the title in Findvideos del Canal: ' + item.channel + ' / ' + item.title)
logger.error(traceback.format_exc())
item.channel = channel_save #... restaura el canal original por si hay fail-over o redirección en Newpct1
item.category = category_save #... restaura la categoría original por si hay fail-over o redirección en Newpct1
item_res = item.clone() #Si ha habido un error, se devuelve el Item original
if item_res.videolibray_emergency_urls:
del item_res.videolibray_emergency_urls #... y se borra la marca de lookup
if item.videolibray_emergency_urls:
del item.videolibray_emergency_urls #... y se borra la marca de lookup original
#Si el usuario ha activado la opción "emergency_urls_torrents", se descargarán los archivos .torrent de cada título
else: #Si se han cacheado con éxito los enlaces...
try:
@@ -921,7 +999,9 @@ def emergency_urls(item, channel=None, path=None):
if item_res.post: post = item_res.post
for url in item_res.emergency_urls[0]: #Recorremos las urls de emergencia...
torrents_path = re.sub(r'(?:\.\w+$)', '_%s.torrent' % str(i).zfill(2), path)
path_real = caching_torrents(url, referer, post, torrents_path=torrents_path) #... para descargar los .torrents
path_real = ''
if magnet_caching_e or not url.startswith('magnet'):
path_real = torrent.caching_torrents(url, referer, post, torrents_path=torrents_path, headers=headers) #... para descargar los .torrents
if path_real: #Si ha tenido éxito...
item_res.emergency_urls[0][i-1] = path_real.replace(videolibrary_path, '') #se guarda el "path" relativo
i += 1
@@ -944,140 +1024,3 @@ def emergency_urls(item, channel=None, path=None):
#logger.debug(item_res.emergency_urls)
return item_res #Devolvemos el Item actualizado con los enlaces de emergencia
def caching_torrents(url, referer=None, post=None, torrents_path=None, timeout=10, lookup=False, data_torrent=False):
if torrents_path != None:
logger.info("path = " + torrents_path)
else:
logger.info()
if referer and post:
logger.info('REFERER: ' + referer)
from core import httptools
torrent_file = ''
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': referer} #Necesario para el Post del .Torrent
"""
Descarga en el path recibido el .torrent de la url recibida, y pasa el decode
Devuelve el path real del .torrent, o el path vacío si la operación no ha tenido éxito
"""
videolibrary_path = config.get_videolibrary_path() #Calculamos el path absoluto a partir de la Videoteca
if torrents_path == None:
if not videolibrary_path:
torrents_path = ''
if data_torrent:
return (torrents_path, torrent_file)
return torrents_path #Si hay un error, devolvemos el "path" vacío
torrents_path = filetools.join(videolibrary_path, 'temp_torrents_Alfa', 'cliente_torrent_Alfa.torrent') #path de descarga temporal
if '.torrent' not in torrents_path:
torrents_path += '.torrent' #path para dejar el .torrent
torrents_path_encode = filetools.encode(torrents_path) #encode utf-8 del path
if url.endswith(".rar") or url.startswith("magnet:"): #No es un archivo .torrent
logger.error('It is not a Torrent file: ' + url)
torrents_path = ''
if data_torrent:
return (torrents_path, torrent_file)
return torrents_path #Si hay un error, devolvemos el "path" vacío
try:
#Descargamos el .torrent
if referer and post: #Descarga con POST
response = httptools.downloadpage(url, headers=headers, post=post, follow_redirects=False, timeout=timeout)
else: #Descarga sin post
response = httptools.downloadpage(url, timeout=timeout)
if not response.sucess:
logger.error('.Torrent file not found: ' + url)
torrents_path = ''
if data_torrent:
return (torrents_path, torrent_file)
return torrents_path #Si hay un error, devolvemos el "path" vacío
torrent_file = response.data
if "used CloudFlare" in torrent_file: #Si tiene CloudFlare, usamos este proceso
response = httptools.downloadpage("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url.strip(), timeout=timeout)
if not response.sucess:
logger.error('Archivo .torrent no encontrado: ' + url)
torrents_path = ''
if data_torrent:
return (torrents_path, torrent_file)
return torrents_path #Si hay un error, devolvemos el "path" vacío
torrent_file = response.data
#Si es un archivo .ZIP tratamos de extraer el contenido
if torrent_file.startswith("PK"):
logger.info('It is a .ZIP file: ' + url)
torrents_path_zip = filetools.join(videolibrary_path, 'temp_torrents_zip') #Carpeta de trabajo
torrents_path_zip = filetools.encode(torrents_path_zip)
torrents_path_zip_file = filetools.join(torrents_path_zip, 'temp_torrents_zip.zip') #Nombre del .zip
import time
filetools.rmdirtree(torrents_path_zip) #Borramos la carpeta temporal
time.sleep(1) #Hay que esperar, porque si no da error
filetools.mkdir(torrents_path_zip) #La creamos de nuevo
if filetools.write(torrents_path_zip_file, torrent_file): #Salvamos el .zip
torrent_file = '' #Borramos el contenido en memoria
try: #Extraemos el .zip
from core import ziptools
unzipper = ziptools.ziptools()
unzipper.extract(torrents_path_zip_file, torrents_path_zip)
except:
import xbmc
xbmc.executebuiltin('XBMC.Extract("%s", "%s")' % (torrents_path_zip_file, torrents_path_zip))
time.sleep(1)
import os
for root, folders, files in os.walk(torrents_path_zip): #Recorremos la carpeta para leer el .torrent
for file in files:
if file.endswith(".torrent"):
input_file = filetools.join(root, file) #nombre del .torrent
torrent_file = filetools.read(input_file) #leemos el .torrent
filetools.rmdirtree(torrents_path_zip) #Borramos la carpeta temporal
#Si no es un archivo .torrent (RAR, HTML,..., vacío) damos error
if not scrapertools.find_single_match(torrent_file, '^d\d+:.*?\d+:'):
logger.error('It is not a Torrent file: ' + url)
torrents_path = ''
if data_torrent:
return (torrents_path, torrent_file)
return torrents_path #Si hay un error, devolvemos el "path" vacío
#Salvamos el .torrent
if not lookup:
if not filetools.write(torrents_path_encode, torrent_file):
logger.error('ERROR: Unwritten .torrent file: ' + torrents_path_encode)
torrents_path = '' #Si hay un error, devolvemos el "path" vacío
torrent_file = '' #... y el buffer del .torrent
if data_torrent:
return (torrents_path, torrent_file)
return torrents_path
except:
torrents_path = '' #Si hay un error, devolvemos el "path" vacío
torrent_file = '' #... y el buffer del .torrent
logger.error('ERROR: .Torrent download process failed: ' + url + ' / ' + torrents_path_encode)
logger.error(traceback.format_exc())
#logger.debug(torrents_path)
if data_torrent:
return (torrents_path, torrent_file)
return torrents_path
def verify_url_torrent(url, timeout=5):
"""
Verifica si el archivo .torrent al que apunta la url está disponible, descargándolo en un area temporal
Entrada: url
Salida: True o False dependiendo del resultado de la operación
"""
if not url or url == 'javascript:;': #Si la url viene vacía...
return False #... volvemos con error
torrents_path = caching_torrents(url, timeout=timeout, lookup=True) #Descargamos el .torrent
if torrents_path: #Si ha tenido éxito...
return True
else:
return False

View File

@@ -3,22 +3,26 @@
# Zip Tools
# --------------------------------------------------------------------------------
import io
import os
from builtins import object
import sys
PY3 = False
VFS = True
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int; VFS = False
import zipfile
from platformcode import config, logger
from core import filetools
class ziptools:
class ziptools(object):
def extract(self, file, dir, folder_to_extract="", overwrite_question=False, backup=False):
logger.info("file=%s" % file)
logger.info("dir=%s" % dir)
if not dir.endswith(':') and not os.path.exists(dir):
os.mkdir(dir)
if not dir.endswith(':') and not filetools.exists(dir):
filetools.mkdir(dir)
file = io.FileIO(file)
zf = zipfile.ZipFile(file)
if not folder_to_extract:
self._createstructure(file, dir)
@@ -30,60 +34,66 @@ class ziptools:
if not name.endswith('/'):
logger.info("no es un directorio")
try:
(path, filename) = os.path.split(os.path.join(dir, name))
(path, filename) = filetools.split(filetools.join(dir, name))
logger.info("path=%s" % path)
logger.info("name=%s" % name)
if folder_to_extract:
if path != os.path.join(dir, folder_to_extract):
if path != filetools.join(dir, folder_to_extract):
break
else:
os.makedirs(path)
filetools.mkdir(path)
except:
pass
if folder_to_extract:
outfilename = os.path.join(dir, filename)
outfilename = filetools.join(dir, filename)
else:
outfilename = os.path.join(dir, name)
outfilename = filetools.join(dir, name)
logger.info("outfilename=%s" % outfilename)
try:
if os.path.exists(outfilename) and overwrite_question:
if filetools.exists(outfilename) and overwrite_question:
from platformcode import platformtools
dyesno = platformtools.dialog_yesno("El archivo ya existe",
"El archivo %s a descomprimir ya existe" \
", ¿desea sobrescribirlo?" \
% os.path.basename(outfilename))
% filetools.basename(outfilename))
if not dyesno:
break
if backup:
import time
import shutil
hora_folder = "Copia seguridad [%s]" % time.strftime("%d-%m_%H-%M", time.localtime())
backup = os.path.join(config.get_data_path(), 'backups', hora_folder, folder_to_extract)
if not os.path.exists(backup):
os.makedirs(backup)
shutil.copy2(outfilename, os.path.join(backup, os.path.basename(outfilename)))
backup = filetools.join(config.get_data_path(), 'backups', hora_folder, folder_to_extract)
if not filetools.exists(backup):
filetools.mkdir(backup)
filetools.copy(outfilename, filetools.join(backup, filetools.basename(outfilename)))
outfile = open(outfilename, 'wb')
outfile.write(zf.read(nameo))
if not filetools.write(outfilename, zf.read(nameo), silent=True, vfs=VFS): #TRUNCA en FINAL en Kodi 19 con VFS
logger.error("Error en fichero " + nameo)
except:
import traceback
logger.error(traceback.format_exc())
logger.error("Error en fichero " + nameo)
try:
zf.close()
except:
logger.info("Error cerrando .zip " + file)
def _createstructure(self, file, dir):
self._makedirs(self._listdirs(file), dir)
def create_necessary_paths(filename):
try:
(path, name) = os.path.split(filename)
os.makedirs(path)
(path, name) = filetools.split(filename)
filetools.mkdir(path)
except:
pass
def _makedirs(self, directories, basedir):
for dir in directories:
curdir = os.path.join(basedir, dir)
if not os.path.exists(curdir):
os.mkdir(curdir)
curdir = filetools.join(basedir, dir)
if not filetools.exists(curdir):
filetools.mkdir(curdir)
def _listdirs(self, file):
zf = zipfile.ZipFile(file)