diff --git a/channelselector.py b/channelselector.py
index 886e8c4d..42b4db51 100644
--- a/channelselector.py
+++ b/channelselector.py
@@ -9,77 +9,67 @@ addon = config.__settings__
downloadenabled = addon.getSetting('downloadenabled')
def getmainlist(view="thumb_"):
- logger.info()
+ logger.log()
itemlist = list()
if config.dev_mode():
- itemlist.append(Item(title="Redirect", channel="checkhost", action="check_channels",
- thumbnail='',
- category=config.get_localized_string(30119), viewmode="thumbnails"))
+ itemlist.append(Item(title="Redirect", channel="checkhost", action="check_channels", thumbnail='',
+ category=config.get_localized_string(30119), viewmode="thumbnails"))
# Main Menu Channels
if addon.getSetting('enable_news_menu') == "true":
itemlist.append(Item(title=config.get_localized_string(30130), channel="news", action="mainlist",
- thumbnail=get_thumb("news.png", view),
- category=config.get_localized_string(30119), viewmode="thumbnails",
- context=[{"title": config.get_localized_string(70285), "channel": "shortcuts", "action": "SettingOnPosition", "category":7, "setting":1}]))
+ thumbnail=get_thumb("news.png", view), category=config.get_localized_string(30119), viewmode="thumbnails",
+ context=[{"title": config.get_localized_string(70285), "channel": "shortcuts", "action": "SettingOnPosition", "category":7, "setting":1}]))
if addon.getSetting('enable_channels_menu') == "true":
itemlist.append(Item(title=config.get_localized_string(30118), channel="channelselector", action="getchanneltypes",
- thumbnail=get_thumb("channels.png", view), view=view,
- category=config.get_localized_string(30119), viewmode="thumbnails"))
+ thumbnail=get_thumb("channels.png", view), view=view, category=config.get_localized_string(30119), viewmode="thumbnails"))
if addon.getSetting('enable_search_menu') == "true":
itemlist.append(Item(title=config.get_localized_string(30103), channel="search", path='special', action="mainlist",
- thumbnail=get_thumb("search.png", view),
- category=config.get_localized_string(30119), viewmode="list",
- context = [{"title": config.get_localized_string(60412), "action": "setting_channel_new", "channel": "search"},
- {"title": config.get_localized_string(70286), "channel": "shortcuts", "action": "SettingOnPosition", "category":5 , "setting":1}]))
+ thumbnail=get_thumb("search.png", view), category=config.get_localized_string(30119), viewmode="list",
+ context = [{"title": config.get_localized_string(60412), "action": "setting_channel_new", "channel": "search"},
+ {"title": config.get_localized_string(70286), "channel": "shortcuts", "action": "SettingOnPosition", "category":5 , "setting":1}]))
if addon.getSetting('enable_onair_menu') == "true":
itemlist.append(Item(channel="filmontv", action="mainlist", title=config.get_localized_string(50001),
- thumbnail=get_thumb("on_the_air.png"), viewmode="thumbnails"))
+ thumbnail=get_thumb("on_the_air.png"), viewmode="thumbnails"))
if addon.getSetting('enable_link_menu') == "true":
- itemlist.append(Item(title=config.get_localized_string(70527), channel="kodfavorites", action="mainlist",
- thumbnail=get_thumb("mylink.png", view), view=view,
- category=config.get_localized_string(70527), viewmode="thumbnails"))
+ itemlist.append(Item(title=config.get_localized_string(70527), channel="kodfavorites", action="mainlist", thumbnail=get_thumb("mylink.png", view),
+ view=view, category=config.get_localized_string(70527), viewmode="thumbnails"))
if addon.getSetting('enable_fav_menu') == "true":
itemlist.append(Item(title=config.get_localized_string(30102), channel="favorites", action="mainlist",
- thumbnail=get_thumb("favorites.png", view),
- category=config.get_localized_string(30102), viewmode="thumbnails"))
+ thumbnail=get_thumb("favorites.png", view), category=config.get_localized_string(30102), viewmode="thumbnails"))
if config.get_videolibrary_support() and addon.getSetting('enable_library_menu') == "true":
itemlist.append(Item(title=config.get_localized_string(30131), channel="videolibrary", action="mainlist",
- thumbnail=get_thumb("videolibrary.png", view),
- category=config.get_localized_string(30119), viewmode="thumbnails",
+ thumbnail=get_thumb("videolibrary.png", view), category=config.get_localized_string(30119), viewmode="thumbnails",
context=[{"title": config.get_localized_string(70287), "channel": "shortcuts", "action": "SettingOnPosition", "category":2, "setting":1},
- {"title": config.get_localized_string(60568), "channel": "videolibrary", "action": "update_videolibrary"}]))
+ {"title": config.get_localized_string(60568), "channel": "videolibrary", "action": "update_videolibrary"}]))
if downloadenabled != "false":
- itemlist.append(Item(title=config.get_localized_string(30101), channel="downloads", action="mainlist",
- thumbnail=get_thumb("downloads.png", view), viewmode="list",
- context=[{"title": config.get_localized_string(70288), "channel": "shortcuts", "action": "SettingOnPosition", "category":6}]))
+ itemlist.append(Item(title=config.get_localized_string(30101), channel="downloads", action="mainlist", thumbnail=get_thumb("downloads.png", view), viewmode="list",
+ context=[{"title": config.get_localized_string(70288), "channel": "shortcuts", "action": "SettingOnPosition", "category":6}]))
thumb_setting = "setting_%s.png" % 0 # config.get_setting("plugin_updates_available")
itemlist.append(Item(title=config.get_localized_string(30100), channel="setting", action="settings",
- thumbnail=get_thumb(thumb_setting, view),
- category=config.get_localized_string(30100), viewmode="list"))
+ thumbnail=get_thumb(thumb_setting, view), category=config.get_localized_string(30100), viewmode="list"))
itemlist.append(Item(title=config.get_localized_string(30104) + " (v" + config.get_addon_version(with_fix=True) + ")", channel="help", action="mainlist",
- thumbnail=get_thumb("help.png", view),
- category=config.get_localized_string(30104), viewmode="list"))
+ thumbnail=get_thumb("help.png", view), category=config.get_localized_string(30104), viewmode="list"))
return itemlist
def getchanneltypes(view="thumb_"):
- logger.info()
+ logger.log()
# Category List
channel_types = ["movie", "tvshow", "anime", "documentary", "vos", "live", "torrent", "music"] #, "direct"
# Channel Language
channel_language = auto_filter()
- logger.info("channel_language=%s" % channel_language)
+ logger.log("channel_language=%s" % channel_language)
# Build Itemlist
itemlist = list()
@@ -102,7 +92,7 @@ def getchanneltypes(view="thumb_"):
def filterchannels(category, view="thumb_"):
from core import channeltools
- logger.info('Filter Channels ' + category)
+ logger.log('Filter Channels ' + category)
channelslist = []
@@ -113,17 +103,17 @@ def filterchannels(category, view="thumb_"):
appenddisabledchannels = True
channel_path = os.path.join(config.get_runtime_path(), 'channels', '*.json')
- logger.info("channel_path = %s" % channel_path)
+ logger.log("channel_path = %s" % channel_path)
channel_files = glob.glob(channel_path)
- logger.info("channel_files found %s" % (len(channel_files)))
+ logger.log("channel_files found %s" % (len(channel_files)))
# Channel Language
channel_language = auto_filter()
- logger.info("channel_language=%s" % channel_language)
+ logger.log("channel_language=%s" % channel_language)
for channel_path in channel_files:
- logger.info("channel in for = %s" % channel_path)
+ logger.log("channel in for = %s" % channel_path)
channel = os.path.basename(channel_path).replace(".json", "")
@@ -136,7 +126,7 @@ def filterchannels(category, view="thumb_"):
# If it's not a channel we skip it
if not channel_parameters["channel"]:
continue
- logger.info("channel_parameters=%s" % repr(channel_parameters))
+ logger.log("channel_parameters=%s" % repr(channel_parameters))
# If you prefer the banner and the channel has it, now change your mind
if view == "banner_" and "banner" in channel_parameters:
@@ -231,7 +221,7 @@ def get_thumb(thumb_name, view="thumb_"):
def set_channel_info(parameters):
- logger.info()
+ logger.log()
info = ''
language = ''
diff --git a/core/channeltools.py b/core/channeltools.py
index 90a2dd74..528acdfb 100644
--- a/core/channeltools.py
+++ b/core/channeltools.py
@@ -15,7 +15,7 @@ default_file = dict()
remote_path = 'https://raw.githubusercontent.com/kodiondemand/media/master/'
def is_enabled(channel_name):
- logger.info("channel_name=" + channel_name)
+ logger.log("channel_name=" + channel_name)
return get_channel_parameters(channel_name)["active"] and get_channel_setting("enabled", channel=channel_name,
default=True)
@@ -87,7 +87,7 @@ def get_channel_parameters(channel_name):
def get_channel_json(channel_name):
- # logger.info("channel_name=" + channel_name)
+ # logger.log("channel_name=" + channel_name)
from core import filetools
channel_json = None
try:
@@ -101,9 +101,9 @@ def get_channel_json(channel_name):
channel_name + ".json")
if filetools.isfile(channel_path):
- # logger.info("channel_data=" + channel_path)
+ # logger.log("channel_data=" + channel_path)
channel_json = jsontools.load(filetools.read(channel_path))
- # logger.info("channel_json= %s" % channel_json)
+ # logger.log("channel_json= %s" % channel_json)
except Exception as ex:
template = "An exception of type %s occured. Arguments:\n%r"
@@ -114,7 +114,7 @@ def get_channel_json(channel_name):
def get_channel_controls_settings(channel_name):
- # logger.info("channel_name=" + channel_name)
+ # logger.log("channel_name=" + channel_name)
dict_settings = {}
# import web_pdb; web_pdb.set_trace()
# list_controls = get_channel_json(channel_name).get('settings', list())
@@ -137,7 +137,7 @@ def get_lang(channel_name):
if hasattr(channel, 'list_language'):
for language in channel.list_language:
list_language.append(language)
- logger.info(list_language)
+ logger.log(list_language)
else:
sub = False
langs = []
diff --git a/core/downloader.py b/core/downloader.py
index b714b474..c08e8fdf 100644
--- a/core/downloader.py
+++ b/core/downloader.py
@@ -253,12 +253,12 @@ class Downloader(object):
self.file.seek(2 ** 31, 0)
except OverflowError:
self._seekable = False
- logger.info("Cannot do seek() or tell() in files larger than 2GB")
+ logger.log("Cannot do seek() or tell() in files larger than 2GB")
self.__get_download_info__()
try:
- logger.info("Download started: Parts: %s | Path: %s | File: %s | Size: %s" % (str(len(self._download_info["parts"])), self._pathencode('utf-8'), self._filenameencode('utf-8'), str(self._download_info["size"])))
+ logger.log("Download started: Parts: %s | Path: %s | File: %s | Size: %s" % (str(len(self._download_info["parts"])), self._pathencode('utf-8'), self._filenameencode('utf-8'), str(self._download_info["size"])))
except:
pass
@@ -410,7 +410,7 @@ class Downloader(object):
return id == 0 or (len(self.completed_parts) >= id and sorted(self.completed_parts)[id - 1] == id - 1)
def __save_file__(self):
- logger.info("Thread started: %s" % threading.current_thread().name)
+ logger.log("Thread started: %s" % threading.current_thread().name)
while self._state == self.states.downloading:
if not self.pending_parts and not self.download_parts and not self.save_parts: # Download finished
@@ -449,7 +449,7 @@ class Downloader(object):
self._download_info["parts"][s]["status"] = self.states.stopped
self._download_info["parts"][s]["current"] = self._download_info["parts"][s]["start"]
- logger.info("Thread stopped: %s" % threading.current_thread().name)
+ logger.log("Thread stopped: %s" % threading.current_thread().name)
def __get_part_id__(self):
self._download_lock.acquire()
@@ -464,21 +464,21 @@ class Downloader(object):
return None
def __set_part_connecting__(self, id):
- logger.info("ID: %s Establishing connection" % id)
+ logger.log("ID: %s Establishing connection" % id)
self._download_info["parts"][id]["status"] = self.states.connecting
def __set_part__error__(self, id):
- logger.info("ID: %s Download failed" % id)
+ logger.log("ID: %s Download failed" % id)
self._download_info["parts"][id]["status"] = self.states.error
self.pending_parts.add(id)
self.download_parts.remove(id)
def __set_part__downloading__(self, id):
- logger.info("ID: %s Downloading data ..." % id)
+ logger.log("ID: %s Downloading data ..." % id)
self._download_info["parts"][id]["status"] = self.states.downloading
def __set_part_completed__(self, id):
- logger.info("ID: %s Download finished!" % id)
+ logger.log("ID: %s Download finished!" % id)
self._download_info["parts"][id]["status"] = self.states.saving
self.download_parts.remove(id)
self.save_parts.add(id)
@@ -501,7 +501,7 @@ class Downloader(object):
return file
def __start_part__(self):
- logger.info("Thread Started: %s" % threading.current_thread().name)
+ logger.log("Thread Started: %s" % threading.current_thread().name)
while self._state == self.states.downloading:
id = self.__get_part_id__()
if id is None: break
@@ -528,7 +528,7 @@ class Downloader(object):
buffer = connection.read(self._block_size)
speed.append(old_div(len(buffer), ((time.time() - start) or 0.001)))
except:
- logger.info("ID: %s Error downloading data" % id)
+ logger.log("ID: %s Error downloading data" % id)
self._download_info["parts"][id]["status"] = self.states.error
self.pending_parts.add(id)
self.download_parts.remove(id)
@@ -546,7 +546,7 @@ class Downloader(object):
if velocidad_minima > speed[-1] and velocidad_minima > speed[-2] and self._download_info["parts"][id]["current"] < self._download_info["parts"][id]["end"]:
if connection.fp: connection.fp._sock.close()
- logger.info("ID: %s Restarting connection! | Minimum Speed: %.2f %s/s | Speed: %.2f %s/s" % (id, vm[1], vm[2], v[1], v[2]))
+ logger.log("ID: %s Restarting connection! | Minimum Speed: %.2f %s/s | Speed: %.2f %s/s" % (id, vm[1], vm[2], v[1], v[2]))
# file.close()
break
else:
@@ -556,7 +556,7 @@ class Downloader(object):
break
self.__set_part_stopped__(id)
- logger.info("Thread stopped: %s" % threading.current_thread().name)
+ logger.log("Thread stopped: %s" % threading.current_thread().name)
def __update_json(self, started=True):
text = filetools.read(self._json_path)
@@ -564,10 +564,10 @@ class Downloader(object):
if self._json_text != text:
self._json_text = text
self._json_item = Item().fromjson(text)
- logger.info('item loaded')
+ logger.log('item loaded')
progress = int(self.progress)
if started and self._json_item.downloadStatus == 0: # stopped
- logger.info('Download paused')
+ logger.log('Download paused')
self.stop()
elif self._json_item.downloadProgress != progress or not started:
params = {"downloadStatus": 4, "downloadComplete": 0, "downloadProgress": progress}
diff --git a/core/downloadtools.py b/core/downloadtools.py
index b0a35cb0..a18af29e 100644
--- a/core/downloadtools.py
+++ b/core/downloadtools.py
@@ -97,11 +97,11 @@ def limpia_nombre_excepto_1(s):
try:
s = unicode(s, "utf-8")
except UnicodeError:
- # logger.info("no es utf-8")
+ # logger.log("no es utf-8")
try:
s = unicode(s, "iso-8859-1")
except UnicodeError:
- # logger.info("no es iso-8859-1")
+ # logger.log("no es iso-8859-1")
pass
# Remove accents
s = limpia_nombre_sin_acentos(s)
@@ -125,29 +125,29 @@ def limpia_nombre_excepto_2(s):
def getfilefromtitle(url, title):
# Print in the log what you will discard
- logger.info("title=" + title)
- logger.info("url=" + url)
+ logger.log("title=" + title)
+ logger.log("url=" + url)
plataforma = config.get_system_platform()
- logger.info("platform=" + plataforma)
+ logger.log("platform=" + plataforma)
# filename = xbmc.makeLegalFilename(title + url[-4:])
from core import scrapertools
nombrefichero = title + scrapertools.get_filename_from_url(url)[-4:]
- logger.info("filename= %s" % nombrefichero)
+ logger.log("filename= %s" % nombrefichero)
if "videobb" in url or "videozer" in url or "putlocker" in url:
nombrefichero = title + ".flv"
if "videobam" in url:
nombrefichero = title + "." + url.rsplit(".", 1)[1][0:3]
- logger.info("filename= %s" % nombrefichero)
+ logger.log("filename= %s" % nombrefichero)
nombrefichero = limpia_nombre_caracteres_especiales(nombrefichero)
- logger.info("filename= %s" % nombrefichero)
+ logger.log("filename= %s" % nombrefichero)
fullpath = filetools.join(config.get_setting("downloadpath"), nombrefichero)
- logger.info("fullpath= %s" % fullpath)
+ logger.log("fullpath= %s" % fullpath)
if config.is_xbmc() and fullpath.startswith("special://"):
import xbmc
@@ -162,7 +162,7 @@ def downloadtitle(url, title):
def downloadbest(video_urls, title, continuar=False):
- logger.info()
+ logger.log()
# Flip it over, to put the highest quality one first (list () is for you to make a copy of)
invertida = list(video_urls)
@@ -172,9 +172,9 @@ def downloadbest(video_urls, title, continuar=False):
# videotitle = elemento[0]
url = elemento[1]
if not PY3:
- logger.info("Downloading option " + title + " " + url.encode('ascii', 'ignore'))
+ logger.log("Downloading option " + title + " " + url.encode('ascii', 'ignore'))
else:
- logger.info("Downloading option " + title + " " + url.encode('ascii', 'ignore').decode('utf-8'))
+ logger.log("Downloading option " + title + " " + url.encode('ascii', 'ignore').decode('utf-8'))
# Calculate the file where you should record
try:
@@ -200,25 +200,25 @@ def downloadbest(video_urls, title, continuar=False):
else:
# EThe file doesn't even exist
if not filetools.exists(fullpath):
- logger.info("-> You have not downloaded anything, testing with the following option if there is")
+ logger.log("-> You have not downloaded anything, testing with the following option if there is")
# The file exists
else:
tamanyo = filetools.getsize(fullpath)
# It has size 0
if tamanyo == 0:
- logger.info("-> Download a file with size 0, testing with the following option if it exists")
+ logger.log("-> Download a file with size 0, testing with the following option if it exists")
os.remove(fullpath)
else:
- logger.info("-> Download a file with size %d, he takes it for good" % tamanyo)
+ logger.log("-> Download a file with size %d, he takes it for good" % tamanyo)
return 0
return -2
def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False, resumir=True, header=''):
- logger.info("url= " + url)
- logger.info("filename= " + nombrefichero)
+ logger.log("url= " + url)
+ logger.log("filename= " + nombrefichero)
if headers is None:
headers = []
@@ -242,14 +242,14 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
nombrefichero = xbmc.makeLegalFilename(nombrefichero)
except:
pass
- logger.info("filename= " + nombrefichero)
+ logger.log("filename= " + nombrefichero)
# The file exists and you want to continue
if filetools.exists(nombrefichero) and continuar:
f = filetools.file_open(nombrefichero, 'r+b', vfs=VFS)
if resumir:
exist_size = filetools.getsize(nombrefichero)
- logger.info("the file exists, size= %d" % exist_size)
+ logger.log("the file exists, size= %d" % exist_size)
grabado = exist_size
f.seek(exist_size)
else:
@@ -258,13 +258,13 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
# the file already exists and you don't want to continue, it aborts
elif filetools.exists(nombrefichero) and not continuar:
- logger.info("the file exists, it does not download again")
+ logger.log("the file exists, it does not download again")
return -3
# the file does not exist
else:
exist_size = 0
- logger.info("the file does not exist")
+ logger.log("the file does not exist")
f = filetools.file_open(nombrefichero, 'wb', vfs=VFS)
grabado = 0
@@ -285,13 +285,13 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
additional_headers = [additional_headers]
for additional_header in additional_headers:
- logger.info("additional_header: " + additional_header)
+ logger.log("additional_header: " + additional_header)
name = re.findall("(.*?)=.*?", additional_header)[0]
value = urllib.parse.unquote_plus(re.findall(".*?=(.*?)$", additional_header)[0])
headers.append([name, value])
url = url.split("|")[0]
- logger.info("url=" + url)
+ logger.log("url=" + url)
# Socket timeout at 60 seconds
socket.setdefaulttimeout(60)
@@ -299,7 +299,7 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
h = urllib.request.HTTPHandler(debuglevel=0)
request = urllib.request.Request(url)
for header in headers:
- logger.info("Header= " + header[0] + ": " + header[1])
+ logger.log("Header= " + header[0] + ": " + header[1])
request.add_header(header[0], header[1])
if exist_size > 0:
@@ -328,12 +328,12 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
if exist_size > 0:
totalfichero = totalfichero + exist_size
- logger.info("Content-Length= %s" % totalfichero)
+ logger.log("Content-Length= %s" % totalfichero)
blocksize = 100 * 1024
bloqueleido = connexion.read(blocksize)
- logger.info("Starting downloading the file, blocked= %s" % len(bloqueleido))
+ logger.log("Starting downloading the file, blocked= %s" % len(bloqueleido))
maxreintentos = 10
@@ -360,7 +360,7 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
tiempofalta = old_div(falta, velocidad)
else:
tiempofalta = 0
- # logger.info(sec_to_hms(tiempofalta))
+ # logger.log(sec_to_hms(tiempofalta))
if not silent:
progreso.update(percent, "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s" %
(descargadosmb, totalmb, percent, old_div(velocidad, 1024),
@@ -368,14 +368,14 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
break
except:
reintentos += 1
- logger.info("ERROR in block download, retry %d" % reintentos)
+ logger.log("ERROR in block download, retry %d" % reintentos)
import traceback
logger.error(traceback.print_exc())
# The user cancels the download
try:
if progreso.iscanceled():
- logger.info("Download of file canceled")
+ logger.log("Download of file canceled")
f.close()
progreso.close()
return -1
@@ -384,7 +384,7 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
# There was an error in the download
if reintentos > maxreintentos:
- logger.info("ERROR in the file download")
+ logger.log("ERROR in the file download")
f.close()
if not silent:
progreso.close()
@@ -430,7 +430,7 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
except:
pass
- logger.info("End of file download")
+ logger.log("End of file download")
def downloadfileRTMP(url, nombrefichero, silent):
@@ -476,7 +476,7 @@ def downloadfileRTMP(url, nombrefichero, silent):
try:
rtmpdump_args = [rtmpdump_cmd] + rtmpdump_args + ["-o", nombrefichero]
from os import spawnv, P_NOWAIT
- logger.info("Initiating file download: %s" % " ".join(rtmpdump_args))
+ logger.log("Initiating file download: %s" % " ".join(rtmpdump_args))
rtmpdump_exit = spawnv(P_NOWAIT, rtmpdump_cmd, rtmpdump_args)
if not silent:
from platformcode import platformtools
@@ -488,18 +488,18 @@ def downloadfileRTMP(url, nombrefichero, silent):
def downloadfileGzipped(url, pathfichero):
- logger.info("url= " + url)
+ logger.log("url= " + url)
nombrefichero = pathfichero
- logger.info("filename= " + nombrefichero)
+ logger.log("filename= " + nombrefichero)
import xbmc
nombrefichero = xbmc.makeLegalFilename(nombrefichero)
- logger.info("filename= " + nombrefichero)
+ logger.log("filename= " + nombrefichero)
patron = "(http://[^/]+)/.+"
matches = re.compile(patron, re.DOTALL).findall(url)
if len(matches):
- logger.info("Main URL: " + matches[0])
+ logger.log("Main URL: " + matches[0])
url1 = matches[0]
else:
url1 = url
@@ -546,9 +546,9 @@ def downloadfileGzipped(url, pathfichero):
nombre_fichero_base = filetools.basename(nombrefichero)
if len(nombre_fichero_base) == 0:
- logger.info("Searching for name in the answer Headers")
+ logger.log("Searching for name in the answer Headers")
nombre_base = connexion.headers["Content-Disposition"]
- logger.info(nombre_base)
+ logger.log(nombre_base)
patron = 'filename="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(nombre_base)
if len(matches) > 0:
@@ -556,7 +556,7 @@ def downloadfileGzipped(url, pathfichero):
titulo = GetTitleFromFile(titulo)
nombrefichero = filetools.join(pathfichero, titulo)
else:
- logger.info("Name of the file not found, Placing temporary name: no_name.txt")
+ logger.log("Name of the file not found, Placing temporary name: no_name.txt")
titulo = "no_name.txt"
nombrefichero = filetools.join(pathfichero, titulo)
totalfichero = int(connexion.headers["Content-Length"])
@@ -564,10 +564,10 @@ def downloadfileGzipped(url, pathfichero):
# then
f = filetools.file_open(nombrefichero, 'w', vfs=VFS)
- logger.info("new file open")
+ logger.log("new file open")
grabado = 0
- logger.info("Content-Length= %s" % totalfichero)
+ logger.log("Content-Length= %s" % totalfichero)
blocksize = 100 * 1024
@@ -580,7 +580,7 @@ def downloadfileGzipped(url, pathfichero):
gzipper = gzip.GzipFile(fileobj=compressedstream)
bloquedata = gzipper.read()
gzipper.close()
- logger.info("Starting downloading the file, blocked= %s" % len(bloqueleido))
+ logger.log("Starting downloading the file, blocked= %s" % len(bloqueleido))
except:
logger.error("ERROR: The file to be downloaded is not compressed with Gzip")
f.close()
@@ -619,32 +619,32 @@ def downloadfileGzipped(url, pathfichero):
tiempofalta = old_div(falta, velocidad)
else:
tiempofalta = 0
- logger.info(sec_to_hms(tiempofalta))
+ logger.log(sec_to_hms(tiempofalta))
progreso.update(percent, "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s left " % (descargadosmb, totalmb, percent, old_div(velocidad, 1024), sec_to_hms(tiempofalta)))
break
except:
reintentos += 1
- logger.info("ERROR in block download, retry %d" % reintentos)
+ logger.log("ERROR in block download, retry %d" % reintentos)
for line in sys.exc_info():
logger.error("%s" % line)
# The user cancels the download
if progreso.iscanceled():
- logger.info("Download of file canceled")
+ logger.log("Download of file canceled")
f.close()
progreso.close()
return -1
# There was an error in the download
if reintentos > maxreintentos:
- logger.info("ERROR in the file download")
+ logger.log("ERROR in the file download")
f.close()
progreso.close()
return -2
except:
- logger.info("ERROR in the file download")
+ logger.log("ERROR in the file download")
for line in sys.exc_info():
logger.error("%s" % line)
f.close()
@@ -655,15 +655,15 @@ def downloadfileGzipped(url, pathfichero):
# print data
progreso.close()
- logger.info("End download of the file")
+ logger.log("End download of the file")
return nombrefichero
def GetTitleFromFile(title):
# Print in the log what you will discard
- logger.info("title= " + title)
+ logger.log("title= " + title)
plataforma = config.get_system_platform()
- logger.info("plataform= " + plataforma)
+ logger.log("plataform= " + plataforma)
# nombrefichero = xbmc.makeLegalFilename(title + url[-4:])
nombrefichero = title
@@ -677,11 +677,11 @@ def sec_to_hms(seconds):
def downloadIfNotModifiedSince(url, timestamp):
- logger.info("(" + url + "," + time.ctime(timestamp) + ")")
+ logger.log("(" + url + "," + time.ctime(timestamp) + ")")
# Convert date to GMT
fecha_formateada = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(timestamp))
- logger.info("Formatted date= %s" % fecha_formateada)
+ logger.log("Formatted date= %s" % fecha_formateada)
# Check if it has changed
inicio = time.clock()
@@ -702,9 +702,9 @@ def downloadIfNotModifiedSince(url, timestamp):
except urllib.error.URLError as e:
# If it returns 304 it is that it has not changed
if hasattr(e, 'code'):
- logger.info("HTTP response code : %d" % e.code)
+ logger.log("HTTP response code : %d" % e.code)
if e.code == 304:
- logger.info("It has not changed")
+ logger.log("It has not changed")
updated = False
# Grab errors with response code from requested external server
else:
@@ -713,13 +713,13 @@ def downloadIfNotModifiedSince(url, timestamp):
data = ""
fin = time.clock()
- logger.info("Downloaded in %d seconds " % (fin - inicio + 1))
+ logger.log("Downloaded in %d seconds " % (fin - inicio + 1))
return updated, data
def download_all_episodes(item, channel, first_episode="", preferred_server="vidspot", filter_language=""):
- logger.info("show= " + item.show)
+ logger.log("show= " + item.show)
show_title = item.show
# Gets the listing from which it was called
@@ -749,9 +749,9 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid
for episode_item in episode_itemlist:
try:
- logger.info("episode= " + episode_item.title)
+ logger.log("episode= " + episode_item.title)
episode_title = scrapertools.find_single_match(episode_item.title, r"(\d+x\d+)")
- logger.info("episode= " + episode_title)
+ logger.log("episode= " + episode_title)
except:
import traceback
logger.error(traceback.format_exc())
@@ -815,7 +815,7 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid
new_mirror_itemlist_4 + new_mirror_itemlist_5 + new_mirror_itemlist_6)
for mirror_item in mirrors_itemlist:
- logger.info("mirror= " + mirror_item.title)
+ logger.log("mirror= " + mirror_item.title)
if "(Italiano)" in mirror_item.title:
idioma = "(Italiano)"
@@ -836,11 +836,11 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid
idioma = "(Desconocido)"
codigo_idioma = "desconocido"
- logger.info("filter_language=#" + filter_language + "#, codigo_idioma=#" + codigo_idioma + "#")
+ logger.log("filter_language=#" + filter_language + "#, codigo_idioma=#" + codigo_idioma + "#")
if filter_language == "" or (filter_language != "" and filter_language == codigo_idioma):
- logger.info("downloading mirror")
+ logger.log("downloading mirror")
else:
- logger.info("language " + codigo_idioma + " filtered, skipping")
+ logger.log("language " + codigo_idioma + " filtered, skipping")
continue
if hasattr(channel, 'play'):
@@ -856,14 +856,14 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid
# Adds it to the download list
if puedes:
- logger.info("downloading mirror started...")
+ logger.log("downloading mirror started...")
# The highest quality video is the latest
# mediaurl = video_urls[len(video_urls) - 1][1]
devuelve = downloadbest(video_urls, show_title + " " + episode_title + " " + idioma +
" [" + video_item.server + "]", continuar=False)
if devuelve == 0:
- logger.info("download ok")
+ logger.log("download ok")
descargado = True
break
elif devuelve == -1:
@@ -874,14 +874,14 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid
pass
return
else:
- logger.info("download error, try another mirror")
+ logger.log("download error, try another mirror")
continue
else:
- logger.info("downloading mirror not available... trying next")
+ logger.log("downloading mirror not available... trying next")
if not descargado:
- logger.info("UNDOWNLOADED EPISODE " + episode_title)
+ logger.log("UNDOWNLOADED EPISODE " + episode_title)
def episodio_ya_descargado(show_title, episode_title):
@@ -889,9 +889,9 @@ def episodio_ya_descargado(show_title, episode_title):
ficheros = filetools.listdir(".")
for fichero in ficheros:
- # logger.info("fichero="+fichero)
+ # logger.log("fichero="+fichero)
if fichero.lower().startswith(show_title.lower()) and scrapertools.find_single_match(fichero, "(\d+x\d+)") == episode_title:
- logger.info("found!")
+ logger.log("found!")
return True
return False
diff --git a/core/filetools.py b/core/filetools.py
index 554a2671..7719d6c7 100644
--- a/core/filetools.py
+++ b/core/filetools.py
@@ -814,7 +814,7 @@ def remove_tags(title):
@rtype: str
@return: string without tags
"""
- logger.info()
+ logger.log()
title_without_tags = scrapertools.find_single_match(title, r'\[color .+?\](.+)\[\/color\]')
@@ -832,7 +832,7 @@ def remove_smb_credential(path):
@return: chain without credentials
@rtype: str
"""
- logger.info()
+ logger.log()
if not scrapertools.find_single_match(path, r'(^\w+:\/\/)'):
return path
diff --git a/core/httptools.py b/core/httptools.py
index 1ec21351..afa8c9ce 100755
--- a/core/httptools.py
+++ b/core/httptools.py
@@ -125,11 +125,11 @@ def set_cookies(dict_cookie, clear=True, alfa_s=False):
def load_cookies(alfa_s=False):
cookies_lock.acquire()
if os.path.isfile(cookies_file):
- if not alfa_s: logger.info("Reading cookies file")
+ if not alfa_s: logger.log("Reading cookies file")
try:
cj.load(cookies_file, ignore_discard=True)
except:
- if not alfa_s: logger.info("The cookie file exists but is illegible, it is deleted")
+ if not alfa_s: logger.log("The cookie file exists but is illegible, it is deleted")
os.remove(cookies_file)
cookies_lock.release()
@@ -137,7 +137,7 @@ load_cookies()
def save_cookies(alfa_s=False):
cookies_lock.acquire()
- if not alfa_s: logger.info("Saving cookies...")
+ if not alfa_s: logger.log("Saving cookies...")
cj.save(cookies_file, ignore_discard=True)
cookies_lock.release()
@@ -161,7 +161,7 @@ def random_useragent():
def show_infobox(info_dict):
- logger.info()
+ logger.log()
from textwrap import wrap
box_items_kodi = {'r_up_corner': u'\u250c',
@@ -186,16 +186,16 @@ def show_infobox(info_dict):
- width = 60
+ width = 100
version = '%s: %s' % (config.get_localized_string(20000), __version)
if config.is_xbmc():
box = box_items_kodi
else:
box = box_items
- logger.info('%s%s%s' % (box['r_up_corner'], box['fill'] * width, box['l_up_corner']))
- logger.info('%s%s%s' % (box['center'], version.center(width), box['center']))
- logger.info('%s%s%s' % (box['r_center'], box['fill'] * width, box['l_center']))
+ logger.log('%s%s%s' % (box['r_up_corner'], box['fill'] * width, box['l_up_corner']))
+ logger.log('%s%s%s' % (box['center'], version.center(width), box['center']))
+ logger.log('%s%s%s' % (box['r_center'], box['fill'] * width, box['l_center']))
count = 0
for key, value in info_dict:
@@ -210,19 +210,19 @@ def show_infobox(info_dict):
for line in text:
if len(line) < width:
line = line.ljust(width, ' ')
- logger.info('%s%s%s' % (box['center'], line, box['center']))
+ logger.log('%s%s%s' % (box['center'], line, box['center']))
else:
- logger.info('%s%s%s' % (box['center'], text, box['center']))
+ logger.log('%s%s%s' % (box['center'], text, box['center']))
if count < len(info_dict):
- logger.info('%s%s%s' % (box['r_center'], box['fill'] * width, box['l_center']))
+ logger.log('%s%s%s' % (box['r_center'], box['fill'] * width, box['l_center']))
else:
- logger.info('%s%s%s' % (box['r_dn_corner'], box['fill'] * width, box['l_dn_corner']))
+ logger.log('%s%s%s' % (box['r_dn_corner'], box['fill'] * width, box['l_dn_corner']))
return
def downloadpage(url, **opt):
- # logger.info()
+ # logger.log()
"""
Open a url and return the data obtained
diff --git a/core/item.py b/core/item.py
index 5f17dadf..0da43760 100644
--- a/core/item.py
+++ b/core/item.py
@@ -298,7 +298,7 @@ class Item(object):
def tostring(self, separator=", "):
"""
Generate a text string with the item's data for the log
- Use: logger.info(item.tostring())
+ Use: logger.log(item.tostring())
@param separator: string to be used as a separator
@type separator: str
'"""
diff --git a/core/jsontools.py b/core/jsontools.py
index 6ebc799b..d21215e3 100644
--- a/core/jsontools.py
+++ b/core/jsontools.py
@@ -11,24 +11,24 @@ from inspect import stack
try:
import json
except:
- logger.info("json included in the interpreter **NOT** available")
+ logger.log("json included in the interpreter **NOT** available")
try:
import simplejson as json
except:
- logger.info("simplejson included in the interpreter **NOT** available")
+ logger.log("simplejson included in the interpreter **NOT** available")
try:
from lib import simplejson as json
except:
- logger.info("simplejson in lib directory **NOT** available")
+ logger.log("simplejson in lib directory **NOT** available")
logger.error("A valid JSON parser was not found")
json = None
else:
- logger.info("Using simplejson in the lib directory")
+ logger.log("Using simplejson in the lib directory")
else:
- logger.info("Using simplejson included in the interpreter")
+ logger.log("Using simplejson included in the interpreter")
# ~ else:
- # ~ logger.info("Usando json incluido en el interprete")
+ # ~ logger.log("Usando json incluido en el interprete")
import sys
PY3 = False
@@ -90,7 +90,7 @@ def get_node_from_file(name_file, node, path=None):
@return: dict with the node to return
@rtype: dict
"""
- logger.info()
+ logger.log()
from platformcode import config
from core import filetools
@@ -129,7 +129,7 @@ def check_to_backup(data, fname, dict_data):
@param dict_data: dictionary name
@type dict_data: dict
"""
- logger.info()
+ logger.log()
if not dict_data:
logger.error("Error loading json from file %s" % fname)
@@ -161,7 +161,7 @@ def update_node(dict_node, name_file, node, path=None, silent=False):
@return json_data
@rtype: dict
"""
- if not silent: logger.info()
+ if not silent: logger.log()
from platformcode import config
from core import filetools
diff --git a/core/scraper.py b/core/scraper.py
index 2515a77d..daae7035 100644
--- a/core/scraper.py
+++ b/core/scraper.py
@@ -61,7 +61,7 @@ def find_and_set_infoLabels(item):
# Check if there is a 'code'
if scraper_result and item.infoLabels['code']:
# correct code
- logger.info("Identificador encontrado: %s" % item.infoLabels['code'])
+ logger.log("Identificador encontrado: %s" % item.infoLabels['code'])
scraper.completar_codigos(item)
return True
elif scraper_result:
@@ -71,7 +71,7 @@ def find_and_set_infoLabels(item):
# Content not found
msg = config.get_localized_string(60228) % title
- logger.info(msg)
+ logger.log(msg)
# Show box with other options:
if scrapers_disponibles[scraper_actual] in list_opciones_cuadro:
list_opciones_cuadro.remove(scrapers_disponibles[scraper_actual])
@@ -95,10 +95,10 @@ def find_and_set_infoLabels(item):
elif index == 1:
# You have to create a dialog box to enter the data
- logger.info("Complete information")
+ logger.log("Complete information")
if cuadro_completar(item):
# correct code
- logger.info("Identifier found: %s" % str(item.infoLabels['code']))
+ logger.log("Identifier found: %s" % str(item.infoLabels['code']))
return True
# raise
@@ -121,7 +121,7 @@ def find_and_set_infoLabels(item):
def cuadro_completar(item):
- logger.info()
+ logger.log()
global dict_default
dict_default = {}
@@ -234,7 +234,7 @@ def get_nfo(item):
@rtype: str
@return:
"""
- logger.info()
+ logger.log()
if "infoLabels" in item and "noscrap_id" in item.infoLabels:
# Create the xml file with the data obtained from the item since there is no active scraper
info_nfo = ''
diff --git a/core/scrapertools.py b/core/scrapertools.py
index c31fae9e..c81e09fc 100644
--- a/core/scrapertools.py
+++ b/core/scrapertools.py
@@ -34,7 +34,7 @@ from platformcode import logger
def printMatches(matches):
i = 0
for match in matches:
- logger.info("%d %s" % (i, match))
+ logger.log("%d %s" % (i, match))
i = i + 1
@@ -446,7 +446,7 @@ def get_season_and_episode(title):
except:
pass
- logger.info("'" + title + "' -> '" + filename + "'")
+ logger.log("'" + title + "' -> '" + filename + "'")
return filename
diff --git a/core/servertools.py b/core/servertools.py
index 674fd7ff..37693b10 100644
--- a/core/servertools.py
+++ b/core/servertools.py
@@ -47,7 +47,7 @@ def find_video_items(item=None, data=None):
@return: returns the itemlist with the results
@rtype: list
"""
- logger.info()
+ logger.log()
itemlist = []
# Download the page
@@ -97,7 +97,7 @@ def get_servers_itemlist(itemlist, fnc=None, sort=False):
# Walk the patterns
for pattern in server_parameters.get("find_videos", {}).get("patterns", []):
- logger.info(pattern["pattern"])
+ logger.log(pattern["pattern"])
# Scroll through the results
for match in re.compile(pattern["pattern"], re.DOTALL).finditer(
"\n".join([item.url.split('|')[0] for item in itemlist if not item.server])):
@@ -144,7 +144,7 @@ def findvideos(data, skip=False):
return some link. It can also be an integer greater than 1, which would represent the maximum number of links to search.
:return:
"""
- logger.info()
+ logger.log()
devuelve = []
skip = int(skip)
servers_list = list(get_servers_list().keys())
@@ -165,7 +165,7 @@ def findvideos(data, skip=False):
devuelve = devuelve[:skip]
break
# if config.get_setting("filter_servers") == False: is_filter_servers = False
- # logger.info('DEVUELVE: ' + str(devuelve))
+ # logger.log('DEVUELVE: ' + str(devuelve))
# if not devuelve and is_filter_servers:
# platformtools.dialog_ok(config.get_localized_string(60000), config.get_localized_string(60001))
return devuelve
@@ -194,7 +194,7 @@ def findvideosbyserver(data, serverid):
value = translate_server_name(server_parameters["name"]) , url, serverid, server_parameters.get("thumbnail", "")
if value not in devuelve and url not in server_parameters["find_videos"].get("ignore_urls", []):
devuelve.append(value)
- logger.info(msg)
+ logger.log(msg)
return devuelve
@@ -206,7 +206,7 @@ def guess_server_thumbnail(serverid):
def get_server_from_url(url):
- logger.info()
+ logger.log()
servers_list = list(get_servers_list().keys())
# Run findvideos on each active server
@@ -224,7 +224,7 @@ def get_server_from_url(url):
for n, pattern in enumerate(server_parameters["find_videos"].get("patterns", [])):
msg = "%s\npattern: %s" % (serverid, pattern["pattern"])
if not "pattern_compiled" in pattern:
- # logger.info('compiled ' + serverid)
+ # logger.log('compiled ' + serverid)
pattern["pattern_compiled"] = re.compile(pattern["pattern"])
dict_servers_parameters[serverid]["find_videos"]["patterns"][n]["pattern_compiled"] = pattern["pattern_compiled"]
# Scroll through the results
@@ -237,7 +237,7 @@ def get_server_from_url(url):
msg += "\nurl encontrada: %s" % url
value = translate_server_name(server_parameters["name"]), url, serverid, server_parameters.get("thumbnail", "")
if url not in server_parameters["find_videos"].get("ignore_urls", []):
- logger.info(msg)
+ logger.log(msg)
return value
return None
@@ -260,7 +260,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
@return: returns the url of the video
@rtype: list
"""
- logger.info("Server: %s, Url: %s" % (server, url))
+ logger.log("Server: %s, Url: %s" % (server, url))
server = server.lower()
@@ -273,7 +273,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
if server == "directo" or server == "local":
if isinstance(video_password, list):
return video_password, len(video_password) > 0, "
".join(error_messages)
- logger.info("Server: %s, url is good" % server)
+ logger.log("Server: %s, url is good" % server)
video_urls.append(["%s [%s]" % (urlparse.urlparse(url)[2][-4:], config.get_localized_string(30137)), url])
# Find out the video URL
@@ -304,7 +304,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
priority = int(config.get_setting("resolve_priority"))
opciones = sorted(opciones, key=lambda x: orden[priority].index(x))
- logger.info("Available options: %s | %s" % (len(opciones), opciones))
+ logger.log("Available options: %s | %s" % (len(opciones), opciones))
else:
logger.error("There is no connector for the server %s" % server)
error_messages.append(config.get_localized_string(60004) % server)
@@ -313,7 +313,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
# Import the server
try:
server_module = __import__('servers.%s' % server, None, None, ["servers.%s" % server])
- logger.info("Imported server: %s" % server_module)
+ logger.log("Imported server: %s" % server_module)
except:
server_module = None
if muestra_dialogo:
@@ -324,17 +324,17 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
# If it has a function to see if the video exists, check it now
if hasattr(server_module, 'test_video_exists'):
- logger.info("Invoking a %s.test_video_exists" % server)
+ logger.log("Invoking a %s.test_video_exists" % server)
try:
video_exists, message = server_module.test_video_exists(page_url=url)
if not video_exists:
error_messages.append(message)
- logger.info("test_video_exists says video doesn't exist")
+ logger.log("test_video_exists says video doesn't exist")
if muestra_dialogo:
progreso.close()
else:
- logger.info("test_video_exists says the video DOES exist")
+ logger.log("test_video_exists says the video DOES exist")
except:
logger.error("Could not verify if the video exists")
import traceback
@@ -361,7 +361,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
# Free mode
if opcion == "free":
try:
- logger.info("Invoking a %s.get_video_url" % server)
+ logger.log("Invoking a %s.get_video_url" % server)
response = serverid.get_video_url(page_url=url, video_password=video_password)
video_urls.extend(response)
except:
@@ -373,7 +373,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
# Premium mode
else:
try:
- logger.info("Invoking a %s.get_video_url" % opcion)
+ logger.log("Invoking a %s.get_video_url" % opcion)
response = serverid.get_video_url(page_url=url, premium=True,
user=config.get_setting("user", server=opcion),
password=config.get_setting("password", server=opcion),
@@ -483,7 +483,7 @@ def get_server_parameters(server):
@return: server data
@rtype: dict
"""
- # logger.info("server %s" % server)
+ # logger.log("server %s" % server)
global dict_servers_parameters
server = server.split('.')[0]
if not server:
@@ -533,15 +533,15 @@ def get_server_parameters(server):
# def get_server_json(server_name):
-# # logger.info("server_name=" + server_name)
+# # logger.log("server_name=" + server_name)
# try:
# server_path = filetools.join(config.get_runtime_path(), "servers", server_name + ".json")
# if not filetools.exists(server_path):
# server_path = filetools.join(config.get_runtime_path(), "servers", "debriders", server_name + ".json")
#
-# # logger.info("server_path=" + server_path)
+# # logger.log("server_path=" + server_path)
# server_json = jsontools.load(filetools.read(server_path))
-# # logger.info("server_json= %s" % server_json)
+# # logger.log("server_json= %s" % server_json)
#
# except Exception as ex:
# template = "An exception of type %s occured. Arguments:\n%r"
@@ -613,7 +613,7 @@ def get_server_setting(name, server, default=None):
if isinstance(dict_file, dict) and 'settings' in dict_file:
dict_settings = dict_file['settings']
except EnvironmentError:
- logger.info("ERROR when reading the file: %s" % file_settings)
+ logger.log("ERROR when reading the file: %s" % file_settings)
if not dict_settings or name not in dict_settings:
# We get controls from the file ../servers/server.json
@@ -627,7 +627,7 @@ def get_server_setting(name, server, default=None):
dict_file['settings'] = dict_settings
# We create the file ../settings/channel_data.json
if not filetools.write(file_settings, jsontools.dump(dict_file)):
- logger.info("ERROR saving file: %s" % file_settings)
+ logger.log("ERROR saving file: %s" % file_settings)
# We return the value of the local parameter 'name' if it exists, if default is not returned
return dict_settings.get(name, default)
@@ -649,7 +649,7 @@ def set_server_setting(name, value, server):
dict_file = jsontools.load(filetools.read(file_settings))
dict_settings = dict_file.get('settings', {})
except EnvironmentError:
- logger.info("ERROR when reading the file: %s" % file_settings)
+ logger.log("ERROR when reading the file: %s" % file_settings)
dict_settings[name] = value
@@ -661,7 +661,7 @@ def set_server_setting(name, value, server):
# We create the file ../settings/channel_data.json
if not filetools.write(file_settings, jsontools.dump(dict_file)):
- logger.info("ERROR saving file: %s" % file_settings)
+ logger.log("ERROR saving file: %s" % file_settings)
return None
return value
@@ -696,7 +696,7 @@ def get_debriders_list():
if server.endswith(".json"):
server_parameters = get_server_parameters(server)
if server_parameters["active"] == True:
- logger.info(server_parameters)
+ logger.log(server_parameters)
server_list[server.split(".")[0]] = server_parameters
return server_list
@@ -742,7 +742,7 @@ def check_list_links(itemlist, numero='', timeout=3):
it = res[0]
verificacion = res[1]
it.title = verificacion + ' ' + it.title.strip()
- logger.info('VERIFICATION= ' + verificacion)
+ logger.log('VERIFICATION= ' + verificacion)
it.alive = verificacion
return itemlist
@@ -763,7 +763,7 @@ def check_video_link(item, timeout=3):
server_module = __import__('servers.%s' % server, None, None, ["servers.%s" % server])
except:
server_module = None
- logger.info("[check_video_link] Cannot import server! %s" % server)
+ logger.log("[check_video_link] Cannot import server! %s" % server)
return item, NK
if hasattr(server_module, 'test_video_exists'):
@@ -773,20 +773,20 @@ def check_video_link(item, timeout=3):
try:
video_exists, message = server_module.test_video_exists(page_url=url)
if not video_exists:
- logger.info("[check_video_link] Does not exist! %s %s %s" % (message, server, url))
+ logger.log("[check_video_link] Does not exist! %s %s %s" % (message, server, url))
resultado = KO
else:
- logger.info("[check_video_link] check ok %s %s" % (server, url))
+ logger.log("[check_video_link] check ok %s %s" % (server, url))
resultado = OK
except:
- logger.info("[check_video_link] Can't check now! %s %s" % (server, url))
+ logger.log("[check_video_link] Can't check now! %s %s" % (server, url))
resultado = NK
finally:
httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = ant_timeout # Restore download time
return item, resultado
- logger.info("[check_video_link] There is no test_video_exists for server: %s" % server)
+ logger.log("[check_video_link] There is no test_video_exists for server: %s" % server)
return item, NK
def translate_server_name(name):
diff --git a/core/tmdb.py b/core/tmdb.py
index 13dd22cd..e1f2b7f2 100644
--- a/core/tmdb.py
+++ b/core/tmdb.py
@@ -87,7 +87,7 @@ create_bd()
# The function name is the name of the decorator and receives the function that decorates.
def cache_response(fn):
- logger.info()
+ logger.log()
# import time
# start_time = time.time()
@@ -441,7 +441,7 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None
def find_and_set_infoLabels(item):
- logger.info()
+ logger.log()
global otmdb_global
tmdb_result = None
@@ -851,7 +851,7 @@ class Tmdb(object):
cls.dic_generos[idioma][tipo] = {}
url = ('http://api.themoviedb.org/3/genre/%s/list?api_key=a1ab8b8669da03637a4b98fa39c39228&language=%s' % (tipo, idioma))
try:
- logger.info("[Tmdb.py] Filling in dictionary of genres")
+ logger.log("[Tmdb.py] Filling in dictionary of genres")
resultado = cls.get_json(url)
if not isinstance(resultado, dict):
@@ -883,7 +883,7 @@ class Tmdb(object):
'&language=%s' % (self.busqueda_id, source, self.busqueda_idioma))
buscando = "%s: %s" % (source.capitalize(), self.busqueda_id)
- logger.info("[Tmdb.py] Searching %s:\n%s" % (buscando, url))
+ logger.log("[Tmdb.py] Searching %s:\n%s" % (buscando, url))
resultado = self.get_json(url)
if not isinstance(resultado, dict):
resultado = ast.literal_eval(resultado.decode('utf-8'))
@@ -925,7 +925,7 @@ class Tmdb(object):
url += '&year=%s' % self.busqueda_year
buscando = self.busqueda_texto.capitalize()
- logger.info("[Tmdb.py] Searching %s on page %s:\n%s" % (buscando, page, url))
+ logger.log("[Tmdb.py] Searching %s on page %s:\n%s" % (buscando, page, url))
resultado = self.get_json(url)
if not isinstance(resultado, dict):
resultado = ast.literal_eval(resultado.decode('utf-8'))
@@ -986,7 +986,7 @@ class Tmdb(object):
url = ('http://api.themoviedb.org/3/%s?api_key=a1ab8b8669da03637a4b98fa39c39228&%s'
% (type_search, "&".join(params)))
- logger.info("[Tmdb.py] Searcing %s:\n%s" % (type_search, url))
+ logger.log("[Tmdb.py] Searcing %s:\n%s" % (type_search, url))
resultado = self.get_json(url)
if not isinstance(resultado, dict):
resultado = ast.literal_eval(resultado.decode('utf-8'))
@@ -1051,7 +1051,7 @@ class Tmdb(object):
return True
def get_list_resultados(self, num_result=20):
- # logger.info("self %s" % str(self))
+ # logger.log("self %s" % str(self))
res = []
if num_result <= 0:
@@ -1271,7 +1271,7 @@ class Tmdb(object):
"&append_to_response=credits" % (self.result["id"], numtemporada, self.busqueda_idioma)
buscando = "id_Tmdb: " + str(self.result["id"]) + " season: " + str(numtemporada) + "\nURL: " + url
- logger.info("[Tmdb.py] Searcing " + buscando)
+ logger.log("[Tmdb.py] Searcing " + buscando)
try:
self.temporada[numtemporada] = self.get_json(url)
if not isinstance(self.temporada[numtemporada], dict):
@@ -1460,7 +1460,7 @@ class Tmdb(object):
items.extend(list(self.get_episodio(ret_infoLabels['season'], episodio).items()))
- # logger.info("ret_infoLabels" % ret_infoLabels)
+ # logger.log("ret_infoLabels" % ret_infoLabels)
for k, v in items:
if not v:
diff --git a/core/trakt_tools.py b/core/trakt_tools.py
index d78e1852..a7b046af 100644
--- a/core/trakt_tools.py
+++ b/core/trakt_tools.py
@@ -129,7 +129,7 @@ def token_trakt(item):
def set_trakt_info(item):
- logger.info()
+ logger.log()
import xbmcgui
# Envia los datos a trakt
try:
@@ -140,7 +140,7 @@ def set_trakt_info(item):
pass
def get_trakt_watched(id_type, mediatype, update=False):
- logger.info()
+ logger.log()
id_list = []
id_dict = dict()
@@ -228,7 +228,7 @@ def trakt_check(itemlist):
def get_sync_from_file():
- logger.info()
+ logger.log()
sync_path = os.path.join(config.get_data_path(), 'settings_channels', 'trakt_data.json')
trakt_node = {}
if os.path.exists(sync_path):
@@ -240,7 +240,7 @@ def get_sync_from_file():
def update_trakt_data(mediatype, trakt_data):
- logger.info()
+ logger.log()
sync_path = os.path.join(config.get_data_path(), 'settings_channels', 'trakt_data.json')
if os.path.exists(sync_path):
@@ -250,7 +250,7 @@ def update_trakt_data(mediatype, trakt_data):
def ask_install_script():
- logger.info()
+ logger.log()
from platformcode import platformtools
@@ -264,7 +264,7 @@ def ask_install_script():
def wait_for_update_trakt():
- logger.info()
+ logger.log()
t = Thread(update_all)
t.setDaemon(True)
t.start()
@@ -273,7 +273,7 @@ def wait_for_update_trakt():
def update_all():
# from core.support import dbg;dbg()
from time import sleep
- logger.info()
+ logger.log()
sleep(20)
while xbmc.Player().isPlaying():
sleep(20)
diff --git a/core/tvdb.py b/core/tvdb.py
index c6d9f56d..0097d0c4 100644
--- a/core/tvdb.py
+++ b/core/tvdb.py
@@ -73,8 +73,8 @@ otvdb_global = None
def find_and_set_infoLabels(item):
- logger.info()
- # logger.info("item es %s" % item)
+ logger.log()
+ # logger.log("item es %s" % item)
p_dialog = None
if not item.contentSeason:
@@ -368,7 +368,7 @@ class Tvdb(object):
@classmethod
def __check_token(cls):
- # logger.info()
+ # logger.log()
if TOKEN == "":
cls.__login()
else:
@@ -383,7 +383,7 @@ class Tvdb(object):
@staticmethod
def __login():
- # logger.info()
+ # logger.log()
global TOKEN
apikey = "106B699FDC04301C"
@@ -413,7 +413,7 @@ class Tvdb(object):
@classmethod
def __refresh_token(cls):
- # logger.info()
+ # logger.log()
global TOKEN
is_success = False
@@ -512,7 +512,7 @@ class Tvdb(object):
]
}
"""
- logger.info()
+ logger.log()
if id_episode and self.episodes.get(id_episode):
return self.episodes.get(id_episode)
@@ -582,7 +582,7 @@ class Tvdb(object):
}
}
"""
- logger.info()
+ logger.log()
try:
url = HOST + "/series/%s/episodes?page=%s" % (_id, page)
@@ -600,7 +600,7 @@ class Tvdb(object):
else:
self.list_episodes[page] = jsontools.load(html)
- # logger.info("dict_html %s" % self.list_episodes)
+ # logger.log("dict_html %s" % self.list_episodes)
return self.list_episodes[page]
@@ -668,7 +668,7 @@ class Tvdb(object):
"""
if semaforo:
semaforo.acquire()
- logger.info()
+ logger.log()
url = HOST + "/episodes/%s" % _id
@@ -691,7 +691,7 @@ class Tvdb(object):
dict_html = jsontools.load(html)
dict_html = dict_html.pop("data")
- logger.info("dict_html %s" % dict_html)
+ logger.log("dict_html %s" % dict_html)
self.episodes[_id] = dict_html
if semaforo:
@@ -722,7 +722,7 @@ class Tvdb(object):
"status": "string"
}
"""
- logger.info()
+ logger.log()
try:
@@ -820,7 +820,7 @@ class Tvdb(object):
}
}
"""
- logger.info()
+ logger.log()
resultado = {}
url = HOST + "/series/%s" % _id
@@ -879,7 +879,7 @@ class Tvdb(object):
@rtype: dict
"""
- logger.info()
+ logger.log()
if self.result.get('image_season_%s' % season):
return self.result['image_season_%s' % season]
@@ -931,7 +931,7 @@ class Tvdb(object):
@return: dictionary with actors
@rtype: dict
"""
- logger.info()
+ logger.log()
url = HOST + "/series/%s/actors" % _id
DEFAULT_HEADERS["Accept-Language"] = lang
@@ -961,7 +961,7 @@ class Tvdb(object):
@rtype: list
@return: list of results
"""
- logger.info()
+ logger.log()
list_results = []
# if we have a result and it has seriesName, we already have the info of the series, it is not necessary to search again
@@ -1102,7 +1102,7 @@ class Tvdb(object):
# ret_infoLabels['title'] = v + " " + origen.get('aliases', [''])[0]
# else:
# ret_infoLabels['title'] = v
- # logger.info("el titulo es %s " % ret_infoLabels['title'])
+ # logger.log("el titulo es %s " % ret_infoLabels['title'])
ret_infoLabels['title'] = v
elif k == 'cast':
diff --git a/core/videolibrarytools.py b/core/videolibrarytools.py
index fe1ba427..ab2d3324 100644
--- a/core/videolibrarytools.py
+++ b/core/videolibrarytools.py
@@ -78,7 +78,7 @@ def save_movie(item, silent=False):
@rtype fallidos: int
@return: the number of failed items or -1 if all failed
"""
- logger.info()
+ logger.log()
# logger.debug(item.tostring('\n'))
insertados = 0
sobreescritos = 0
@@ -144,7 +144,7 @@ def save_movie(item, silent=False):
if not path:
# Create folder
path = filetools.join(MOVIES_PATH, ("%s [%s]" % (base_name, _id)).strip())
- logger.info("Creating movie directory:" + path)
+ logger.log("Creating movie directory:" + path)
if not filetools.mkdir(path):
logger.debug("Could not create directory")
return 0, 0, -1, path
@@ -159,7 +159,7 @@ def save_movie(item, silent=False):
if not nfo_exists:
# We create .nfo if it doesn't exist
- logger.info("Creating .nfo: " + nfo_path)
+ logger.log("Creating .nfo: " + nfo_path)
head_nfo = scraper.get_nfo(item)
item_nfo = Item(title=item.contentTitle, channel="videolibrary", action='findvideos',
@@ -182,7 +182,7 @@ def save_movie(item, silent=False):
if item_nfo and strm_exists:
if json_exists:
- logger.info("The file exists. Is overwritten")
+ logger.log("The file exists. Is overwritten")
sobreescritos += 1
else:
insertados += 1
@@ -209,7 +209,7 @@ def save_movie(item, silent=False):
item_nfo.library_urls[item.channel] = item.url
if filetools.write(nfo_path, head_nfo + item_nfo.tojson()):
- #logger.info("FOLDER_MOVIES : %s" % FOLDER_MOVIES)
+ #logger.log("FOLDER_MOVIES : %s" % FOLDER_MOVIES)
# We update the Kodi video library with the movie
if config.is_xbmc() and config.get_setting("videolibrary_kodi") and not silent:
from platformcode import xbmc_videolibrary
@@ -238,7 +238,7 @@ def update_renumber_options(item, head_nfo, path):
json = json_file['TVSHOW_AUTORENUMBER']
if item.fulltitle in json:
item.channel_prefs[channel]['TVSHOW_AUTORENUMBER'] = json[item.fulltitle]
- logger.info('UPDATED=\n' + str(item.channel_prefs))
+ logger.log('UPDATED=\n' + str(item.channel_prefs))
filetools.write(tvshow_path, head_nfo + item.tojson())
def add_renumber_options(item, head_nfo, path):
@@ -426,7 +426,7 @@ def save_tvshow(item, episodelist, silent=False):
@rtype path: str
@return: serial directory
"""
- logger.info()
+ logger.log()
# logger.debug(item.tostring('\n'))
path = ""
@@ -483,7 +483,7 @@ def save_tvshow(item, episodelist, silent=False):
if not path:
path = filetools.join(TVSHOWS_PATH, ("%s [%s]" % (base_name, _id)).strip())
- logger.info("Creating series directory: " + path)
+ logger.log("Creating series directory: " + path)
try:
filetools.mkdir(path)
except OSError as exception:
@@ -493,7 +493,7 @@ def save_tvshow(item, episodelist, silent=False):
tvshow_path = filetools.join(path, "tvshow.nfo")
if not filetools.exists(tvshow_path):
# We create tvshow.nfo, if it does not exist, with the head_nfo, series info and watched episode marks
- logger.info("Creating tvshow.nfo: " + tvshow_path)
+ logger.log("Creating tvshow.nfo: " + tvshow_path)
head_nfo = scraper.get_nfo(item)
item.infoLabels['mediatype'] = "tvshow"
item.infoLabels['title'] = item.contentSerieName
@@ -567,11 +567,11 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
@rtype fallidos: int
@return: the number of failed episodes
"""
- logger.info()
+ logger.log()
episodelist = filter_list(episodelist, serie.action, path)
# No episode list, nothing to save
if not len(episodelist):
- logger.info("There is no episode list, we go out without creating strm")
+ logger.log("There is no episode list, we go out without creating strm")
return 0, 0, 0
# process local episodes
@@ -586,7 +586,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
elif config.get_setting("local_episodes", "videolibrary"):
done, local_episodes_path = config_local_episodes_path(path, serie)
if done < 0:
- logger.info("An issue has occurred while configuring local episodes, going out without creating strm")
+ logger.log("An issue has occurred while configuring local episodes, going out without creating strm")
return 0, 0, done
item_nfo.local_episodes_path = local_episodes_path
filetools.write(nfo_path, head_nfo + item_nfo.tojson())
@@ -710,7 +710,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
# No episode list, nothing to save
if not len(new_episodelist):
- logger.info("There is no episode list, we go out without creating strm")
+ logger.log("There is no episode list, we go out without creating strm")
return 0, 0, 0
local_episodelist += get_local_content(path)
@@ -742,12 +742,12 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower())
if season_episode in local_episodelist:
- logger.info('Skipped: Serie ' + serie.contentSerieName + ' ' + season_episode + ' available as local content')
+ logger.log('Skipped: Serie ' + serie.contentSerieName + ' ' + season_episode + ' available as local content')
continue
# check if the episode has been downloaded
if filetools.join(path, "%s [downloads].json" % season_episode) in ficheros:
- logger.info('INFO: "%s" episode %s has been downloaded, skipping it' % (serie.contentSerieName, season_episode))
+ logger.log('INFO: "%s" episode %s has been downloaded, skipping it' % (serie.contentSerieName, season_episode))
continue
strm_exists = strm_path in ficheros
@@ -800,7 +800,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
if filetools.write(json_path, e.tojson()):
if not json_exists:
- logger.info("Inserted: %s" % json_path)
+ logger.log("Inserted: %s" % json_path)
insertados += 1
# We mark episode as unseen
news_in_playcounts[season_episode] = 0
@@ -811,14 +811,14 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
news_in_playcounts[serie.contentSerieName] = 0
else:
- logger.info("Overwritten: %s" % json_path)
+ logger.log("Overwritten: %s" % json_path)
sobreescritos += 1
else:
- logger.info("Failed: %s" % json_path)
+ logger.log("Failed: %s" % json_path)
fallidos += 1
else:
- logger.info("Failed: %s" % json_path)
+ logger.log("Failed: %s" % json_path)
fallidos += 1
if not silent and p_dialog.iscanceled():
@@ -888,7 +888,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
def config_local_episodes_path(path, item, silent=False):
- logger.info(item)
+ logger.log(item)
from platformcode.xbmc_videolibrary import search_local_path
local_episodes_path=search_local_path(item)
if not local_episodes_path:
@@ -900,11 +900,11 @@ def config_local_episodes_path(path, item, silent=False):
platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(80043))
local_episodes_path = platformtools.dialog_browse(0, config.get_localized_string(80046))
if local_episodes_path == '':
- logger.info("User has canceled the dialog")
+ logger.log("User has canceled the dialog")
return -2, local_episodes_path
elif path in local_episodes_path:
platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(80045))
- logger.info("Selected folder is the same of the TV show one")
+ logger.log("Selected folder is the same of the TV show one")
return -2, local_episodes_path
if local_episodes_path:
@@ -919,7 +919,7 @@ def config_local_episodes_path(path, item, silent=False):
def process_local_episodes(local_episodes_path, path):
- logger.info()
+ logger.log()
sub_extensions = ['.srt', '.sub', '.sbv', '.ass', '.idx', '.ssa', '.smi']
artwork_extensions = ['.jpg', '.jpeg', '.png']
@@ -958,7 +958,7 @@ def process_local_episodes(local_episodes_path, path):
def get_local_content(path):
- logger.info()
+ logger.log()
local_episodelist = []
for root, folders, files in filetools.walk(path):
@@ -987,7 +987,7 @@ def add_movie(item):
@type item: item
@param item: item to be saved.
"""
- logger.info()
+ logger.log()
from platformcode.launcher import set_search_temp; set_search_temp(item)
# To disambiguate titles, TMDB is caused to ask for the really desired title
@@ -1034,7 +1034,7 @@ def add_tvshow(item, channel=None):
@param channel: channel from which the series will be saved. By default, item.from_channel or item.channel will be imported.
"""
- logger.info("show=#" + item.show + "#")
+ logger.log("show=#" + item.show + "#")
from platformcode.launcher import set_search_temp; set_search_temp(item)
if item.channel == "downloads":
@@ -1105,7 +1105,7 @@ def add_tvshow(item, channel=None):
else:
platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60070) % item.show)
- logger.info("%s episodes of series %s have been added to the video library" % (insertados, item.show))
+ logger.log("%s episodes of series %s have been added to the video library" % (insertados, item.show))
if config.is_xbmc():
if config.get_setting("sync_trakt_new_tvshow", "videolibrary"):
import xbmc
@@ -1121,7 +1121,7 @@ def add_tvshow(item, channel=None):
def emergency_urls(item, channel=None, path=None, headers={}):
- logger.info()
+ logger.log()
import re
from servers import torrent
try:
diff --git a/core/ziptools.py b/core/ziptools.py
index 138c4c1b..3b3a3683 100644
--- a/core/ziptools.py
+++ b/core/ziptools.py
@@ -17,8 +17,8 @@ from core import filetools
class ziptools(object):
def extract(self, file, dir, folder_to_extract="", overwrite_question=False, backup=False):
- logger.info("file= %s" % file)
- logger.info("dir= %s" % dir)
+ logger.log("file= %s" % file)
+ logger.log("dir= %s" % dir)
if not dir.endswith(':') and not filetools.exists(dir):
filetools.mkdir(dir)
@@ -30,13 +30,13 @@ class ziptools(object):
for nameo in zf.namelist():
name = nameo.replace(':', '_').replace('<', '_').replace('>', '_').replace('|', '_').replace('"', '_').replace('?', '_').replace('*', '_')
- logger.info("name=%s" % nameo)
+ logger.log("name=%s" % nameo)
if not name.endswith('/'):
- logger.info("it's not a directory")
+ logger.log("it's not a directory")
try:
(path, filename) = filetools.split(filetools.join(dir, name))
- logger.info("path=%s" % path)
- logger.info("name=%s" % name)
+ logger.log("path=%s" % path)
+ logger.log("name=%s" % name)
if folder_to_extract:
if path != filetools.join(dir, folder_to_extract):
break
@@ -49,7 +49,7 @@ class ziptools(object):
else:
outfilename = filetools.join(dir, name)
- logger.info("outfilename=%s" % outfilename)
+ logger.log("outfilename=%s" % outfilename)
try:
if filetools.exists(outfilename) and overwrite_question:
from platformcode import platformtools
@@ -74,7 +74,7 @@ class ziptools(object):
try:
zf.close()
except:
- logger.info("Error closing .zip " + file)
+ logger.log("Error closing .zip " + file)
def _createstructure(self, file, dir):
self._makedirs(self._listdirs(file), dir)
diff --git a/default.py b/default.py
index 7ed2cc72..b5c1474e 100644
--- a/default.py
+++ b/default.py
@@ -9,7 +9,7 @@ import sys
import xbmc
from platformcode import config, logger
-logger.info("init...")
+logger.log("init...")
librerias = xbmc.translatePath(os.path.join(config.get_runtime_path(), 'lib'))
sys.path.insert(0, librerias)
diff --git a/lib/arm_chromeos.py b/lib/arm_chromeos.py
index d4808402..4dddd954 100644
--- a/lib/arm_chromeos.py
+++ b/lib/arm_chromeos.py
@@ -27,7 +27,7 @@ class ChromeOSImage:
"""
def __init__(self, imgpath):
- logger.info('Image Path: ' + imgpath)
+ logger.log('Image Path: ' + imgpath)
"""Prepares the image"""
self.imgpath = imgpath
self.bstream = self.get_bstream(imgpath)
@@ -59,7 +59,7 @@ class ChromeOSImage:
self.seek_stream(entries_start * lba_size)
if not calcsize(part_format) == entry_size:
- logger.info('Partition table entries are not 128 bytes long')
+ logger.log('Partition table entries are not 128 bytes long')
return 0
for index in range(1, entries_num + 1): # pylint: disable=unused-variable
@@ -71,7 +71,7 @@ class ChromeOSImage:
break
if not offset:
- logger.info('Failed to calculate losetup offset.')
+ logger.log('Failed to calculate losetup offset.')
return 0
return offset
@@ -93,7 +93,7 @@ class ChromeOSImage:
while True:
chunk2 = self.read_stream(chunksize)
if not chunk2:
- logger.info('File %s not found in the ChromeOS image' % filename)
+ logger.log('File %s not found in the ChromeOS image' % filename)
return False
chunk = chunk1 + chunk2
diff --git a/lib/generictools.py b/lib/generictools.py
index ffe77898..dd344e09 100644
--- a/lib/generictools.py
+++ b/lib/generictools.py
@@ -25,7 +25,7 @@ intervenido_sucuri = 'Access Denied - Sucuri Website Firewall'
def update_title(item):
- logger.info()
+ logger.log()
from core import scraper,support
@@ -41,7 +41,7 @@ def update_title(item):
The channel must add a method to be able to receive the call from Kodi / Alfa, and be able to call this method:
def actualizar_titulos(item):
- logger.info()
+ logger.log()
itemlist = []
from lib import generictools
from platformcode import launcher
@@ -205,7 +205,7 @@ def update_title(item):
def refresh_screen(item):
- logger.info()
+ logger.log()
"""
#### Kodi 18 compatibility ####
@@ -239,7 +239,7 @@ def refresh_screen(item):
def post_tmdb_listado(item, itemlist):
- logger.info()
+ logger.log()
itemlist_fo = []
"""
@@ -484,7 +484,7 @@ def post_tmdb_listado(item, itemlist):
def post_tmdb_seasons(item, itemlist):
- logger.info()
+ logger.log()
"""
@@ -644,7 +644,7 @@ def post_tmdb_seasons(item, itemlist):
def post_tmdb_episodios(item, itemlist):
- logger.info()
+ logger.log()
itemlist_fo = []
"""
@@ -995,7 +995,7 @@ def post_tmdb_episodios(item, itemlist):
def post_tmdb_findvideos(item, itemlist):
- logger.info()
+ logger.log()
"""
@@ -1215,7 +1215,7 @@ def post_tmdb_findvideos(item, itemlist):
def get_field_from_kodi_DB(item, from_fields='*', files='file'):
- logger.info()
+ logger.log()
"""
Call to read from the Kodi DB the input fields received (from_fields, by default "*") of the video indicated in Item
@@ -1293,7 +1293,7 @@ def get_field_from_kodi_DB(item, from_fields='*', files='file'):
def fail_over_newpct1(item, patron, patron2=None, timeout=None):
- logger.info()
+ logger.log()
import ast
"""
@@ -1494,7 +1494,7 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
def web_intervenida(item, data, desactivar=True):
- logger.info()
+ logger.log()
"""
@@ -1577,7 +1577,7 @@ def web_intervenida(item, data, desactivar=True):
def regenerate_clones():
- logger.info()
+ logger.log()
import json
from core import videolibrarytools
@@ -1591,7 +1591,7 @@ def regenerate_clones():
# Find the paths where to leave the control .json file, and the Video Library
json_path = filetools.exists(filetools.join(config.get_runtime_path(), 'verify_cached_torrents.json'))
if json_path:
- logger.info('Previously repaired video library: WE ARE GOING')
+ logger.log('Previously repaired video library: WE ARE GOING')
return False
json_path = filetools.join(config.get_runtime_path(), 'verify_cached_torrents.json')
filetools.write(json_path, json.dumps({"CINE_verify": True})) # Prevents another simultaneous process from being launched
@@ -1631,7 +1631,7 @@ def regenerate_clones():
# Delete the Tvshow.nfo files and check if the .nfo has more than one channel and one is clone Newpct1
for file in files:
- # logger.info('file - nfos: ' + file)
+ # logger.log('file - nfos: ' + file)
if 'tvshow.nfo' in file:
file_path = filetools.join(root, 'tvshow.nfo')
filetools.remove(file_path)
@@ -1697,7 +1697,7 @@ def regenerate_clones():
for file in files:
file_path = filetools.join(root, file)
if '.json' in file:
- logger.info('** file: ' + file)
+ logger.log('** file: ' + file)
canal_json = scrapertools.find_single_match(file, r'\[(\w+)\].json')
if canal_json not in nfo.library_urls:
filetools.remove(file_path) # we delete the .json is a zombie
@@ -1740,7 +1740,7 @@ def regenerate_clones():
def dejuice(data):
- logger.info()
+ logger.log()
# Method to unobtrusive JuicyCodes data
import base64
diff --git a/lib/megaserver/client.py b/lib/megaserver/client.py
index 210fafa3..66e196f8 100644
--- a/lib/megaserver/client.py
+++ b/lib/megaserver/client.py
@@ -45,7 +45,7 @@ class Client(object):
t= Thread(target=self._auto_shutdown)
t.setDaemon(True)
t.start()
- logger.info("MEGA Server Started")
+ logger.log("MEGA Server Started")
def _auto_shutdown(self):
while self.running:
@@ -75,7 +75,7 @@ class Client(object):
def stop(self):
self.running = False
self._server.stop()
- logger.info("MEGA Server Stopped")
+ logger.log("MEGA Server Stopped")
def get_play_list(self):
if len(self.files) > 1:
@@ -103,7 +103,7 @@ class Client(object):
return files
except:
- logger.info(traceback.format_exc())
+ logger.log(traceback.format_exc())
pass
return files
diff --git a/lib/sambatools/libsmb.py b/lib/sambatools/libsmb.py
index 4a66385d..5cb83fbd 100644
--- a/lib/sambatools/libsmb.py
+++ b/lib/sambatools/libsmb.py
@@ -14,7 +14,7 @@ remote = None
def parse_url(url):
- # logger.info("Url: %s" % url)
+ # logger.log("Url: %s" % url)
url = url.strip()
patron = "^smb://(?:([^;\n]+);)?(?:([^:@\n]+)[:|@])?(?:([^@\n]+)@)?([^/]+)/([^/\n]+)([/]?.*?)$"
domain, user, password, server_name, share_name, path = re.compile(patron, re.DOTALL).match(url).groups()
@@ -27,7 +27,7 @@ def parse_url(url):
if path.endswith("/"): path = path[:-1]
if not path: path = "/"
- # logger.info("Dominio: '%s' |Usuario: '%s' | Password: '%s' | Servidor: '%s' | IP: '%s' | Share Name: '%s' | Path: '%s'" % (domain, user, password, server_name, server_ip, share_name, path))
+ # logger.log("Dominio: '%s' |Usuario: '%s' | Password: '%s' | Servidor: '%s' | IP: '%s' | Share Name: '%s' | Path: '%s'" % (domain, user, password, server_name, server_ip, share_name, path))
return server_name, server_ip, share_name, unicode(path, "utf8"), user, password, domain
@@ -46,7 +46,7 @@ def get_server_name_ip(server):
def connect(url):
- # logger.info("Url: %s" % url)
+ # logger.log("Url: %s" % url)
global remote
server_name, server_ip, share_name, path, user, password, domain = parse_url(url)
@@ -63,7 +63,7 @@ def connect(url):
def listdir(url):
- logger.info("Url: %s" % url)
+ logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
files = [f.filename for f in remote.listPath(share_name, path) if not f.filename in [".", ".."]]
@@ -73,7 +73,7 @@ def listdir(url):
def walk(url, topdown=True, onerror=None):
- logger.info("Url: %s" % url)
+ logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
@@ -103,7 +103,7 @@ def walk(url, topdown=True, onerror=None):
def get_attributes(url):
- logger.info("Url: %s" % url)
+ logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
return remote.getAttributes(share_name, path)
@@ -112,7 +112,7 @@ def get_attributes(url):
def mkdir(url):
- logger.info("Url: %s" % url)
+ logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
remote.createDirectory(share_name, path)
@@ -121,12 +121,12 @@ def mkdir(url):
def smb_open(url, mode):
- logger.info("Url: %s" % url)
+ logger.log("Url: %s" % url)
return SMBFile(url, mode)
def isfile(url):
- logger.info("Url: %s" % url)
+ logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
files = [f.filename for f in remote.listPath(share_name, os.path.dirname(path)) if not f.isDirectory]
@@ -136,7 +136,7 @@ def isfile(url):
def isdir(url):
- logger.info("Url: %s" % url)
+ logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
folders = [f.filename for f in remote.listPath(share_name, os.path.dirname(path)) if f.isDirectory]
@@ -146,7 +146,7 @@ def isdir(url):
def exists(url):
- logger.info("Url: %s" % url)
+ logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
files = [f.filename for f in remote.listPath(share_name, os.path.dirname(path))]
@@ -156,7 +156,7 @@ def exists(url):
def remove(url):
- logger.info("Url: %s" % url)
+ logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
remote.deleteFiles(share_name, path)
@@ -165,7 +165,7 @@ def remove(url):
def rmdir(url):
- logger.info("Url: %s" % url)
+ logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
remote.deleteDirectory(share_name, path)
@@ -174,7 +174,7 @@ def rmdir(url):
def rename(url, new_name):
- logger.info("Url: %s" % url)
+ logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
_, _, _, new_name, _, _, _ = parse_url(new_name)
try:
diff --git a/lib/unshortenit.py b/lib/unshortenit.py
index 35c6cce2..02a19d1f 100644
--- a/lib/unshortenit.py
+++ b/lib/unshortenit.py
@@ -1,16 +1,15 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-try:
- from urllib.parse import urlsplit, urlparse, parse_qs, urljoin
-except:
+import os, re, sys, json, time
+
+if sys.version_info[0] >= 3:
+ from urllib.parse import urlsplit, urlparse, parse_qs, urljoin, urlencode
+ from urllib.request import urlopen
+else:
+ from urllib import urlencode, urlopen
from urlparse import urlsplit, urlparse, parse_qs, urljoin
-import json
-import os
-import re
-import time
-import urllib
from base64 import b64decode
from core import httptools, scrapertools
@@ -61,17 +60,13 @@ class UnshortenIt(object):
return uri, "No domain found in URI!"
had_google_outbound, uri = self._clear_google_outbound_proxy(uri)
- if re.search(self._adfly_regex, domain,
- re.IGNORECASE) or type == 'adfly':
+ if re.search(self._adfly_regex, domain, re.IGNORECASE) or type == 'adfly':
uri, code = self._unshorten_adfly(uri)
- if re.search(self._adfocus_regex, domain,
- re.IGNORECASE) or type == 'adfocus':
+ if re.search(self._adfocus_regex, domain, re.IGNORECASE) or type == 'adfocus':
uri, code = self._unshorten_adfocus(uri)
- if re.search(self._linkbucks_regex, domain,
- re.IGNORECASE) or type == 'linkbucks':
+ if re.search(self._linkbucks_regex, domain, re.IGNORECASE) or type == 'linkbucks':
uri, code = self._unshorten_linkbucks(uri)
- if re.search(self._lnxlu_regex, domain,
- re.IGNORECASE) or type == 'lnxlu':
+ if re.search(self._lnxlu_regex, domain, re.IGNORECASE) or type == 'lnxlu':
uri, code = self._unshorten_lnxlu(uri)
if re.search(self._shrink_service_regex, domain, re.IGNORECASE):
uri, code = self._unshorten_shrink_service(uri)
@@ -99,7 +94,7 @@ class UnshortenIt(object):
if oldUri == uri:
break
- logger.info(uri)
+ logger.log(uri)
return uri, code
@@ -368,7 +363,7 @@ class UnshortenIt(object):
if len(code) > 0:
payload = {'click': code[0]}
r = httptools.downloadpage(
- 'http://lnx.lu?' + urllib.urlencode(payload),
+ 'http://lnx.lu?' + urlencode(payload),
timeout=self._timeout)
return r.url, r.code
else:
@@ -400,7 +395,7 @@ class UnshortenIt(object):
payload = {'adSessionId': session_id, 'callback': 'c'}
r = httptools.downloadpage(
'http://sh.st/shortest-url/end-adsession?' +
- urllib.urlencode(payload),
+ urlencode(payload),
headers=http_header,
timeout=self._timeout)
response = r.data[6:-2].decode('utf-8')
@@ -519,7 +514,7 @@ class UnshortenIt(object):
else:
if 'sb/' in uri or 'akv/' in uri or 'wss/' in uri or 'wsd/' in uri:
import datetime, hashlib
- ip = urllib.urlopen('https://api.ipify.org/').read()
+ ip = urlopen('https://api.ipify.org/').read()
day = datetime.date.today().strftime('%Y%m%d')
headers = {
"Cookie": hashlib.md5(ip+day).hexdigest() + "=1"
@@ -531,12 +526,12 @@ class UnshortenIt(object):
r = httptools.downloadpage(uri, timeout=self._timeout, headers=headers, follow_redirects=False)
if 'Wait 1 hour' in r.data:
uri = ''
- logger.info('IP bannato da vcrypt, aspetta un ora')
+ logger.log('IP bannato da vcrypt, aspetta un ora')
else:
prev_uri = uri
uri = r.headers['location']
if uri == prev_uri:
- logger.info('Use Cloudscraper')
+ logger.log('Use Cloudscraper')
uri = httptools.downloadpage(uri, timeout=self._timeout, headers=headers, follow_redirects=False, cf=True).headers['location']
if "4snip" in uri:
@@ -593,7 +588,7 @@ class UnshortenIt(object):
r = httptools.downloadpage(uri, follow_redirect=True, timeout=self._timeout, cookies=False)
if 'get/' in r.url:
uri = 'https://linkhub.icu/view/' + re.search('\.\./view/([^"]+)', r.data).group(1)
- logger.info(uri)
+ logger.log(uri)
r = httptools.downloadpage(uri, follow_redirect=True, timeout=self._timeout, cookies=False)
uri = re.search('