diff --git a/channelselector.py b/channelselector.py index 886e8c4d..42b4db51 100644 --- a/channelselector.py +++ b/channelselector.py @@ -9,77 +9,67 @@ addon = config.__settings__ downloadenabled = addon.getSetting('downloadenabled') def getmainlist(view="thumb_"): - logger.info() + logger.log() itemlist = list() if config.dev_mode(): - itemlist.append(Item(title="Redirect", channel="checkhost", action="check_channels", - thumbnail='', - category=config.get_localized_string(30119), viewmode="thumbnails")) + itemlist.append(Item(title="Redirect", channel="checkhost", action="check_channels", thumbnail='', + category=config.get_localized_string(30119), viewmode="thumbnails")) # Main Menu Channels if addon.getSetting('enable_news_menu') == "true": itemlist.append(Item(title=config.get_localized_string(30130), channel="news", action="mainlist", - thumbnail=get_thumb("news.png", view), - category=config.get_localized_string(30119), viewmode="thumbnails", - context=[{"title": config.get_localized_string(70285), "channel": "shortcuts", "action": "SettingOnPosition", "category":7, "setting":1}])) + thumbnail=get_thumb("news.png", view), category=config.get_localized_string(30119), viewmode="thumbnails", + context=[{"title": config.get_localized_string(70285), "channel": "shortcuts", "action": "SettingOnPosition", "category":7, "setting":1}])) if addon.getSetting('enable_channels_menu') == "true": itemlist.append(Item(title=config.get_localized_string(30118), channel="channelselector", action="getchanneltypes", - thumbnail=get_thumb("channels.png", view), view=view, - category=config.get_localized_string(30119), viewmode="thumbnails")) + thumbnail=get_thumb("channels.png", view), view=view, category=config.get_localized_string(30119), viewmode="thumbnails")) if addon.getSetting('enable_search_menu') == "true": itemlist.append(Item(title=config.get_localized_string(30103), channel="search", path='special', action="mainlist", - thumbnail=get_thumb("search.png", view), - category=config.get_localized_string(30119), viewmode="list", - context = [{"title": config.get_localized_string(60412), "action": "setting_channel_new", "channel": "search"}, - {"title": config.get_localized_string(70286), "channel": "shortcuts", "action": "SettingOnPosition", "category":5 , "setting":1}])) + thumbnail=get_thumb("search.png", view), category=config.get_localized_string(30119), viewmode="list", + context = [{"title": config.get_localized_string(60412), "action": "setting_channel_new", "channel": "search"}, + {"title": config.get_localized_string(70286), "channel": "shortcuts", "action": "SettingOnPosition", "category":5 , "setting":1}])) if addon.getSetting('enable_onair_menu') == "true": itemlist.append(Item(channel="filmontv", action="mainlist", title=config.get_localized_string(50001), - thumbnail=get_thumb("on_the_air.png"), viewmode="thumbnails")) + thumbnail=get_thumb("on_the_air.png"), viewmode="thumbnails")) if addon.getSetting('enable_link_menu') == "true": - itemlist.append(Item(title=config.get_localized_string(70527), channel="kodfavorites", action="mainlist", - thumbnail=get_thumb("mylink.png", view), view=view, - category=config.get_localized_string(70527), viewmode="thumbnails")) + itemlist.append(Item(title=config.get_localized_string(70527), channel="kodfavorites", action="mainlist", thumbnail=get_thumb("mylink.png", view), + view=view, category=config.get_localized_string(70527), viewmode="thumbnails")) if addon.getSetting('enable_fav_menu') == "true": itemlist.append(Item(title=config.get_localized_string(30102), channel="favorites", action="mainlist", - thumbnail=get_thumb("favorites.png", view), - category=config.get_localized_string(30102), viewmode="thumbnails")) + thumbnail=get_thumb("favorites.png", view), category=config.get_localized_string(30102), viewmode="thumbnails")) if config.get_videolibrary_support() and addon.getSetting('enable_library_menu') == "true": itemlist.append(Item(title=config.get_localized_string(30131), channel="videolibrary", action="mainlist", - thumbnail=get_thumb("videolibrary.png", view), - category=config.get_localized_string(30119), viewmode="thumbnails", + thumbnail=get_thumb("videolibrary.png", view), category=config.get_localized_string(30119), viewmode="thumbnails", context=[{"title": config.get_localized_string(70287), "channel": "shortcuts", "action": "SettingOnPosition", "category":2, "setting":1}, - {"title": config.get_localized_string(60568), "channel": "videolibrary", "action": "update_videolibrary"}])) + {"title": config.get_localized_string(60568), "channel": "videolibrary", "action": "update_videolibrary"}])) if downloadenabled != "false": - itemlist.append(Item(title=config.get_localized_string(30101), channel="downloads", action="mainlist", - thumbnail=get_thumb("downloads.png", view), viewmode="list", - context=[{"title": config.get_localized_string(70288), "channel": "shortcuts", "action": "SettingOnPosition", "category":6}])) + itemlist.append(Item(title=config.get_localized_string(30101), channel="downloads", action="mainlist", thumbnail=get_thumb("downloads.png", view), viewmode="list", + context=[{"title": config.get_localized_string(70288), "channel": "shortcuts", "action": "SettingOnPosition", "category":6}])) thumb_setting = "setting_%s.png" % 0 # config.get_setting("plugin_updates_available") itemlist.append(Item(title=config.get_localized_string(30100), channel="setting", action="settings", - thumbnail=get_thumb(thumb_setting, view), - category=config.get_localized_string(30100), viewmode="list")) + thumbnail=get_thumb(thumb_setting, view), category=config.get_localized_string(30100), viewmode="list")) itemlist.append(Item(title=config.get_localized_string(30104) + " (v" + config.get_addon_version(with_fix=True) + ")", channel="help", action="mainlist", - thumbnail=get_thumb("help.png", view), - category=config.get_localized_string(30104), viewmode="list")) + thumbnail=get_thumb("help.png", view), category=config.get_localized_string(30104), viewmode="list")) return itemlist def getchanneltypes(view="thumb_"): - logger.info() + logger.log() # Category List channel_types = ["movie", "tvshow", "anime", "documentary", "vos", "live", "torrent", "music"] #, "direct" # Channel Language channel_language = auto_filter() - logger.info("channel_language=%s" % channel_language) + logger.log("channel_language=%s" % channel_language) # Build Itemlist itemlist = list() @@ -102,7 +92,7 @@ def getchanneltypes(view="thumb_"): def filterchannels(category, view="thumb_"): from core import channeltools - logger.info('Filter Channels ' + category) + logger.log('Filter Channels ' + category) channelslist = [] @@ -113,17 +103,17 @@ def filterchannels(category, view="thumb_"): appenddisabledchannels = True channel_path = os.path.join(config.get_runtime_path(), 'channels', '*.json') - logger.info("channel_path = %s" % channel_path) + logger.log("channel_path = %s" % channel_path) channel_files = glob.glob(channel_path) - logger.info("channel_files found %s" % (len(channel_files))) + logger.log("channel_files found %s" % (len(channel_files))) # Channel Language channel_language = auto_filter() - logger.info("channel_language=%s" % channel_language) + logger.log("channel_language=%s" % channel_language) for channel_path in channel_files: - logger.info("channel in for = %s" % channel_path) + logger.log("channel in for = %s" % channel_path) channel = os.path.basename(channel_path).replace(".json", "") @@ -136,7 +126,7 @@ def filterchannels(category, view="thumb_"): # If it's not a channel we skip it if not channel_parameters["channel"]: continue - logger.info("channel_parameters=%s" % repr(channel_parameters)) + logger.log("channel_parameters=%s" % repr(channel_parameters)) # If you prefer the banner and the channel has it, now change your mind if view == "banner_" and "banner" in channel_parameters: @@ -231,7 +221,7 @@ def get_thumb(thumb_name, view="thumb_"): def set_channel_info(parameters): - logger.info() + logger.log() info = '' language = '' diff --git a/core/channeltools.py b/core/channeltools.py index 90a2dd74..528acdfb 100644 --- a/core/channeltools.py +++ b/core/channeltools.py @@ -15,7 +15,7 @@ default_file = dict() remote_path = 'https://raw.githubusercontent.com/kodiondemand/media/master/' def is_enabled(channel_name): - logger.info("channel_name=" + channel_name) + logger.log("channel_name=" + channel_name) return get_channel_parameters(channel_name)["active"] and get_channel_setting("enabled", channel=channel_name, default=True) @@ -87,7 +87,7 @@ def get_channel_parameters(channel_name): def get_channel_json(channel_name): - # logger.info("channel_name=" + channel_name) + # logger.log("channel_name=" + channel_name) from core import filetools channel_json = None try: @@ -101,9 +101,9 @@ def get_channel_json(channel_name): channel_name + ".json") if filetools.isfile(channel_path): - # logger.info("channel_data=" + channel_path) + # logger.log("channel_data=" + channel_path) channel_json = jsontools.load(filetools.read(channel_path)) - # logger.info("channel_json= %s" % channel_json) + # logger.log("channel_json= %s" % channel_json) except Exception as ex: template = "An exception of type %s occured. Arguments:\n%r" @@ -114,7 +114,7 @@ def get_channel_json(channel_name): def get_channel_controls_settings(channel_name): - # logger.info("channel_name=" + channel_name) + # logger.log("channel_name=" + channel_name) dict_settings = {} # import web_pdb; web_pdb.set_trace() # list_controls = get_channel_json(channel_name).get('settings', list()) @@ -137,7 +137,7 @@ def get_lang(channel_name): if hasattr(channel, 'list_language'): for language in channel.list_language: list_language.append(language) - logger.info(list_language) + logger.log(list_language) else: sub = False langs = [] diff --git a/core/downloader.py b/core/downloader.py index b714b474..c08e8fdf 100644 --- a/core/downloader.py +++ b/core/downloader.py @@ -253,12 +253,12 @@ class Downloader(object): self.file.seek(2 ** 31, 0) except OverflowError: self._seekable = False - logger.info("Cannot do seek() or tell() in files larger than 2GB") + logger.log("Cannot do seek() or tell() in files larger than 2GB") self.__get_download_info__() try: - logger.info("Download started: Parts: %s | Path: %s | File: %s | Size: %s" % (str(len(self._download_info["parts"])), self._pathencode('utf-8'), self._filenameencode('utf-8'), str(self._download_info["size"]))) + logger.log("Download started: Parts: %s | Path: %s | File: %s | Size: %s" % (str(len(self._download_info["parts"])), self._pathencode('utf-8'), self._filenameencode('utf-8'), str(self._download_info["size"]))) except: pass @@ -410,7 +410,7 @@ class Downloader(object): return id == 0 or (len(self.completed_parts) >= id and sorted(self.completed_parts)[id - 1] == id - 1) def __save_file__(self): - logger.info("Thread started: %s" % threading.current_thread().name) + logger.log("Thread started: %s" % threading.current_thread().name) while self._state == self.states.downloading: if not self.pending_parts and not self.download_parts and not self.save_parts: # Download finished @@ -449,7 +449,7 @@ class Downloader(object): self._download_info["parts"][s]["status"] = self.states.stopped self._download_info["parts"][s]["current"] = self._download_info["parts"][s]["start"] - logger.info("Thread stopped: %s" % threading.current_thread().name) + logger.log("Thread stopped: %s" % threading.current_thread().name) def __get_part_id__(self): self._download_lock.acquire() @@ -464,21 +464,21 @@ class Downloader(object): return None def __set_part_connecting__(self, id): - logger.info("ID: %s Establishing connection" % id) + logger.log("ID: %s Establishing connection" % id) self._download_info["parts"][id]["status"] = self.states.connecting def __set_part__error__(self, id): - logger.info("ID: %s Download failed" % id) + logger.log("ID: %s Download failed" % id) self._download_info["parts"][id]["status"] = self.states.error self.pending_parts.add(id) self.download_parts.remove(id) def __set_part__downloading__(self, id): - logger.info("ID: %s Downloading data ..." % id) + logger.log("ID: %s Downloading data ..." % id) self._download_info["parts"][id]["status"] = self.states.downloading def __set_part_completed__(self, id): - logger.info("ID: %s Download finished!" % id) + logger.log("ID: %s Download finished!" % id) self._download_info["parts"][id]["status"] = self.states.saving self.download_parts.remove(id) self.save_parts.add(id) @@ -501,7 +501,7 @@ class Downloader(object): return file def __start_part__(self): - logger.info("Thread Started: %s" % threading.current_thread().name) + logger.log("Thread Started: %s" % threading.current_thread().name) while self._state == self.states.downloading: id = self.__get_part_id__() if id is None: break @@ -528,7 +528,7 @@ class Downloader(object): buffer = connection.read(self._block_size) speed.append(old_div(len(buffer), ((time.time() - start) or 0.001))) except: - logger.info("ID: %s Error downloading data" % id) + logger.log("ID: %s Error downloading data" % id) self._download_info["parts"][id]["status"] = self.states.error self.pending_parts.add(id) self.download_parts.remove(id) @@ -546,7 +546,7 @@ class Downloader(object): if velocidad_minima > speed[-1] and velocidad_minima > speed[-2] and self._download_info["parts"][id]["current"] < self._download_info["parts"][id]["end"]: if connection.fp: connection.fp._sock.close() - logger.info("ID: %s Restarting connection! | Minimum Speed: %.2f %s/s | Speed: %.2f %s/s" % (id, vm[1], vm[2], v[1], v[2])) + logger.log("ID: %s Restarting connection! | Minimum Speed: %.2f %s/s | Speed: %.2f %s/s" % (id, vm[1], vm[2], v[1], v[2])) # file.close() break else: @@ -556,7 +556,7 @@ class Downloader(object): break self.__set_part_stopped__(id) - logger.info("Thread stopped: %s" % threading.current_thread().name) + logger.log("Thread stopped: %s" % threading.current_thread().name) def __update_json(self, started=True): text = filetools.read(self._json_path) @@ -564,10 +564,10 @@ class Downloader(object): if self._json_text != text: self._json_text = text self._json_item = Item().fromjson(text) - logger.info('item loaded') + logger.log('item loaded') progress = int(self.progress) if started and self._json_item.downloadStatus == 0: # stopped - logger.info('Download paused') + logger.log('Download paused') self.stop() elif self._json_item.downloadProgress != progress or not started: params = {"downloadStatus": 4, "downloadComplete": 0, "downloadProgress": progress} diff --git a/core/downloadtools.py b/core/downloadtools.py index b0a35cb0..a18af29e 100644 --- a/core/downloadtools.py +++ b/core/downloadtools.py @@ -97,11 +97,11 @@ def limpia_nombre_excepto_1(s): try: s = unicode(s, "utf-8") except UnicodeError: - # logger.info("no es utf-8") + # logger.log("no es utf-8") try: s = unicode(s, "iso-8859-1") except UnicodeError: - # logger.info("no es iso-8859-1") + # logger.log("no es iso-8859-1") pass # Remove accents s = limpia_nombre_sin_acentos(s) @@ -125,29 +125,29 @@ def limpia_nombre_excepto_2(s): def getfilefromtitle(url, title): # Print in the log what you will discard - logger.info("title=" + title) - logger.info("url=" + url) + logger.log("title=" + title) + logger.log("url=" + url) plataforma = config.get_system_platform() - logger.info("platform=" + plataforma) + logger.log("platform=" + plataforma) # filename = xbmc.makeLegalFilename(title + url[-4:]) from core import scrapertools nombrefichero = title + scrapertools.get_filename_from_url(url)[-4:] - logger.info("filename= %s" % nombrefichero) + logger.log("filename= %s" % nombrefichero) if "videobb" in url or "videozer" in url or "putlocker" in url: nombrefichero = title + ".flv" if "videobam" in url: nombrefichero = title + "." + url.rsplit(".", 1)[1][0:3] - logger.info("filename= %s" % nombrefichero) + logger.log("filename= %s" % nombrefichero) nombrefichero = limpia_nombre_caracteres_especiales(nombrefichero) - logger.info("filename= %s" % nombrefichero) + logger.log("filename= %s" % nombrefichero) fullpath = filetools.join(config.get_setting("downloadpath"), nombrefichero) - logger.info("fullpath= %s" % fullpath) + logger.log("fullpath= %s" % fullpath) if config.is_xbmc() and fullpath.startswith("special://"): import xbmc @@ -162,7 +162,7 @@ def downloadtitle(url, title): def downloadbest(video_urls, title, continuar=False): - logger.info() + logger.log() # Flip it over, to put the highest quality one first (list () is for you to make a copy of) invertida = list(video_urls) @@ -172,9 +172,9 @@ def downloadbest(video_urls, title, continuar=False): # videotitle = elemento[0] url = elemento[1] if not PY3: - logger.info("Downloading option " + title + " " + url.encode('ascii', 'ignore')) + logger.log("Downloading option " + title + " " + url.encode('ascii', 'ignore')) else: - logger.info("Downloading option " + title + " " + url.encode('ascii', 'ignore').decode('utf-8')) + logger.log("Downloading option " + title + " " + url.encode('ascii', 'ignore').decode('utf-8')) # Calculate the file where you should record try: @@ -200,25 +200,25 @@ def downloadbest(video_urls, title, continuar=False): else: # EThe file doesn't even exist if not filetools.exists(fullpath): - logger.info("-> You have not downloaded anything, testing with the following option if there is") + logger.log("-> You have not downloaded anything, testing with the following option if there is") # The file exists else: tamanyo = filetools.getsize(fullpath) # It has size 0 if tamanyo == 0: - logger.info("-> Download a file with size 0, testing with the following option if it exists") + logger.log("-> Download a file with size 0, testing with the following option if it exists") os.remove(fullpath) else: - logger.info("-> Download a file with size %d, he takes it for good" % tamanyo) + logger.log("-> Download a file with size %d, he takes it for good" % tamanyo) return 0 return -2 def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False, resumir=True, header=''): - logger.info("url= " + url) - logger.info("filename= " + nombrefichero) + logger.log("url= " + url) + logger.log("filename= " + nombrefichero) if headers is None: headers = [] @@ -242,14 +242,14 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False nombrefichero = xbmc.makeLegalFilename(nombrefichero) except: pass - logger.info("filename= " + nombrefichero) + logger.log("filename= " + nombrefichero) # The file exists and you want to continue if filetools.exists(nombrefichero) and continuar: f = filetools.file_open(nombrefichero, 'r+b', vfs=VFS) if resumir: exist_size = filetools.getsize(nombrefichero) - logger.info("the file exists, size= %d" % exist_size) + logger.log("the file exists, size= %d" % exist_size) grabado = exist_size f.seek(exist_size) else: @@ -258,13 +258,13 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False # the file already exists and you don't want to continue, it aborts elif filetools.exists(nombrefichero) and not continuar: - logger.info("the file exists, it does not download again") + logger.log("the file exists, it does not download again") return -3 # the file does not exist else: exist_size = 0 - logger.info("the file does not exist") + logger.log("the file does not exist") f = filetools.file_open(nombrefichero, 'wb', vfs=VFS) grabado = 0 @@ -285,13 +285,13 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False additional_headers = [additional_headers] for additional_header in additional_headers: - logger.info("additional_header: " + additional_header) + logger.log("additional_header: " + additional_header) name = re.findall("(.*?)=.*?", additional_header)[0] value = urllib.parse.unquote_plus(re.findall(".*?=(.*?)$", additional_header)[0]) headers.append([name, value]) url = url.split("|")[0] - logger.info("url=" + url) + logger.log("url=" + url) # Socket timeout at 60 seconds socket.setdefaulttimeout(60) @@ -299,7 +299,7 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False h = urllib.request.HTTPHandler(debuglevel=0) request = urllib.request.Request(url) for header in headers: - logger.info("Header= " + header[0] + ": " + header[1]) + logger.log("Header= " + header[0] + ": " + header[1]) request.add_header(header[0], header[1]) if exist_size > 0: @@ -328,12 +328,12 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False if exist_size > 0: totalfichero = totalfichero + exist_size - logger.info("Content-Length= %s" % totalfichero) + logger.log("Content-Length= %s" % totalfichero) blocksize = 100 * 1024 bloqueleido = connexion.read(blocksize) - logger.info("Starting downloading the file, blocked= %s" % len(bloqueleido)) + logger.log("Starting downloading the file, blocked= %s" % len(bloqueleido)) maxreintentos = 10 @@ -360,7 +360,7 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False tiempofalta = old_div(falta, velocidad) else: tiempofalta = 0 - # logger.info(sec_to_hms(tiempofalta)) + # logger.log(sec_to_hms(tiempofalta)) if not silent: progreso.update(percent, "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s" % (descargadosmb, totalmb, percent, old_div(velocidad, 1024), @@ -368,14 +368,14 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False break except: reintentos += 1 - logger.info("ERROR in block download, retry %d" % reintentos) + logger.log("ERROR in block download, retry %d" % reintentos) import traceback logger.error(traceback.print_exc()) # The user cancels the download try: if progreso.iscanceled(): - logger.info("Download of file canceled") + logger.log("Download of file canceled") f.close() progreso.close() return -1 @@ -384,7 +384,7 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False # There was an error in the download if reintentos > maxreintentos: - logger.info("ERROR in the file download") + logger.log("ERROR in the file download") f.close() if not silent: progreso.close() @@ -430,7 +430,7 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False except: pass - logger.info("End of file download") + logger.log("End of file download") def downloadfileRTMP(url, nombrefichero, silent): @@ -476,7 +476,7 @@ def downloadfileRTMP(url, nombrefichero, silent): try: rtmpdump_args = [rtmpdump_cmd] + rtmpdump_args + ["-o", nombrefichero] from os import spawnv, P_NOWAIT - logger.info("Initiating file download: %s" % " ".join(rtmpdump_args)) + logger.log("Initiating file download: %s" % " ".join(rtmpdump_args)) rtmpdump_exit = spawnv(P_NOWAIT, rtmpdump_cmd, rtmpdump_args) if not silent: from platformcode import platformtools @@ -488,18 +488,18 @@ def downloadfileRTMP(url, nombrefichero, silent): def downloadfileGzipped(url, pathfichero): - logger.info("url= " + url) + logger.log("url= " + url) nombrefichero = pathfichero - logger.info("filename= " + nombrefichero) + logger.log("filename= " + nombrefichero) import xbmc nombrefichero = xbmc.makeLegalFilename(nombrefichero) - logger.info("filename= " + nombrefichero) + logger.log("filename= " + nombrefichero) patron = "(http://[^/]+)/.+" matches = re.compile(patron, re.DOTALL).findall(url) if len(matches): - logger.info("Main URL: " + matches[0]) + logger.log("Main URL: " + matches[0]) url1 = matches[0] else: url1 = url @@ -546,9 +546,9 @@ def downloadfileGzipped(url, pathfichero): nombre_fichero_base = filetools.basename(nombrefichero) if len(nombre_fichero_base) == 0: - logger.info("Searching for name in the answer Headers") + logger.log("Searching for name in the answer Headers") nombre_base = connexion.headers["Content-Disposition"] - logger.info(nombre_base) + logger.log(nombre_base) patron = 'filename="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(nombre_base) if len(matches) > 0: @@ -556,7 +556,7 @@ def downloadfileGzipped(url, pathfichero): titulo = GetTitleFromFile(titulo) nombrefichero = filetools.join(pathfichero, titulo) else: - logger.info("Name of the file not found, Placing temporary name: no_name.txt") + logger.log("Name of the file not found, Placing temporary name: no_name.txt") titulo = "no_name.txt" nombrefichero = filetools.join(pathfichero, titulo) totalfichero = int(connexion.headers["Content-Length"]) @@ -564,10 +564,10 @@ def downloadfileGzipped(url, pathfichero): # then f = filetools.file_open(nombrefichero, 'w', vfs=VFS) - logger.info("new file open") + logger.log("new file open") grabado = 0 - logger.info("Content-Length= %s" % totalfichero) + logger.log("Content-Length= %s" % totalfichero) blocksize = 100 * 1024 @@ -580,7 +580,7 @@ def downloadfileGzipped(url, pathfichero): gzipper = gzip.GzipFile(fileobj=compressedstream) bloquedata = gzipper.read() gzipper.close() - logger.info("Starting downloading the file, blocked= %s" % len(bloqueleido)) + logger.log("Starting downloading the file, blocked= %s" % len(bloqueleido)) except: logger.error("ERROR: The file to be downloaded is not compressed with Gzip") f.close() @@ -619,32 +619,32 @@ def downloadfileGzipped(url, pathfichero): tiempofalta = old_div(falta, velocidad) else: tiempofalta = 0 - logger.info(sec_to_hms(tiempofalta)) + logger.log(sec_to_hms(tiempofalta)) progreso.update(percent, "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s left " % (descargadosmb, totalmb, percent, old_div(velocidad, 1024), sec_to_hms(tiempofalta))) break except: reintentos += 1 - logger.info("ERROR in block download, retry %d" % reintentos) + logger.log("ERROR in block download, retry %d" % reintentos) for line in sys.exc_info(): logger.error("%s" % line) # The user cancels the download if progreso.iscanceled(): - logger.info("Download of file canceled") + logger.log("Download of file canceled") f.close() progreso.close() return -1 # There was an error in the download if reintentos > maxreintentos: - logger.info("ERROR in the file download") + logger.log("ERROR in the file download") f.close() progreso.close() return -2 except: - logger.info("ERROR in the file download") + logger.log("ERROR in the file download") for line in sys.exc_info(): logger.error("%s" % line) f.close() @@ -655,15 +655,15 @@ def downloadfileGzipped(url, pathfichero): # print data progreso.close() - logger.info("End download of the file") + logger.log("End download of the file") return nombrefichero def GetTitleFromFile(title): # Print in the log what you will discard - logger.info("title= " + title) + logger.log("title= " + title) plataforma = config.get_system_platform() - logger.info("plataform= " + plataforma) + logger.log("plataform= " + plataforma) # nombrefichero = xbmc.makeLegalFilename(title + url[-4:]) nombrefichero = title @@ -677,11 +677,11 @@ def sec_to_hms(seconds): def downloadIfNotModifiedSince(url, timestamp): - logger.info("(" + url + "," + time.ctime(timestamp) + ")") + logger.log("(" + url + "," + time.ctime(timestamp) + ")") # Convert date to GMT fecha_formateada = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(timestamp)) - logger.info("Formatted date= %s" % fecha_formateada) + logger.log("Formatted date= %s" % fecha_formateada) # Check if it has changed inicio = time.clock() @@ -702,9 +702,9 @@ def downloadIfNotModifiedSince(url, timestamp): except urllib.error.URLError as e: # If it returns 304 it is that it has not changed if hasattr(e, 'code'): - logger.info("HTTP response code : %d" % e.code) + logger.log("HTTP response code : %d" % e.code) if e.code == 304: - logger.info("It has not changed") + logger.log("It has not changed") updated = False # Grab errors with response code from requested external server else: @@ -713,13 +713,13 @@ def downloadIfNotModifiedSince(url, timestamp): data = "" fin = time.clock() - logger.info("Downloaded in %d seconds " % (fin - inicio + 1)) + logger.log("Downloaded in %d seconds " % (fin - inicio + 1)) return updated, data def download_all_episodes(item, channel, first_episode="", preferred_server="vidspot", filter_language=""): - logger.info("show= " + item.show) + logger.log("show= " + item.show) show_title = item.show # Gets the listing from which it was called @@ -749,9 +749,9 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid for episode_item in episode_itemlist: try: - logger.info("episode= " + episode_item.title) + logger.log("episode= " + episode_item.title) episode_title = scrapertools.find_single_match(episode_item.title, r"(\d+x\d+)") - logger.info("episode= " + episode_title) + logger.log("episode= " + episode_title) except: import traceback logger.error(traceback.format_exc()) @@ -815,7 +815,7 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid new_mirror_itemlist_4 + new_mirror_itemlist_5 + new_mirror_itemlist_6) for mirror_item in mirrors_itemlist: - logger.info("mirror= " + mirror_item.title) + logger.log("mirror= " + mirror_item.title) if "(Italiano)" in mirror_item.title: idioma = "(Italiano)" @@ -836,11 +836,11 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid idioma = "(Desconocido)" codigo_idioma = "desconocido" - logger.info("filter_language=#" + filter_language + "#, codigo_idioma=#" + codigo_idioma + "#") + logger.log("filter_language=#" + filter_language + "#, codigo_idioma=#" + codigo_idioma + "#") if filter_language == "" or (filter_language != "" and filter_language == codigo_idioma): - logger.info("downloading mirror") + logger.log("downloading mirror") else: - logger.info("language " + codigo_idioma + " filtered, skipping") + logger.log("language " + codigo_idioma + " filtered, skipping") continue if hasattr(channel, 'play'): @@ -856,14 +856,14 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid # Adds it to the download list if puedes: - logger.info("downloading mirror started...") + logger.log("downloading mirror started...") # The highest quality video is the latest # mediaurl = video_urls[len(video_urls) - 1][1] devuelve = downloadbest(video_urls, show_title + " " + episode_title + " " + idioma + " [" + video_item.server + "]", continuar=False) if devuelve == 0: - logger.info("download ok") + logger.log("download ok") descargado = True break elif devuelve == -1: @@ -874,14 +874,14 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid pass return else: - logger.info("download error, try another mirror") + logger.log("download error, try another mirror") continue else: - logger.info("downloading mirror not available... trying next") + logger.log("downloading mirror not available... trying next") if not descargado: - logger.info("UNDOWNLOADED EPISODE " + episode_title) + logger.log("UNDOWNLOADED EPISODE " + episode_title) def episodio_ya_descargado(show_title, episode_title): @@ -889,9 +889,9 @@ def episodio_ya_descargado(show_title, episode_title): ficheros = filetools.listdir(".") for fichero in ficheros: - # logger.info("fichero="+fichero) + # logger.log("fichero="+fichero) if fichero.lower().startswith(show_title.lower()) and scrapertools.find_single_match(fichero, "(\d+x\d+)") == episode_title: - logger.info("found!") + logger.log("found!") return True return False diff --git a/core/filetools.py b/core/filetools.py index 554a2671..7719d6c7 100644 --- a/core/filetools.py +++ b/core/filetools.py @@ -814,7 +814,7 @@ def remove_tags(title): @rtype: str @return: string without tags """ - logger.info() + logger.log() title_without_tags = scrapertools.find_single_match(title, r'\[color .+?\](.+)\[\/color\]') @@ -832,7 +832,7 @@ def remove_smb_credential(path): @return: chain without credentials @rtype: str """ - logger.info() + logger.log() if not scrapertools.find_single_match(path, r'(^\w+:\/\/)'): return path diff --git a/core/httptools.py b/core/httptools.py index 1ec21351..afa8c9ce 100755 --- a/core/httptools.py +++ b/core/httptools.py @@ -125,11 +125,11 @@ def set_cookies(dict_cookie, clear=True, alfa_s=False): def load_cookies(alfa_s=False): cookies_lock.acquire() if os.path.isfile(cookies_file): - if not alfa_s: logger.info("Reading cookies file") + if not alfa_s: logger.log("Reading cookies file") try: cj.load(cookies_file, ignore_discard=True) except: - if not alfa_s: logger.info("The cookie file exists but is illegible, it is deleted") + if not alfa_s: logger.log("The cookie file exists but is illegible, it is deleted") os.remove(cookies_file) cookies_lock.release() @@ -137,7 +137,7 @@ load_cookies() def save_cookies(alfa_s=False): cookies_lock.acquire() - if not alfa_s: logger.info("Saving cookies...") + if not alfa_s: logger.log("Saving cookies...") cj.save(cookies_file, ignore_discard=True) cookies_lock.release() @@ -161,7 +161,7 @@ def random_useragent(): def show_infobox(info_dict): - logger.info() + logger.log() from textwrap import wrap box_items_kodi = {'r_up_corner': u'\u250c', @@ -186,16 +186,16 @@ def show_infobox(info_dict): - width = 60 + width = 100 version = '%s: %s' % (config.get_localized_string(20000), __version) if config.is_xbmc(): box = box_items_kodi else: box = box_items - logger.info('%s%s%s' % (box['r_up_corner'], box['fill'] * width, box['l_up_corner'])) - logger.info('%s%s%s' % (box['center'], version.center(width), box['center'])) - logger.info('%s%s%s' % (box['r_center'], box['fill'] * width, box['l_center'])) + logger.log('%s%s%s' % (box['r_up_corner'], box['fill'] * width, box['l_up_corner'])) + logger.log('%s%s%s' % (box['center'], version.center(width), box['center'])) + logger.log('%s%s%s' % (box['r_center'], box['fill'] * width, box['l_center'])) count = 0 for key, value in info_dict: @@ -210,19 +210,19 @@ def show_infobox(info_dict): for line in text: if len(line) < width: line = line.ljust(width, ' ') - logger.info('%s%s%s' % (box['center'], line, box['center'])) + logger.log('%s%s%s' % (box['center'], line, box['center'])) else: - logger.info('%s%s%s' % (box['center'], text, box['center'])) + logger.log('%s%s%s' % (box['center'], text, box['center'])) if count < len(info_dict): - logger.info('%s%s%s' % (box['r_center'], box['fill'] * width, box['l_center'])) + logger.log('%s%s%s' % (box['r_center'], box['fill'] * width, box['l_center'])) else: - logger.info('%s%s%s' % (box['r_dn_corner'], box['fill'] * width, box['l_dn_corner'])) + logger.log('%s%s%s' % (box['r_dn_corner'], box['fill'] * width, box['l_dn_corner'])) return def downloadpage(url, **opt): - # logger.info() + # logger.log() """ Open a url and return the data obtained diff --git a/core/item.py b/core/item.py index 5f17dadf..0da43760 100644 --- a/core/item.py +++ b/core/item.py @@ -298,7 +298,7 @@ class Item(object): def tostring(self, separator=", "): """ Generate a text string with the item's data for the log - Use: logger.info(item.tostring()) + Use: logger.log(item.tostring()) @param separator: string to be used as a separator @type separator: str '""" diff --git a/core/jsontools.py b/core/jsontools.py index 6ebc799b..d21215e3 100644 --- a/core/jsontools.py +++ b/core/jsontools.py @@ -11,24 +11,24 @@ from inspect import stack try: import json except: - logger.info("json included in the interpreter **NOT** available") + logger.log("json included in the interpreter **NOT** available") try: import simplejson as json except: - logger.info("simplejson included in the interpreter **NOT** available") + logger.log("simplejson included in the interpreter **NOT** available") try: from lib import simplejson as json except: - logger.info("simplejson in lib directory **NOT** available") + logger.log("simplejson in lib directory **NOT** available") logger.error("A valid JSON parser was not found") json = None else: - logger.info("Using simplejson in the lib directory") + logger.log("Using simplejson in the lib directory") else: - logger.info("Using simplejson included in the interpreter") + logger.log("Using simplejson included in the interpreter") # ~ else: - # ~ logger.info("Usando json incluido en el interprete") + # ~ logger.log("Usando json incluido en el interprete") import sys PY3 = False @@ -90,7 +90,7 @@ def get_node_from_file(name_file, node, path=None): @return: dict with the node to return @rtype: dict """ - logger.info() + logger.log() from platformcode import config from core import filetools @@ -129,7 +129,7 @@ def check_to_backup(data, fname, dict_data): @param dict_data: dictionary name @type dict_data: dict """ - logger.info() + logger.log() if not dict_data: logger.error("Error loading json from file %s" % fname) @@ -161,7 +161,7 @@ def update_node(dict_node, name_file, node, path=None, silent=False): @return json_data @rtype: dict """ - if not silent: logger.info() + if not silent: logger.log() from platformcode import config from core import filetools diff --git a/core/scraper.py b/core/scraper.py index 2515a77d..daae7035 100644 --- a/core/scraper.py +++ b/core/scraper.py @@ -61,7 +61,7 @@ def find_and_set_infoLabels(item): # Check if there is a 'code' if scraper_result and item.infoLabels['code']: # correct code - logger.info("Identificador encontrado: %s" % item.infoLabels['code']) + logger.log("Identificador encontrado: %s" % item.infoLabels['code']) scraper.completar_codigos(item) return True elif scraper_result: @@ -71,7 +71,7 @@ def find_and_set_infoLabels(item): # Content not found msg = config.get_localized_string(60228) % title - logger.info(msg) + logger.log(msg) # Show box with other options: if scrapers_disponibles[scraper_actual] in list_opciones_cuadro: list_opciones_cuadro.remove(scrapers_disponibles[scraper_actual]) @@ -95,10 +95,10 @@ def find_and_set_infoLabels(item): elif index == 1: # You have to create a dialog box to enter the data - logger.info("Complete information") + logger.log("Complete information") if cuadro_completar(item): # correct code - logger.info("Identifier found: %s" % str(item.infoLabels['code'])) + logger.log("Identifier found: %s" % str(item.infoLabels['code'])) return True # raise @@ -121,7 +121,7 @@ def find_and_set_infoLabels(item): def cuadro_completar(item): - logger.info() + logger.log() global dict_default dict_default = {} @@ -234,7 +234,7 @@ def get_nfo(item): @rtype: str @return: """ - logger.info() + logger.log() if "infoLabels" in item and "noscrap_id" in item.infoLabels: # Create the xml file with the data obtained from the item since there is no active scraper info_nfo = '' diff --git a/core/scrapertools.py b/core/scrapertools.py index c31fae9e..c81e09fc 100644 --- a/core/scrapertools.py +++ b/core/scrapertools.py @@ -34,7 +34,7 @@ from platformcode import logger def printMatches(matches): i = 0 for match in matches: - logger.info("%d %s" % (i, match)) + logger.log("%d %s" % (i, match)) i = i + 1 @@ -446,7 +446,7 @@ def get_season_and_episode(title): except: pass - logger.info("'" + title + "' -> '" + filename + "'") + logger.log("'" + title + "' -> '" + filename + "'") return filename diff --git a/core/servertools.py b/core/servertools.py index 674fd7ff..37693b10 100644 --- a/core/servertools.py +++ b/core/servertools.py @@ -47,7 +47,7 @@ def find_video_items(item=None, data=None): @return: returns the itemlist with the results @rtype: list """ - logger.info() + logger.log() itemlist = [] # Download the page @@ -97,7 +97,7 @@ def get_servers_itemlist(itemlist, fnc=None, sort=False): # Walk the patterns for pattern in server_parameters.get("find_videos", {}).get("patterns", []): - logger.info(pattern["pattern"]) + logger.log(pattern["pattern"]) # Scroll through the results for match in re.compile(pattern["pattern"], re.DOTALL).finditer( "\n".join([item.url.split('|')[0] for item in itemlist if not item.server])): @@ -144,7 +144,7 @@ def findvideos(data, skip=False): return some link. It can also be an integer greater than 1, which would represent the maximum number of links to search. :return: """ - logger.info() + logger.log() devuelve = [] skip = int(skip) servers_list = list(get_servers_list().keys()) @@ -165,7 +165,7 @@ def findvideos(data, skip=False): devuelve = devuelve[:skip] break # if config.get_setting("filter_servers") == False: is_filter_servers = False - # logger.info('DEVUELVE: ' + str(devuelve)) + # logger.log('DEVUELVE: ' + str(devuelve)) # if not devuelve and is_filter_servers: # platformtools.dialog_ok(config.get_localized_string(60000), config.get_localized_string(60001)) return devuelve @@ -194,7 +194,7 @@ def findvideosbyserver(data, serverid): value = translate_server_name(server_parameters["name"]) , url, serverid, server_parameters.get("thumbnail", "") if value not in devuelve and url not in server_parameters["find_videos"].get("ignore_urls", []): devuelve.append(value) - logger.info(msg) + logger.log(msg) return devuelve @@ -206,7 +206,7 @@ def guess_server_thumbnail(serverid): def get_server_from_url(url): - logger.info() + logger.log() servers_list = list(get_servers_list().keys()) # Run findvideos on each active server @@ -224,7 +224,7 @@ def get_server_from_url(url): for n, pattern in enumerate(server_parameters["find_videos"].get("patterns", [])): msg = "%s\npattern: %s" % (serverid, pattern["pattern"]) if not "pattern_compiled" in pattern: - # logger.info('compiled ' + serverid) + # logger.log('compiled ' + serverid) pattern["pattern_compiled"] = re.compile(pattern["pattern"]) dict_servers_parameters[serverid]["find_videos"]["patterns"][n]["pattern_compiled"] = pattern["pattern_compiled"] # Scroll through the results @@ -237,7 +237,7 @@ def get_server_from_url(url): msg += "\nurl encontrada: %s" % url value = translate_server_name(server_parameters["name"]), url, serverid, server_parameters.get("thumbnail", "") if url not in server_parameters["find_videos"].get("ignore_urls", []): - logger.info(msg) + logger.log(msg) return value return None @@ -260,7 +260,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo @return: returns the url of the video @rtype: list """ - logger.info("Server: %s, Url: %s" % (server, url)) + logger.log("Server: %s, Url: %s" % (server, url)) server = server.lower() @@ -273,7 +273,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo if server == "directo" or server == "local": if isinstance(video_password, list): return video_password, len(video_password) > 0, "
".join(error_messages) - logger.info("Server: %s, url is good" % server) + logger.log("Server: %s, url is good" % server) video_urls.append(["%s [%s]" % (urlparse.urlparse(url)[2][-4:], config.get_localized_string(30137)), url]) # Find out the video URL @@ -304,7 +304,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo priority = int(config.get_setting("resolve_priority")) opciones = sorted(opciones, key=lambda x: orden[priority].index(x)) - logger.info("Available options: %s | %s" % (len(opciones), opciones)) + logger.log("Available options: %s | %s" % (len(opciones), opciones)) else: logger.error("There is no connector for the server %s" % server) error_messages.append(config.get_localized_string(60004) % server) @@ -313,7 +313,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo # Import the server try: server_module = __import__('servers.%s' % server, None, None, ["servers.%s" % server]) - logger.info("Imported server: %s" % server_module) + logger.log("Imported server: %s" % server_module) except: server_module = None if muestra_dialogo: @@ -324,17 +324,17 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo # If it has a function to see if the video exists, check it now if hasattr(server_module, 'test_video_exists'): - logger.info("Invoking a %s.test_video_exists" % server) + logger.log("Invoking a %s.test_video_exists" % server) try: video_exists, message = server_module.test_video_exists(page_url=url) if not video_exists: error_messages.append(message) - logger.info("test_video_exists says video doesn't exist") + logger.log("test_video_exists says video doesn't exist") if muestra_dialogo: progreso.close() else: - logger.info("test_video_exists says the video DOES exist") + logger.log("test_video_exists says the video DOES exist") except: logger.error("Could not verify if the video exists") import traceback @@ -361,7 +361,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo # Free mode if opcion == "free": try: - logger.info("Invoking a %s.get_video_url" % server) + logger.log("Invoking a %s.get_video_url" % server) response = serverid.get_video_url(page_url=url, video_password=video_password) video_urls.extend(response) except: @@ -373,7 +373,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo # Premium mode else: try: - logger.info("Invoking a %s.get_video_url" % opcion) + logger.log("Invoking a %s.get_video_url" % opcion) response = serverid.get_video_url(page_url=url, premium=True, user=config.get_setting("user", server=opcion), password=config.get_setting("password", server=opcion), @@ -483,7 +483,7 @@ def get_server_parameters(server): @return: server data @rtype: dict """ - # logger.info("server %s" % server) + # logger.log("server %s" % server) global dict_servers_parameters server = server.split('.')[0] if not server: @@ -533,15 +533,15 @@ def get_server_parameters(server): # def get_server_json(server_name): -# # logger.info("server_name=" + server_name) +# # logger.log("server_name=" + server_name) # try: # server_path = filetools.join(config.get_runtime_path(), "servers", server_name + ".json") # if not filetools.exists(server_path): # server_path = filetools.join(config.get_runtime_path(), "servers", "debriders", server_name + ".json") # -# # logger.info("server_path=" + server_path) +# # logger.log("server_path=" + server_path) # server_json = jsontools.load(filetools.read(server_path)) -# # logger.info("server_json= %s" % server_json) +# # logger.log("server_json= %s" % server_json) # # except Exception as ex: # template = "An exception of type %s occured. Arguments:\n%r" @@ -613,7 +613,7 @@ def get_server_setting(name, server, default=None): if isinstance(dict_file, dict) and 'settings' in dict_file: dict_settings = dict_file['settings'] except EnvironmentError: - logger.info("ERROR when reading the file: %s" % file_settings) + logger.log("ERROR when reading the file: %s" % file_settings) if not dict_settings or name not in dict_settings: # We get controls from the file ../servers/server.json @@ -627,7 +627,7 @@ def get_server_setting(name, server, default=None): dict_file['settings'] = dict_settings # We create the file ../settings/channel_data.json if not filetools.write(file_settings, jsontools.dump(dict_file)): - logger.info("ERROR saving file: %s" % file_settings) + logger.log("ERROR saving file: %s" % file_settings) # We return the value of the local parameter 'name' if it exists, if default is not returned return dict_settings.get(name, default) @@ -649,7 +649,7 @@ def set_server_setting(name, value, server): dict_file = jsontools.load(filetools.read(file_settings)) dict_settings = dict_file.get('settings', {}) except EnvironmentError: - logger.info("ERROR when reading the file: %s" % file_settings) + logger.log("ERROR when reading the file: %s" % file_settings) dict_settings[name] = value @@ -661,7 +661,7 @@ def set_server_setting(name, value, server): # We create the file ../settings/channel_data.json if not filetools.write(file_settings, jsontools.dump(dict_file)): - logger.info("ERROR saving file: %s" % file_settings) + logger.log("ERROR saving file: %s" % file_settings) return None return value @@ -696,7 +696,7 @@ def get_debriders_list(): if server.endswith(".json"): server_parameters = get_server_parameters(server) if server_parameters["active"] == True: - logger.info(server_parameters) + logger.log(server_parameters) server_list[server.split(".")[0]] = server_parameters return server_list @@ -742,7 +742,7 @@ def check_list_links(itemlist, numero='', timeout=3): it = res[0] verificacion = res[1] it.title = verificacion + ' ' + it.title.strip() - logger.info('VERIFICATION= ' + verificacion) + logger.log('VERIFICATION= ' + verificacion) it.alive = verificacion return itemlist @@ -763,7 +763,7 @@ def check_video_link(item, timeout=3): server_module = __import__('servers.%s' % server, None, None, ["servers.%s" % server]) except: server_module = None - logger.info("[check_video_link] Cannot import server! %s" % server) + logger.log("[check_video_link] Cannot import server! %s" % server) return item, NK if hasattr(server_module, 'test_video_exists'): @@ -773,20 +773,20 @@ def check_video_link(item, timeout=3): try: video_exists, message = server_module.test_video_exists(page_url=url) if not video_exists: - logger.info("[check_video_link] Does not exist! %s %s %s" % (message, server, url)) + logger.log("[check_video_link] Does not exist! %s %s %s" % (message, server, url)) resultado = KO else: - logger.info("[check_video_link] check ok %s %s" % (server, url)) + logger.log("[check_video_link] check ok %s %s" % (server, url)) resultado = OK except: - logger.info("[check_video_link] Can't check now! %s %s" % (server, url)) + logger.log("[check_video_link] Can't check now! %s %s" % (server, url)) resultado = NK finally: httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = ant_timeout # Restore download time return item, resultado - logger.info("[check_video_link] There is no test_video_exists for server: %s" % server) + logger.log("[check_video_link] There is no test_video_exists for server: %s" % server) return item, NK def translate_server_name(name): diff --git a/core/tmdb.py b/core/tmdb.py index 13dd22cd..e1f2b7f2 100644 --- a/core/tmdb.py +++ b/core/tmdb.py @@ -87,7 +87,7 @@ create_bd() # The function name is the name of the decorator and receives the function that decorates. def cache_response(fn): - logger.info() + logger.log() # import time # start_time = time.time() @@ -441,7 +441,7 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None def find_and_set_infoLabels(item): - logger.info() + logger.log() global otmdb_global tmdb_result = None @@ -851,7 +851,7 @@ class Tmdb(object): cls.dic_generos[idioma][tipo] = {} url = ('http://api.themoviedb.org/3/genre/%s/list?api_key=a1ab8b8669da03637a4b98fa39c39228&language=%s' % (tipo, idioma)) try: - logger.info("[Tmdb.py] Filling in dictionary of genres") + logger.log("[Tmdb.py] Filling in dictionary of genres") resultado = cls.get_json(url) if not isinstance(resultado, dict): @@ -883,7 +883,7 @@ class Tmdb(object): '&language=%s' % (self.busqueda_id, source, self.busqueda_idioma)) buscando = "%s: %s" % (source.capitalize(), self.busqueda_id) - logger.info("[Tmdb.py] Searching %s:\n%s" % (buscando, url)) + logger.log("[Tmdb.py] Searching %s:\n%s" % (buscando, url)) resultado = self.get_json(url) if not isinstance(resultado, dict): resultado = ast.literal_eval(resultado.decode('utf-8')) @@ -925,7 +925,7 @@ class Tmdb(object): url += '&year=%s' % self.busqueda_year buscando = self.busqueda_texto.capitalize() - logger.info("[Tmdb.py] Searching %s on page %s:\n%s" % (buscando, page, url)) + logger.log("[Tmdb.py] Searching %s on page %s:\n%s" % (buscando, page, url)) resultado = self.get_json(url) if not isinstance(resultado, dict): resultado = ast.literal_eval(resultado.decode('utf-8')) @@ -986,7 +986,7 @@ class Tmdb(object): url = ('http://api.themoviedb.org/3/%s?api_key=a1ab8b8669da03637a4b98fa39c39228&%s' % (type_search, "&".join(params))) - logger.info("[Tmdb.py] Searcing %s:\n%s" % (type_search, url)) + logger.log("[Tmdb.py] Searcing %s:\n%s" % (type_search, url)) resultado = self.get_json(url) if not isinstance(resultado, dict): resultado = ast.literal_eval(resultado.decode('utf-8')) @@ -1051,7 +1051,7 @@ class Tmdb(object): return True def get_list_resultados(self, num_result=20): - # logger.info("self %s" % str(self)) + # logger.log("self %s" % str(self)) res = [] if num_result <= 0: @@ -1271,7 +1271,7 @@ class Tmdb(object): "&append_to_response=credits" % (self.result["id"], numtemporada, self.busqueda_idioma) buscando = "id_Tmdb: " + str(self.result["id"]) + " season: " + str(numtemporada) + "\nURL: " + url - logger.info("[Tmdb.py] Searcing " + buscando) + logger.log("[Tmdb.py] Searcing " + buscando) try: self.temporada[numtemporada] = self.get_json(url) if not isinstance(self.temporada[numtemporada], dict): @@ -1460,7 +1460,7 @@ class Tmdb(object): items.extend(list(self.get_episodio(ret_infoLabels['season'], episodio).items())) - # logger.info("ret_infoLabels" % ret_infoLabels) + # logger.log("ret_infoLabels" % ret_infoLabels) for k, v in items: if not v: diff --git a/core/trakt_tools.py b/core/trakt_tools.py index d78e1852..a7b046af 100644 --- a/core/trakt_tools.py +++ b/core/trakt_tools.py @@ -129,7 +129,7 @@ def token_trakt(item): def set_trakt_info(item): - logger.info() + logger.log() import xbmcgui # Envia los datos a trakt try: @@ -140,7 +140,7 @@ def set_trakt_info(item): pass def get_trakt_watched(id_type, mediatype, update=False): - logger.info() + logger.log() id_list = [] id_dict = dict() @@ -228,7 +228,7 @@ def trakt_check(itemlist): def get_sync_from_file(): - logger.info() + logger.log() sync_path = os.path.join(config.get_data_path(), 'settings_channels', 'trakt_data.json') trakt_node = {} if os.path.exists(sync_path): @@ -240,7 +240,7 @@ def get_sync_from_file(): def update_trakt_data(mediatype, trakt_data): - logger.info() + logger.log() sync_path = os.path.join(config.get_data_path(), 'settings_channels', 'trakt_data.json') if os.path.exists(sync_path): @@ -250,7 +250,7 @@ def update_trakt_data(mediatype, trakt_data): def ask_install_script(): - logger.info() + logger.log() from platformcode import platformtools @@ -264,7 +264,7 @@ def ask_install_script(): def wait_for_update_trakt(): - logger.info() + logger.log() t = Thread(update_all) t.setDaemon(True) t.start() @@ -273,7 +273,7 @@ def wait_for_update_trakt(): def update_all(): # from core.support import dbg;dbg() from time import sleep - logger.info() + logger.log() sleep(20) while xbmc.Player().isPlaying(): sleep(20) diff --git a/core/tvdb.py b/core/tvdb.py index c6d9f56d..0097d0c4 100644 --- a/core/tvdb.py +++ b/core/tvdb.py @@ -73,8 +73,8 @@ otvdb_global = None def find_and_set_infoLabels(item): - logger.info() - # logger.info("item es %s" % item) + logger.log() + # logger.log("item es %s" % item) p_dialog = None if not item.contentSeason: @@ -368,7 +368,7 @@ class Tvdb(object): @classmethod def __check_token(cls): - # logger.info() + # logger.log() if TOKEN == "": cls.__login() else: @@ -383,7 +383,7 @@ class Tvdb(object): @staticmethod def __login(): - # logger.info() + # logger.log() global TOKEN apikey = "106B699FDC04301C" @@ -413,7 +413,7 @@ class Tvdb(object): @classmethod def __refresh_token(cls): - # logger.info() + # logger.log() global TOKEN is_success = False @@ -512,7 +512,7 @@ class Tvdb(object): ] } """ - logger.info() + logger.log() if id_episode and self.episodes.get(id_episode): return self.episodes.get(id_episode) @@ -582,7 +582,7 @@ class Tvdb(object): } } """ - logger.info() + logger.log() try: url = HOST + "/series/%s/episodes?page=%s" % (_id, page) @@ -600,7 +600,7 @@ class Tvdb(object): else: self.list_episodes[page] = jsontools.load(html) - # logger.info("dict_html %s" % self.list_episodes) + # logger.log("dict_html %s" % self.list_episodes) return self.list_episodes[page] @@ -668,7 +668,7 @@ class Tvdb(object): """ if semaforo: semaforo.acquire() - logger.info() + logger.log() url = HOST + "/episodes/%s" % _id @@ -691,7 +691,7 @@ class Tvdb(object): dict_html = jsontools.load(html) dict_html = dict_html.pop("data") - logger.info("dict_html %s" % dict_html) + logger.log("dict_html %s" % dict_html) self.episodes[_id] = dict_html if semaforo: @@ -722,7 +722,7 @@ class Tvdb(object): "status": "string" } """ - logger.info() + logger.log() try: @@ -820,7 +820,7 @@ class Tvdb(object): } } """ - logger.info() + logger.log() resultado = {} url = HOST + "/series/%s" % _id @@ -879,7 +879,7 @@ class Tvdb(object): @rtype: dict """ - logger.info() + logger.log() if self.result.get('image_season_%s' % season): return self.result['image_season_%s' % season] @@ -931,7 +931,7 @@ class Tvdb(object): @return: dictionary with actors @rtype: dict """ - logger.info() + logger.log() url = HOST + "/series/%s/actors" % _id DEFAULT_HEADERS["Accept-Language"] = lang @@ -961,7 +961,7 @@ class Tvdb(object): @rtype: list @return: list of results """ - logger.info() + logger.log() list_results = [] # if we have a result and it has seriesName, we already have the info of the series, it is not necessary to search again @@ -1102,7 +1102,7 @@ class Tvdb(object): # ret_infoLabels['title'] = v + " " + origen.get('aliases', [''])[0] # else: # ret_infoLabels['title'] = v - # logger.info("el titulo es %s " % ret_infoLabels['title']) + # logger.log("el titulo es %s " % ret_infoLabels['title']) ret_infoLabels['title'] = v elif k == 'cast': diff --git a/core/videolibrarytools.py b/core/videolibrarytools.py index fe1ba427..ab2d3324 100644 --- a/core/videolibrarytools.py +++ b/core/videolibrarytools.py @@ -78,7 +78,7 @@ def save_movie(item, silent=False): @rtype fallidos: int @return: the number of failed items or -1 if all failed """ - logger.info() + logger.log() # logger.debug(item.tostring('\n')) insertados = 0 sobreescritos = 0 @@ -144,7 +144,7 @@ def save_movie(item, silent=False): if not path: # Create folder path = filetools.join(MOVIES_PATH, ("%s [%s]" % (base_name, _id)).strip()) - logger.info("Creating movie directory:" + path) + logger.log("Creating movie directory:" + path) if not filetools.mkdir(path): logger.debug("Could not create directory") return 0, 0, -1, path @@ -159,7 +159,7 @@ def save_movie(item, silent=False): if not nfo_exists: # We create .nfo if it doesn't exist - logger.info("Creating .nfo: " + nfo_path) + logger.log("Creating .nfo: " + nfo_path) head_nfo = scraper.get_nfo(item) item_nfo = Item(title=item.contentTitle, channel="videolibrary", action='findvideos', @@ -182,7 +182,7 @@ def save_movie(item, silent=False): if item_nfo and strm_exists: if json_exists: - logger.info("The file exists. Is overwritten") + logger.log("The file exists. Is overwritten") sobreescritos += 1 else: insertados += 1 @@ -209,7 +209,7 @@ def save_movie(item, silent=False): item_nfo.library_urls[item.channel] = item.url if filetools.write(nfo_path, head_nfo + item_nfo.tojson()): - #logger.info("FOLDER_MOVIES : %s" % FOLDER_MOVIES) + #logger.log("FOLDER_MOVIES : %s" % FOLDER_MOVIES) # We update the Kodi video library with the movie if config.is_xbmc() and config.get_setting("videolibrary_kodi") and not silent: from platformcode import xbmc_videolibrary @@ -238,7 +238,7 @@ def update_renumber_options(item, head_nfo, path): json = json_file['TVSHOW_AUTORENUMBER'] if item.fulltitle in json: item.channel_prefs[channel]['TVSHOW_AUTORENUMBER'] = json[item.fulltitle] - logger.info('UPDATED=\n' + str(item.channel_prefs)) + logger.log('UPDATED=\n' + str(item.channel_prefs)) filetools.write(tvshow_path, head_nfo + item.tojson()) def add_renumber_options(item, head_nfo, path): @@ -426,7 +426,7 @@ def save_tvshow(item, episodelist, silent=False): @rtype path: str @return: serial directory """ - logger.info() + logger.log() # logger.debug(item.tostring('\n')) path = "" @@ -483,7 +483,7 @@ def save_tvshow(item, episodelist, silent=False): if not path: path = filetools.join(TVSHOWS_PATH, ("%s [%s]" % (base_name, _id)).strip()) - logger.info("Creating series directory: " + path) + logger.log("Creating series directory: " + path) try: filetools.mkdir(path) except OSError as exception: @@ -493,7 +493,7 @@ def save_tvshow(item, episodelist, silent=False): tvshow_path = filetools.join(path, "tvshow.nfo") if not filetools.exists(tvshow_path): # We create tvshow.nfo, if it does not exist, with the head_nfo, series info and watched episode marks - logger.info("Creating tvshow.nfo: " + tvshow_path) + logger.log("Creating tvshow.nfo: " + tvshow_path) head_nfo = scraper.get_nfo(item) item.infoLabels['mediatype'] = "tvshow" item.infoLabels['title'] = item.contentSerieName @@ -567,11 +567,11 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): @rtype fallidos: int @return: the number of failed episodes """ - logger.info() + logger.log() episodelist = filter_list(episodelist, serie.action, path) # No episode list, nothing to save if not len(episodelist): - logger.info("There is no episode list, we go out without creating strm") + logger.log("There is no episode list, we go out without creating strm") return 0, 0, 0 # process local episodes @@ -586,7 +586,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): elif config.get_setting("local_episodes", "videolibrary"): done, local_episodes_path = config_local_episodes_path(path, serie) if done < 0: - logger.info("An issue has occurred while configuring local episodes, going out without creating strm") + logger.log("An issue has occurred while configuring local episodes, going out without creating strm") return 0, 0, done item_nfo.local_episodes_path = local_episodes_path filetools.write(nfo_path, head_nfo + item_nfo.tojson()) @@ -710,7 +710,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): # No episode list, nothing to save if not len(new_episodelist): - logger.info("There is no episode list, we go out without creating strm") + logger.log("There is no episode list, we go out without creating strm") return 0, 0, 0 local_episodelist += get_local_content(path) @@ -742,12 +742,12 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) if season_episode in local_episodelist: - logger.info('Skipped: Serie ' + serie.contentSerieName + ' ' + season_episode + ' available as local content') + logger.log('Skipped: Serie ' + serie.contentSerieName + ' ' + season_episode + ' available as local content') continue # check if the episode has been downloaded if filetools.join(path, "%s [downloads].json" % season_episode) in ficheros: - logger.info('INFO: "%s" episode %s has been downloaded, skipping it' % (serie.contentSerieName, season_episode)) + logger.log('INFO: "%s" episode %s has been downloaded, skipping it' % (serie.contentSerieName, season_episode)) continue strm_exists = strm_path in ficheros @@ -800,7 +800,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): if filetools.write(json_path, e.tojson()): if not json_exists: - logger.info("Inserted: %s" % json_path) + logger.log("Inserted: %s" % json_path) insertados += 1 # We mark episode as unseen news_in_playcounts[season_episode] = 0 @@ -811,14 +811,14 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): news_in_playcounts[serie.contentSerieName] = 0 else: - logger.info("Overwritten: %s" % json_path) + logger.log("Overwritten: %s" % json_path) sobreescritos += 1 else: - logger.info("Failed: %s" % json_path) + logger.log("Failed: %s" % json_path) fallidos += 1 else: - logger.info("Failed: %s" % json_path) + logger.log("Failed: %s" % json_path) fallidos += 1 if not silent and p_dialog.iscanceled(): @@ -888,7 +888,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): def config_local_episodes_path(path, item, silent=False): - logger.info(item) + logger.log(item) from platformcode.xbmc_videolibrary import search_local_path local_episodes_path=search_local_path(item) if not local_episodes_path: @@ -900,11 +900,11 @@ def config_local_episodes_path(path, item, silent=False): platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(80043)) local_episodes_path = platformtools.dialog_browse(0, config.get_localized_string(80046)) if local_episodes_path == '': - logger.info("User has canceled the dialog") + logger.log("User has canceled the dialog") return -2, local_episodes_path elif path in local_episodes_path: platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(80045)) - logger.info("Selected folder is the same of the TV show one") + logger.log("Selected folder is the same of the TV show one") return -2, local_episodes_path if local_episodes_path: @@ -919,7 +919,7 @@ def config_local_episodes_path(path, item, silent=False): def process_local_episodes(local_episodes_path, path): - logger.info() + logger.log() sub_extensions = ['.srt', '.sub', '.sbv', '.ass', '.idx', '.ssa', '.smi'] artwork_extensions = ['.jpg', '.jpeg', '.png'] @@ -958,7 +958,7 @@ def process_local_episodes(local_episodes_path, path): def get_local_content(path): - logger.info() + logger.log() local_episodelist = [] for root, folders, files in filetools.walk(path): @@ -987,7 +987,7 @@ def add_movie(item): @type item: item @param item: item to be saved. """ - logger.info() + logger.log() from platformcode.launcher import set_search_temp; set_search_temp(item) # To disambiguate titles, TMDB is caused to ask for the really desired title @@ -1034,7 +1034,7 @@ def add_tvshow(item, channel=None): @param channel: channel from which the series will be saved. By default, item.from_channel or item.channel will be imported. """ - logger.info("show=#" + item.show + "#") + logger.log("show=#" + item.show + "#") from platformcode.launcher import set_search_temp; set_search_temp(item) if item.channel == "downloads": @@ -1105,7 +1105,7 @@ def add_tvshow(item, channel=None): else: platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60070) % item.show) - logger.info("%s episodes of series %s have been added to the video library" % (insertados, item.show)) + logger.log("%s episodes of series %s have been added to the video library" % (insertados, item.show)) if config.is_xbmc(): if config.get_setting("sync_trakt_new_tvshow", "videolibrary"): import xbmc @@ -1121,7 +1121,7 @@ def add_tvshow(item, channel=None): def emergency_urls(item, channel=None, path=None, headers={}): - logger.info() + logger.log() import re from servers import torrent try: diff --git a/core/ziptools.py b/core/ziptools.py index 138c4c1b..3b3a3683 100644 --- a/core/ziptools.py +++ b/core/ziptools.py @@ -17,8 +17,8 @@ from core import filetools class ziptools(object): def extract(self, file, dir, folder_to_extract="", overwrite_question=False, backup=False): - logger.info("file= %s" % file) - logger.info("dir= %s" % dir) + logger.log("file= %s" % file) + logger.log("dir= %s" % dir) if not dir.endswith(':') and not filetools.exists(dir): filetools.mkdir(dir) @@ -30,13 +30,13 @@ class ziptools(object): for nameo in zf.namelist(): name = nameo.replace(':', '_').replace('<', '_').replace('>', '_').replace('|', '_').replace('"', '_').replace('?', '_').replace('*', '_') - logger.info("name=%s" % nameo) + logger.log("name=%s" % nameo) if not name.endswith('/'): - logger.info("it's not a directory") + logger.log("it's not a directory") try: (path, filename) = filetools.split(filetools.join(dir, name)) - logger.info("path=%s" % path) - logger.info("name=%s" % name) + logger.log("path=%s" % path) + logger.log("name=%s" % name) if folder_to_extract: if path != filetools.join(dir, folder_to_extract): break @@ -49,7 +49,7 @@ class ziptools(object): else: outfilename = filetools.join(dir, name) - logger.info("outfilename=%s" % outfilename) + logger.log("outfilename=%s" % outfilename) try: if filetools.exists(outfilename) and overwrite_question: from platformcode import platformtools @@ -74,7 +74,7 @@ class ziptools(object): try: zf.close() except: - logger.info("Error closing .zip " + file) + logger.log("Error closing .zip " + file) def _createstructure(self, file, dir): self._makedirs(self._listdirs(file), dir) diff --git a/default.py b/default.py index 7ed2cc72..b5c1474e 100644 --- a/default.py +++ b/default.py @@ -9,7 +9,7 @@ import sys import xbmc from platformcode import config, logger -logger.info("init...") +logger.log("init...") librerias = xbmc.translatePath(os.path.join(config.get_runtime_path(), 'lib')) sys.path.insert(0, librerias) diff --git a/lib/arm_chromeos.py b/lib/arm_chromeos.py index d4808402..4dddd954 100644 --- a/lib/arm_chromeos.py +++ b/lib/arm_chromeos.py @@ -27,7 +27,7 @@ class ChromeOSImage: """ def __init__(self, imgpath): - logger.info('Image Path: ' + imgpath) + logger.log('Image Path: ' + imgpath) """Prepares the image""" self.imgpath = imgpath self.bstream = self.get_bstream(imgpath) @@ -59,7 +59,7 @@ class ChromeOSImage: self.seek_stream(entries_start * lba_size) if not calcsize(part_format) == entry_size: - logger.info('Partition table entries are not 128 bytes long') + logger.log('Partition table entries are not 128 bytes long') return 0 for index in range(1, entries_num + 1): # pylint: disable=unused-variable @@ -71,7 +71,7 @@ class ChromeOSImage: break if not offset: - logger.info('Failed to calculate losetup offset.') + logger.log('Failed to calculate losetup offset.') return 0 return offset @@ -93,7 +93,7 @@ class ChromeOSImage: while True: chunk2 = self.read_stream(chunksize) if not chunk2: - logger.info('File %s not found in the ChromeOS image' % filename) + logger.log('File %s not found in the ChromeOS image' % filename) return False chunk = chunk1 + chunk2 diff --git a/lib/generictools.py b/lib/generictools.py index ffe77898..dd344e09 100644 --- a/lib/generictools.py +++ b/lib/generictools.py @@ -25,7 +25,7 @@ intervenido_sucuri = 'Access Denied - Sucuri Website Firewall' def update_title(item): - logger.info() + logger.log() from core import scraper,support @@ -41,7 +41,7 @@ def update_title(item): The channel must add a method to be able to receive the call from Kodi / Alfa, and be able to call this method: def actualizar_titulos(item): - logger.info() + logger.log() itemlist = [] from lib import generictools from platformcode import launcher @@ -205,7 +205,7 @@ def update_title(item): def refresh_screen(item): - logger.info() + logger.log() """ #### Kodi 18 compatibility #### @@ -239,7 +239,7 @@ def refresh_screen(item): def post_tmdb_listado(item, itemlist): - logger.info() + logger.log() itemlist_fo = [] """ @@ -484,7 +484,7 @@ def post_tmdb_listado(item, itemlist): def post_tmdb_seasons(item, itemlist): - logger.info() + logger.log() """ @@ -644,7 +644,7 @@ def post_tmdb_seasons(item, itemlist): def post_tmdb_episodios(item, itemlist): - logger.info() + logger.log() itemlist_fo = [] """ @@ -995,7 +995,7 @@ def post_tmdb_episodios(item, itemlist): def post_tmdb_findvideos(item, itemlist): - logger.info() + logger.log() """ @@ -1215,7 +1215,7 @@ def post_tmdb_findvideos(item, itemlist): def get_field_from_kodi_DB(item, from_fields='*', files='file'): - logger.info() + logger.log() """ Call to read from the Kodi DB the input fields received (from_fields, by default "*") of the video indicated in Item @@ -1293,7 +1293,7 @@ def get_field_from_kodi_DB(item, from_fields='*', files='file'): def fail_over_newpct1(item, patron, patron2=None, timeout=None): - logger.info() + logger.log() import ast """ @@ -1494,7 +1494,7 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None): def web_intervenida(item, data, desactivar=True): - logger.info() + logger.log() """ @@ -1577,7 +1577,7 @@ def web_intervenida(item, data, desactivar=True): def regenerate_clones(): - logger.info() + logger.log() import json from core import videolibrarytools @@ -1591,7 +1591,7 @@ def regenerate_clones(): # Find the paths where to leave the control .json file, and the Video Library json_path = filetools.exists(filetools.join(config.get_runtime_path(), 'verify_cached_torrents.json')) if json_path: - logger.info('Previously repaired video library: WE ARE GOING') + logger.log('Previously repaired video library: WE ARE GOING') return False json_path = filetools.join(config.get_runtime_path(), 'verify_cached_torrents.json') filetools.write(json_path, json.dumps({"CINE_verify": True})) # Prevents another simultaneous process from being launched @@ -1631,7 +1631,7 @@ def regenerate_clones(): # Delete the Tvshow.nfo files and check if the .nfo has more than one channel and one is clone Newpct1 for file in files: - # logger.info('file - nfos: ' + file) + # logger.log('file - nfos: ' + file) if 'tvshow.nfo' in file: file_path = filetools.join(root, 'tvshow.nfo') filetools.remove(file_path) @@ -1697,7 +1697,7 @@ def regenerate_clones(): for file in files: file_path = filetools.join(root, file) if '.json' in file: - logger.info('** file: ' + file) + logger.log('** file: ' + file) canal_json = scrapertools.find_single_match(file, r'\[(\w+)\].json') if canal_json not in nfo.library_urls: filetools.remove(file_path) # we delete the .json is a zombie @@ -1740,7 +1740,7 @@ def regenerate_clones(): def dejuice(data): - logger.info() + logger.log() # Method to unobtrusive JuicyCodes data import base64 diff --git a/lib/megaserver/client.py b/lib/megaserver/client.py index 210fafa3..66e196f8 100644 --- a/lib/megaserver/client.py +++ b/lib/megaserver/client.py @@ -45,7 +45,7 @@ class Client(object): t= Thread(target=self._auto_shutdown) t.setDaemon(True) t.start() - logger.info("MEGA Server Started") + logger.log("MEGA Server Started") def _auto_shutdown(self): while self.running: @@ -75,7 +75,7 @@ class Client(object): def stop(self): self.running = False self._server.stop() - logger.info("MEGA Server Stopped") + logger.log("MEGA Server Stopped") def get_play_list(self): if len(self.files) > 1: @@ -103,7 +103,7 @@ class Client(object): return files except: - logger.info(traceback.format_exc()) + logger.log(traceback.format_exc()) pass return files diff --git a/lib/sambatools/libsmb.py b/lib/sambatools/libsmb.py index 4a66385d..5cb83fbd 100644 --- a/lib/sambatools/libsmb.py +++ b/lib/sambatools/libsmb.py @@ -14,7 +14,7 @@ remote = None def parse_url(url): - # logger.info("Url: %s" % url) + # logger.log("Url: %s" % url) url = url.strip() patron = "^smb://(?:([^;\n]+);)?(?:([^:@\n]+)[:|@])?(?:([^@\n]+)@)?([^/]+)/([^/\n]+)([/]?.*?)$" domain, user, password, server_name, share_name, path = re.compile(patron, re.DOTALL).match(url).groups() @@ -27,7 +27,7 @@ def parse_url(url): if path.endswith("/"): path = path[:-1] if not path: path = "/" - # logger.info("Dominio: '%s' |Usuario: '%s' | Password: '%s' | Servidor: '%s' | IP: '%s' | Share Name: '%s' | Path: '%s'" % (domain, user, password, server_name, server_ip, share_name, path)) + # logger.log("Dominio: '%s' |Usuario: '%s' | Password: '%s' | Servidor: '%s' | IP: '%s' | Share Name: '%s' | Path: '%s'" % (domain, user, password, server_name, server_ip, share_name, path)) return server_name, server_ip, share_name, unicode(path, "utf8"), user, password, domain @@ -46,7 +46,7 @@ def get_server_name_ip(server): def connect(url): - # logger.info("Url: %s" % url) + # logger.log("Url: %s" % url) global remote server_name, server_ip, share_name, path, user, password, domain = parse_url(url) @@ -63,7 +63,7 @@ def connect(url): def listdir(url): - logger.info("Url: %s" % url) + logger.log("Url: %s" % url) remote, share_name, path = connect(url) try: files = [f.filename for f in remote.listPath(share_name, path) if not f.filename in [".", ".."]] @@ -73,7 +73,7 @@ def listdir(url): def walk(url, topdown=True, onerror=None): - logger.info("Url: %s" % url) + logger.log("Url: %s" % url) remote, share_name, path = connect(url) try: @@ -103,7 +103,7 @@ def walk(url, topdown=True, onerror=None): def get_attributes(url): - logger.info("Url: %s" % url) + logger.log("Url: %s" % url) remote, share_name, path = connect(url) try: return remote.getAttributes(share_name, path) @@ -112,7 +112,7 @@ def get_attributes(url): def mkdir(url): - logger.info("Url: %s" % url) + logger.log("Url: %s" % url) remote, share_name, path = connect(url) try: remote.createDirectory(share_name, path) @@ -121,12 +121,12 @@ def mkdir(url): def smb_open(url, mode): - logger.info("Url: %s" % url) + logger.log("Url: %s" % url) return SMBFile(url, mode) def isfile(url): - logger.info("Url: %s" % url) + logger.log("Url: %s" % url) remote, share_name, path = connect(url) try: files = [f.filename for f in remote.listPath(share_name, os.path.dirname(path)) if not f.isDirectory] @@ -136,7 +136,7 @@ def isfile(url): def isdir(url): - logger.info("Url: %s" % url) + logger.log("Url: %s" % url) remote, share_name, path = connect(url) try: folders = [f.filename for f in remote.listPath(share_name, os.path.dirname(path)) if f.isDirectory] @@ -146,7 +146,7 @@ def isdir(url): def exists(url): - logger.info("Url: %s" % url) + logger.log("Url: %s" % url) remote, share_name, path = connect(url) try: files = [f.filename for f in remote.listPath(share_name, os.path.dirname(path))] @@ -156,7 +156,7 @@ def exists(url): def remove(url): - logger.info("Url: %s" % url) + logger.log("Url: %s" % url) remote, share_name, path = connect(url) try: remote.deleteFiles(share_name, path) @@ -165,7 +165,7 @@ def remove(url): def rmdir(url): - logger.info("Url: %s" % url) + logger.log("Url: %s" % url) remote, share_name, path = connect(url) try: remote.deleteDirectory(share_name, path) @@ -174,7 +174,7 @@ def rmdir(url): def rename(url, new_name): - logger.info("Url: %s" % url) + logger.log("Url: %s" % url) remote, share_name, path = connect(url) _, _, _, new_name, _, _, _ = parse_url(new_name) try: diff --git a/lib/unshortenit.py b/lib/unshortenit.py index 35c6cce2..02a19d1f 100644 --- a/lib/unshortenit.py +++ b/lib/unshortenit.py @@ -1,16 +1,15 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -try: - from urllib.parse import urlsplit, urlparse, parse_qs, urljoin -except: +import os, re, sys, json, time + +if sys.version_info[0] >= 3: + from urllib.parse import urlsplit, urlparse, parse_qs, urljoin, urlencode + from urllib.request import urlopen +else: + from urllib import urlencode, urlopen from urlparse import urlsplit, urlparse, parse_qs, urljoin -import json -import os -import re -import time -import urllib from base64 import b64decode from core import httptools, scrapertools @@ -61,17 +60,13 @@ class UnshortenIt(object): return uri, "No domain found in URI!" had_google_outbound, uri = self._clear_google_outbound_proxy(uri) - if re.search(self._adfly_regex, domain, - re.IGNORECASE) or type == 'adfly': + if re.search(self._adfly_regex, domain, re.IGNORECASE) or type == 'adfly': uri, code = self._unshorten_adfly(uri) - if re.search(self._adfocus_regex, domain, - re.IGNORECASE) or type == 'adfocus': + if re.search(self._adfocus_regex, domain, re.IGNORECASE) or type == 'adfocus': uri, code = self._unshorten_adfocus(uri) - if re.search(self._linkbucks_regex, domain, - re.IGNORECASE) or type == 'linkbucks': + if re.search(self._linkbucks_regex, domain, re.IGNORECASE) or type == 'linkbucks': uri, code = self._unshorten_linkbucks(uri) - if re.search(self._lnxlu_regex, domain, - re.IGNORECASE) or type == 'lnxlu': + if re.search(self._lnxlu_regex, domain, re.IGNORECASE) or type == 'lnxlu': uri, code = self._unshorten_lnxlu(uri) if re.search(self._shrink_service_regex, domain, re.IGNORECASE): uri, code = self._unshorten_shrink_service(uri) @@ -99,7 +94,7 @@ class UnshortenIt(object): if oldUri == uri: break - logger.info(uri) + logger.log(uri) return uri, code @@ -368,7 +363,7 @@ class UnshortenIt(object): if len(code) > 0: payload = {'click': code[0]} r = httptools.downloadpage( - 'http://lnx.lu?' + urllib.urlencode(payload), + 'http://lnx.lu?' + urlencode(payload), timeout=self._timeout) return r.url, r.code else: @@ -400,7 +395,7 @@ class UnshortenIt(object): payload = {'adSessionId': session_id, 'callback': 'c'} r = httptools.downloadpage( 'http://sh.st/shortest-url/end-adsession?' + - urllib.urlencode(payload), + urlencode(payload), headers=http_header, timeout=self._timeout) response = r.data[6:-2].decode('utf-8') @@ -519,7 +514,7 @@ class UnshortenIt(object): else: if 'sb/' in uri or 'akv/' in uri or 'wss/' in uri or 'wsd/' in uri: import datetime, hashlib - ip = urllib.urlopen('https://api.ipify.org/').read() + ip = urlopen('https://api.ipify.org/').read() day = datetime.date.today().strftime('%Y%m%d') headers = { "Cookie": hashlib.md5(ip+day).hexdigest() + "=1" @@ -531,12 +526,12 @@ class UnshortenIt(object): r = httptools.downloadpage(uri, timeout=self._timeout, headers=headers, follow_redirects=False) if 'Wait 1 hour' in r.data: uri = '' - logger.info('IP bannato da vcrypt, aspetta un ora') + logger.log('IP bannato da vcrypt, aspetta un ora') else: prev_uri = uri uri = r.headers['location'] if uri == prev_uri: - logger.info('Use Cloudscraper') + logger.log('Use Cloudscraper') uri = httptools.downloadpage(uri, timeout=self._timeout, headers=headers, follow_redirects=False, cf=True).headers['location'] if "4snip" in uri: @@ -593,7 +588,7 @@ class UnshortenIt(object): r = httptools.downloadpage(uri, follow_redirect=True, timeout=self._timeout, cookies=False) if 'get/' in r.url: uri = 'https://linkhub.icu/view/' + re.search('\.\./view/([^"]+)', r.data).group(1) - logger.info(uri) + logger.log(uri) r = httptools.downloadpage(uri, follow_redirect=True, timeout=self._timeout, cookies=False) uri = re.search('
'"+repr(value)+"'") + # logger.log("get_setting -> '"+repr(value)+"'") return value # Specific server setting elif server: - # logger.info("get_setting reading server setting '"+name+"' from server json") + # logger.log("get_setting reading server setting '"+name+"' from server json") from core import servertools value = servertools.get_server_setting(name, server, default) - # logger.info("get_setting -> '"+repr(value)+"'") + # logger.log("get_setting -> '"+repr(value)+"'") return value # Global setting else: - # logger.info("get_setting reading main setting '"+name+"'") + # logger.log("get_setting reading main setting '"+name+"'") value = __settings__.getSetting(name) if not value: return default diff --git a/platformcode/download_and_play.py b/platformcode/download_and_play.py index 06525f42..76ccaf5a 100644 --- a/platformcode/download_and_play.py +++ b/platformcode/download_and_play.py @@ -22,17 +22,17 @@ from platformcode import config, logger # Download a file and start playing while downloading def download_and_play(url, file_name, download_path): # Start thread - logger.info("Active threads " + str(threading.active_count())) - logger.info("" + repr(threading.enumerate())) - logger.info("Starting download thread...") + logger.log("Active threads " + str(threading.active_count())) + logger.log("" + repr(threading.enumerate())) + logger.log("Starting download thread...") download_thread = DownloadThread(url, file_name, download_path) download_thread.start() - logger.info("Download thread started") - logger.info("Active threads " + str(threading.active_count())) - logger.info("" + repr(threading.enumerate())) + logger.log("Download thread started") + logger.log("Active threads " + str(threading.active_count())) + logger.log("" + repr(threading.enumerate())) # Wait - logger.info("Waiting...") + logger.log("Waiting...") while True: cancelled = False @@ -53,7 +53,7 @@ def download_and_play(url, file_name, download_path): dialog.close() - logger.info("End of waiting") + logger.log("End of waiting") # Launch the player player = CustomPlayer() @@ -61,66 +61,66 @@ def download_and_play(url, file_name, download_path): player.PlayStream(download_thread.get_file_name()) # End of playback - logger.info("End of playback") + logger.log("End of playback") if player.is_stopped(): - logger.info("Terminated by user") + logger.log("Terminated by user") break else: if not download_thread.isAlive(): - logger.info("Download has finished") + logger.log("Download has finished") break else: - logger.info("Continua la descarga") + logger.log("Continua la descarga") # When the player finishes, if you continue downloading it for now - logger.info("Download thread alive=" + str(download_thread.isAlive())) + logger.log("Download thread alive=" + str(download_thread.isAlive())) if download_thread.isAlive(): - logger.info("Killing download thread") + logger.log("Killing download thread") download_thread.force_stop() class CustomPlayer(xbmc.Player): def __init__(self, *args, **kwargs): - logger.info() + logger.log() self.actualtime = 0 self.totaltime = 0 self.stopped = False xbmc.Player.__init__(self) def PlayStream(self, url): - logger.info("url=" + url) + logger.log("url=" + url) self.play(url) self.actualtime = 0 self.url = url while self.isPlaying(): self.actualtime = self.getTime() self.totaltime = self.getTotalTime() - logger.info("actualtime=" + str(self.actualtime) + " totaltime=" + str(self.totaltime)) + logger.log("actualtime=" + str(self.actualtime) + " totaltime=" + str(self.totaltime)) xbmc.sleep(3000) def set_download_thread(self, download_thread): - logger.info() + logger.log() self.download_thread = download_thread def force_stop_download_thread(self): - logger.info() + logger.log() if self.download_thread.isAlive(): - logger.info("Killing download thread") + logger.log("Killing download thread") self.download_thread.force_stop() # while self.download_thread.isAlive(): # xbmc.sleep(1000) def onPlayBackStarted(self): - logger.info("PLAYBACK STARTED") + logger.log("PLAYBACK STARTED") def onPlayBackEnded(self): - logger.info("PLAYBACK ENDED") + logger.log("PLAYBACK ENDED") def onPlayBackStopped(self): - logger.info("PLAYBACK STOPPED") + logger.log("PLAYBACK STOPPED") self.stopped = True self.force_stop_download_thread() @@ -131,7 +131,7 @@ class CustomPlayer(xbmc.Player): # Download in background class DownloadThread(threading.Thread): def __init__(self, url, file_name, download_path): - # logger.info(repr(file)) + # logger.log(repr(file)) self.url = url self.download_path = download_path self.file_name = os.path.join(download_path, file_name) @@ -148,16 +148,16 @@ class DownloadThread(threading.Thread): threading.Thread.__init__(self) def run(self): - logger.info("Download starts...") + logger.log("Download starts...") if "megacrypter.com" in self.url: self.download_file_megacrypter() else: self.download_file() - logger.info("Download ends") + logger.log("Download ends") def force_stop(self): - logger.info() + logger.log() force_stop_file = open(self.force_stop_file_name, "w") force_stop_file.write("0") force_stop_file.close() @@ -181,38 +181,38 @@ class DownloadThread(threading.Thread): return self.total_size def download_file_megacrypter(self): - logger.info() + logger.log() comando = "./megacrypter.sh" - logger.info("command= " + comando) + logger.log("command= " + comando) oldcwd = os.getcwd() - logger.info("oldcwd= " + oldcwd) + logger.log("oldcwd= " + oldcwd) cwd = os.path.join(config.get_runtime_path(), "tools") - logger.info("cwd= " + cwd) + logger.log("cwd= " + cwd) os.chdir(cwd) - logger.info("directory changed to= " + os.getcwd()) + logger.log("directory changed to= " + os.getcwd()) - logger.info("destination= " + self.download_path) + logger.log("destination= " + self.download_path) os.system(comando + " '" + self.url + "' \"" + self.download_path + "\"") # p = subprocess.Popen([comando , self.url , self.download_path], cwd=cwd, stdout=subprocess.PIPE , stderr=subprocess.PIPE ) # out, err = p.communicate() - # logger.info("DownloadThread.download_file out="+out) + # logger.log("DownloadThread.download_file out="+out) os.chdir(oldcwd) def download_file(self): - logger.info("Direct download") + logger.log("Direct download") headers = [] # Ensures that the file can be created - logger.info("filename= " + self.file_name) + logger.log("filename= " + self.file_name) self.file_name = xbmc.makeLegalFilename(self.file_name) - logger.info("filename= " + self.file_name) - logger.info("url= " + self.url) + logger.log("filename= " + self.file_name) + logger.log("url= " + self.url) # Create the file existSize = 0 @@ -228,13 +228,13 @@ class DownloadThread(threading.Thread): additional_headers = [additional_headers] for additional_header in additional_headers: - logger.info("additional_header: " + additional_header) + logger.log("additional_header: " + additional_header) name = re.findall("(.*?)=.*?", additional_header)[0] value = urllib.parse.unquote_plus(re.findall(".*?=(.*?)$", additional_header)[0]) headers.append([name, value]) self.url = self.url.split("|")[0] - logger.info("url= " + self.url) + logger.log("url= " + self.url) # Timeout del socket a 60 segundos socket.setdefaulttimeout(60) @@ -243,7 +243,7 @@ class DownloadThread(threading.Thread): h = urllib.request.HTTPHandler(debuglevel=0) request = urllib.request.Request(self.url) for header in headers: - logger.info("Header= " + header[0] + ": " + header[1]) + logger.log("Header= " + header[0] + ": " + header[1]) request.add_header(header[0], header[1]) # Lanza la petición @@ -272,18 +272,18 @@ class DownloadThread(threading.Thread): self.total_size = int(float(totalfichero) / float(1024 * 1024)) - logger.info("Content-Length=%s" % totalfichero) + logger.log("Content-Length=%s" % totalfichero) blocksize = 100 * 1024 bloqueleido = connexion.read(blocksize) - logger.info("Starting file download, blocked= %s" % len(bloqueleido)) + logger.log("Starting file download, blocked= %s" % len(bloqueleido)) maxreintentos = 10 while len(bloqueleido) > 0: try: if os.path.exists(self.force_stop_file_name): - logger.info("Force_stop file detected, download is interrupted") + logger.log("Force_stop file detected, download is interrupted") f.close() xbmc.executebuiltin("XBMC.Notification(%s,%s,300)" % (config.get_localized_string(60319),config.get_localized_string(60320))) @@ -297,7 +297,7 @@ class DownloadThread(threading.Thread): # except: f.write(bloqueleido) grabado = grabado + len(bloqueleido) - logger.info("grabado=%d de %d" % (grabado, totalfichero)) + logger.log("grabado=%d de %d" % (grabado, totalfichero)) percent = int(float(grabado) * 100 / float(totalfichero)) self.progress = percent totalmb = float(float(totalfichero) / (1024 * 1024)) @@ -323,7 +323,7 @@ class DownloadThread(threading.Thread): except: import sys reintentos = reintentos + 1 - logger.info("ERROR in block download, retry %d" % reintentos) + logger.log("ERROR in block download, retry %d" % reintentos) for line in sys.exc_info(): logger.error("%s" % line) diff --git a/platformcode/envtal.py b/platformcode/envtal.py index a5461a83..541d06ad 100644 --- a/platformcode/envtal.py +++ b/platformcode/envtal.py @@ -344,31 +344,31 @@ def list_env(environment={}): if environment['debug'] == 'False': logger.log_enable(True) - logger.info(sep) - logger.info('KoD environment variables: ' + environment['addon_version'] + ' Debug: ' + environment['debug']) - logger.info(sep) + logger.log(sep) + logger.log('KoD environment variables: ' + environment['addon_version'] + ' Debug: ' + environment['debug']) + logger.log(sep) - logger.info(environment['os_name'] + ' ' + environment['prod_model'] + ' ' + + logger.log(environment['os_name'] + ' ' + environment['prod_model'] + ' ' + environment['os_release'] + ' ' + environment['machine'] + ' ' + environment['architecture'] + ' ' + environment['language']) - logger.info('Kodi ' + environment['num_version'] + ', Vídeo: ' + + logger.log('Kodi ' + environment['num_version'] + ', Vídeo: ' + environment['video_db'] + ', Python ' + environment['python_version']) if environment['cpu_usage']: - logger.info('CPU: ' + environment['cpu_usage']) + logger.log('CPU: ' + environment['cpu_usage']) if environment['mem_total'] or environment['mem_free']: - logger.info('Memory: Total: ' + environment['mem_total'] + ' MB | Disp.: ' + + logger.log('Memory: Total: ' + environment['mem_total'] + ' MB | Disp.: ' + environment['mem_free'] + ' MB | Buffers: ' + str(int(environment['kodi_buffer']) * 3) + ' MB | Buffermode: ' + environment['kodi_bmode'] + ' | Readfactor: ' + environment['kodi_rfactor']) - logger.info('Userdata: ' + environment['userdata_path'] + ' - Free: ' + + logger.log('Userdata: ' + environment['userdata_path'] + ' - Free: ' + environment['userdata_free'].replace('.', ',') + ' GB') - logger.info('Videolibrary: Series/Episodes: ' + environment['videolab_series'] + '/' + + logger.log('Videolibrary: Series/Episodes: ' + environment['videolab_series'] + '/' + environment['videolab_episodios'] + ' - Pelis: ' + environment['videolab_pelis'] + ' - Upd: ' + environment['videolab_update'] + ' - Path: ' + @@ -380,24 +380,24 @@ def list_env(environment={}): # if x == 0: # cliente_alt = cliente.copy() # del cliente_alt['Torrent_opt'] - # logger.info('Torrent: Opt: %s, %s' % (str(cliente['Torrent_opt']), \ + # logger.log('Torrent: Opt: %s, %s' % (str(cliente['Torrent_opt']), \ # str(cliente_alt).replace('{', '').replace('}', '') \ # .replace("'", '').replace('_', ' '))) # elif x == 1 and environment['torrent_error']: - # logger.info('- ' + str(cliente).replace('{', '').replace('}', '') \ + # logger.log('- ' + str(cliente).replace('{', '').replace('}', '') \ # .replace("'", '').replace('_', ' ')) # else: # cliente_alt = cliente.copy() # del cliente_alt['Plug_in'] # cliente_alt['Libre'] = cliente_alt['Libre'].replace('.', ',') + ' GB' - # logger.info('- %s: %s' % (str(cliente['Plug_in']), str(cliente_alt) \ + # logger.log('- %s: %s' % (str(cliente['Plug_in']), str(cliente_alt) \ # .replace('{', '').replace('}', '').replace("'", '') \ # .replace('\\\\', '\\'))) - # logger.info('Proxy: ' + environment['proxy_active']) + # logger.log('Proxy: ' + environment['proxy_active']) - logger.info('LOG Size: ' + environment['log_size'].replace('.', ',') + ' MB') - logger.info(sep) + logger.log('LOG Size: ' + environment['log_size'].replace('.', ',') + ' MB') + logger.log(sep) if environment['debug'] == 'False': logger.log_enable(False) diff --git a/platformcode/launcher.py b/platformcode/launcher.py index 36242bee..b171cad0 100644 --- a/platformcode/launcher.py +++ b/platformcode/launcher.py @@ -19,7 +19,7 @@ def start(): Within this function all calls should go to functions that we want to execute as soon as we open the plugin. """ - logger.info() + logger.log() # config.set_setting('show_once', True) # Test if all the required directories are created config.verify_directories_created() @@ -37,7 +37,7 @@ def start(): updater.showSavedChangelog() def run(item=None): - logger.info() + logger.log() if not item: # Extract item from sys.argv if sys.argv[2]: @@ -76,7 +76,7 @@ def run(item=None): xbmc_videolibrary.ask_set_content(silent=False) config.set_setting('show_once', True) - logger.info(item.tostring()) + logger.log(item.tostring()) try: if not config.get_setting('tmdb_active'): @@ -84,7 +84,7 @@ def run(item=None): # If item has no action, stops here if item.action == "": - logger.info("Item without action") + logger.log("Item without action") return # Action for main menu in channelselector @@ -154,7 +154,7 @@ def run(item=None): channel_file = os.path.join(config.get_runtime_path(), CHANNELS, item.channel + ".py") - logger.info("channel_file= " + channel_file + ' - ' + CHANNELS + ' - ' + item.channel) + logger.log("channel_file= " + channel_file + ' - ' + CHANNELS + ' - ' + item.channel) channel = None @@ -164,7 +164,7 @@ def run(item=None): except ImportError: exec("import " + CHANNELS + "." + item.channel + " as channel") - logger.info("Running channel %s | %s" % (channel.__name__, channel.__file__)) + logger.log("Running channel %s | %s" % (channel.__name__, channel.__file__)) # Special play action if item.action == "play": @@ -174,12 +174,12 @@ def run(item=None): trakt_tools.set_trakt_info(item) except: pass - logger.info("item.action=%s" % item.action.upper()) + logger.log("item.action=%s" % item.action.upper()) # logger.debug("item_toPlay: " + "\n" + item.tostring('\n')) # First checks if channel has a "play" function if hasattr(channel, 'play'): - logger.info("Executing channel 'play' method") + logger.log("Executing channel 'play' method") itemlist = channel.play(item) b_favourite = item.isFavourite # Play should return a list of playable URLS @@ -200,7 +200,7 @@ def run(item=None): # If player don't have a "play" function, not uses the standard play from platformtools else: - logger.info("Executing core 'play' method") + logger.log("Executing core 'play' method") platformtools.play_video(item) # Special action for findvideos, where the plugin looks for known urls @@ -213,7 +213,7 @@ def run(item=None): # If not, uses the generic findvideos function else: - logger.info("No channel 'findvideos' method, " + logger.log("No channel 'findvideos' method, " "executing core method") itemlist = servertools.find_video_items(item) @@ -258,7 +258,7 @@ def run(item=None): else: filetools.remove(temp_search_file) - logger.info("item.action=%s" % item.action.upper()) + logger.log("item.action=%s" % item.action.upper()) from core import channeltools if config.get_setting('last_search'): @@ -279,7 +279,7 @@ def run(item=None): # For all other actions else: # import web_pdb; web_pdb.set_trace() - logger.info("Executing channel '%s' method" % item.action) + logger.log("Executing channel '%s' method" % item.action) itemlist = getattr(channel, item.action)(item) if config.get_setting('trakt_sync'): from core import trakt_tools @@ -361,7 +361,7 @@ def set_search_temp(item): filetools.write(temp_search_file, f) def reorder_itemlist(itemlist): - logger.info() + logger.log() # logger.debug("Inlet itemlist size: %i" % len(itemlist)) new_list = [] @@ -399,7 +399,7 @@ def reorder_itemlist(itemlist): new_list.extend(mod_list) new_list.extend(not_mod_list) - logger.info("Modified Titles:%i |Unmodified:%i" % (modified, not_modified)) + logger.log("Modified Titles:%i |Unmodified:%i" % (modified, not_modified)) if len(new_list) == 0: new_list = itemlist @@ -409,7 +409,7 @@ def reorder_itemlist(itemlist): def limit_itemlist(itemlist): - logger.info() + logger.log() # logger.debug("Inlet itemlist size: %i" % len(itemlist)) try: @@ -442,7 +442,7 @@ def play_from_library(item): itemlist=[] item.fromLibrary = True - logger.info() + logger.log() # logger.debug("item: \n" + item.tostring('\n')) # Try to reproduce an image (this does nothing and also does not give an error) diff --git a/platformcode/logger.py b/platformcode/logger.py index ab7a7a2d..7748545d 100644 --- a/platformcode/logger.py +++ b/platformcode/logger.py @@ -3,12 +3,9 @@ # Logger (kodi) # -------------------------------------------------------------------------------- -import inspect - -import xbmc +import inspect, sys, os, xbmc from platformcode import config -import sys PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int @@ -105,13 +102,13 @@ def error(texto=""): def log(*args): # Function to simplify the log # Automatically returns File Name and Function Name - import os - string = '' - for arg in args: string += ' '+str(arg) - frame = inspect.stack()[1] - filename = frame[0].f_code.co_filename - filename = os.path.basename(filename) - info("[" + filename + "] - [" + inspect.stack()[1][3] + "] " + string) + if loggeractive: + string = '' + for arg in args: string += ' '+str(arg) + frame = inspect.stack()[1] + filename = frame[0].f_code.co_filename + filename = os.path.basename(filename) + xbmc.log("[" + filename + "] [" + inspect.stack()[1][3] + "] " + string, xbmc.LOGNOTICE) class WebErrorException(Exception): diff --git a/platformcode/platformtools.py b/platformcode/platformtools.py index d02284d3..3a220451 100644 --- a/platformcode/platformtools.py +++ b/platformcode/platformtools.py @@ -117,7 +117,7 @@ def dialog_browse(_type, heading, shares="files", mask="", useThumbs=False, trea def itemlist_refresh(): # pos = Item().fromurl(xbmc.getInfoLabel('ListItem.FileNameAndPath')).itemlistPosition - # logger.info('Current position: ' + str(pos)) + # logger.log('Current position: ' + str(pos)) xbmc.executebuiltin("Container.Refresh") # while Item().fromurl(xbmc.getInfoLabel('ListItem.FileNameAndPath')).itemlistPosition != pos: @@ -138,7 +138,7 @@ def render_items(itemlist, parent_item): """ Function used to render itemlist on kodi """ - logger.info('START render_items') + logger.log('START render_items') thumb_type = config.get_setting('video_thumbnail_type') from specials import shortcuts from core import httptools @@ -223,7 +223,7 @@ def render_items(itemlist, parent_item): set_view_mode(itemlist[0], parent_item) xbmcplugin.endOfDirectory(_handle) - logger.info('END render_items') + logger.log('END render_items') def getCurrentView(item=None, parent_item=None): @@ -280,11 +280,11 @@ def set_view_mode(item, parent_item): if content: mode = int(config.get_setting('view_mode_%s' % content).split(',')[-1]) if mode == 0: - logger.info('default mode') + logger.log('default mode') mode = 55 xbmcplugin.setContent(handle=int(sys.argv[1]), content=Type) xbmc.executebuiltin('Container.SetViewMode(%s)' % mode) - logger.info('TYPE: ' + Type + ' - ' + 'CONTENT: ' + content) + logger.log('TYPE: ' + Type + ' - ' + 'CONTENT: ' + content) def set_infolabels(listitem, item, player=False): @@ -504,10 +504,10 @@ def is_playing(): def play_video(item, strm=False, force_direct=False, autoplay=False): - logger.info() + logger.log() logger.debug(item.tostring('\n')) if item.channel == 'downloads': - logger.info("Play local video: %s [%s]" % (item.title, item.url)) + logger.log("Play local video: %s [%s]" % (item.title, item.url)) xlistitem = xbmcgui.ListItem(path=item.url) xlistitem.setArt({"thumb": item.thumbnail}) set_infolabels(xlistitem, item, True) @@ -515,7 +515,7 @@ def play_video(item, strm=False, force_direct=False, autoplay=False): return default_action = config.get_setting("default_action") - logger.info("default_action=%s" % default_action) + logger.log("default_action=%s" % default_action) # Open the selection dialog to see the available options opciones, video_urls, seleccion, salir = get_dialogo_opciones(item, default_action, strm, autoplay) @@ -525,8 +525,8 @@ def play_video(item, strm=False, force_direct=False, autoplay=False): seleccion = get_seleccion(default_action, opciones, seleccion, video_urls) if seleccion < 0: return # Canceled box - logger.info("selection=%d" % seleccion) - logger.info("selection=%s" % opciones[seleccion]) + logger.log("selection=%d" % seleccion) + logger.log("selection=%s" % opciones[seleccion]) # run the available option, jdwonloader, download, favorites, add to the video library ... IF IT IS NOT PLAY salir = set_opcion(item, seleccion, opciones, video_urls) @@ -687,7 +687,7 @@ def alert_unsopported_server(): def handle_wait(time_to_wait, title, text): - logger.info("handle_wait(time_to_wait=%d)" % time_to_wait) + logger.log("handle_wait(time_to_wait=%d)" % time_to_wait) espera = dialog_progress(' ' + title, "") secs = 0 @@ -706,15 +706,15 @@ def handle_wait(time_to_wait, title, text): break if cancelled: - logger.info('Wait canceled') + logger.log('Wait canceled') return False else: - logger.info('Wait finished') + logger.log('Wait finished') return True def get_dialogo_opciones(item, default_action, strm, autoplay): - logger.info() + logger.log() # logger.debug(item.tostring('\n')) from core import servertools @@ -798,7 +798,7 @@ def get_dialogo_opciones(item, default_action, strm, autoplay): def set_opcion(item, seleccion, opciones, video_urls): - logger.info() + logger.log() # logger.debug(item.tostring('\n')) salir = False # You have not chosen anything, most likely because you have given the ESC @@ -848,7 +848,7 @@ def set_opcion(item, seleccion, opciones, video_urls): def get_video_seleccionado(item, seleccion, video_urls): - logger.info() + logger.log() mediaurl = "" view = False wait_time = 0 @@ -874,7 +874,7 @@ def get_video_seleccionado(item, seleccion, video_urls): mpd = True # If there is no mediaurl it is because the video is not there :) - logger.info("mediaurl=" + mediaurl) + logger.log("mediaurl=" + mediaurl) if mediaurl == "": if item.server == "unknown": alert_unsopported_server() @@ -891,7 +891,7 @@ def get_video_seleccionado(item, seleccion, video_urls): def set_player(item, xlistitem, mediaurl, view, strm, nfo_path=None, head_nfo=None, item_nfo=None): - logger.info() + logger.log() # logger.debug("item:\n" + item.tostring('\n')) # Moved del conector "torrent" here if item.server == "torrent": @@ -908,10 +908,10 @@ def set_player(item, xlistitem, mediaurl, view, strm, nfo_path=None, head_nfo=No player_mode = config.get_setting("player_mode") if (player_mode == 3 and mediaurl.startswith("rtmp")) or item.play_from == 'window' or item.nfo: player_mode = 0 elif "megacrypter.com" in mediaurl: player_mode = 3 - logger.info("mediaurl=" + mediaurl) + logger.log("mediaurl=" + mediaurl) if player_mode == 0: - logger.info('Player Mode: Direct') + logger.log('Player Mode: Direct') # Add the listitem to a playlist playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) playlist.clear() @@ -924,24 +924,24 @@ def set_player(item, xlistitem, mediaurl, view, strm, nfo_path=None, head_nfo=No trakt_tools.wait_for_update_trakt() elif player_mode == 1: - logger.info('Player Mode: setResolvedUrl') + logger.log('Player Mode: setResolvedUrl') xlistitem.setPath(mediaurl) xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xlistitem) xbmc.sleep(2500) elif player_mode == 2: - logger.info('Player Mode: Built-In') + logger.log('Player Mode: Built-In') xbmc.executebuiltin("PlayMedia(" + mediaurl + ")") elif player_mode == 3: - logger.info('Player Mode: Download and Play') + logger.log('Player Mode: Download and Play') from platformcode import download_and_play download_and_play.download_and_play(mediaurl, "download_and_play.tmp", config.get_setting("downloadpath")) return # ALL LOOKING TO REMOVE VIEW if item.subtitle and view: - logger.info("External subtitles: " + item.subtitle) + logger.log("External subtitles: " + item.subtitle) xbmc.sleep(2000) xbmc_player.setSubtitles(item.subtitle) @@ -967,7 +967,7 @@ def torrent_client_installed(show_tuple=False): def play_torrent(item, xlistitem, mediaurl): - logger.info() + logger.log() import time from servers import torrent @@ -1087,17 +1087,17 @@ def install_inputstream(): # Check if InputStream add-on exists! Addon('inputstream.adaptive') - logger.info('InputStream add-on installed from repo.') + logger.log('InputStream add-on installed from repo.') except RuntimeError: - logger.info('InputStream add-on not installed.') + logger.log('InputStream add-on not installed.') dialog_ok(config.get_localized_string(20000), config.get_localized_string(30126)) return False else: try: Addon('inputstream.adaptive') - logger.info('InputStream add-on is installed and enabled') + logger.log('InputStream add-on is installed and enabled') except: - logger.info('enabling InputStream add-on') + logger.log('enabling InputStream add-on') xbmc.executebuiltin('UpdateLocalAddons') xbmc.executeJSONRPC('{"jsonrpc": "2.0", "id":1, "method": "Addons.SetAddonEnabled", "params": { "addonid": "inputstream.adaptive", "enabled": true }}') return True @@ -1212,13 +1212,13 @@ def best_chromeos_image(devices): # Select the newest version from distutils.version import LooseVersion # pylint: disable=import-error,no-name-in-module,useless-suppression if LooseVersion(device['version']) > LooseVersion(best['version']): - logger.info('%s (%s) is newer than %s (%s)' % (device['hwid'], device['version'], best['hwid'], best['version'])) + logger.log('%s (%s) is newer than %s (%s)' % (device['hwid'], device['version'], best['hwid'], best['version'])) best = device # Select the smallest image (disk space requirement) elif LooseVersion(device['version']) == LooseVersion(best['version']): if int(device['filesize']) + int(device['zipfilesize']) < int(best['filesize']) + int(best['zipfilesize']): - logger.info('%s (%d) is smaller than %s (%d)' % (device['hwid'], int(device['filesize']) + int(device['zipfilesize']), best['hwid'], int(best['filesize']) + int(best['zipfilesize']))) + logger.log('%s (%d) is smaller than %s (%d)' % (device['hwid'], int(device['filesize']) + int(device['zipfilesize']), best['hwid'], int(best['filesize']) + int(best['zipfilesize']))) best = device return best diff --git a/platformcode/recaptcha.py b/platformcode/recaptcha.py index c4422143..c609227a 100644 --- a/platformcode/recaptcha.py +++ b/platformcode/recaptcha.py @@ -65,7 +65,7 @@ class Recaptcha(xbmcgui.WindowXMLDialog): data = httptools.downloadpage(self.url, post=post, headers=self.headers).data from platformcode import logger - logger.info(data) + logger.log(data) self.result = scrapertools.find_single_match(data, '
.*?>([^<]+)<') if self.result: platformtools.dialog_notification("Captcha corretto", "Verifica conclusa") diff --git a/platformcode/subtitletools.py b/platformcode/subtitletools.py index 9a90bd1f..20d231e7 100644 --- a/platformcode/subtitletools.py +++ b/platformcode/subtitletools.py @@ -84,7 +84,7 @@ def regex_tvshow(compare, file, sub=""): def set_Subtitle(): - logger.info() + logger.log() exts = [".srt", ".sub", ".txt", ".smi", ".ssa", ".ass"] subtitle_folder_path = filetools.join(config.get_data_path(), "subtitles") @@ -93,7 +93,7 @@ def set_Subtitle(): if subtitle_type == "2": subtitle_path = config.get_setting("subtitlepath_file") - logger.info("Con subtitulo : " + subtitle_path) + logger.log("Con subtitulo : " + subtitle_path) xbmc.Player().setSubtitles(subtitle_path) else: if subtitle_type == "0": @@ -106,7 +106,7 @@ def set_Subtitle(): long_v = len(subtitle_path) if long_v > 0: if subtitle_path.startswith("http") or subtitle_path[long_v - 4, long] in exts: - logger.info("Con subtitulo : " + subtitle_path) + logger.log("Con subtitulo : " + subtitle_path) xbmc.Player().setSubtitles(subtitle_path) return else: @@ -125,7 +125,7 @@ def set_Subtitle(): Subnames = glob.glob(filetools.join(subtitle_path, "Movies", subtitle_name + "*.??.???")) for Subname in Subnames: if os.path.splitext(Subname)[1] in exts: - logger.info("Con subtitulo : " + filetools.split(Subname)[1]) + logger.log("Con subtitulo : " + filetools.split(Subname)[1]) xbmc.Player().setSubtitles((Subname)) except: logger.error("error al cargar subtitulos") @@ -216,7 +216,7 @@ def searchSubtitle(item): filetools.mkdir(full_path_tvshow) # title_new + ".mp4" full_path_video_new = xbmc.translatePath( filetools.join(full_path_tvshow, "%s %sx%s.mp4" % (tvshow_title, season, episode))) - logger.info(full_path_video_new) + logger.log(full_path_video_new) listitem = xbmcgui.ListItem(title_new, iconImage="DefaultVideo.png", thumbnailImage="") listitem.setInfo("video", {"Title": title_new, "Genre": "Tv shows", "episode": int(episode), "season": int(season), "tvshowtitle": tvshow_title}) @@ -230,7 +230,7 @@ def searchSubtitle(item): try: filetools.copy(path_video_temp, full_path_video_new) copy = True - logger.info("nuevo path =" + full_path_video_new) + logger.log("nuevo path =" + full_path_video_new) time.sleep(2) playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) playlist.clear() @@ -288,7 +288,7 @@ def get_from_subdivx(sub_url): :return: The path to the unzipped subtitle """ - logger.info() + logger.log() sub = '' sub_dir = os.path.join(config.get_data_path(), 'temp_subs') @@ -312,9 +312,9 @@ def get_from_subdivx(sub_url): filetools.write(filename, data_dl) sub = extract_file_online(sub_dir, filename) except: - logger.info('sub invalid') + logger.log('sub invalid') else: - logger.info('sub invalid') + logger.log('sub invalid') return sub @@ -328,7 +328,7 @@ def extract_file_online(path, filename): :return: """ - logger.info() + logger.log() url = "http://online.b1.org/rest/online/upload" diff --git a/platformcode/unify.py b/platformcode/unify.py index ec90ffd3..71ccf803 100644 --- a/platformcode/unify.py +++ b/platformcode/unify.py @@ -98,7 +98,7 @@ thumb_dict = {"movies": "https://s10.postimg.cc/fxtqzdog9/peliculas.png", def set_genre(string): - # logger.info() + # logger.log() genres_dict = {'accion': ['accion', 'action', 'accion y aventura', 'action & adventure'], 'adultos': ['adultos', 'adultos +', 'adulto'], @@ -140,7 +140,7 @@ def set_genre(string): def remove_format(string): - # logger.info() + # logger.log() # logger.debug('enter remove: %s' % string) string = string.rstrip() string = re.sub(r'(\[|\[\/)(?:color|COLOR|b|B|i|I).*?\]|\[|\]|\(|\)|\:|\.', '', string) @@ -156,7 +156,7 @@ def normalize(string): def simplify(string): - # logger.info() + # logger.log() # logger.debug('enter simplify: %s'%string) string = remove_format(string) string = string.replace('-', ' ').replace('_', ' ') @@ -175,7 +175,7 @@ def simplify(string): def add_languages(title, languages): - # logger.info() + # logger.log() if isinstance(languages, list): for language in languages: @@ -186,7 +186,7 @@ def add_languages(title, languages): def add_info_plot(plot, languages, quality): - # logger.info() + # logger.log() last = '[/I][/B]\n' if languages: @@ -221,7 +221,7 @@ def add_info_plot(plot, languages, quality): def set_color(title, category): - # logger.info() + # logger.log() from core import jsontools styles_path = os.path.join(config.get_runtime_path(), 'resources', 'color_styles.json') @@ -262,7 +262,7 @@ def set_color(title, category): def set_lang(language): - # logger.info() + # logger.log() cast = ['castellano', 'español', 'espanol', 'cast', 'esp', 'espaol', 'es', 'zc', 'spa', 'spanish', 'vc'] ita = ['italiano', 'italian', 'ita', 'it'] @@ -303,7 +303,7 @@ def set_lang(language): def title_format(item): - # logger.info() + # logger.log() lang = False valid = True @@ -567,7 +567,7 @@ def title_format(item): def thumbnail_type(item): - # logger.info() + # logger.log() # Check what type of thumbnail will be used in findvideos, Poster or Logo of the server thumb_type = config.get_setting('video_thumbnail_type') diff --git a/platformcode/updater.py b/platformcode/updater.py index 4592a931..e2dc427e 100644 --- a/platformcode/updater.py +++ b/platformcode/updater.py @@ -34,7 +34,7 @@ changelogFile = "special://profile/addon_data/plugin.video.kod/changelog.txt" def loadCommits(page=1): apiLink = 'https://api.github.com/repos/' + user + '/' + repo + '/commits?sha=' + branch + "&page=" + str(page) - logger.info(apiLink) + logger.log(apiLink) # riprova ogni secondo finchè non riesce (ad esempio per mancanza di connessione) for n in range(10): try: @@ -54,7 +54,7 @@ def loadCommits(page=1): def check(background=False): if not addon.getSetting('addon_update_enabled'): return False, False - logger.info('Cerco aggiornamenti..') + logger.log('Cerco aggiornamenti..') commits = loadCommits() if not commits: return False, False @@ -66,7 +66,7 @@ def check(background=False): localCommitFile = open(os.path.join(addonDir, trackingFile), 'r+') localCommitSha = localCommitFile.read() localCommitSha = localCommitSha.replace('\n', '') # da testare - logger.info('Commit locale: ' + localCommitSha) + logger.log('Commit locale: ' + localCommitSha) updated = False serviceChanged = False @@ -91,7 +91,7 @@ def check(background=False): # evitiamo di applicare i merge commit if 'Merge' in commitJson['commit']['message']: continue - logger.info('aggiornando a ' + commitJson['sha']) + logger.log('aggiornando a ' + commitJson['sha']) alreadyApplied = True # major update @@ -108,7 +108,7 @@ def check(background=False): if file["filename"] == trackingFile: # il file di tracking non si modifica continue else: - logger.info(file["filename"]) + logger.log(file["filename"]) if 'resources/language' in file["filename"]: poFilesChanged = True if 'service.py' in file["filename"]: @@ -138,7 +138,7 @@ def check(background=False): localFile.writelines(patched) localFile.close() else: # nel caso ci siano stati problemi - logger.info('lo sha non corrisponde, scarico il file') + logger.log('lo sha non corrisponde, scarico il file') localFile.close() urllib.urlretrieve(file['raw_url'], os.path.join(addonDir, file['filename'])) else: # è un file NON testuale, lo devo scaricare @@ -191,7 +191,7 @@ def check(background=False): elif changelog: platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(80041) + changelog) else: - logger.info('Nessun nuovo aggiornamento') + logger.log('Nessun nuovo aggiornamento') return updated, serviceChanged @@ -207,7 +207,7 @@ def showSavedChangelog(): def calcCurrHash(): treeHash = githash.tree_hash(addonDir).hexdigest() - logger.info('tree hash: ' + treeHash) + logger.log('tree hash: ' + treeHash) commits = loadCommits() lastCommitSha = commits[0]['sha'] page = 1 @@ -227,7 +227,7 @@ def calcCurrHash(): if found: break else: - logger.info('Non sono riuscito a trovare il commit attuale, scarico lo zip') + logger.log('Non sono riuscito a trovare il commit attuale, scarico lo zip') hash = updateFromZip() # se ha scaricato lo zip si trova di sicuro all'ultimo commit localCommitFile = open(os.path.join(xbmc.translatePath("special://home/addons/"), 'plugin.video.kod', trackingFile), 'w') @@ -294,9 +294,9 @@ def updateFromZip(message=config.get_localized_string(80050)): destpathname = xbmc.translatePath("special://home/addons/") extractedDir = filetools.join(destpathname, "addon-" + branch) - logger.info("remotefilename=%s" % remotefilename) - logger.info("localfilename=%s" % localfilename) - logger.info('extract dir: ' + extractedDir) + logger.log("remotefilename=%s" % remotefilename) + logger.log("localfilename=%s" % localfilename) + logger.log('extract dir: ' + extractedDir) # pulizia preliminare remove(localfilename) @@ -307,24 +307,24 @@ def updateFromZip(message=config.get_localized_string(80050)): lambda nb, bs, fs, url=remotefilename: _pbhook(nb, bs, fs, url, dp)) except Exception as e: platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(80031)) - logger.info('Non sono riuscito a scaricare il file zip') - logger.info(e) + logger.log('Non sono riuscito a scaricare il file zip') + logger.log(e) dp.close() return False # Lo descomprime - logger.info("decompressione...") - logger.info("destpathname=%s" % destpathname) + logger.log("decompressione...") + logger.log("destpathname=%s" % destpathname) if os.path.isfile(localfilename): - logger.info('il file esiste') + logger.log('il file esiste') dp.update(80, config.get_localized_string(20000) + '\n' + config.get_localized_string(80032)) import zipfile try: hash = fixZipGetHash(localfilename) - logger.info(hash) + logger.log(hash) with zipfile.ZipFile(filetools.file_open(localfilename, 'rb', vfs=False)) as zip: size = sum([zinfo.file_size for zinfo in zip.filelist]) @@ -335,7 +335,7 @@ def updateFromZip(message=config.get_localized_string(80050)): dp.update(int(80 + cur_size * 15 / size)) except Exception as e: - logger.info('Non sono riuscito ad estrarre il file zip') + logger.log('Non sono riuscito ad estrarre il file zip') logger.error(e) import traceback logger.error(traceback.print_exc()) @@ -355,7 +355,7 @@ def updateFromZip(message=config.get_localized_string(80050)): rename(extractedDir, 'plugin.video.kod') addonDir = filetools.join(destpathname, 'plugin.video.kod') - logger.info("Cancellando il file zip...") + logger.log("Cancellando il file zip...") remove(localfilename) dp.update(100) @@ -384,7 +384,7 @@ def remove(file): try: os.remove(file) except: - logger.info('File ' + file + ' NON eliminato') + logger.log('File ' + file + ' NON eliminato') def onerror(func, path, exc_info): @@ -411,7 +411,7 @@ def removeTree(dir): try: shutil.rmtree(dir, ignore_errors=False, onerror=onerror) except Exception as e: - logger.info('Cartella ' + dir + ' NON eliminata') + logger.log('Cartella ' + dir + ' NON eliminata') logger.error(e) @@ -419,7 +419,7 @@ def rename(dir1, dir2): try: filetools.rename(dir1, dir2, silent=True, vfs=False) except: - logger.info('cartella ' + dir1 + ' NON rinominata') + logger.log('cartella ' + dir1 + ' NON rinominata') # https://stackoverflow.com/questions/3083235/unzipping-file-results-in-badzipfile-file-is-not-a-zip-file diff --git a/platformcode/xbmc_info_window.py b/platformcode/xbmc_info_window.py index 19148fa2..22e75e1e 100644 --- a/platformcode/xbmc_info_window.py +++ b/platformcode/xbmc_info_window.py @@ -261,7 +261,7 @@ class InfoWindow(xbmcgui.WindowXMLDialog): return self.return_value def onClick(self, _id): - logger.info("onClick id=" + repr(_id)) + logger.log("onClick id=" + repr(_id)) if _id == ID_BUTTON_PREVIOUS and self.indexList > 0: self.indexList -= 1 self.get_scraper_data(self.listData[self.indexList]) @@ -281,7 +281,7 @@ class InfoWindow(xbmcgui.WindowXMLDialog): self.return_value = None def onAction(self, action): - logger.info("action=" + repr(action.getId())) + logger.log("action=" + repr(action.getId())) action = action.getId() # Find Focus diff --git a/platformcode/xbmc_videolibrary.py b/platformcode/xbmc_videolibrary.py index 1dbb5103..86ec7810 100644 --- a/platformcode/xbmc_videolibrary.py +++ b/platformcode/xbmc_videolibrary.py @@ -17,7 +17,7 @@ from xml.dom import minidom def mark_auto_as_watched(item, nfo_path=None, head_nfo=None, item_nfo=None): def mark_as_watched_subThread(item, nfo_path, head_nfo, item_nfo): - logger.info() + logger.log() # logger.debug("item:\n" + item.tostring('\n')) time_limit = time.time() + 30 @@ -99,7 +99,7 @@ def sync_trakt_addon(path_folder): """ Updates the values ​​of episodes seen if """ - logger.info() + logger.log() # if the addon exists we do the search if xbmc.getCondVisibility('System.HasAddon("script.trakt")'): # we import dependencies @@ -225,7 +225,7 @@ def sync_trakt_kodi(silent=True): notificacion = False xbmc.executebuiltin('RunScript(script.trakt,action=sync,silent=%s)' % silent) - logger.info("Synchronization with Trakt started") + logger.log("Synchronization with Trakt started") if notificacion: platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(60045), sound=False, time=2000) @@ -239,7 +239,7 @@ def mark_content_as_watched_on_kodi(item, value=1): @type value: int @param value: > 0 for seen, 0 for not seen """ - logger.info() + logger.log() # logger.debug("item:\n" + item.tostring('\n')) payload_f = '' @@ -311,7 +311,7 @@ def mark_season_as_watched_on_kodi(item, value=1): @type value: int @param value: > 0 for seen, 0 for not seen """ - logger.info() + logger.log() # logger.debug("item:\n" + item.tostring('\n')) # We can only mark the season as seen in the Kodi database if the database is local, in case of sharing database this functionality will not work @@ -345,7 +345,7 @@ def mark_content_as_watched_on_kod(path): @type str: path @param path: content folder to mark """ - logger.info() + logger.log() #logger.debug("path: " + path) FOLDER_MOVIES = config.get_setting("folder_movies") @@ -435,7 +435,7 @@ def get_data(payload): :return: """ import urllib.request, urllib.error - logger.info("payload: %s" % payload) + logger.log("payload: %s" % payload) # Required header for XBMC JSON-RPC calls, otherwise you'll get a 415 HTTP response code - Unsupported media type headers = {'content-type': 'application/json'} @@ -452,7 +452,7 @@ def get_data(payload): response = f.read() f.close() - logger.info("get_data: response %s" % response) + logger.log("get_data: response %s" % response) data = jsontools.load(response) except Exception as ex: template = "An exception of type %s occured. Arguments:\n%r" @@ -468,7 +468,7 @@ def get_data(payload): logger.error("error en xbmc.executeJSONRPC: %s" % message) data = ["error"] - logger.info("data: %s" % data) + logger.log("data: %s" % data) return data @@ -482,7 +482,7 @@ def update(folder_content=config.get_setting("folder_tvshows"), folder=""): @type folder: str @param folder: name of the folder to scan. """ - logger.info(folder) + logger.log(folder) payload = { "jsonrpc": "2.0", @@ -546,7 +546,7 @@ def set_content(content_type, silent=False, custom=False): @type content_type: str ('movie' o 'tvshow') @param content_type: content type to configure, series or movies """ - logger.info() + logger.log() continuar = True msg_text = "" videolibrarypath = config.get_setting("videolibrarypath") @@ -572,7 +572,7 @@ def set_content(content_type, silent=False, custom=False): try: # Install metadata.themoviedb.org xbmc.executebuiltin('xbmc.installaddon(metadata.themoviedb.org)', True) - logger.info("Instalado el Scraper de películas de TheMovieDB") + logger.log("Instalado el Scraper de películas de TheMovieDB") except: pass @@ -626,7 +626,7 @@ def set_content(content_type, silent=False, custom=False): try: # Install metadata.tvdb.com xbmc.executebuiltin('xbmc.installaddon(metadata.tvdb.com)', True) - logger.info("The TVDB series Scraper installed ") + logger.log("The TVDB series Scraper installed ") except: pass @@ -721,7 +721,7 @@ def set_content(content_type, silent=False, custom=False): strScraper = 'metadata.universal' path_settings = xbmc.translatePath("special://profile/addon_data/metadata.universal/settings.xml") if not os.path.exists(path_settings): - logger.info("%s: %s" % (content_type, path_settings + " doesn't exist")) + logger.log("%s: %s" % (content_type, path_settings + " doesn't exist")) return continuar settings_data = filetools.read(path_settings) strSettings = ' '.join(settings_data.split()).replace("> <", "><") @@ -740,7 +740,7 @@ def set_content(content_type, silent=False, custom=False): strScraper = 'metadata.tvshows.themoviedb.org' path_settings = xbmc.translatePath("special://profile/addon_data/metadata.tvshows.themoviedb.org/settings.xml") if not os.path.exists(path_settings): - logger.info("%s: %s" % (content_type, path_settings + " doesn't exist")) + logger.log("%s: %s" % (content_type, path_settings + " doesn't exist")) return continuar settings_data = filetools.read(path_settings) strSettings = ' '.join(settings_data.split()).replace("> <", "><") @@ -750,7 +750,7 @@ def set_content(content_type, silent=False, custom=False): videolibrarypath += sep strPath = videolibrarypath + config.get_setting("folder_tvshows") + sep - logger.info("%s: %s" % (content_type, strPath)) + logger.log("%s: %s" % (content_type, strPath)) # We check if strPath already exists in the DB to avoid duplicates sql = 'SELECT idPath FROM path where strPath="%s"' % strPath nun_records, records = execute_sql_kodi(sql) @@ -792,15 +792,15 @@ def set_content(content_type, silent=False, custom=False): heading = config.get_localized_string(70103) % content_type msg_text = config.get_localized_string(70104) - logger.info("%s: %s" % (heading, msg_text)) + logger.log("%s: %s" % (heading, msg_text)) return continuar def update_db(old_path, new_path, old_movies_folder, new_movies_folder, old_tvshows_folder, new_tvshows_folder, progress): def path_replace(path, old, new): - logger.info() - logger.info('path: ' + path + ', old: ' + old + ', new: ' + new) + logger.log() + logger.log('path: ' + path + ', old: ' + old + ', new: ' + new) if new.startswith("special://") or '://' in new: sep = '/' else: sep = os.sep @@ -811,7 +811,7 @@ def update_db(old_path, new_path, old_movies_folder, new_movies_folder, old_tvsh return path - logger.info() + logger.log() sql_old_path = old_path if sql_old_path.startswith("special://"): @@ -823,10 +823,10 @@ def update_db(old_path, new_path, old_movies_folder, new_movies_folder, old_tvsh if not sql_old_path.endswith(sep): sql_old_path += sep - logger.info('sql_old_path: ' + sql_old_path) + logger.log('sql_old_path: ' + sql_old_path) # search MAIN path in the DB sql = 'SELECT idPath, strPath FROM path where strPath LIKE "%s"' % sql_old_path - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) # change main path @@ -834,7 +834,7 @@ def update_db(old_path, new_path, old_movies_folder, new_movies_folder, old_tvsh idPath = records[0][0] strPath = path_replace(records[0][1], old_path, new_path) sql = 'UPDATE path SET strPath="%s" WHERE idPath=%s' % (strPath, idPath) - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) else: progress.update(100) @@ -851,7 +851,7 @@ def update_db(old_path, new_path, old_movies_folder, new_movies_folder, old_tvsh # Search Main Sub Folder sql = 'SELECT idPath, strPath FROM path where strPath LIKE "%s"' % sql_old_folder - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) # Change Main Sub Folder @@ -860,13 +860,13 @@ def update_db(old_path, new_path, old_movies_folder, new_movies_folder, old_tvsh idPath = record[0] strPath = path_replace(record[1], filetools.join(old_path, OldFolder), filetools.join(new_path, NewFolder)) sql = 'UPDATE path SET strPath="%s" WHERE idPath=%s' % (strPath, idPath) - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) # Search if Sub Folder exixt in all paths sql_old_folder += '%' sql = 'SELECT idPath, strPath FROM path where strPath LIKE "%s"' % sql_old_folder - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) #Change Sub Folder in all paths @@ -875,7 +875,7 @@ def update_db(old_path, new_path, old_movies_folder, new_movies_folder, old_tvsh idPath = record[0] strPath = path_replace(record[1], filetools.join(old_path, OldFolder), filetools.join(new_path, NewFolder)) sql = 'UPDATE path SET strPath="%s" WHERE idPath=%s' % (strPath, idPath) - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) @@ -883,27 +883,27 @@ def update_db(old_path, new_path, old_movies_folder, new_movies_folder, old_tvsh # if is Movie Folder # search and modify in "movie" sql = 'SELECT idMovie, c22 FROM movie where c22 LIKE "%s"' % sql_old_folder - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) if records: for record in records: idMovie = record[0] strPath = path_replace(record[1], filetools.join(old_path, OldFolder), filetools.join(new_path, NewFolder)) sql = 'UPDATE movie SET c22="%s" WHERE idMovie=%s' % (strPath, idMovie) - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) else: # if is TV Show Folder # search and modify in "episode" sql = 'SELECT idEpisode, c18 FROM episode where c18 LIKE "%s"' % sql_old_folder - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) if records: for record in records: idEpisode = record[0] strPath = path_replace(record[1], filetools.join(old_path, OldFolder), filetools.join(new_path, NewFolder)) sql = 'UPDATE episode SET c18="%s" WHERE idEpisode=%s' % (strPath, idEpisode) - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) p += 5 progress.update(p, config.get_localized_string(20000) + '\n' + config.get_localized_string(80013)) @@ -928,26 +928,26 @@ def clean(path_list=[]): return path, sep - logger.info() + logger.log() progress = platformtools.dialog_progress_bg(config.get_localized_string(20000), config.get_localized_string(80025)) progress.update(0) # if the path list is empty, clean the entire video library if not path_list: - logger.info('the path list is empty, clean the entire video library') + logger.log('the path list is empty, clean the entire video library') if not config.get_setting("videolibrary_kodi"): sql_path, sep = sql_format(config.get_setting("videolibrarypath")) if not sql_path.endswith(sep): sql_path += sep sql = 'SELECT idPath FROM path where strPath LIKE "%s"' % sql_path - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) idPath = records[0][0] sql = 'DELETE from path WHERE idPath=%s' % idPath - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) sql = 'DELETE from path WHERE idParentPath=%s' % idPath - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) from core import videolibrarytools @@ -961,7 +961,7 @@ def clean(path_list=[]): if filetools.exists(tvshow_nfo): path_list.append(filetools.join(config.get_setting("videolibrarypath"), videolibrarytools.FOLDER_TVSHOWS, folder)) - logger.info('path_list: ' + str(path_list)) + logger.log('path_list: ' + str(path_list)) if path_list: t = float(100) / len(path_list) for i, path in enumerate(path_list): progress.update(int(math.ceil((i + 1) * t))) @@ -971,13 +971,13 @@ def clean(path_list=[]): sql_path, sep = sql_format(path) if filetools.isdir(path) and not sql_path.endswith(sep): sql_path += sep - logger.info('path: ' + path) - logger.info('sql_path: ' + sql_path) + logger.log('path: ' + path) + logger.log('sql_path: ' + sql_path) if filetools.isdir(path): # search movie in the DB sql = 'SELECT idMovie FROM movie where c22 LIKE "%s"' % (sql_path + '%') - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) # delete movie if records: @@ -986,7 +986,7 @@ def clean(path_list=[]): continue # search TV show in the DB sql = 'SELECT idShow FROM tvshow_view where strPath LIKE "%s"' % sql_path - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) # delete TV show if records: @@ -995,7 +995,7 @@ def clean(path_list=[]): elif config.get_setting("folder_movies") in sql_path: # search movie in the DB sql = 'SELECT idMovie FROM movie where c22 LIKE "%s"' % sql_path - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) # delete movie if records: @@ -1004,7 +1004,7 @@ def clean(path_list=[]): else: # search episode in the DB sql = 'SELECT idEpisode FROM episode where c18 LIKE "%s"' % sql_path - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) # delete episode if records: @@ -1023,7 +1023,7 @@ def check_db(path): ret = False sql_path = '%' + sep + path.split(sep)[-1] + sep + '%' sql = 'SELECT idShow FROM tvshow_view where strPath LIKE "%s"' % sql_path - logger.info('sql: ' + sql) + logger.log('sql: ' + sql) nun_records, records = execute_sql_kodi(sql) if records: ret = True @@ -1040,7 +1040,7 @@ def execute_sql_kodi(sql): @return: list with the query result @rtype records: list of tuples """ - logger.info() + logger.log() file_db = "" nun_records = 0 records = None @@ -1061,14 +1061,14 @@ def execute_sql_kodi(sql): break if file_db: - logger.info("DB file: %s" % file_db) + logger.log("DB file: %s" % file_db) conn = None try: import sqlite3 conn = sqlite3.connect(file_db) cursor = conn.cursor() - logger.info("Running sql: %s" % sql) + logger.log("Running sql: %s" % sql) cursor.execute(sql) conn.commit() @@ -1082,7 +1082,7 @@ def execute_sql_kodi(sql): nun_records = conn.total_changes conn.close() - logger.info("Query executed. Records: %s" % nun_records) + logger.log("Query executed. Records: %s" % nun_records) except: logger.error("Error executing sql query") @@ -1102,7 +1102,7 @@ def check_sources(new_movies_path='', new_tvshows_path=''): if not path.endswith(sep): path += sep return path - logger.info() + logger.log() new_movies_path = format_path(new_movies_path) new_tvshows_path = format_path(new_tvshows_path) @@ -1132,7 +1132,7 @@ def check_sources(new_movies_path='', new_tvshows_path=''): def update_sources(new='', old=''): - logger.info() + logger.log() if new == old: return SOURCES_PATH = xbmc.translatePath("special://userdata/sources.xml") @@ -1174,9 +1174,9 @@ def update_sources(new='', old=''): # create new path list_path = [p.firstChild.data for p in paths_node] if new in list_path: - logger.info("The path %s already exists in sources.xml" % new) + logger.log("The path %s already exists in sources.xml" % new) return - logger.info("The path %s does not exist in sources.xml" % new) + logger.log("The path %s does not exist in sources.xml" % new) # if the path does not exist we create one source_node = xmldoc.createElement("source") @@ -1215,7 +1215,7 @@ def update_sources(new='', old=''): def ask_set_content(silent=False): - logger.info() + logger.log() logger.debug("videolibrary_kodi %s" % config.get_setting("videolibrary_kodi")) def do_config(custom=False): @@ -1272,7 +1272,7 @@ def ask_set_content(silent=False): def next_ep(item): from core.item import Item - logger.info() + logger.log() item.next_ep = False # check if next file exist @@ -1288,7 +1288,7 @@ def next_ep(item): nextIndex = fileList.index(current_filename) + 1 if nextIndex == 0 or nextIndex == len(fileList): next_file = None else: next_file = fileList[nextIndex] - logger.info('Next File:' + str(next_file)) + logger.log('Next File:' + str(next_file)) # start next episode window afther x time if next_file: diff --git a/servers/akvideo.py b/servers/akvideo.py index 14ff3836..bf5416ec 100644 --- a/servers/akvideo.py +++ b/servers/akvideo.py @@ -9,7 +9,7 @@ from platformcode import logger, config headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0']] def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) # page_url = re.sub('akvideo.stream/(?:video/|video\\.php\\?file_code=)?(?:embed-)?([a-zA-Z0-9]+)','akvideo.stream/video/\\1',page_url) global data page = httptools.downloadpage(page_url, headers=headers) @@ -28,18 +28,18 @@ def test_video_exists(page_url): # ID, code = scrapertools.find_single_match(data, r"""input\D*id=(?:'|")([^'"]+)(?:'|").*?value='([a-z0-9]+)""") # post = urllib.urlencode({ID: code}) - # logger.info('PAGE DATA' + data) + # logger.log('PAGE DATA' + data) if "File Not Found" in data: return False, config.get_localized_string(70449) % "Akvideo" return True, "" def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info(" url=" + page_url) + logger.log(" url=" + page_url) video_urls = [] global data - # logger.info('PAGE DATA' + data) + # logger.log('PAGE DATA' + data) # sitekey = scrapertools.find_single_match(data, 'data-sitekey="([^"]+)') # captcha = platformtools.show_recaptcha(sitekey, page_url) if sitekey else '' # diff --git a/servers/anavids.py b/servers/anavids.py index c4169620..34462c9f 100644 --- a/servers/anavids.py +++ b/servers/anavids.py @@ -6,7 +6,7 @@ from platformcode import config, logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global data data = httptools.downloadpage(page_url, cookies=False).data if 'File you are looking for is not found.' in data: diff --git a/servers/animeid.py b/servers/animeid.py index 3cec4070..56eaa3bd 100644 --- a/servers/animeid.py +++ b/servers/animeid.py @@ -6,7 +6,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "no longer exists" in data or "to copyright issues" in data: return False, config.get_localized_string(70449) % "animeid" @@ -16,7 +16,7 @@ def test_video_exists(page_url): def get_video_url(page_url, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data video_urls = [] label, videourl = scrapertools.find_single_match(data, 'label":"([^"]+)".*?file":"([^"]+)') diff --git a/servers/anonfile.py b/servers/anonfile.py index 4191caff..ea1070bb 100644 --- a/servers/anonfile.py +++ b/servers/anonfile.py @@ -9,7 +9,7 @@ from platformcode import logger, config def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) response = httptools.downloadpage(page_url) if not response.success or "Not Found" in response.data or "File was deleted" in response.data or "is no longer available" in response.data: return False, config.get_localized_string(70449) % "anonfile" @@ -17,7 +17,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] data = httptools.downloadpage(page_url).data patron = 'download-url.*?href="([^"]+)"' diff --git a/servers/archiveorg.py b/servers/archiveorg.py index 61677b02..ee9a51cd 100644 --- a/servers/archiveorg.py +++ b/servers/archiveorg.py @@ -9,7 +9,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url) if data.code == 404: return False, config.get_localized_string(70449) % "ArchiveOrg" @@ -17,7 +17,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] data = httptools.downloadpage(page_url).data patron = '' diff --git a/servers/backin.py b/servers/backin.py index c070d5d6..fa6c4d9e 100644 --- a/servers/backin.py +++ b/servers/backin.py @@ -9,7 +9,7 @@ except ImportError: from urllib import urlencode def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) if 'http://' in page_url: # fastids page_url = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers['location'] @@ -24,7 +24,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("page_url=" + page_url) + logger.log("page_url=" + page_url) video_urls = [] @@ -36,18 +36,18 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= if data_pack: from lib import jsunpack data = jsunpack.unpack(data_pack) - logger.info("page_url=" + data) + logger.log("page_url=" + data) # URL url = scrapertools.find_single_match(data, r'"src"value="([^"]+)"') if not url: url = scrapertools.find_single_match(data, r'file\s*:\s*"([^"]+)"') - logger.info("URL=" + str(url)) + logger.log("URL=" + str(url)) # URL del vídeo video_urls.append([".mp4" + " [backin]", url]) for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], httptools.get_url_headers(video_url[1]))) + logger.log("%s - %s" % (video_url[0], httptools.get_url_headers(video_url[1]))) return video_urls diff --git a/servers/badshare.py b/servers/badshare.py index 78c62986..3e05b115 100644 --- a/servers/badshare.py +++ b/servers/badshare.py @@ -11,7 +11,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global page page = httptools.downloadpage(page_url) if not page.success: @@ -20,7 +20,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] ext = '.mp4' diff --git a/servers/bdupload.py b/servers/bdupload.py index 6321dd3a..79c40c74 100644 --- a/servers/bdupload.py +++ b/servers/bdupload.py @@ -10,7 +10,7 @@ headers = {'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "Archive no Encontrado" in data: return False, config.get_localized_string(70449) % "bdupload" @@ -19,7 +19,7 @@ def test_video_exists(page_url): def get_video_url(page_url, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data post = "" patron = '(?s)type="hidden" name="([^"]+)".*?value="([^"]*)"' diff --git a/servers/cinemaupload.py b/servers/cinemaupload.py index c3f05da8..8d1af217 100644 --- a/servers/cinemaupload.py +++ b/servers/cinemaupload.py @@ -11,7 +11,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url) if data.code == 404: return False, config.get_localized_string(70449) % "CinemaUpload" @@ -19,7 +19,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] data = httptools.downloadpage(page_url).data data = re.sub(r'\n|\r|\t| |
|\s{2,}', "", data) diff --git a/servers/clicknupload.py b/servers/clicknupload.py index f54c5d5c..1f40f6f1 100755 --- a/servers/clicknupload.py +++ b/servers/clicknupload.py @@ -22,7 +22,7 @@ excption = False def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = get_data(page_url.replace(".org", ".me")) if "File Not Found" in data: return False, config.get_localized_string(70449) % "Clicknupload" @@ -31,7 +31,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) data = get_data(page_url.replace(".org", ".me")) @@ -51,7 +51,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= media_url = media.rsplit('/', 1)[0] + "/" + url_strip video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [clicknupload]", media_url]) for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) + logger.log("%s - %s" % (video_url[0], video_url[1])) return video_urls diff --git a/servers/clipwatching.py b/servers/clipwatching.py index 93aa4804..7653b330 100644 --- a/servers/clipwatching.py +++ b/servers/clipwatching.py @@ -6,7 +6,7 @@ from lib import jsunpack from platformcode import logger, config def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global data data = httptools.downloadpage(page_url).data if "File Not Found" in data or "File was deleted" in data: @@ -15,7 +15,7 @@ def test_video_exists(page_url): def get_video_url(page_url, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] try: diff --git a/servers/cloudvideo.py b/servers/cloudvideo.py index b7885afe..0dd0b0f8 100644 --- a/servers/cloudvideo.py +++ b/servers/cloudvideo.py @@ -8,7 +8,7 @@ from lib import jsunpack def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) html = httptools.downloadpage(page_url) global data data = html.data @@ -18,7 +18,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] global data # data = httptools.downloadpage(page_url).data diff --git a/servers/crunchyroll.py b/servers/crunchyroll.py index 2d252b3a..8c9eec30 100755 --- a/servers/crunchyroll.py +++ b/servers/crunchyroll.py @@ -30,7 +30,7 @@ proxy = "https://www.usa-proxy.org/" def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url, headers=GLOBAL_HEADER).data if "Este es un clip de muestra" in data: @@ -44,7 +44,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): #page_url='https://www.crunchyroll.com/es-es/one-piece/episode-891-climbing-up-a-waterfall-a-great-journey-through-the-land-of-wanos-sea-zone-786643' - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] if "crunchyroll.com" in page_url: media_id = page_url.rsplit("-", 1)[1] @@ -94,7 +94,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= file_sub = "" video_urls.append(["%s %sp [crunchyroll]" % (filename, quality), media_url, 0, file_sub]) for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) + logger.log("%s - %s" % (video_url[0], video_url[1])) return video_urls diff --git a/servers/dailymotion.py b/servers/dailymotion.py index 7de4b4fd..ad72f92f 100644 --- a/servers/dailymotion.py +++ b/servers/dailymotion.py @@ -6,7 +6,7 @@ from platformcode import logger, config def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global response response = httptools.downloadpage(page_url, cookies=False) @@ -18,7 +18,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] cookie = {'Cookie': response.headers["set-cookie"]} data = response.data.replace("\\", "") @@ -40,5 +40,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= stream_url = stream_url_http video_urls.append(["%sp .%s [dailymotion]" % (calidad, stream_type), stream_url, 0, subtitle]) for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) + logger.log("%s - %s" % (video_url[0], video_url[1])) return video_urls \ No newline at end of file diff --git a/servers/debriders/alldebrid.py b/servers/debriders/alldebrid.py index 6f0bd18a..db6e3010 100644 --- a/servers/debriders/alldebrid.py +++ b/servers/debriders/alldebrid.py @@ -7,7 +7,7 @@ from platformcode import logger # Returns an array of possible video url's from the page_url def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info() + logger.log() page_url = correct_url(page_url) dd1 = httptools.downloadpage("https://api.alldebrid.com/user/login?agent=mySoft&username=%s&password=%s" %(user, password)).data token = scrapertools.find_single_match(dd1, 'token":"([^"]+)') diff --git a/servers/debriders/realdebrid.py b/servers/debriders/realdebrid.py index 98489850..22b7ff4b 100755 --- a/servers/debriders/realdebrid.py +++ b/servers/debriders/realdebrid.py @@ -22,7 +22,7 @@ headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:65.0) Gecko/20 # Returns an array of possible video url's from the page_url def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s' , video_password=%s)" % (page_url, video_password)) + logger.log("(page_url='%s' , video_password=%s)" % (page_url, video_password)) page_url = page_url.replace(".nz/embed", ".nz/") # Se comprueba si existe un token guardado y sino se ejecuta el proceso de autentificación token_auth = config.get_setting("token", server="realdebrid") @@ -99,7 +99,7 @@ def get_enlaces(data): def authentication(): - logger.info() + logger.log() try: client_id = "YTWNFBIJEEBP6" diff --git a/servers/decrypters/adfly.py b/servers/decrypters/adfly.py index 20b34d18..e9846a53 100755 --- a/servers/decrypters/adfly.py +++ b/servers/decrypters/adfly.py @@ -8,7 +8,7 @@ from platformcode import logger def get_long_url(short_url): - logger.info("short_url = '%s'" % short_url) + logger.log("short_url = '%s'" % short_url) data = httptools.downloadpage(short_url).data ysmm = scrapertools.find_single_match(data, "var ysmm = '([^']+)';") diff --git a/servers/decrypters/linkbucks.py b/servers/decrypters/linkbucks.py index 5b15ec00..b9af3d30 100755 --- a/servers/decrypters/linkbucks.py +++ b/servers/decrypters/linkbucks.py @@ -17,7 +17,7 @@ from platformcode import logger # Obtiene la URL que hay detrás de un enlace a linkbucks def get_long_url(short_url): - logger.info("(short_url='%s')" % short_url) + logger.log("(short_url='%s')" % short_url) request_headers = [] request_headers.append(["User-Agent", @@ -33,17 +33,17 @@ def get_long_url(short_url): while True: for name, value in response_headers: if name == "set-cookie": - logger.info("Set-Cookie: " + value) + logger.log("Set-Cookie: " + value) cookie_name = scrapertools.scrapertools.find_single_match(value, '(.*?)\=.*?\;') cookie_value = scrapertools.scrapertools.find_single_match(value, '.*?\=(.*?)\;') request_headers.append(["Cookie", cookie_name + "=" + cookie_value]) body, response_headers = scrapertools.read_body_and_headers(url, headers=request_headers) - logger.info("body=" + body) + logger.log("body=" + body) try: location = scrapertools.scrapertools.find_single_match(body, '([^<]+)') - logger.info("location=" + location) + logger.log("location=" + location) break except: n = n + 1 diff --git a/servers/decrypters/longurl.py b/servers/decrypters/longurl.py index 4c6cb012..18f7cd38 100755 --- a/servers/decrypters/longurl.py +++ b/servers/decrypters/longurl.py @@ -38,15 +38,15 @@ servers = get_server_list() def get_long_urls(data): - logger.info() + logger.log() patron = 'Download error' in data: return False, "El enlace no es válido
o ha sido borrado de gigasize" @@ -13,7 +13,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] return video_urls diff --git a/servers/googlevideo.py b/servers/googlevideo.py index 3a411e80..e324062b 100755 --- a/servers/googlevideo.py +++ b/servers/googlevideo.py @@ -7,14 +7,14 @@ from platformcode import logger def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] # Lo extrae a partir de flashvideodownloader.org if page_url.startswith("http://"): url = 'http://www.flashvideodownloader.org/download.php?u=' + page_url else: url = 'http://www.flashvideodownloader.org/download.php?u=http://video.google.com/videoplay?docid=' + page_url - logger.info("url=" + url) + logger.log("url=" + url) data = httptools.downloadpage(url).data # Extrae el vídeo @@ -24,6 +24,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= video_urls.append(["[googlevideo]", newmatches[0]]) for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) + logger.log("%s - %s" % (video_url[0], video_url[1])) return video_urls diff --git a/servers/gounlimited.py b/servers/gounlimited.py index 723a6d2e..e0e597bb 100644 --- a/servers/gounlimited.py +++ b/servers/gounlimited.py @@ -20,15 +20,15 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] global data # data = httptools.downloadpage(page_url, use_requests=True, verify=False).data data = re.sub(r'"|\n|\r|\t| |
|\s{2,}', "", data) - logger.info('GOUN DATA= '+data) + logger.log('GOUN DATA= '+data) packed_data = scrapertools.find_single_match(data, "javascript'>(eval.*?)") unpacked = jsunpack.unpack(packed_data) - logger.info('GOUN DATA= '+unpacked) + logger.log('GOUN DATA= '+unpacked) patron = r"sources..([^\]]+)" matches = re.compile(patron, re.DOTALL).findall(unpacked) if not matches: diff --git a/servers/gvideo.py b/servers/gvideo.py index aa9712c4..adc6e52e 100644 --- a/servers/gvideo.py +++ b/servers/gvideo.py @@ -42,7 +42,7 @@ def test_video_exists(page_url): def get_video_url(page_url, user="", password="", video_password=""): - logger.info() + logger.log() video_urls = [] urls = [] streams =[] diff --git a/servers/hdload.py b/servers/hdload.py index 30240022..9cce3157 100644 --- a/servers/hdload.py +++ b/servers/hdload.py @@ -7,7 +7,7 @@ from platformcode import config, logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url, cookies=False).data if 'Not found id' in data: @@ -17,12 +17,12 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info() + logger.log() itemlist = [] - logger.info(page_url) + logger.log(page_url) data = httptools.downloadpage(page_url, post='').data - logger.info(data) + logger.log(data) url = base64.b64decode(data) itemlist.append([".mp4 [HDLoad]", url]) diff --git a/servers/hdmario.py b/servers/hdmario.py index 27bb27e9..12b6b9aa 100644 --- a/servers/hdmario.py +++ b/servers/hdmario.py @@ -7,12 +7,12 @@ from platformcode import logger, config, platformtools baseUrl = 'https://hdmario.live' def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global page, data page = httptools.downloadpage(page_url) data = page.data - logger.info(page.url) + logger.log(page.url) if "the page you are looking for could not be found" in data: return False, config.get_localized_string(70449) % "HDmario" @@ -37,13 +37,13 @@ def registerOrLogin(page_url, forced=False): setting.server_config(Item(config='hdmario')) login() else: - logger.info('Registrazione automatica in corso') + logger.log('Registrazione automatica in corso') import random import string randEmail = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(random.randint(9, 14))) + '@gmail.com' randPsw = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(10)) - logger.info('email: ' + randEmail) - logger.info('pass: ' + randPsw) + logger.log('email: ' + randEmail) + logger.log('pass: ' + randPsw) nTry = 0 while nTry < 5: nTry += 1 @@ -59,7 +59,7 @@ def registerOrLogin(page_url, forced=False): break else: platformtools.dialog_ok('HDmario', 'Impossibile registrarsi automaticamente') - logger.info('Registrazione completata') + logger.log('Registrazione completata') global page, data page = httptools.downloadpage(page_url) data = page.data @@ -67,7 +67,7 @@ def registerOrLogin(page_url, forced=False): def get_video_url(page_url, premium=False, user="", password="", video_password=""): global page, data page_url = page_url.replace('?', '') - logger.info("url=" + page_url) + logger.log("url=" + page_url) if 'unconfirmed' in page.url: from lib import onesecmail @@ -79,7 +79,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= } httptools.downloadpage(page.url, post=postData) jsonMail = onesecmail.waitForMail(mail) - logger.info(jsonMail) + logger.log(jsonMail) if jsonMail: code = jsonMail['subject'].split(' - ')[0] page = httptools.downloadpage(page_url + '?code=' + code) @@ -91,12 +91,12 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= if 'Registrati' in data: platformtools.dialog_ok('HDmario', 'Username/password non validi') registerOrLogin(page_url, True) - logger.info(data) + logger.log(data) from lib import jsunpack_js2py unpacked = jsunpack_js2py.unpack(scrapertools.find_single_match(data, '") data = jsunpack.unpack(match) @@ -23,9 +23,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= media_url = scrapertools.find_single_match(data, '{type:"video/mp4",src:"([^"]+)"}') if not media_url: media_url = scrapertools.find_single_match(data, '"file":"([^"]+)') - logger.info("media_url=" + media_url) + logger.log("media_url=" + media_url) video_urls = list() video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [mp4upload]", media_url]) for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) + logger.log("%s - %s" % (video_url[0], video_url[1])) return video_urls diff --git a/servers/mydaddy.py b/servers/mydaddy.py index 1f6ec8e4..cf65b0ba 100644 --- a/servers/mydaddy.py +++ b/servers/mydaddy.py @@ -21,7 +21,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info() + logger.log() video_urls = [] data = httptools.downloadpage(page_url).data data = scrapertools.find_single_match(data, 'var srca = \[(.*?)\]') diff --git a/servers/mystream.py b/servers/mystream.py index 5a7bc97a..ec8f0a17 100644 --- a/servers/mystream.py +++ b/servers/mystream.py @@ -10,7 +10,7 @@ import re def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url) global page_data page_data = data.data @@ -21,7 +21,7 @@ def test_video_exists(page_url): return True, "" def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] global page_data video_url = scrapertools.find_single_match(decode(page_data), r"'src',\s*'([^']+)") diff --git a/servers/myupload.py b/servers/myupload.py index 04eb0724..228fc82b 100644 --- a/servers/myupload.py +++ b/servers/myupload.py @@ -23,7 +23,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info() + logger.log() video_urls = [] data = httptools.downloadpage(page_url).data matches = scrapertools.find_multiple_matches(data, 'tracker: "([^"]+)"') diff --git a/servers/netutv.py b/servers/netutv.py index 8e391f6a..aa475c41 100755 --- a/servers/netutv.py +++ b/servers/netutv.py @@ -25,7 +25,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) #Deshabilitamos el server hasta nueva orden return False, "[netutv] Servidor deshabilitado" # http://netu.tv/watch_video.php=XX solo contiene una redireccion, ir directamente a http://hqq.tv/player/embed_player.php?vid=XX @@ -37,7 +37,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] if "hash=" in page_url: diff --git a/servers/nowvideo.py b/servers/nowvideo.py index 170b3f94..21cde3ca 100644 --- a/servers/nowvideo.py +++ b/servers/nowvideo.py @@ -9,7 +9,7 @@ from platformcode import logger, config headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0']] def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "Not Found" in data or "File was deleted" in data or "The file is being converted" in data or "Please try again later" in data: return False, config.get_localized_string(70293) % "NowVideo" @@ -20,7 +20,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): host = 'http://nowvideo.club' - logger.info("(nowvideo page_url='%s')" % page_url) + logger.log("(nowvideo page_url='%s')" % page_url) video_urls = [] data = httptools.downloadpage(page_url).data page_url_post = scrapertools.find_single_match(data, '
') @@ -28,7 +28,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= imhuman = '&imhuman=' + scrapertools.find_single_match(data, 'name="imhuman" value="([^"]+)"').replace(" ", "+") post = urllib.urlencode({k: v for k, v in scrapertools.find_multiple_matches(data, 'name="([^"]+)" value="([^"]*)"')}) + imhuman data = httptools.downloadpage(host + page_url_post, post=post).data - logger.info("nowvideo data page_url2 ='%s'" % data) + logger.log("nowvideo data page_url2 ='%s'" % data) headers.append(['Referer', page_url]) post_data = scrapertools.find_single_match(data,"
\s*") @@ -43,11 +43,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= _headers = urllib.urlencode(dict(headers)) for media_url in media_urls: - #logger.info("nowvideo data page_url2 ='%s'" % media_url) + #logger.log("nowvideo data page_url2 ='%s'" % media_url) video_urls.append([" mp4 [nowvideo] ", media_url + '|' + _headers]) for video_url in media_urls: - logger.info("[nowvideo.py] %s - %s" % (video_url[0], video_url[1])) + logger.log("[nowvideo.py] %s - %s" % (video_url[0], video_url[1])) return video_urls @@ -57,7 +57,7 @@ def find_videos(data): devuelve = [] patronvideos = r"nowvideo.club/(?:play|videos)?([a-z0-9A-Z]+)" - logger.info("[nowvideo.py] find_videos #" + patronvideos + "#") + logger.log("[nowvideo.py] find_videos #" + patronvideos + "#") matches = re.compile(patronvideos, re.DOTALL).findall(data) for match in matches: @@ -65,10 +65,10 @@ def find_videos(data): url = 'http://nowvideo.club/%s' % match if url not in encontrados: - logger.info(" url=" + url) + logger.log(" url=" + url) devuelve.append([titulo, url, 'nowvideo']) encontrados.add(url) else: - logger.info(" url duplicada=" + url) + logger.log(" url duplicada=" + url) return devuelve diff --git a/servers/okru.py b/servers/okru.py index f8fb8f83..c8e4588b 100644 --- a/servers/okru.py +++ b/servers/okru.py @@ -8,7 +8,7 @@ from platformcode import logger, config def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "copyrightsRestricted" in data or "COPYRIGHTS_RESTRICTED" in data: @@ -20,7 +20,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] data = httptools.downloadpage(page_url).data diff --git a/servers/onefichier.py b/servers/onefichier.py index 971d954e..160b2e59 100644 --- a/servers/onefichier.py +++ b/servers/onefichier.py @@ -21,25 +21,25 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) if config.get_setting("premium", server="onefichier"): user = config.get_setting("user", server="onefichier") password = config.get_setting("password", server="onefichier") url = "https://1fichier.com/login.pl" - logger.info("url=" + url) + logger.log("url=" + url) post_parameters = {"mail": user, "pass": password, "lt": "on", "purge": "on", "valider": "Send"} post = urllib.urlencode(post_parameters) - logger.info("post=" + post) + logger.log("post=" + post) data = httptools.downloadpage(url, post=post).data - # logger.info("data="+data) + # logger.log("data="+data) cookies = config.get_cookie_data() - logger.info("cookies=" + cookies) + logger.log("cookies=" + cookies) # 1fichier.com TRUE / FALSE 1443553315 SID imC3q8MQ7cARw5tkXeWvKyrH493rR=1yvrjhxDAA0T0iEmqRfNF9GXwjrwPHssAQ sid_cookie_value = scrapertools.find_single_match(cookies, "1fichier.com.*?SID\s+([A-Za-z0-9\+\=]+)") - logger.info("sid_cookie_value=" + sid_cookie_value) + logger.log("sid_cookie_value=" + sid_cookie_value) # .1fichier.com TRUE / FALSE 1443553315 SID imC3q8MQ7cARw5tkXeWvKyrH493rR=1yvrjhxDAA0T0iEmqRfNF9GXwjrwPHssAQ cookie = urllib.urlencode({"SID": sid_cookie_value}) @@ -50,16 +50,16 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']) headers.append(['Cookie', cookie]) filename = scrapertools.get_header_from_response(page_url, header_to_get="Content-Disposition") - logger.info("filename=" + filename) + logger.log("filename=" + filename) # Construye la URL final para Kodi location = page_url + "|Cookie=" + cookie - logger.info("location=" + location) + logger.log("location=" + location) video_urls = [] video_urls.append([filename[-4:] + " (Premium) [1fichier]", location]) for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) + logger.log("%s - %s" % (video_url[0], video_url[1])) return video_urls diff --git a/servers/onlystream.py b/servers/onlystream.py index 12d68cd1..76af2680 100644 --- a/servers/onlystream.py +++ b/servers/onlystream.py @@ -6,7 +6,7 @@ from platformcode import config, logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url, cookies=False).data if 'File you are looking for is not found.' in data: @@ -16,8 +16,8 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) data = httptools.downloadpage(page_url).data - # logger.info(data) + # logger.log(data) video_urls = support.get_jwplayer_mediaurl(data, 'Onlystream') return video_urls diff --git a/servers/rapidgator.py b/servers/rapidgator.py index ed25c540..70e94d11 100644 --- a/servers/rapidgator.py +++ b/servers/rapidgator.py @@ -8,6 +8,6 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] return video_urls diff --git a/servers/rcdnme.py b/servers/rcdnme.py index d7fc0b2c..89189e62 100644 --- a/servers/rcdnme.py +++ b/servers/rcdnme.py @@ -10,7 +10,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url) if "Object not found" in data.data or "longer exists on our servers" in data.data: @@ -21,7 +21,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "p,a,c,k,e,d" in data: @@ -44,6 +44,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= except: pass for video_url in video_urls: - logger.info(" %s - %s" % (video_url[0], video_url[1])) + logger.log(" %s - %s" % (video_url[0], video_url[1])) return video_urls diff --git a/servers/rutube.py b/servers/rutube.py index e6414be7..42541cbb 100644 --- a/servers/rutube.py +++ b/servers/rutube.py @@ -24,13 +24,13 @@ from core import jsontools def get_source(url): - logger.info() + logger.log() data = httptools.downloadpage(url).data data = re.sub(r'\n|\r|\t| |
|\s{2,}', "", data) return data def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = get_source(page_url) if "File was deleted" in data or "File Not Found" in data: @@ -40,7 +40,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] referer = '' diff --git a/servers/samaup.py b/servers/samaup.py index cc6285c0..ea2918e3 100644 --- a/servers/samaup.py +++ b/servers/samaup.py @@ -10,7 +10,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global data data = httptools.downloadpage(page_url).data if "Not Found" in data or "File was deleted" in data: @@ -19,7 +19,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] ext = 'mp4' diff --git a/servers/sendvid.py b/servers/sendvid.py index ed11f426..62d4d191 100755 --- a/servers/sendvid.py +++ b/servers/sendvid.py @@ -9,7 +9,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] data = scrapertools.httptools.downloadpage(page_url).data media_url = scrapertools.find_single_match(data, 'var\s+video_source\s+\=\s+"([^"]+)"') @@ -24,5 +24,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= else: video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [sendvid]", media_url]) for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) + logger.log("%s - %s" % (video_url[0], video_url[1])) return video_urls diff --git a/servers/speedvideo.py b/servers/speedvideo.py index c9a2e42e..a2349547 100644 --- a/servers/speedvideo.py +++ b/servers/speedvideo.py @@ -5,7 +5,7 @@ from core import httptools, scrapertools from platformcode import config, logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data @@ -15,22 +15,22 @@ def test_video_exists(page_url): return True, "" def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] quality ={'MOBILE':1, 'NORMAL':2, 'HD':3} data = httptools.downloadpage(page_url).data - logger.info('SPEEDVIDEO DATA '+ data) + logger.log('SPEEDVIDEO DATA '+ data) media_urls = scrapertools.find_multiple_matches(data, r"file:[^']'([^']+)',\s*label:[^\"]\"([^\"]+)\"") - logger.info("speed video - media urls: %s " % media_urls) + logger.log("speed video - media urls: %s " % media_urls) for media_url, label in media_urls: media_url = httptools.downloadpage(media_url, only_headers=True, follow_redirects=False).headers.get("location", "") if media_url: video_urls.append([media_url.split('.')[-1] + ' - ' + label + ' - ' + ' [Speedvideo]', media_url]) - logger.info("speed video - media urls: %s " % video_urls) + logger.log("speed video - media urls: %s " % video_urls) return sorted(video_urls, key=lambda x: quality[x[0].split(' - ')[1]]) diff --git a/servers/streamtape.py b/servers/streamtape.py index 5bc74691..81f3b405 100644 --- a/servers/streamtape.py +++ b/servers/streamtape.py @@ -6,7 +6,7 @@ from platformcode import logger, config def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global data data = httptools.downloadpage(page_url).data if "Video not found" in data: @@ -15,7 +15,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] global data diff --git a/servers/streamz.py b/servers/streamz.py index 3d85f57a..7427a5f6 100644 --- a/servers/streamz.py +++ b/servers/streamz.py @@ -6,7 +6,7 @@ from platformcode import logger def get_video_url(page_url, video_password): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] url = httptools.downloadpage(page_url).url data = httptools.downloadpage(url).data diff --git a/servers/supervideo.py b/servers/supervideo.py index ea506217..a668e9a6 100644 --- a/servers/supervideo.py +++ b/servers/supervideo.py @@ -9,7 +9,7 @@ from platformcode import config, logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global data data = httptools.downloadpage(page_url, cookies=False).data if 'File Not Found' in data: @@ -19,7 +19,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] # data = httptools.downloadpage(page_url).data global data @@ -43,7 +43,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= video_urls.append(['.' + source['file'].split('.')[-1] + ' [' + quality + '] [SuperVideo]', source['file']]) else: - logger.info('ELSE!') + logger.log('ELSE!') matches = scrapertools.find_multiple_matches(data, r'src:\s*"([^"]+)",\s*type:\s*"[^"]+"(?:\s*, res:\s(\d+))?') for url, quality in matches: if url.split('.')[-1] != 'm3u8': diff --git a/servers/thevid.py b/servers/thevid.py index e061856b..ed182221 100644 --- a/servers/thevid.py +++ b/servers/thevid.py @@ -8,7 +8,7 @@ from platformcode import logger, config def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "Video not found..." in data or "Video removed due to copyright" in data: return False, config.get_localized_string(70292) % "Thevid" @@ -31,5 +31,5 @@ def get_video_url(page_url, user="", password="", video_password=""): continue video = "https:" + video video_urls.append(["mp4 [Thevid]", video]) - logger.info("Url: %s" % videos) + logger.log("Url: %s" % videos) return video_urls diff --git a/servers/thevideobee.py b/servers/thevideobee.py index f319dcfe..b2f2fc75 100644 --- a/servers/thevideobee.py +++ b/servers/thevideobee.py @@ -9,7 +9,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "no longer exists" in data or "to copyright issues" in data: return False, config.get_localized_string(70449) % "thevideobee" @@ -17,7 +17,7 @@ def test_video_exists(page_url): def get_video_url(page_url, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data video_urls = [] videourl = scrapertools.find_single_match(data, 'src: "([^"]+)') diff --git a/servers/turbobit.py b/servers/turbobit.py index d370f03f..8872142c 100644 --- a/servers/turbobit.py +++ b/servers/turbobit.py @@ -4,6 +4,6 @@ from platformcode import logger def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] return video_urls diff --git a/servers/turbovid.py b/servers/turbovid.py index 8353df9b..4e5f0bb4 100644 --- a/servers/turbovid.py +++ b/servers/turbovid.py @@ -9,7 +9,7 @@ from platformcode import logger, config def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "Not Found" in data or "File Does not Exist" in data: return False, config.get_localized_string(70449) % "Turbovid" @@ -18,7 +18,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password="", server='Turbovid'): - logger.info("(turbovid page_url='%s')" % page_url) + logger.log("(turbovid page_url='%s')" % page_url) video_urls = [] data = httptools.downloadpage(page_url).data data = data.replace('"', "'") @@ -28,6 +28,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= time.sleep(6) data = httptools.downloadpage(page_url_post, post=post).data - logger.info("(data page_url='%s')" % data) + logger.log("(data page_url='%s')" % data) video_urls = support.get_jwplayer_mediaurl(data, 'Turbovid') return video_urls diff --git a/servers/tusfiles.py b/servers/tusfiles.py index 18cec1ee..59e25d8a 100644 --- a/servers/tusfiles.py +++ b/servers/tusfiles.py @@ -9,7 +9,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "no longer exists" in data or "to copyright issues" in data: return False, config.get_localized_string(70449) % "tusfiles" @@ -17,7 +17,7 @@ def test_video_exists(page_url): def get_video_url(page_url, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data video_urls = [] videourl = scrapertools.find_single_match(data, 'source src="([^"]+)') diff --git a/servers/uploadedto.py b/servers/uploadedto.py index 72b83510..73d624e5 100755 --- a/servers/uploadedto.py +++ b/servers/uploadedto.py @@ -6,7 +6,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) real_url = page_url.replace("uploaded.to", "uploaded.net") code = httptools.downloadpage(real_url, only_headers=True).code @@ -16,36 +16,36 @@ def test_video_exists(page_url): return True, "" def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] if premium: #Si no hay almacenada una cookie activa, hacemos login if check_cookie("uploaded.net", "login") != True: # Login para conseguir la cookie - logger.info("-------------------------------------------") - logger.info("login") - logger.info("-------------------------------------------") + logger.log("-------------------------------------------") + logger.log("login") + logger.log("-------------------------------------------") login_url = "http://uploaded.net/io/login" post = "id=" + user + "&pw=" + password setcookie = httptools.downloadpage(login_url, post=post, follow_redirects=False, only_headers=True).headers.get("set-cookie", "") - logger.info("-------------------------------------------") - logger.info("obtiene la url") - logger.info("-------------------------------------------") + logger.log("-------------------------------------------") + logger.log("obtiene la url") + logger.log("-------------------------------------------") location = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers.get("location", "") - logger.info("location=" + location) + logger.log("location=" + location) #fix descarga no directa if location == "": data = httptools.downloadpage(page_url).data - #logger.info("data: %s" % data) + #logger.log("data: %s" % data) if "

Premium Download

" in data: location = scrapertools.find_single_match(data, 'WE ARE SORRY" in data or '404 Not Found' in data: diff --git a/servers/uptobox.py b/servers/uptobox.py index 52cc08aa..5e8f0c26 100755 --- a/servers/uptobox.py +++ b/servers/uptobox.py @@ -19,7 +19,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data @@ -37,7 +37,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) # Si el enlace es directo de upstream if "uptobox" not in page_url: data = httptools.downloadpage(page_url).data @@ -60,7 +60,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= video_urls = uptobox(page_url, data) for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) + logger.log("%s - %s" % (video_url[0], video_url[1])) return video_urls diff --git a/servers/upvid.py b/servers/upvid.py index 4523d5dc..79577e8e 100644 --- a/servers/upvid.py +++ b/servers/upvid.py @@ -13,7 +13,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url) if data.code == 404: return False, config.get_localized_string(70449) % "upvid" @@ -23,7 +23,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium = False, user = "", password = "", video_password = ""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] headers = {'referer': page_url} for i in range(0, 3): diff --git a/servers/uqload.py b/servers/uqload.py index 3c254960..12828a21 100644 --- a/servers/uqload.py +++ b/servers/uqload.py @@ -10,7 +10,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url) @@ -21,7 +21,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] data = httptools.downloadpage(page_url).data diff --git a/servers/userscloud.py b/servers/userscloud.py index 8988cdb9..163bf240 100644 --- a/servers/userscloud.py +++ b/servers/userscloud.py @@ -7,7 +7,7 @@ from platformcode import logger, config def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) response = httptools.downloadpage(page_url) @@ -18,7 +18,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] unpacked = "" data = httptools.downloadpage(page_url).data @@ -37,6 +37,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= video_urls.append(["%s [userscloud]" % ext, media_url]) for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) + logger.log("%s - %s" % (video_url[0], video_url[1])) return video_urls diff --git a/servers/vevio.py b/servers/vevio.py index f0c1a6c0..f0e8d92a 100644 --- a/servers/vevio.py +++ b/servers/vevio.py @@ -18,7 +18,7 @@ from platformcode import logger, config def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "File was deleted" in data or "Page Cannot Be Found" in data or "Video not found" in data: return False, config.get_localized_string(70449) % "vevio" @@ -26,7 +26,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] post = {} post = urllib.urlencode(post) diff --git a/servers/vidcloud.py b/servers/vidcloud.py index 4a28eb22..0fc875bc 100644 --- a/servers/vidcloud.py +++ b/servers/vidcloud.py @@ -13,7 +13,7 @@ from platformcode import logger, config def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "We're Sorry" in data: return False, config.get_localized_string(70292) % "Vidcloud" @@ -22,7 +22,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] @@ -56,6 +56,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= video_urls.append(["%s [Vidcloud" % ext, media_url]) for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) + logger.log("%s - %s" % (video_url[0], video_url[1])) return video_urls diff --git a/servers/videobin.py b/servers/videobin.py index c2d0125a..dd1dd075 100644 --- a/servers/videobin.py +++ b/servers/videobin.py @@ -11,7 +11,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "borrado" in data or "Deleted" in data: return False, config.get_localized_string(70449) % "videobin" @@ -19,7 +19,7 @@ def test_video_exists(page_url): return True, "" def get_video_url(page_url, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] data = httptools.downloadpage(page_url).data bloque = scrapertools.find_single_match(data, 'sources:.\[.*?]') diff --git a/servers/videomega.py b/servers/videomega.py index de473b1d..b2ec79b3 100644 --- a/servers/videomega.py +++ b/servers/videomega.py @@ -5,7 +5,7 @@ from platformcode import logger def get_video_url(page_url, video_password): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] data = httptools.downloadpage(page_url).data m= scrapertools.find_single_match(data, '<link href="(Br74.*?==.css)"') diff --git a/servers/vidfast.py b/servers/vidfast.py index 382cb0bb..c31f1fd9 100644 --- a/servers/vidfast.py +++ b/servers/vidfast.py @@ -9,7 +9,7 @@ from platformcode import logger video_urls = [] def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) subtitles = "" response = httptools.downloadpage(page_url) @@ -21,9 +21,9 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] - logger.info("Intel11 %s" %data) + logger.log("Intel11 %s" %data) media_url = scrapertools.find_single_match(data, 'file:"([^"]+)') if media_url: ext = media_url[-4:] diff --git a/servers/vidlox.py b/servers/vidlox.py index 7cbb684e..12ce9d3c 100644 --- a/servers/vidlox.py +++ b/servers/vidlox.py @@ -11,7 +11,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global data data = httptools.downloadpage(page_url).data if "borrado" in data or "Deleted" in data: @@ -21,7 +21,7 @@ def test_video_exists(page_url): def get_video_url(page_url, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] bloque = scrapertools.find_single_match(data, 'sources:.\[.*?]') diff --git a/servers/vidmoly.py b/servers/vidmoly.py index 6252edff..681f00b5 100644 --- a/servers/vidmoly.py +++ b/servers/vidmoly.py @@ -5,7 +5,7 @@ from platformcode import logger, config def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global data resp = httptools.downloadpage(page_url) data = resp.data @@ -15,7 +15,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) global data video_urls = support.get_jwplayer_mediaurl(data, 'Vidmoly') diff --git a/servers/vidoza.py b/servers/vidoza.py index c7a39de0..76d37ae0 100644 --- a/servers/vidoza.py +++ b/servers/vidoza.py @@ -8,7 +8,7 @@ from platformcode import logger, config def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global data data = httptools.downloadpage(page_url).data if "Page not found" in data or "File was deleted" in data: @@ -20,7 +20,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global data video_urls = [] diff --git a/servers/vidtodo.py b/servers/vidtodo.py index ba08b6b9..94d611ec 100755 --- a/servers/vidtodo.py +++ b/servers/vidtodo.py @@ -8,7 +8,7 @@ from platformcode import logger id_server = "vidtodo" response = "" def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global response response = httptools.downloadpage(page_url) if not response.success or "Not Found" in response.data: @@ -19,7 +19,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] data = response.data packed_data = scrapertools.find_single_match(data, "javascript'>(eval.*?)</script>") @@ -41,5 +41,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= video_urls.append([".mp4 [%s] %s" % (id_server, inf), mp4 % h]) video_urls.append(["RTMP [%s] %s" % (id_server, inf), "%s playpath=%s" % (rtmp, playpath)]) for video_url in video_urls: - logger.info("video_url: %s - %s" % (video_url[0], video_url[1])) + logger.log("video_url: %s - %s" % (video_url[0], video_url[1])) return video_urls diff --git a/servers/vidtome.py b/servers/vidtome.py index 9f1eeafc..062cf4df 100644 --- a/servers/vidtome.py +++ b/servers/vidtome.py @@ -6,7 +6,7 @@ from lib import jsunpack def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global data data = httptools.downloadpage(page_url).data if "Not Found" in data or "File Does not Exist" in data: @@ -15,7 +15,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) global data video_urls = [] code = scrapertools.find_single_match(data, 'name="code" value="([^"]+)') diff --git a/servers/vidup.py b/servers/vidup.py index c300d778..3da01527 100755 --- a/servers/vidup.py +++ b/servers/vidup.py @@ -20,7 +20,7 @@ from platformcode import logger def test_video_exists(page_url): return False, "[Vidup] Servidor Deshabilitado" - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) page = httptools.downloadpage(page_url) url = page.url if "Not Found" in page.data or "/404" in url: @@ -29,7 +29,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] post= {} post = urllib.urlencode(post) diff --git a/servers/vimeo.py b/servers/vimeo.py index 5308551f..7fd880ac 100644 --- a/servers/vimeo.py +++ b/servers/vimeo.py @@ -5,7 +5,7 @@ from core import scrapertools from platformcode import logger, config headers = [['User-Agent', 'Mozilla/5.0']] def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global data if "|" in page_url: @@ -23,7 +23,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] global data @@ -38,6 +38,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= video_urls.sort(key=lambda x: x[2]) for video_url in video_urls: video_url[2] = 0 - logger.info("%s - %s" % (video_url[0], video_url[1])) + logger.log("%s - %s" % (video_url[0], video_url[1])) return video_urls diff --git a/servers/vimpleru.py b/servers/vimpleru.py index dc21c4aa..7588aecc 100644 --- a/servers/vimpleru.py +++ b/servers/vimpleru.py @@ -7,7 +7,7 @@ from platformcode import config, logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if '"title":"Video Not Found"' in data: return False, config.get_localized_string(70449) % "Vimple" @@ -16,7 +16,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url=%s)" % page_url) + logger.log("(page_url=%s)" % page_url) data = httptools.downloadpage(page_url).data @@ -34,6 +34,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [vimple.ru]", media_url]) for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) + logger.log("%s - %s" % (video_url[0], video_url[1])) return video_urls diff --git a/servers/vivo.py b/servers/vivo.py index 4c7b73a3..559e0e60 100644 --- a/servers/vivo.py +++ b/servers/vivo.py @@ -9,7 +9,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url) if data.code == 404: return False, config.get_localized_string(70449) % "Vivo" @@ -17,7 +17,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] data = httptools.downloadpage(page_url).data enc_data = scrapertools.find_single_match(data, 'data-stream="([^"]+)') diff --git a/servers/vk.py b/servers/vk.py index 0468cf8a..a4ec514a 100755 --- a/servers/vk.py +++ b/servers/vk.py @@ -17,7 +17,7 @@ from platformcode import config, logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) if not login(): return False, "Falta Ingresar/Actualizar las credenciales en el servidor vk. Configuracion - Preferencias - Ajustes de servidores - Configuración del servidor vk" data = httptools.downloadpage(page_url).data @@ -28,7 +28,7 @@ def test_video_exists(page_url): # Returns an array of possible video url's from the page_url def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] data = httptools.downloadpage(page_url).data matches = scrapertools.find_multiple_matches(data, '<source src="([^"]+)" type="video/(\w+)') @@ -37,7 +37,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= video_urls.append([calidad + "p ." + ext + " [vk]", media_url]) video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0])) for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) + logger.log("%s - %s" % (video_url[0], video_url[1])) return video_urls diff --git a/servers/vshare.py b/servers/vshare.py index 20f1a000..a0ec42c9 100644 --- a/servers/vshare.py +++ b/servers/vshare.py @@ -9,7 +9,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) response = httptools.downloadpage(page_url) if response.code != 200 or "No longer available!" in response.data: return False, config.get_localized_string(70449) % "vshare" @@ -18,7 +18,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url = " + page_url) + logger.log("url = " + page_url) headers = {"Referer":page_url} data = httptools.downloadpage(page_url, headers=headers).data flowplayer = re.search("url: [\"']([^\"']+)", data) diff --git a/servers/vudeo.py b/servers/vudeo.py index b5986b72..e2b6bd72 100644 --- a/servers/vudeo.py +++ b/servers/vudeo.py @@ -6,7 +6,7 @@ data = "" def test_video_exists(page_url): global data - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) response = httptools.downloadpage(page_url) if response.code == 404: @@ -18,5 +18,5 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): global data - logger.info("url=" + page_url) + logger.log("url=" + page_url) return support.get_jwplayer_mediaurl(data, 'vudeo') diff --git a/servers/vupplayer.py b/servers/vupplayer.py index 66238660..dcf187e7 100644 --- a/servers/vupplayer.py +++ b/servers/vupplayer.py @@ -5,7 +5,7 @@ from platformcode import logger, config def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) page = httptools.downloadpage(page_url) global data data = page.data @@ -15,7 +15,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] global data patron = r'sources:\s*\[\{src:\s*"([^"]+)"' diff --git a/servers/vvvvid.py b/servers/vvvvid.py index 53f0568c..b3818ed6 100644 --- a/servers/vvvvid.py +++ b/servers/vvvvid.py @@ -19,7 +19,7 @@ payload = {'conn_id': conn_id} def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "Not Found" in data or "File was deleted" in data: return False, config.get_localized_string(70449) % "VVVVID" @@ -45,7 +45,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= # Getting info from Site json_url = "https://www.vvvvid.it/vvvvid/ondemand/" + show_id + '/season/' +season_id + '/' json_file = current_session.get(json_url, headers=headers, params=payload).json() - logger.info(json_file['data']) + logger.log(json_file['data']) # Search for the correct episode for episode in json_file['data']: diff --git a/servers/watchanimestream.py b/servers/watchanimestream.py index 3df92201..72b41db1 100644 --- a/servers/watchanimestream.py +++ b/servers/watchanimestream.py @@ -5,7 +5,7 @@ from platformcode import logger def get_video_url(page_url, video_password): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] url = page_url.replace("/v/", "/api/source/") post = "r=&d=watchanimestream.net" diff --git a/servers/watchvideo.py b/servers/watchvideo.py index 81ce9abe..b8d57574 100644 --- a/servers/watchvideo.py +++ b/servers/watchvideo.py @@ -7,7 +7,7 @@ from platformcode import logger, config def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global data data = httptools.downloadpage(page_url).data if "Not Found" in data or "File was deleted" in data: @@ -16,7 +16,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) + logger.log("url=" + page_url) video_urls = [] media_urls = scrapertools.find_multiple_matches(data, 'file:"([^"]+)"') if not media_urls: @@ -34,5 +34,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= video_urls.append(["%s [watchvideo]" % (ext), media_url]) video_urls.reverse() for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) + logger.log("%s - %s" % (video_url[0], video_url[1])) return video_urls diff --git a/servers/wstream.py b/servers/wstream.py index 84c7c1f7..d72bdf1f 100644 --- a/servers/wstream.py +++ b/servers/wstream.py @@ -21,7 +21,7 @@ def test_video_exists(page_url): if 'nored.icu' in str(headers): real_host = 'wstream.video' - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) resp = httptools.downloadpage(page_url, headers=headers, verify=False) global data, real_url @@ -30,7 +30,7 @@ def test_video_exists(page_url): page_url = resp.url.replace(headers[1][1], real_host) if '/streaming.php' in page_url in page_url: code = httptools.downloadpage(page_url, headers=headers, follow_redirects=False, only_headers=True, verify=False).headers['location'].split('/')[-1].replace('.html', '') - # logger.info('WCODE=' + code) + # logger.log('WCODE=' + code) page_url = 'https://' + real_host + '/video.php?file_code=' + code data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True, verify=False).data @@ -45,7 +45,7 @@ def test_video_exists(page_url): headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0'],['Host', 'wstream.video']] # from core.support import dbg;dbg() new_data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True, verify=False).data - logger.info('NEW DATA: \n' + new_data) + logger.log('NEW DATA: \n' + new_data) if new_data: data = new_data @@ -93,11 +93,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= except: pass - logger.info("[Wstream] url=" + page_url) + logger.log("[Wstream] url=" + page_url) video_urls = [] global data, real_url, headers # from core.support import dbg;dbg() - # logger.info(data) + # logger.log(data) sitekey = scrapertools.find_multiple_matches(data, """data-sitekey=['"] *([^"']+)""") if sitekey: sitekey = sitekey[-1] diff --git a/servers/youdbox.py b/servers/youdbox.py index a58912b4..00f0de74 100644 --- a/servers/youdbox.py +++ b/servers/youdbox.py @@ -5,7 +5,7 @@ from platformcode import logger def get_video_url(page_url, video_password): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] data = httptools.downloadpage(page_url).data url = scrapertools.find_single_match(data, '<source src="([^"]+)"') diff --git a/servers/yourupload.py b/servers/yourupload.py index 7625c0a3..f2410141 100755 --- a/servers/yourupload.py +++ b/servers/yourupload.py @@ -6,7 +6,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) global data data = httptools.downloadpage(page_url).data @@ -17,7 +17,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] referer = {'Referer': page_url} diff --git a/servers/youtube.py b/servers/youtube.py index f0250994..267081c8 100644 --- a/servers/youtube.py +++ b/servers/youtube.py @@ -6,7 +6,7 @@ from platformcode import config, logger, platformtools name = 'plugin.video.youtube' def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data @@ -18,12 +18,12 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): import xbmc from xbmcaddon import Addon - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] if not page_url.startswith("http"): page_url = "http://www.youtube.com/watch?v=%s" % page_url - logger.info(" page_url->'%s'" % page_url) + logger.log(" page_url->'%s'" % page_url) video_id = scrapertools.find_single_match(page_url, '(?:v=|embed/)([A-z0-9_-]{11})') # from core.support import dbg;dbg() diff --git a/servers/youwatch.py b/servers/youwatch.py index 82f461c3..3e7064ae 100644 --- a/servers/youwatch.py +++ b/servers/youwatch.py @@ -6,7 +6,7 @@ from platformcode import logger def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "File Not Found" in data: return False, config.get_localized_string(70449) % "Youwatch" @@ -20,7 +20,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data url_redirect = scrapertools.find_single_match(data, '<iframe src="([^"]+)"') @@ -31,6 +31,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= video_urls = [[scrapertools.get_filename_from_url(url)[-4:] + " [youwatch]", video_url]] for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) + logger.log("%s - %s" % (video_url[0], video_url[1])) return video_urls diff --git a/servers/zippyshare.py b/servers/zippyshare.py index 65c45a70..0d61f447 100755 --- a/servers/zippyshare.py +++ b/servers/zippyshare.py @@ -29,7 +29,7 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) + logger.log("(page_url='%s')" % page_url) video_urls = [] data = httptools.downloadpage(page_url).data @@ -43,5 +43,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= mediaurl = '%s%s' % (domain, url) extension = "." + mediaurl.split('.')[-1] video_urls.append([extension + " [zippyshare]", mediaurl]) - logger.info("url=%s" %video_urls) + logger.log("url=%s" %video_urls) return video_urls diff --git a/service.py b/service.py index 12018c8a..22a7956a 100644 --- a/service.py +++ b/service.py @@ -19,7 +19,7 @@ from servers import torrent def update(path, p_dialog, i, t, serie, overwrite): - logger.info("Updating " + path) + logger.log("Updating " + path) insertados_total = 0 nfo_file = xbmc.translatePath(filetools.join(path, 'tvshow.nfo')) @@ -52,7 +52,7 @@ def update(path, p_dialog, i, t, serie, overwrite): serie.channel.capitalize())) try: pathchannels = filetools.join(config.get_runtime_path(), "channels", serie.channel + '.py') - logger.info("loading channel: " + pathchannels + " " + + logger.log("loading channel: " + pathchannels + " " + serie.channel) if serie.library_filter_show: @@ -105,7 +105,7 @@ def update(path, p_dialog, i, t, serie, overwrite): def check_for_update(overwrite=True): - logger.info("Update Series...") + logger.log("Update Series...") p_dialog = None serie_actualizada = False update_when_finished = False @@ -137,7 +137,7 @@ def check_for_update(overwrite=True): filetools.write(tvshow_file, head_nfo + serie.tojson()) path = filetools.dirname(tvshow_file) - logger.info("serie=" + serie.contentSerieName) + logger.log("serie=" + serie.contentSerieName) p_dialog.update(int(math.ceil((i + 1) * t)), heading, serie.contentSerieName) #Verificamos el estado del serie.library_playcounts de la Serie por si está incompleto @@ -249,7 +249,7 @@ def check_for_update(overwrite=True): p_dialog.close() else: - logger.info("Not update the video library, it is disabled") + logger.log("Not update the video library, it is disabled") except Exception as ex: logger.error("An error occurred while updating the series") @@ -279,7 +279,7 @@ def viewmodeMonitor(): if content: defaultMode = int(config.get_setting('view_mode_%s' % content).split(',')[-1]) if currentMode != defaultMode: - logger.info('viewmode changed: ' + currentModeName + '-' + str(currentMode) + ' - content: ' + content) + logger.log('viewmode changed: ' + currentModeName + '-' + str(currentMode) + ' - content: ' + content) config.set_setting('view_mode_%s' % content, currentModeName + ', ' + str(currentMode)) except: logger.error(traceback.print_exc()) @@ -319,7 +319,7 @@ class AddonMonitor(xbmc.Monitor): super(AddonMonitor, self).__init__() def onSettingsChanged(self): - logger.info('settings changed') + logger.log('settings changed') settings_post = config.get_all_settings_addon() from platformcode import xbmc_videolibrary @@ -360,11 +360,11 @@ class AddonMonitor(xbmc.Monitor): self.settings_pre = settings_post def onScreensaverActivated(self): - logger.info('screensaver activated, un-scheduling screen-on jobs') + logger.log('screensaver activated, un-scheduling screen-on jobs') schedule.clear('screenOn') def onScreensaverDeactivated(self): - logger.info('screensaver deactivated, re-scheduling screen-on jobs') + logger.log('screensaver deactivated, re-scheduling screen-on jobs') self.scheduleScreenOnJobs() def scheduleUpdater(self): @@ -372,7 +372,7 @@ class AddonMonitor(xbmc.Monitor): updaterCheck() self.updaterPeriod = config.get_setting('addon_update_timer') schedule.every(self.updaterPeriod).hours.do(updaterCheck).tag('updater') - logger.info('scheduled updater every ' + str(self.updaterPeriod) + ' hours') + logger.log('scheduled updater every ' + str(self.updaterPeriod) + ' hours') def scheduleVideolibrary(self): self.update_setting = config.get_setting("update", "videolibrary") @@ -380,23 +380,23 @@ class AddonMonitor(xbmc.Monitor): if self.update_setting == 2 or self.update_setting == 3: self.update_hour = config.get_setting("everyday_delay", "videolibrary") * 4 schedule.every().day.at(str(self.update_hour).zfill(2) + ':00').do(run_threaded, check_for_update, (False,)).tag('videolibrary') - logger.info('scheduled videolibrary at ' + str(self.update_hour).zfill(2) + ':00') + logger.log('scheduled videolibrary at ' + str(self.update_hour).zfill(2) + ':00') def scheduleScreenOnJobs(self): schedule.every().second.do(viewmodeMonitor).tag('screenOn') schedule.every().second.do(torrent.elementum_monitor).tag('screenOn') def onDPMSActivated(self): - logger.info('DPMS activated, un-scheduling screen-on jobs') + logger.log('DPMS activated, un-scheduling screen-on jobs') schedule.clear('screenOn') def onDPMSDeactivated(self): - logger.info('DPMS deactivated, re-scheduling screen-on jobs') + logger.log('DPMS deactivated, re-scheduling screen-on jobs') self.scheduleScreenOnJobs() if __name__ == "__main__": - logger.info('Starting KoD service') + logger.log('Starting KoD service') monitor = AddonMonitor() # mark as stopped all downloads (if we are here, probably kodi just started) diff --git a/specials/autoplay.py b/specials/autoplay.py index bb004f0c..069500db 100644 --- a/specials/autoplay.py +++ b/specials/autoplay.py @@ -29,7 +29,7 @@ def start(itemlist, item): if item.global_search: return itemlist - logger.info() + logger.log() global PLAYED PLAYED = False @@ -272,7 +272,7 @@ def start(itemlist, item): def play_multi_channel(item, itemlist): - logger.info() + logger.log() global PLAYED video_dict = dict() diff --git a/specials/backup.py b/specials/backup.py index c6405d45..f4248065 100644 --- a/specials/backup.py +++ b/specials/backup.py @@ -18,7 +18,7 @@ videolibrary_tvshows_path = u'' + videolibrarytools.TVSHOWS_PATH def export_videolibrary(item): - logger.info() + logger.log() zip_file_folder = platformtools.dialog_browse(3, config.get_localized_string(80002)) if zip_file_folder == "": @@ -51,7 +51,7 @@ def export_videolibrary(item): def import_videolibrary(item): - logger.info() + logger.log() zip_file = u'' + platformtools.dialog_browse(1, config.get_localized_string(80005), mask=".zip") if zip_file == "": diff --git a/specials/checkhost.py b/specials/checkhost.py index 31ea9187..d431fb3b 100644 --- a/specials/checkhost.py +++ b/specials/checkhost.py @@ -37,7 +37,7 @@ class Kdicc(): self.view_msg = view_msg self.lst_site_check_dns = lst_site_check_dns self.urls = [] - #logger.info("check #### INIZIO INIT#### ") + #logger.log("check #### INIZIO INIT#### ") def check_Ip(self): """ @@ -248,7 +248,7 @@ def check_channels(inutile=''): This is because it can happen that at any time the connection may have problems. If it does, check it relative writing of the file is interrupted with a warning message """ - logger.info() + logger.log() folderJson = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('path')).decode('utf-8') fileJson = 'channels.json' @@ -264,7 +264,7 @@ def check_channels(inutile=''): # to get an idea of ​​the timing # useful only if you control all channels # for channels with error 522 about 40 seconds are lost ... - logger.info("check #### INIZIO #### channel - host :%s - %s " % (chann, host)) + logger.log("check #### INIZIO #### channel - host :%s - %s " % (chann, host)) rslt = Kdicc(lst_urls = [host]).http_Resp() @@ -288,10 +288,10 @@ def check_channels(inutile=''): # risultato[chann] = 'Errore Sconosciuto - '+str(rslt['code']) +' - '+ host risultato[chann] = host - logger.info("check #### FINE #### rslt :%s " % (rslt)) + logger.log("check #### FINE #### rslt :%s " % (rslt)) fileJson_test = 'channels-test.json' # I write the updated file with open(folderJson+'/'+fileJson_test, 'w') as f: data = json.dump(risultato, f, sort_keys=True, indent=4) - logger.info(data) + logger.log(data) diff --git a/specials/downloads.py b/specials/downloads.py index 3f04fe3a..de50dd41 100644 --- a/specials/downloads.py +++ b/specials/downloads.py @@ -312,7 +312,7 @@ def menu(item): # Show Dialog seleccion = platformtools.dialog_select(config.get_localized_string(30163), opciones) - logger.info('SELECTION: '+ op[seleccion]) + logger.log('SELECTION: '+ op[seleccion]) # -1 is cancel if seleccion == -1: return diff --git a/specials/favorites.py b/specials/favorites.py index bc4ba832..aa58328b 100644 --- a/specials/favorites.py +++ b/specials/favorites.py @@ -24,7 +24,7 @@ except: def mainlist(item): - logger.info() + logger.log() itemlist = [] for name, thumb, data in read_favourites(): @@ -81,7 +81,7 @@ def save_favourites(favourites_list): def addFavourite(item): - logger.info() + logger.log() # logger.debug(item.tostring('\n')) # If you get here through the context menu, you must retrieve the action and channel parameters @@ -100,7 +100,7 @@ def addFavourite(item): def delFavourite(item): - logger.info() + logger.log() # logger.debug(item.tostring('\n')) if item.from_title: @@ -118,7 +118,7 @@ def delFavourite(item): def renameFavourite(item): - logger.info() + logger.log() # logger.debug(item.tostring('\n')) # Find the item we want to rename in favorites.xml @@ -137,7 +137,7 @@ def renameFavourite(item): ################################################## # Features to migrate old favorites (.txt) def readbookmark(filepath): - logger.info() + logger.log() import urllib bookmarkfile = filetools.file_open(filepath) @@ -230,6 +230,6 @@ try: if config.get_setting("bookmarkpath") != "": check_bookmark(config.get_setting("bookmarkpath")) else: - logger.info("No path to old version favorites") + logger.log("No path to old version favorites") except: pass diff --git a/specials/filmontv.py b/specials/filmontv.py index 1900558c..78f2552f 100644 --- a/specials/filmontv.py +++ b/specials/filmontv.py @@ -193,7 +193,7 @@ def listaCanali(item): thumbnail = None skip = False # return itemlist - # logger.info([i.title for i in itemlist]) + # logger.log([i.title for i in itemlist]) f.close() return sorted(itemlist, key=lambda x: x.title) diff --git a/specials/filtertools.py b/specials/filtertools.py index c03ffe28..6ea735d5 100644 --- a/specials/filtertools.py +++ b/specials/filtertools.py @@ -234,7 +234,7 @@ def get_link(list_item, item, list_language, list_quality=None, global_filter_la @return: Item list @rtype: list[Item] """ - logger.info() + logger.log() # if the required fields are None we leave if list_item is None or item is None: @@ -274,7 +274,7 @@ def get_links(list_item, item, list_language, list_quality=None, global_filter_l @return: lista de Item @rtype: list[Item] """ - logger.info() + logger.log() # if the required fields are None we leave @@ -362,7 +362,7 @@ def no_filter(item): @return: lista de enlaces @rtype: list[Item] """ - logger.info() + logger.log() itemlist = [] for i in item.list_item_all: @@ -384,7 +384,7 @@ def mainlist(channel, list_language, list_quality): @return: Item list @rtype: list[Item] """ - logger.info() + logger.log() itemlist = [] dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_FILTER) @@ -425,8 +425,8 @@ def config_item(item): @param item: item @type item: Item """ - logger.info() - logger.info("item %s" % item.tostring()) + logger.log() + logger.log("item %s" % item.tostring()) # WE GET THE JSON DATA dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER) @@ -448,8 +448,8 @@ def config_item(item): else: lang_selected = dict_series.get(tvshow, {}).get(TAG_LANGUAGE, default_lang) list_quality = dict_series.get(tvshow, {}).get(TAG_QUALITY_ALLOWED, [x.lower() for x in item.list_quality]) - # logger.info("lang selected {}".format(lang_selected)) - # logger.info("list quality {}".format(list_quality)) + # logger.log("lang selected {}".format(lang_selected)) + # logger.log("list quality {}".format(list_quality)) active = True custom_button = {'visible': False} @@ -516,7 +516,7 @@ def config_item(item): def delete(item, dict_values): - logger.info() + logger.log() if item: dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER) @@ -554,7 +554,7 @@ def save(item, dict_data_saved): @param dict_data_saved: dictionary with saved data @type dict_data_saved: dict """ - logger.info() + logger.log() if item and dict_data_saved: logger.debug('item: %s\ndatos: %s' % (item.tostring(), dict_data_saved)) @@ -564,7 +564,7 @@ def save(item, dict_data_saved): dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER) tvshow = item.show.strip().lower() - logger.info("Data is updated") + logger.log("Data is updated") list_quality = [] for _id, value in list(dict_data_saved.items()): @@ -599,7 +599,7 @@ def save_from_context(item): @param item: item @type item: item """ - logger.info() + logger.log() dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER) tvshow = item.show.strip().lower() @@ -630,7 +630,7 @@ def delete_from_context(item): @param item: item @type item: item """ - logger.info() + logger.log() # We come from get_links and no result has been obtained, in context menu and we delete if item.to_channel != "": diff --git a/specials/help.py b/specials/help.py index 53d9cac8..8ce57173 100644 --- a/specials/help.py +++ b/specials/help.py @@ -37,7 +37,7 @@ if config.is_xbmc(): def mainlist(item): - logger.info() + logger.log() itemlist = [] if config.is_xbmc(): diff --git a/specials/infoplus.py b/specials/infoplus.py index e5e6c93b..627b5564 100644 --- a/specials/infoplus.py +++ b/specials/infoplus.py @@ -1225,7 +1225,7 @@ class related(xbmcgui.WindowDialog): def busqueda_global(item, infoLabels, org_title=False): - logger.info() + logger.log() logger.debug(item) @@ -1797,7 +1797,7 @@ class ActorInfo(xbmcgui.WindowDialog): xbmc.sleep(400) if exit_loop: break - logger.info("salimos carajo xD") + logger.log("salimos carajo xD") def onAction(self, action): global exit_loop diff --git a/specials/kodfavorites.py b/specials/kodfavorites.py index eec2efea..86eb13d0 100644 --- a/specials/kodfavorites.py +++ b/specials/kodfavorites.py @@ -126,7 +126,7 @@ class KodfavouritesData(object): # ============================ def addFavourite(item): - logger.info() + logger.log() alfav = KodfavouritesData() # If you get here through the context menu, you must retrieve the action and channel parameters @@ -179,7 +179,7 @@ def addFavourite(item): # ==================== def mainlist(item): - logger.info() + logger.log() alfav = KodfavouritesData() item.category = get_name_from_filename(os.path.basename(alfav.user_favorites_file)) @@ -216,7 +216,7 @@ def mainlist(item): def mostrar_perfil(item): - logger.info() + logger.log() alfav = KodfavouritesData() itemlist = [] @@ -289,7 +289,7 @@ def _crea_perfil(alfav): # Profile and link management def crear_perfil(item): - logger.info() + logger.log() alfav = KodfavouritesData() if not _crea_perfil(alfav): return False @@ -299,7 +299,7 @@ def crear_perfil(item): def editar_perfil_titulo(item): - logger.info() + logger.log() alfav = KodfavouritesData() if not alfav.user_favorites[item.i_perfil]: return False @@ -316,7 +316,7 @@ def editar_perfil_titulo(item): def eliminar_perfil(item): - logger.info() + logger.log() alfav = KodfavouritesData() if not alfav.user_favorites[item.i_perfil]: return False @@ -332,7 +332,7 @@ def eliminar_perfil(item): def acciones_enlace(item): - logger.info() + logger.log() acciones = [config.get_localized_string(70620), config.get_localized_string(70621), config.get_localized_string(70622), config.get_localized_string(70623), config.get_localized_string(70624), config.get_localized_string(70548), config.get_localized_string(70625), @@ -364,7 +364,7 @@ def acciones_enlace(item): def editar_enlace_titulo(item): - logger.info() + logger.log() alfav = KodfavouritesData() if not alfav.user_favorites[item.i_perfil]: return False @@ -386,7 +386,7 @@ def editar_enlace_titulo(item): def editar_enlace_color(item): - logger.info() + logger.log() alfav = KodfavouritesData() if not alfav.user_favorites[item.i_perfil]: return False @@ -410,7 +410,7 @@ def editar_enlace_color(item): def editar_enlace_thumbnail(item): - logger.info() + logger.log() alfav = KodfavouritesData() if not alfav.user_favorites[item.i_perfil]: return False @@ -470,7 +470,7 @@ def editar_enlace_thumbnail(item): def editar_enlace_carpeta(item): - logger.info() + logger.log() alfav = KodfavouritesData() if not alfav.user_favorites[item.i_perfil]: return False @@ -489,7 +489,7 @@ def editar_enlace_carpeta(item): def editar_enlace_lista(item): - logger.info() + logger.log() alfav = KodfavouritesData() if not alfav.user_favorites[item.i_perfil]: return False @@ -527,7 +527,7 @@ def editar_enlace_lista(item): def eliminar_enlace(item): - logger.info() + logger.log() alfav = KodfavouritesData() if not alfav.user_favorites[item.i_perfil]: return False @@ -542,7 +542,7 @@ def eliminar_enlace(item): # Move profiles and links (up, down, top, bottom) def mover_perfil(item): - logger.info() + logger.log() alfav = KodfavouritesData() alfav.user_favorites = _mover_item(alfav.user_favorites, item.i_perfil, item.direccion) @@ -552,7 +552,7 @@ def mover_perfil(item): return True def mover_enlace(item): - logger.info() + logger.log() alfav = KodfavouritesData() if not alfav.user_favorites[item.i_perfil]: return False @@ -597,7 +597,7 @@ def _mover_item(lista, i_selected, direccion): # ------------------------------------------ def mainlist_listas(item): - logger.info() + logger.log() itemlist = [] item.category = 'Listas' @@ -619,7 +619,7 @@ def mainlist_listas(item): def acciones_lista(item): - logger.info() + logger.log() acciones = [config.get_localized_string(70604), config.get_localized_string(70629), config.get_localized_string(70605), config.get_localized_string(70606), config.get_localized_string(70607)] @@ -641,7 +641,7 @@ def acciones_lista(item): def activar_lista(item): - logger.info() + logger.log() fullfilename = os.path.join(config.get_data_path(), item.lista) if not os.path.exists(fullfilename): @@ -659,7 +659,7 @@ def activar_lista(item): def renombrar_lista(item): - logger.info() + logger.log() fullfilename_current = os.path.join(config.get_data_path(), item.lista) if not os.path.exists(fullfilename_current): @@ -695,7 +695,7 @@ def renombrar_lista(item): def eliminar_lista(item): - logger.info() + logger.log() fullfilename = os.path.join(config.get_data_path(), item.lista) if not os.path.exists(fullfilename): @@ -714,7 +714,7 @@ def eliminar_lista(item): def informacion_lista(item): - logger.info() + logger.log() fullfilename = os.path.join(config.get_data_path(), item.lista) if not os.path.exists(fullfilename): @@ -741,7 +741,7 @@ def informacion_lista(item): def compartir_lista(item): - logger.info() + logger.log() fullfilename = os.path.join(config.get_data_path(), item.lista) if not os.path.exists(fullfilename): @@ -796,7 +796,7 @@ def compartir_lista(item): def acciones_nueva_lista(item): - logger.info() + logger.log() acciones = [config.get_localized_string(70651), config.get_localized_string(70652), @@ -830,7 +830,7 @@ def acciones_nueva_lista(item): def crear_lista(item): - logger.info() + logger.log() titulo = platformtools.dialog_input(default='', heading=config.get_localized_string(70612)) if titulo is None or titulo == '': @@ -853,7 +853,7 @@ def crear_lista(item): def descargar_lista(item, url): - logger.info() + logger.log() from core import httptools, scrapertools if 'tinyupload.com/' in url: diff --git a/specials/news.py b/specials/news.py index fe2d35eb..4d79a10a 100644 --- a/specials/news.py +++ b/specials/news.py @@ -42,7 +42,7 @@ menu_settings_path = os.path.join(config.get_data_path(), "settings_channels", ' def mainlist(item): - logger.info() + logger.log() itemlist = [] # list_canales, any_active = get_channels_list() @@ -128,7 +128,7 @@ def set_category_context(item): def get_channels_list(): - logger.info() + logger.log() ## import web_pdb; web_pdb.set_trace() ## list_canales = {'peliculas': [], '4k': [], 'terror': [], 'infantiles': [], 'series': [], 'anime': [], ## 'castellano': [], 'latino':[], 'italiano':[], 'torrent':[], 'documentales': []} @@ -166,14 +166,14 @@ def get_channels_list(): return list_canales, any_active def set_cache(item): - logger.info() + logger.log() item.mode = 'set_cache' t = Thread(target=novedades, args=[item]) t.start() #t.join() def get_from_cache(item): - logger.info() + logger.log() itemlist=[] cache_node = jsontools.get_node_from_file('menu_cache_data.json', 'cached') first=item.last @@ -198,7 +198,7 @@ def get_from_cache(item): return itemlist def add_menu_items(item, itemlist): - logger.info() + logger.log() menu_icon = get_thumb('menu.png') menu = Item(channel="channelselector", action="getmainlist", viewmode="movie", thumbnail=menu_icon, title='Menu') @@ -215,7 +215,7 @@ def add_menu_items(item, itemlist): return itemlist def novedades(item): - logger.info() + logger.log() global list_newest threads = [] @@ -231,7 +231,7 @@ def novedades(item): return get_from_cache(item) multithread = config.get_setting("multithread", "news") - logger.info("multithread= " + str(multithread)) + logger.log("multithread= " + str(multithread)) if not multithread: if platformtools.dialog_yesno(config.get_localized_string(60515), @@ -270,7 +270,7 @@ def novedades(item): # if progreso.iscanceled(): # progreso.close() - # logger.info("Búsqueda cancelada") + # logger.log("Búsqueda cancelada") # return itemlist # Modo Multi Thread @@ -284,7 +284,7 @@ def novedades(item): # Modo single Thread else: if mode == 'normal': - logger.info("Obteniendo novedades de channel_id=" + channel_id) + logger.log("Obteniendo novedades de channel_id=" + channel_id) progreso.update(percentage, "", config.get_localized_string(60520) % channel_title) get_newest(channel_id, item.extra) @@ -304,7 +304,7 @@ def novedades(item): logger.debug(mensaje) if progreso.iscanceled(): - logger.info("Busqueda de novedades cancelada") + logger.log("Busqueda de novedades cancelada") break time.sleep(0.5) @@ -312,7 +312,7 @@ def novedades(item): if mode == 'normal': mensaje = config.get_localized_string(60522) % (len(list_newest), time.time() - start_time) progreso.update(100, mensaje) - logger.info(mensaje) + logger.log(mensaje) start_time = time.time() # logger.debug(start_time) @@ -345,7 +345,7 @@ def novedades(item): def get_newest(channel_id, categoria): - logger.info("channel_id=" + channel_id + ", categoria=" + categoria) + logger.log("channel_id=" + channel_id + ", categoria=" + categoria) global list_newest global list_newest_tourl @@ -366,9 +366,9 @@ def get_newest(channel_id, categoria): if not puede: return - logger.info("running channel " + modulo.__name__ + " " + modulo.__file__) + logger.log("running channel " + modulo.__name__ + " " + modulo.__file__) list_result = modulo.newest(categoria) - logger.info("canal= %s %d resultados" % (channel_id, len(list_result))) + logger.log("canal= %s %d resultados" % (channel_id, len(list_result))) exist=False if os.path.exists(menu_cache_path): cache_node = jsontools.get_node_from_file('menu_cache_data.json', 'cached') @@ -377,7 +377,7 @@ def get_newest(channel_id, categoria): cache_node = {} # logger.debug('cache node: %s' % cache_node) for item in list_result: - # logger.info("item="+item.tostring()) + # logger.log("item="+item.tostring()) item.channel = channel_id list_newest.append(item) list_newest_tourl.append(item.tourl()) @@ -532,7 +532,7 @@ def group_by_content(list_result_canal): def show_channels(item): - logger.info() + logger.log() global channels_id_name channels_id_name = item.extra itemlist = [] diff --git a/specials/resolverdns.py b/specials/resolverdns.py index a32e9e3e..0d55662d 100644 --- a/specials/resolverdns.py +++ b/specials/resolverdns.py @@ -65,13 +65,13 @@ class CipherSuiteAdapter(host_header_ssl.HostHeaderSSLAdapter): try: self.cur.execute('select ip from dnscache where domain=?', (domain,)) ip = self.cur.fetchall()[0][0] - logger.info('Cache DNS: ' + domain + ' = ' + str(ip)) + logger.log('Cache DNS: ' + domain + ' = ' + str(ip)) except: pass if not ip: # not cached try: ip = doh.query(domain)[0] - logger.info('Query DoH: ' + domain + ' = ' + str(ip)) + logger.log('Query DoH: ' + domain + ' = ' + str(ip)) self.writeToCache(domain, ip) except Exception: logger.error('Failed to resolve hostname, fallback to normal dns') @@ -132,8 +132,8 @@ class CipherSuiteAdapter(host_header_ssl.HostHeaderSSLAdapter): try: ret = super(CipherSuiteAdapter, self).send(request, **kwargs) except Exception as e: - logger.info('Request for ' + domain + ' with ip ' + ip + ' failed') - logger.info(e) + logger.log('Request for ' + domain + ' with ip ' + ip + ' failed') + logger.log(e) # if 'SSLError' in str(e): # # disabilito # config.set_setting("resolver_dns", False) @@ -142,7 +142,7 @@ class CipherSuiteAdapter(host_header_ssl.HostHeaderSSLAdapter): # else: tryFlush = True if tryFlush and not flushedDns: # re-request ips and update cache - logger.info('Flushing dns cache for ' + domain) + logger.log('Flushing dns cache for ' + domain) return self.flushDns(request, domain, **kwargs) ret.url = realUrl else: diff --git a/specials/search.py b/specials/search.py index 052bbbe3..4fa53992 100644 --- a/specials/search.py +++ b/specials/search.py @@ -31,7 +31,7 @@ def_lang = info_language[config.get_setting("info_language", "videolibrary")] def mainlist(item): - logger.info() + logger.log() itemlist = [Item(channel=item.channel, title=config.get_localized_string(70276), action='new_search', mode='all', thumbnail=get_thumb("search.png")), Item(channel=item.channel, title=config.get_localized_string(70741) % config.get_localized_string(30122), action='new_search', mode='movie', thumbnail=get_thumb("search_movie.png")), @@ -47,7 +47,7 @@ def mainlist(item): def sub_menu(item): - logger.info() + logger.log() itemlist = [Item(channel=item.channel, action='genres_menu', title=config.get_localized_string(70306), mode='movie', thumbnail=get_thumb("movie_genre.png")), Item(channel=item.channel, action='years_menu', title=config.get_localized_string(70742), mode='movie', thumbnail=get_thumb("movie_year.png")), @@ -66,7 +66,7 @@ def sub_menu(item): def saved_search(item): - logger.info() + logger.log() itemlist = list() saved_searches_list = get_saved_searches() @@ -93,7 +93,7 @@ def saved_search(item): def new_search(item): - logger.info() + logger.log() temp_search_file = config.get_temp_file('temp-search') if filetools.isfile(temp_search_file): @@ -163,7 +163,7 @@ def new_search(item): def channel_search(item): - logger.info(item) + logger.log(item) start = time.time() searching = list() @@ -389,14 +389,14 @@ def get_servers(item, module_dict): def get_info(itemlist): - logger.info() + logger.log() tmdb.set_infoLabels_itemlist(itemlist, True, forced=True) return itemlist def get_channels(item): - logger.info() + logger.log() channels_list = list() title_list = list() @@ -717,7 +717,7 @@ def discover_list(item): def from_context(item): - logger.info() + logger.log() select = setting_channel_new(item) @@ -738,7 +738,7 @@ def from_context(item): def set_context(itemlist): - logger.info() + logger.log() for elem in itemlist: elem.context = [{"title": config.get_localized_string(60412), @@ -755,7 +755,7 @@ def set_context(itemlist): def get_from_temp(item): - logger.info() + logger.log() n = 30 nTotal = len(item.itemlist) diff --git a/specials/setting.py b/specials/setting.py index 86e6c53e..aa0ac6ea 100644 --- a/specials/setting.py +++ b/specials/setting.py @@ -21,7 +21,7 @@ CHANNELNAME = "setting" AUTOSTART = config.is_autorun_enabled() def mainlist(item): - logger.info() + logger.log() itemlist = list() itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60535), action="settings", folder=False, @@ -62,7 +62,7 @@ def mainlist(item): def menu_channels(item): - logger.info() + logger.log() itemlist = list() itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60545), action="conf_tools", folder=False, @@ -98,15 +98,15 @@ def channel_config(item): def autostart(item): # item required launcher.py line 265 if config.enable_disable_autorun(AUTOSTART): - logger.info('AUTOSTART ENABLED') + logger.log('AUTOSTART ENABLED') # xbmcgui.Dialog().ok(config.get_localized_string(20000), config.get_localized_string(70709)) else: - logger.info('AUTOSTART ENABLED') + logger.log('AUTOSTART ENABLED') # xbmcgui.Dialog().ok(config.get_localized_string(20000), config.get_localized_string(70710)) # def setting_torrent(item): -# logger.info() +# logger.log() # LIBTORRENT_PATH = config.get_setting("libtorrent_path", server="torrent", default="") # LIBTORRENT_ERROR = config.get_setting("libtorrent_error", server="torrent", default="") @@ -243,7 +243,7 @@ def autostart(item): # item required launcher.py line 265 # config.set_setting("magnet2torrent", dict_data_saved["magnet2torrent"], server="torrent") def menu_servers(item): - logger.info() + logger.log() itemlist = list() itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60550), action="servers_blacklist", folder=False, @@ -272,7 +272,7 @@ def menu_servers(item): for server in sorted(server_list): server_parameters = servertools.get_server_parameters(server) - logger.info(server_parameters) + logger.log(server_parameters) if server_parameters["has_settings"] and [x for x in server_parameters["settings"] if x["id"] not in ["black_list", "white_list"]]: itemlist.append( Item(channel=CHANNELNAME, title=". " + config.get_localized_string(60553) % server_parameters["name"], @@ -423,7 +423,7 @@ def settings(item): def submenu_tools(item): - logger.info() + logger.log() itemlist = list() # Custom tools @@ -464,7 +464,7 @@ def submenu_tools(item): def check_quickfixes(item): - logger.info() + logger.log() if not config.dev_mode(): from platformcode import updater @@ -475,7 +475,7 @@ def check_quickfixes(item): # def update_quasar(item): -# logger.info() +# logger.log() # from platformcode import custom_code, platformtools # stat = False @@ -487,7 +487,7 @@ def check_quickfixes(item): def conf_tools(item): - logger.info() + logger.log() # Enable or disable channels if item.extra == "channels_onoff": @@ -589,14 +589,14 @@ def conf_tools(item): action="", folder=False, thumbnail=channel.thumbnail)) continue - # logger.info(channel.channel + " SALTADO!") + # logger.log(channel.channel + " SALTADO!") # The json file settings of the channel are loaded file_settings = os.path.join(config.get_data_path(), "settings_channels", channel.channel + "_data.json") dict_settings = {} dict_file = {} if filetools.exists(file_settings): - # logger.info(channel.channel + " Has _data.json file") + # logger.log(channel.channel + " Has _data.json file") channeljson_exists = True # We get saved settings from ../settings/channel_data.json try: @@ -606,7 +606,7 @@ def conf_tools(item): except EnvironmentError: logger.error("ERROR when reading the file: %s" % file_settings) else: - # logger.info(channel.channel + " No _data.json file") + # logger.log(channel.channel + " No _data.json file") channeljson_exists = False if channeljson_exists: @@ -626,7 +626,7 @@ def conf_tools(item): # Default settings are loaded list_controls, default_settings = channeltools.get_channel_controls_settings( channel.channel) - # logger.info(channel.title + " | Default: %s" % default_settings) + # logger.log(channel.title + " | Default: %s" % default_settings) except: import traceback logger.error(channel.title + config.get_localized_string(60570) % traceback.format_exc()) @@ -648,7 +648,7 @@ def conf_tools(item): list_status = config.get_localized_string(60571) else: - # logger.info(channel.channel + " - NO correction needed!") + # logger.log(channel.channel + " - NO correction needed!") needsfix = False # If the channel status has been set it is added to the list @@ -747,7 +747,7 @@ def channel_status(item, dict_values): for k in dict_values: if k == "all_channels": - logger.info("All channels | Selected state: %s" % dict_values[k]) + logger.log("All channels | Selected state: %s" % dict_values[k]) if dict_values[k] != 0: excluded_channels = ['url', 'search', 'videolibrary', 'setting', @@ -788,9 +788,9 @@ def channel_status(item, dict_values): continue else: - logger.info("Channel: %s | State: %s" % (k, dict_values[k])) + logger.log("Channel: %s | State: %s" % (k, dict_values[k])) config.set_setting("enabled", dict_values[k], k) - logger.info("the value is like %s " % config.get_setting("enabled", k)) + logger.log("the value is like %s " % config.get_setting("enabled", k)) platformtools.itemlist_update(Item(channel=CHANNELNAME, action="mainlist")) @@ -878,7 +878,7 @@ def restore_tools(item): def report_menu(item): - logger.info('URL: ' + item.url) + logger.log('URL: ' + item.url) from channelselector import get_thumb @@ -951,7 +951,7 @@ def report_menu(item): def activate_debug(item): - logger.info(item.extra) + logger.log(item.extra) from platformcode import platformtools #Enable / disable DEBUB option in settings.xml @@ -1231,7 +1231,7 @@ def report_send(item, description='', fatal=False): continue status = True # Upload operation completed successfully - logger.info('Report created: ' + str(item.url)) # The URL of the user report is saved + logger.log('Report created: ' + str(item.url)) # The URL of the user report is saved # if fatal: # For future use, for logger.crash # platformtools.dialog_ok('KoD CREATED ERROR report', 'Report it in the forum by adding FATAL ERROR and this URL: ', '[COLOR gold]%s[/COLOR]' % item.url, pastebin_one_use_msg) # else: # Report URL passed to user diff --git a/specials/shortcuts.py b/specials/shortcuts.py index 32a552d0..41acb967 100644 --- a/specials/shortcuts.py +++ b/specials/shortcuts.py @@ -126,7 +126,7 @@ def SettingOnPosition(item): xbmc.executebuiltin('Addon.OpenSettings(plugin.video.kod)') category = item.category if item.category else 0 setting = item.setting if item.setting else 0 - logger.info('SETTING= ' + str(setting)) + logger.log('SETTING= ' + str(setting)) xbmc.executebuiltin('SetFocus(%i)' % (category - 100)) xbmc.executebuiltin('SetFocus(%i)' % (setting - 80)) diff --git a/specials/side_menu.py b/specials/side_menu.py index 1ede1c95..a076c80a 100644 --- a/specials/side_menu.py +++ b/specials/side_menu.py @@ -43,7 +43,7 @@ def set_menu_settings(item): jsontools.update_node(menu_node, 'menu_settings_data.json', "menu") def check_user_home(item): - logger.info() + logger.log() if os.path.exists(menu_settings_path): menu_node = jsontools.get_node_from_file('menu_settings_data.json', 'menu') if 'user_home' in menu_node: @@ -56,7 +56,7 @@ def check_user_home(item): return item def set_custom_start(item): - logger.info() + logger.log() if os.path.exists(menu_settings_path): menu_node = jsontools.get_node_from_file('menu_settings_data.json', 'menu') else: @@ -70,7 +70,7 @@ def set_custom_start(item): jsontools.update_node(menu_node, 'menu_settings_data.json', "menu") def get_start_page(): - logger.info() + logger.log() dictCategory = { config.get_localized_string(70137): 'peliculas', @@ -357,7 +357,7 @@ class Main(xbmcgui.WindowXMLDialog): self.focus -= 1 def run_action(self, item): - logger.info() + logger.log() if item.menu != True: self.close() xbmc.executebuiltin("Container.update(%s)"%launcher.run(item)) diff --git a/specials/trailertools.py b/specials/trailertools.py index a240cea3..b291d4f2 100644 --- a/specials/trailertools.py +++ b/specials/trailertools.py @@ -45,7 +45,7 @@ else: def buscartrailer(item, trailers=[]): - logger.info() + logger.log() # List of actions if run from context menu if item.action == "manual_search" and item.contextual: @@ -80,8 +80,8 @@ def buscartrailer(item, trailers=[]): item.year = item.infoLabels['year'] - logger.info("Search: %s" % item.contentTitle) - logger.info("Year: %s" % item.year) + logger.log("Search: %s" % item.contentTitle) + logger.log("Year: %s" % item.year) if item.infoLabels['trailer'] and not trailers: url = item.infoLabels['trailer'] if "youtube" in url: @@ -127,7 +127,7 @@ def buscartrailer(item, trailers=[]): def manual_search(item): - logger.info() + logger.log() texto = platformtools.dialog_input(default=item.contentTitle, heading=config.get_localized_string(30112)) if texto is not None: if item.extra == "abandomoviez": @@ -139,7 +139,7 @@ def manual_search(item): def tmdb_trailers(item, tipo="movie"): - logger.info() + logger.log() from core.tmdb import Tmdb itemlist = [] @@ -158,7 +158,7 @@ def tmdb_trailers(item, tipo="movie"): def youtube_search(item): - logger.info() + logger.log() itemlist = [] titulo = item.contentTitle if item.extra != "youtube": @@ -202,7 +202,7 @@ def youtube_search(item): def abandomoviez_search(item): - logger.info() + logger.log() # Check if it is a zero search or comes from the Next option if item.page != "": @@ -253,7 +253,7 @@ def abandomoviez_search(item): def search_links_abando(item): - logger.info() + logger.log() data = httptools.downloadpage(item.url).data itemlist = [] if "Lo sentimos, no tenemos trailer" in data: @@ -308,7 +308,7 @@ def search_links_abando(item): def filmaffinity_search(item): - logger.info() + logger.log() if item.filmaffinity: item.url = item.filmaffinity @@ -363,7 +363,7 @@ def filmaffinity_search(item): def search_links_filmaff(item): - logger.info() + logger.log() itemlist = [] data = httptools.downloadpage(item.url).data diff --git a/specials/tvmoviedb.py b/specials/tvmoviedb.py index 6a70c60d..78ccda67 100644 --- a/specials/tvmoviedb.py +++ b/specials/tvmoviedb.py @@ -31,11 +31,11 @@ adult_mal = config.get_setting('adult_mal', "tvmoviedb") mal_ck = "MzE1MDQ2cGQ5N2llYTY4Z2xwbGVzZjFzbTY=" images_predef = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/" default_fan = filetools.join(config.get_runtime_path(), "fanart.jpg") -logger.info('FANART= '+default_fan) +logger.log('FANART= '+default_fan) def mainlist(item): - logger.info() + logger.log() itemlist = [] # TMDB itemlist.append(item.clone(title=typo(config.get_localized_string(70021), 'bold'), action="")) @@ -72,7 +72,7 @@ def configuracion(item): return ret def search_star(item): - logger.info() + logger.log() itemlist = [] item.type='movie' @@ -114,7 +114,7 @@ def search_(item): def busqueda(item): - logger.info() + logger.log() new_item = Item(title=item.contentTitle, text=item.contentTitle.replace("+", " "), mode=item.contentType, infoLabels=item.infoLabels) @@ -734,7 +734,7 @@ def indices_tmdb(item): def filtro(item): - logger.info() + logger.log() from datetime import datetime list_controls = [] @@ -833,7 +833,7 @@ def filtrado(item, values): def musica_movie(item): - logger.info() + logger.log() itemlist = [] data = httptools.downloadpage(item.url).data @@ -954,7 +954,7 @@ def listado_imdb(item): def filtro_imdb(item): - logger.info() + logger.log() from datetime import datetime list_controls = [] @@ -1561,7 +1561,7 @@ def detalles_fa(item): def filtro_fa(item): - logger.info() + logger.log() from datetime import datetime list_controls = [] @@ -1663,7 +1663,7 @@ def filtrado_fa(item, values): def login_fa(): - logger.info() + logger.log() try: user = config.get_setting("usuariofa", "tvmoviedb") @@ -1688,7 +1688,7 @@ def login_fa(): userid = scrapertools.find_single_match(data, 'id-user=(\d+)') if userid: config.set_setting("userid", userid, "tvmoviedb") - logger.info("Login correcto") + logger.log("Login correcto") return True, "" except: import traceback @@ -1813,7 +1813,7 @@ def acciones_fa(item): def votar_fa(item): # Window to select the vote - logger.info() + logger.log() list_controls = [] valores = {} @@ -2215,7 +2215,7 @@ def acciones_trakt(item): def order_list(item): - logger.info() + logger.log() list_controls = [] valores1 = ['rating', 'added', 'title', 'released', 'runtime', 'popularity', 'percentage', 'votes'] @@ -2956,7 +2956,7 @@ def info_anidb(item, itemlist, url): def filtro_mal(item): - logger.info() + logger.log() list_controls = [] valores = {} @@ -3040,7 +3040,7 @@ def callback_mal(item, values): def musica_anime(item): # List available anime and songs similar to the anime title - logger.info() + logger.log() itemlist = [] data = httptools.downloadpage("http://www.freeanimemusic.org/song_search.php", post=item.post).data @@ -3076,7 +3076,7 @@ def musica_anime(item): def login_mal(from_list=False): - logger.info() + logger.log() try: user = config.get_setting("usuariomal", "tvmoviedb") @@ -3103,7 +3103,7 @@ def login_mal(from_list=False): else: if generic: return False, config.get_localized_string(70381), user - logger.info("Login correcto") + logger.log("Login correcto") return True, "", user except: import traceback @@ -3135,7 +3135,7 @@ def cuenta_mal(item): def items_mal(item): # Scraper for personal lists - logger.info() + logger.log() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| ", "", data) diff --git a/specials/videolibrary.py b/specials/videolibrary.py index 543834ad..1410dbcd 100644 --- a/specials/videolibrary.py +++ b/specials/videolibrary.py @@ -8,7 +8,6 @@ if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int import xbmc, os, traceback -from channelselector import get_thumb from core import filetools, scrapertools, videolibrarytools from core.support import typo, thumb from core.item import Item @@ -22,18 +21,14 @@ else: def mainlist(item): - logger.info() + logger.log() itemlist = [Item(channel=item.channel, action="list_movies", title=config.get_localized_string(60509), - category=config.get_localized_string(70270), - thumbnail=get_thumb("videolibrary_movie.png")), - Item(channel=item.channel, action="list_tvshows", title=config.get_localized_string(60600), - category=config.get_localized_string(70271), - thumbnail=get_thumb("videolibrary_tvshow.png"), + category=config.get_localized_string(70270), thumbnail=thumb("videolibrary_movie")), + Item(channel=item.channel, action="list_tvshows",title=config.get_localized_string(60600), + category=config.get_localized_string(70271), thumbnail=thumb("videolibrary_tvshow"), context=[{"channel":"videolibrary", "action":"update_videolibrary", "title":config.get_localized_string(70269)}]), - Item(channel='shortcuts', action="SettingOnPosition", - category=2, setting=1, title=typo(config.get_localized_string(70287),'bold color kod'), - thumbnail = get_thumb("setting_0.png"))] + Item(channel='shortcuts', action="SettingOnPosition", category=2, setting=1, thumbnail = thumb("setting_0"))] return itemlist @@ -42,7 +37,7 @@ def channel_config(item): def list_movies(item, silent=False): - logger.info() + logger.log() itemlist = [] movies_path = [] for root, folders, files in filetools.walk(videolibrarytools.MOVIES_PATH): @@ -68,7 +63,7 @@ def list_movies(item, silent=False): def list_tvshows(item): from time import time start = time() - logger.info() + logger.log() itemlist = [] lista = [] tvshows_path = [] @@ -93,7 +88,7 @@ def list_tvshows(item): title=typo(config.get_localized_string(70269), 'bold color kod'), folder=False), Item(channel=item.channel, action="configure_update_videolibrary", thumbnail=item.thumbnail, title=typo(config.get_localized_string(60599), 'bold color kod'), lista=lista, folder=False)] - logger.info('TEMPO= ' + str(time() - start)) + logger.log('TEMPO= ' + str(time() - start)) return itemlist @@ -192,7 +187,7 @@ def get_results(nfo_path, root, Type, local=False): # Contextual menu: Mark as seen / not seen visto = item.library_playcounts.get(item.contentTitle, 0) item.infoLabels["playcount"] = visto - logger.info('item\n' + str(item)) + logger.log('item\n' + str(item)) if visto > 0: seen_text = config.get_localized_string(60020) counter = 0 @@ -264,7 +259,7 @@ def configure_update_videolibrary(item): def get_seasons(item): - logger.info() + logger.log() # logger.debug("item:\n" + item.tostring('\n')) itemlist = [] dict_temp = {} @@ -323,7 +318,7 @@ def get_seasons(item): def get_episodes(item): - logger.info() + logger.log() # logger.debug("item:\n" + item.tostring('\n')) itemlist = [] @@ -387,7 +382,7 @@ def get_episodes(item): def findvideos(item): from specials import autoplay - logger.info() + logger.log() # logger.debug("item:\n" + item.tostring('\n')) videolibrarytools.check_renumber_options(item) itemlist = [] @@ -540,7 +535,7 @@ def findvideos(item): def play(item): - logger.info() + logger.log() # logger.debug("item:\n" + item.tostring('\n')) if not item.contentChannel == "local": @@ -580,7 +575,7 @@ def play(item): def update_videolibrary(item=''): - logger.info() + logger.log() # Update active series by overwriting import service @@ -600,17 +595,17 @@ def update_videolibrary(item=''): def move_videolibrary(current_path, new_path, current_movies_folder, new_movies_folder, current_tvshows_folder, new_tvshows_folder): - logger.info() + logger.log() backup_current_path = current_path backup_new_path = new_path - logger.info('current_path: ' + current_path) - logger.info('new_path: ' + new_path) - logger.info('current_movies_folder: ' + current_movies_folder) - logger.info('new_movies_folder: ' + new_movies_folder) - logger.info('current_tvshows_folder: ' + current_tvshows_folder) - logger.info('new_tvshows_folder: ' + new_tvshows_folder) + logger.log('current_path: ' + current_path) + logger.log('new_path: ' + new_path) + logger.log('current_movies_folder: ' + current_movies_folder) + logger.log('new_movies_folder: ' + new_movies_folder) + logger.log('current_tvshows_folder: ' + current_tvshows_folder) + logger.log('new_tvshows_folder: ' + new_tvshows_folder) notify = False progress = platformtools.dialog_progress_bg(config.get_localized_string(20000), config.get_localized_string(80011)) @@ -621,14 +616,14 @@ def move_videolibrary(current_path, new_path, current_movies_folder, new_movies_ current_tvshows_path = u'' + filetools.join(current_path, current_tvshows_folder) new_tvshows_path = u'' + filetools.join(new_path, new_tvshows_folder) - logger.info('current_movies_path: ' + current_movies_path) - logger.info('new_movies_path: ' + new_movies_path) - logger.info('current_tvshows_path: ' + current_tvshows_path) - logger.info('new_tvshows_path: ' + new_tvshows_path) + logger.log('current_movies_path: ' + current_movies_path) + logger.log('new_movies_path: ' + new_movies_path) + logger.log('current_tvshows_path: ' + current_tvshows_path) + logger.log('new_tvshows_path: ' + new_tvshows_path) from platformcode import xbmc_videolibrary movies_path, tvshows_path = xbmc_videolibrary.check_sources(new_movies_path, new_tvshows_path) - logger.info('check_sources: ' + str(movies_path) + ', ' + str(tvshows_path)) + logger.log('check_sources: ' + str(movies_path) + ', ' + str(tvshows_path)) if movies_path or tvshows_path: if not movies_path: filetools.rmdir(new_movies_path) @@ -673,7 +668,7 @@ def move_videolibrary(current_path, new_path, current_movies_folder, new_movies_ def delete_videolibrary(item): - logger.info() + logger.log() if not platformtools.dialog_yesno(config.get_localized_string(20000), config.get_localized_string(80037)): return @@ -699,7 +694,7 @@ def delete_videolibrary(item): # context menu methods def update_tvshow(item): - logger.info() + logger.log() # logger.debug("item:\n" + item.tostring('\n')) heading = config.get_localized_string(60037) @@ -725,11 +720,11 @@ def update_tvshow(item): def add_local_episodes(item): - logger.info() + logger.log() done, local_episodes_path = videolibrarytools.config_local_episodes_path(item.path, item, silent=True) if done < 0: - logger.info("An issue has occurred while configuring local episodes") + logger.log("An issue has occurred while configuring local episodes") elif local_episodes_path: nfo_path = filetools.join(item.path, "tvshow.nfo") head_nfo, item_nfo = videolibrarytools.read_nfo(nfo_path) @@ -744,7 +739,7 @@ def add_local_episodes(item): def remove_local_episodes(item): - logger.info() + logger.log() nfo_path = filetools.join(item.path, "tvshow.nfo") head_nfo, item_nfo = videolibrarytools.read_nfo(nfo_path) @@ -762,7 +757,7 @@ def remove_local_episodes(item): def verify_playcount_series(item, path): - logger.info() + logger.log() """ This method reviews and repairs the PlayCount of a series that has become out of sync with the actual list of episodes in its folder. Entries for missing episodes, seasons, or series are created with the "not seen" mark. Later it is sent to verify the counters of Seasons and Series @@ -825,7 +820,7 @@ def verify_playcount_series(item, path): def mark_content_as_watched2(item): - logger.info() + logger.log() # logger.debug("item:\n" + item.tostring('\n')) if filetools.isfile(item.nfo): head_nfo, it = videolibrarytools.read_nfo(item.nfo) @@ -863,7 +858,7 @@ def mark_content_as_watched2(item): def mark_content_as_watched(item): - logger.info() + logger.log() #logger.debug("item:\n" + item.tostring('\n')) if filetools.exists(item.nfo): @@ -901,7 +896,7 @@ def mark_content_as_watched(item): def mark_season_as_watched(item): - logger.info() + logger.log() # logger.debug("item:\n" + item.tostring('\n')) # Get dictionary of marked episodes @@ -954,7 +949,7 @@ def mark_season_as_watched(item): def mark_tvshow_as_updatable(item, silent=False): - logger.info() + logger.log() head_nfo, it = videolibrarytools.read_nfo(item.nfo) it.active = item.active filetools.write(item.nfo, head_nfo + it.tojson()) @@ -987,7 +982,7 @@ def delete(item): elif platformtools.dialog_yesno(heading, config.get_localized_string(70081) % filetools.basename(_item.path)): filetools.rmdirtree(_item.path) - logger.info("All links removed") + logger.log("All links removed") xbmc.sleep(1000) platformtools.itemlist_refresh() @@ -1058,7 +1053,7 @@ def delete(item): filetools.write(item.nfo, head_nfo + item_nfo.tojson()) msg_txt = config.get_localized_string(70087) % (num_enlaces, canal) - logger.info(msg_txt) + logger.log(msg_txt) platformtools.dialog_notification(heading, msg_txt) platformtools.itemlist_refresh() @@ -1068,7 +1063,7 @@ def delete(item): def check_season_playcount(item, season): - logger.info() + logger.log() if season: episodios_temporada = 0 @@ -1090,7 +1085,7 @@ def check_season_playcount(item, season): def check_tvshow_playcount(item, season): - logger.info() + logger.log() if season: temporadas_serie = 0 temporadas_vistas_serie = 0 diff --git a/tests/test_generic.py b/tests/test_generic.py index bcba9c69..43a5fef3 100644 --- a/tests/test_generic.py +++ b/tests/test_generic.py @@ -224,7 +224,7 @@ class GenericChannelMenuItemTest(unittest.TestCase): break for resIt in self.itemlist: - logger.info(resIt.title + ' -> ' + resIt.url) + logger.log(resIt.title + ' -> ' + resIt.url) self.assertLess(len(resIt.fulltitle), 110, 'channel ' + self.ch + ' -> ' + self.title + ' might contain wrong titles<br>' + resIt.fulltitle) if resIt.url: diff --git a/updatetvshow.py b/updatetvshow.py index 7227ec72..6ab933c9 100644 --- a/updatetvshow.py +++ b/updatetvshow.py @@ -49,7 +49,7 @@ def search_paths(Id): def execute_sql(sql): - logger.info() + logger.log() file_db = "" records = None @@ -69,14 +69,14 @@ def execute_sql(sql): break if file_db: - logger.info("DB file: %s" % file_db) + logger.log("DB file: %s" % file_db) conn = None try: import sqlite3 conn = sqlite3.connect(file_db) cursor = conn.cursor() - logger.info("Running sql: %s" % sql) + logger.log("Running sql: %s" % sql) cursor.execute(sql) conn.commit() @@ -86,7 +86,7 @@ def execute_sql(sql): records = [] conn.close() - logger.info("Query executed. Records: %s" % nun_records) + logger.log("Query executed. Records: %s" % nun_records) except: logger.error("Error executing sql query")