diff --git a/channels/altadefinizione01_link.py b/channels/altadefinizione01_link.py index 17f2a409..e76ca1e0 100644 --- a/channels/altadefinizione01_link.py +++ b/channels/altadefinizione01_link.py @@ -73,7 +73,7 @@ def search(item, text): except: import sys for line in sys.exc_info(): - logger.info("%s mainlist search log: %s" % (__channel__, line)) + logger.error("%s" % line) return [] # =========== def per le novità nel menu principale ============= diff --git a/channels/cineblog01.py b/channels/cineblog01.py index 8ddea810..3137da60 100644 --- a/channels/cineblog01.py +++ b/channels/cineblog01.py @@ -76,7 +76,7 @@ def newest(categoria): def search(item, text): - logger.info(item, "search", text) + logger.info("search", text) if item.contentType == 'tvshow': item.url = host + '/serietv/' else: item.url = host try: @@ -189,14 +189,14 @@ def findvideos(item): def load_links(itemlist, re_txt, desc_txt, quality=""): streaming = scrapertools.find_single_match(data, re_txt).replace('"', '') - support.info('STREAMING', streaming) - support.info('STREAMING=', streaming) + logger.debug('STREAMING', streaming) + logger.debug('STREAMING=', streaming) matches = support.match(streaming, patron = r'
|)?[^\d]*\d*(?:×|Ã)[0-9]+[^<]+)', '', item.other)
return support.server(item, data=data)
def play(item):
- support.info()
+ logger.debug()
return servertools.find_video_items(item, data=item.url)
diff --git a/channels/film4k.py b/channels/film4k.py
index cc605718..3e82f3b3 100644
--- a/channels/film4k.py
+++ b/channels/film4k.py
@@ -29,7 +29,7 @@ def mainlist(item):
def search(item, text):
- logger.info()
+ logger.info('search', text)
item.url = item.url + "/?s=" + text
try:
return support.dooplay_search(item)
diff --git a/channels/guardaseriecam.py b/channels/guardaseriecam.py
index 1dc671de..cdab2da3 100644
--- a/channels/guardaseriecam.py
+++ b/channels/guardaseriecam.py
@@ -51,7 +51,7 @@ def episodios(item):
def search(item, text):
- support.info('search', item)
+ support.info('search', text)
item.contentType = 'tvshow'
itemlist = []
text = text.replace(' ', '+')
@@ -66,5 +66,5 @@ def search(item, text):
def findvideos(item):
- logger.info("[guardaserie_live] findvideos")
+ logger.debug()
return support.server(item, item.url)
\ No newline at end of file
diff --git a/channels/netfreex.py b/channels/netfreex.py
index c40dd8cc..442145e7 100644
--- a/channels/netfreex.py
+++ b/channels/netfreex.py
@@ -29,7 +29,7 @@ def mainlist(item):
def search(item, text):
- logger.info()
+ logger.info('search', text)
item.url = item.url + "/?s=" + text
try:
return support.dooplay_search(item)
diff --git a/channels/vedohd.py b/channels/vedohd.py
index 619135de..ffb81a21 100644
--- a/channels/vedohd.py
+++ b/channels/vedohd.py
@@ -30,7 +30,7 @@ def mainlist(item):
def search(item, text):
- logger.info("[vedohd.py] " + item.url + " search " + text)
+ logger.info("search",text)
item.url = item.url + "/?s=" + text
return support.dooplay_search(item, blacklist)
@@ -44,7 +44,6 @@ def findvideos(item):
itemlist = []
for link in support.dooplay_get_links(item, host):
if link['title'] != 'Trailer':
- logger.info(link['title'])
server, quality = scrapertools.find_single_match(link['title'], '([^ ]+) ?(HD|3D)?')
if quality:
title = server + " [COLOR blue][" + quality + "][/COLOR]"
@@ -63,7 +62,7 @@ def menu(item):
def play(item):
- logger.info("[vedohd.py] play")
+ logger.debug()
data = support.swzz_get_url(item)
diff --git a/channels/youtube_channel.py b/channels/youtube_channel.py
index 66409814..80d57a85 100644
--- a/channels/youtube_channel.py
+++ b/channels/youtube_channel.py
@@ -14,15 +14,15 @@ YOUTUBE_V3_API_KEY = "AIzaSyCjsmBT0JZy1RT-PLwB-Zkfba87sa2inyI"
def youtube_api_call(method, parameters):
- logger.info("method=" + method + ", parameters=" + repr(parameters))
+ logger.debug("method=" + method + ", parameters=" + repr(parameters))
encoded_parameters = urllib.urlencode(parameters)
url = "https://www.googleapis.com/youtube/v3/" + method + "?" + encoded_parameters + "&key=" + YOUTUBE_V3_API_KEY;
- logger.info("url=" + url)
+ logger.debug("url=" + url)
data = httptools.downloadpage(url).data
- logger.info("data=" + data)
+ logger.debug("data=" + data)
json_object = jsontools.load(data)
@@ -51,13 +51,13 @@ def youtube_get_playlist_items(playlist_id, pageToken=""):
# Show all YouTube playlists for the selected channel
def playlists(item, channel_id, pageToken=""):
- logger.info()
+ logger.debug()
itemlist = []
json_object = youtube_get_user_playlists(channel_id, pageToken)
for entry in json_object["items"]:
- logger.info("entry=" + repr(entry))
+ logger.debug("entry=" + repr(entry))
title = entry["snippet"]["title"]
plot = entry["snippet"]["description"]
@@ -85,13 +85,13 @@ def latest_videos(item, channel_id):
# Show all YouTube videos for the selected playlist
def videos(item, pageToken=""):
- logger.info()
+ logger.debug()
itemlist = []
json_object = youtube_get_playlist_items(item.url, pageToken)
for entry in json_object["items"]:
- logger.info("entry=" + repr(entry))
+ logger.debug("entry=" + repr(entry))
title = entry["snippet"]["title"]
plot = entry["snippet"]["description"]
diff --git a/channelselector.py b/channelselector.py
index 9f810eff..649772c5 100644
--- a/channelselector.py
+++ b/channelselector.py
@@ -9,7 +9,7 @@ downloadenabled = addon.getSetting('downloadenabled')
def getmainlist(view="thumb_"):
- logger.info()
+ logger.debug()
itemlist = list()
if config.dev_mode():
@@ -62,14 +62,14 @@ def getmainlist(view="thumb_"):
def getchanneltypes(view="thumb_"):
- logger.info()
+ logger.debug()
# Category List
channel_types = ["movie", "tvshow", "anime", "documentary", "vos", "live", "torrent", "music"] #, "direct"
# Channel Language
channel_language = auto_filter()
- logger.info("channel_language=%s" % channel_language)
+ logger.debug("channel_language=%s" % channel_language)
# Build Itemlist
itemlist = list()
@@ -92,7 +92,7 @@ def getchanneltypes(view="thumb_"):
def filterchannels(category, view="thumb_"):
from core import channeltools
- logger.info('Filter Channels ' + category)
+ logger.debug('Filter Channels ' + category)
channelslist = []
@@ -103,14 +103,14 @@ def filterchannels(category, view="thumb_"):
appenddisabledchannels = True
channel_path = os.path.join(config.get_runtime_path(), 'channels', '*.json')
- logger.info("channel_path = %s" % channel_path)
+ logger.debug("channel_path = %s" % channel_path)
channel_files = glob.glob(channel_path)
- logger.info("channel_files found %s" % (len(channel_files)))
+ logger.debug("channel_files found %s" % (len(channel_files)))
# Channel Language
channel_language = auto_filter()
- logger.info("channel_language=%s" % channel_language)
+ logger.debug("channel_language=%s" % channel_language)
for channel_path in channel_files:
logger.debug("channel in for = %s" % channel_path)
@@ -221,7 +221,7 @@ def get_thumb(thumb_name, view="thumb_"):
def set_channel_info(parameters):
- logger.info()
+ logger.debug()
info = ''
language = ''
diff --git a/core/autoplay.py b/core/autoplay.py
index f6707ffb..a8d141d6 100644
--- a/core/autoplay.py
+++ b/core/autoplay.py
@@ -29,7 +29,7 @@ def start(itemlist, item):
if item.global_search:
return itemlist
- logger.info()
+ logger.debug()
global PLAYED
PLAYED = False
@@ -274,7 +274,7 @@ def start(itemlist, item):
def play_multi_channel(item, itemlist):
- logger.info()
+ logger.debug()
start(itemlist, item)
diff --git a/core/channeltools.py b/core/channeltools.py
index 90a2dd74..ff5a0442 100644
--- a/core/channeltools.py
+++ b/core/channeltools.py
@@ -15,7 +15,7 @@ default_file = dict()
remote_path = 'https://raw.githubusercontent.com/kodiondemand/media/master/'
def is_enabled(channel_name):
- logger.info("channel_name=" + channel_name)
+ logger.debug("channel_name=" + channel_name)
return get_channel_parameters(channel_name)["active"] and get_channel_setting("enabled", channel=channel_name,
default=True)
@@ -27,7 +27,7 @@ def get_channel_parameters(channel_name):
if channel_name not in dict_channels_parameters:
try:
channel_parameters = get_channel_json(channel_name)
- # logger.debug(channel_parameters)
+ logger.debug(channel_parameters)
if channel_parameters:
# name and default changes
channel_parameters["title"] = channel_parameters.pop("name") + (' [DEPRECATED]' if 'deprecated' in channel_parameters and channel_parameters['deprecated'] else '')
@@ -87,7 +87,7 @@ def get_channel_parameters(channel_name):
def get_channel_json(channel_name):
- # logger.info("channel_name=" + channel_name)
+ logger.debug("channel_name=" + channel_name)
from core import filetools
channel_json = None
try:
@@ -101,9 +101,9 @@ def get_channel_json(channel_name):
channel_name + ".json")
if filetools.isfile(channel_path):
- # logger.info("channel_data=" + channel_path)
+ logger.debug("channel_data=" + channel_path)
channel_json = jsontools.load(filetools.read(channel_path))
- # logger.info("channel_json= %s" % channel_json)
+ logger.debug("channel_json= %s" % channel_json)
except Exception as ex:
template = "An exception of type %s occured. Arguments:\n%r"
@@ -114,7 +114,7 @@ def get_channel_json(channel_name):
def get_channel_controls_settings(channel_name):
- # logger.info("channel_name=" + channel_name)
+ logger.debug("channel_name=" + channel_name)
dict_settings = {}
# import web_pdb; web_pdb.set_trace()
# list_controls = get_channel_json(channel_name).get('settings', list())
@@ -137,7 +137,7 @@ def get_lang(channel_name):
if hasattr(channel, 'list_language'):
for language in channel.list_language:
list_language.append(language)
- logger.info(list_language)
+ logger.debug(list_language)
else:
sub = False
langs = []
diff --git a/core/downloader.py b/core/downloader.py
index b714b474..919f9469 100644
--- a/core/downloader.py
+++ b/core/downloader.py
@@ -253,7 +253,7 @@ class Downloader(object):
self.file.seek(2 ** 31, 0)
except OverflowError:
self._seekable = False
- logger.info("Cannot do seek() or tell() in files larger than 2GB")
+ logger.error("Cannot do seek() or tell() in files larger than 2GB")
self.__get_download_info__()
diff --git a/core/filetools.py b/core/filetools.py
index 554a2671..481f015c 100644
--- a/core/filetools.py
+++ b/core/filetools.py
@@ -814,7 +814,7 @@ def remove_tags(title):
@rtype: str
@return: string without tags
"""
- logger.info()
+ logger.debug()
title_without_tags = scrapertools.find_single_match(title, r'\[color .+?\](.+)\[\/color\]')
@@ -832,7 +832,7 @@ def remove_smb_credential(path):
@return: chain without credentials
@rtype: str
"""
- logger.info()
+ logger.debug()
if not scrapertools.find_single_match(path, r'(^\w+:\/\/)'):
return path
diff --git a/core/filtertools.py b/core/filtertools.py
index c03ffe28..09f016db 100644
--- a/core/filtertools.py
+++ b/core/filtertools.py
@@ -234,7 +234,7 @@ def get_link(list_item, item, list_language, list_quality=None, global_filter_la
@return: Item list
@rtype: list[Item]
"""
- logger.info()
+ logger.debug()
# if the required fields are None we leave
if list_item is None or item is None:
@@ -274,7 +274,7 @@ def get_links(list_item, item, list_language, list_quality=None, global_filter_l
@return: lista de Item
@rtype: list[Item]
"""
- logger.info()
+ logger.debug()
# if the required fields are None we leave
@@ -362,7 +362,7 @@ def no_filter(item):
@return: lista de enlaces
@rtype: list[Item]
"""
- logger.info()
+ logger.debug()
itemlist = []
for i in item.list_item_all:
@@ -384,7 +384,7 @@ def mainlist(channel, list_language, list_quality):
@return: Item list
@rtype: list[Item]
"""
- logger.info()
+ logger.debug()
itemlist = []
dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_FILTER)
@@ -425,8 +425,8 @@ def config_item(item):
@param item: item
@type item: Item
"""
- logger.info()
- logger.info("item %s" % item.tostring())
+ logger.debug()
+ logger.debug("item %s" % item.tostring())
# WE GET THE JSON DATA
dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER)
@@ -448,8 +448,8 @@ def config_item(item):
else:
lang_selected = dict_series.get(tvshow, {}).get(TAG_LANGUAGE, default_lang)
list_quality = dict_series.get(tvshow, {}).get(TAG_QUALITY_ALLOWED, [x.lower() for x in item.list_quality])
- # logger.info("lang selected {}".format(lang_selected))
- # logger.info("list quality {}".format(list_quality))
+ # logger.debug("lang selected {}".format(lang_selected))
+ # logger.debug("list quality {}".format(list_quality))
active = True
custom_button = {'visible': False}
@@ -516,7 +516,7 @@ def config_item(item):
def delete(item, dict_values):
- logger.info()
+ logger.debug()
if item:
dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER)
@@ -554,7 +554,7 @@ def save(item, dict_data_saved):
@param dict_data_saved: dictionary with saved data
@type dict_data_saved: dict
"""
- logger.info()
+ logger.debug()
if item and dict_data_saved:
logger.debug('item: %s\ndatos: %s' % (item.tostring(), dict_data_saved))
@@ -564,7 +564,7 @@ def save(item, dict_data_saved):
dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER)
tvshow = item.show.strip().lower()
- logger.info("Data is updated")
+ logger.debug("Data is updated")
list_quality = []
for _id, value in list(dict_data_saved.items()):
@@ -599,7 +599,7 @@ def save_from_context(item):
@param item: item
@type item: item
"""
- logger.info()
+ logger.debug()
dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER)
tvshow = item.show.strip().lower()
@@ -630,7 +630,7 @@ def delete_from_context(item):
@param item: item
@type item: item
"""
- logger.info()
+ logger.debug()
# We come from get_links and no result has been obtained, in context menu and we delete
if item.to_channel != "":
diff --git a/core/httptools.py b/core/httptools.py
index a06eb9ac..ae7de722 100755
--- a/core/httptools.py
+++ b/core/httptools.py
@@ -449,7 +449,7 @@ def downloadpage(url, **opt):
if not 'api.themoviedb' in url and not opt.get('alfa_s', False):
show_infobox(info_dict)
-
+ if not config.get_setting("debug"): logger.info('Page URL:',url)
return type('HTTPResponse', (), response)
def fill_fields_pre(url, opt, proxy_data, file_name):
diff --git a/core/jsontools.py b/core/jsontools.py
index b2fed3a6..6bfe960f 100644
--- a/core/jsontools.py
+++ b/core/jsontools.py
@@ -11,22 +11,22 @@ from inspect import stack
try:
import json
except:
- logger.info("json included in the interpreter **NOT** available")
+ logger.error("json included in the interpreter **NOT** available")
try:
import simplejson as json
except:
- logger.info("simplejson included in the interpreter **NOT** available")
+ logger.error("simplejson included in the interpreter **NOT** available")
try:
from lib import simplejson as json
except:
- logger.info("simplejson in lib directory **NOT** available")
+ logger.error("simplejson in lib directory **NOT** available")
logger.error("A valid JSON parser was not found")
json = None
else:
logger.info("Using simplejson in the lib directory")
else:
- logger.info("Using simplejson included in the interpreter")
+ logger.error("Using simplejson included in the interpreter")
# ~ else:
# ~ logger.info("Usando json incluido en el interprete")
diff --git a/core/scraper.py b/core/scraper.py
index 3fd90d5e..d5aa9ac2 100644
--- a/core/scraper.py
+++ b/core/scraper.py
@@ -62,7 +62,7 @@ def find_and_set_infoLabels(item):
# Check if there is a 'code'
if scraper_result and item.infoLabels['code']:
# correct code
- logger.info("Identificador encontrado: %s" % item.infoLabels['code'])
+ logger.debug("Identificador encontrado: %s" % item.infoLabels['code'])
scraper.completar_codigos(item)
return True
elif scraper_result:
@@ -72,7 +72,7 @@ def find_and_set_infoLabels(item):
# Content not found
msg = config.get_localized_string(60228) % title
- logger.info(msg)
+ logger.debug(msg)
# Show box with other options:
item = platformtools.dialog_info(item, scraper_actual)
if item.exit:
@@ -83,7 +83,7 @@ def find_and_set_infoLabels(item):
def cuadro_completar(item):
- logger.info()
+ logger.debug()
global dict_default
dict_default = {}
@@ -196,7 +196,7 @@ def get_nfo(item):
@rtype: str
@return:
"""
- logger.info()
+ logger.debug()
if "infoLabels" in item and "noscrap_id" in item.infoLabels:
# Create the xml file with the data obtained from the item since there is no active scraper
info_nfo = ''
diff --git a/core/scrapertools.py b/core/scrapertools.py
index 3df0dab8..aa926c8b 100644
--- a/core/scrapertools.py
+++ b/core/scrapertools.py
@@ -34,7 +34,7 @@ from platformcode import logger
def printMatches(matches):
i = 0
for match in matches:
- logger.info("%d %s" % (i, match))
+ logger.debug("%d %s" % (i, match))
i = i + 1
@@ -447,7 +447,7 @@ def get_season_and_episode(title):
except:
pass
- logger.info("'" + title + "' -> '" + filename + "'")
+ logger.debug("'" + title + "' -> '" + filename + "'")
return filename
diff --git a/core/servertools.py b/core/servertools.py
index 19e688f9..8017f6c7 100644
--- a/core/servertools.py
+++ b/core/servertools.py
@@ -47,7 +47,7 @@ def find_video_items(item=None, data=None):
@return: returns the itemlist with the results
@rtype: list
"""
- logger.info()
+ logger.debug()
itemlist = []
# Download the page
@@ -97,7 +97,7 @@ def get_servers_itemlist(itemlist, fnc=None, sort=False):
# Walk the patterns
for pattern in server_parameters.get("find_videos", {}).get("patterns", []):
- logger.info(pattern["pattern"])
+ logger.debug(pattern["pattern"])
# Scroll through the results
for match in re.compile(pattern["pattern"], re.DOTALL).finditer(
"\n".join([item.url.split('|')[0] for item in itemlist if not item.server])):
@@ -144,7 +144,7 @@ def findvideos(data, skip=False):
return some link. It can also be an integer greater than 1, which would represent the maximum number of links to search.
:return:
"""
- logger.info()
+ logger.debug()
devuelve = []
skip = int(skip)
servers_list = list(get_servers_list().keys())
@@ -181,7 +181,7 @@ def findvideosbyserver(data, serverid):
value = translate_server_name(server_parameters["name"]) , url, serverid, server_parameters.get("thumbnail", "")
if value not in devuelve and url not in server_parameters["find_videos"].get("ignore_urls", []):
devuelve.append(value)
- logger.info(msg)
+ logger.debug(msg)
return devuelve
@@ -193,7 +193,7 @@ def guess_server_thumbnail(serverid):
def get_server_from_url(url):
- logger.info()
+ logger.debug()
servers_list = list(get_servers_list().keys())
# Run findvideos on each active server
@@ -211,7 +211,7 @@ def get_server_from_url(url):
for n, pattern in enumerate(server_parameters["find_videos"].get("patterns", [])):
msg = "%s\npattern: %s" % (serverid, pattern["pattern"])
if not "pattern_compiled" in pattern:
- # logger.info('compiled ' + serverid)
+ # logger.debug('compiled ' + serverid)
pattern["pattern_compiled"] = re.compile(pattern["pattern"])
dict_servers_parameters[serverid]["find_videos"]["patterns"][n]["pattern_compiled"] = pattern["pattern_compiled"]
# Scroll through the results
@@ -224,7 +224,7 @@ def get_server_from_url(url):
msg += "\nurl encontrada: %s" % url
value = translate_server_name(server_parameters["name"]), url, serverid, server_parameters.get("thumbnail", "")
if url not in server_parameters["find_videos"].get("ignore_urls", []):
- logger.info(msg)
+ logger.debug(msg)
return value
return None
@@ -616,7 +616,7 @@ def get_server_setting(name, server, default=None):
dict_file['settings'] = dict_settings
# We create the file ../settings/channel_data.json
if not filetools.write(file_settings, jsontools.dump(dict_file)):
- logger.info("ERROR saving file: %s" % file_settings)
+ logger.error("ERROR saving file: %s" % file_settings)
# We return the value of the local parameter 'name' if it exists, if default is not returned
return dict_settings.get(name, default)
@@ -638,7 +638,7 @@ def set_server_setting(name, value, server):
dict_file = jsontools.load(filetools.read(file_settings))
dict_settings = dict_file.get('settings', {})
except EnvironmentError:
- logger.info("ERROR when reading the file: %s" % file_settings)
+ logger.error("ERROR when reading the file: %s" % file_settings)
dict_settings[name] = value
@@ -650,7 +650,7 @@ def set_server_setting(name, value, server):
# We create the file ../settings/channel_data.json
if not filetools.write(file_settings, jsontools.dump(dict_file)):
- logger.info("ERROR saving file: %s" % file_settings)
+ logger.error("ERROR saving file: %s" % file_settings)
return None
return value
@@ -752,7 +752,7 @@ def check_video_link(item, timeout=3):
server_module = __import__('servers.%s' % server, None, None, ["servers.%s" % server])
except:
server_module = None
- logger.info("[check_video_link] Cannot import server! %s" % server)
+ logger.error("[check_video_link] Cannot import server! %s" % server)
return item, NK
if hasattr(server_module, 'test_video_exists'):
@@ -762,20 +762,20 @@ def check_video_link(item, timeout=3):
try:
video_exists, message = server_module.test_video_exists(page_url=url)
if not video_exists:
- logger.info("[check_video_link] Does not exist! %s %s %s" % (message, server, url))
+ logger.error("[check_video_link] Does not exist! %s %s %s" % (message, server, url))
resultado = KO
else:
- logger.info("[check_video_link] check ok %s %s" % (server, url))
+ logger.debug("[check_video_link] check ok %s %s" % (server, url))
resultado = OK
except:
- logger.info("[check_video_link] Can't check now! %s %s" % (server, url))
+ logger.error("[check_video_link] Can't check now! %s %s" % (server, url))
resultado = NK
finally:
httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = ant_timeout # Restore download time
return item, resultado
- logger.info("[check_video_link] There is no test_video_exists for server: %s" % server)
+ logger.debug("[check_video_link] There is no test_video_exists for server: %s" % server)
return item, NK
def translate_server_name(name):
diff --git a/core/support.py b/core/support.py
index 693c6319..99c83171 100755
--- a/core/support.py
+++ b/core/support.py
@@ -33,7 +33,7 @@ def hdpass_get_servers(item):
for mir_url, srv in scrapertools.find_multiple_matches(mir, patron_option):
mir_url = scrapertools.decodeHtmlentities(mir_url)
- info(mir_url)
+ logger.debug(mir_url)
it = item.clone(action="play", quality=quality, title=srv, server=srv, url= mir_url)
if not servertools.get_server_parameters(srv.lower()): it = hdpass_get_url(it)[0] # do not exists or it's empty
ret.append(it)
@@ -1022,7 +1022,7 @@ def videolibrary(itemlist, item, typography='', function_level=1, function=''):
# Simply add this function to add video library support
# Function_level is useful if the function is called by another function.
# If the call is direct, leave it blank
- info()
+ logger.debug()
if item.contentType == 'movie':
action = 'add_pelicula_to_library'
@@ -1073,7 +1073,7 @@ def videolibrary(itemlist, item, typography='', function_level=1, function=''):
def nextPage(itemlist, item, data='', patron='', function_or_level=1, next_page='', resub=[]):
# Function_level is useful if the function is called by another function.
# If the call is direct, leave it blank
- info()
+ logger.debug()
action = inspect.stack()[function_or_level][3] if type(function_or_level) == int else function_or_level
if next_page == '':
next_page = scrapertools.find_single_match(data, patron)
@@ -1083,7 +1083,7 @@ def nextPage(itemlist, item, data='', patron='', function_or_level=1, next_page=
if 'http' not in next_page:
next_page = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+') + (next_page if next_page.startswith('/') else '/' + next_page)
next_page = next_page.replace('&', '&')
- info('NEXT= ', next_page)
+ logger.debug('NEXT= ', next_page)
itemlist.append(
item.clone(channel=item.channel,
action = action,
@@ -1110,7 +1110,7 @@ def pagination(itemlist, item, page, perpage, function_level=1):
def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=True, Download=True, patronTag=None, Videolibrary=True):
- info()
+ logger.debug()
blacklisted_servers = config.get_setting("black_list", server='servers')
if not blacklisted_servers: blacklisted_servers = []
if not data and not itemlist:
@@ -1375,7 +1375,7 @@ def thumb(item_itemlist_string=None, genre=False, live=False):
'_tvshow':['serie','tv', 'fiction']}
def autoselect_thumb(item, genre):
- info('SPLIT',re.split(r'\.|\{|\}|\[|\]|\(|\)|/| ',item.title.lower()))
+ logger.debug('SPLIT',re.split(r'\.|\{|\}|\[|\]|\(|\)|/| ',item.title.lower()))
if genre == False:
for thumb, titles in icon_dict.items():
if any(word in re.split(r'\.|\{|\}|\[|\]|\(|\)|/| ',item.title.lower()) for word in search):
diff --git a/core/tmdb.py b/core/tmdb.py
index e5a6ab5e..0b6c709e 100644
--- a/core/tmdb.py
+++ b/core/tmdb.py
@@ -87,7 +87,7 @@ create_bd()
# The function name is the name of the decorator and receives the function that decorates.
def cache_response(fn):
- logger.info()
+ logger.debug()
# import time
# start_time = time.time()
@@ -495,7 +495,7 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None
def find_and_set_infoLabels(item):
- logger.info()
+ logger.debug()
global otmdb_global
tmdb_result = None
@@ -902,7 +902,7 @@ class Tmdb(object):
cls.dic_generos[idioma][tipo] = {}
url = ('http://api.themoviedb.org/3/genre/%s/list?api_key=a1ab8b8669da03637a4b98fa39c39228&language=%s' % (tipo, idioma))
try:
- logger.info("[Tmdb.py] Filling in dictionary of genres")
+ logger.debug("[Tmdb.py] Filling in dictionary of genres")
resultado = cls.get_json(url)
if not isinstance(resultado, dict):
@@ -934,7 +934,7 @@ class Tmdb(object):
'&language=%s' % (self.busqueda_id, source, self.busqueda_idioma))
buscando = "%s: %s" % (source.capitalize(), self.busqueda_id)
- logger.info("[Tmdb.py] Searching %s:\n%s" % (buscando, url))
+ logger.debug("[Tmdb.py] Searching %s:\n%s" % (buscando, url))
resultado = self.get_json(url)
if not isinstance(resultado, dict):
resultado = ast.literal_eval(resultado.decode('utf-8'))
@@ -981,7 +981,7 @@ class Tmdb(object):
url += '&year=%s' % self.busqueda_year
buscando = self.busqueda_texto.capitalize()
- logger.info("[Tmdb.py] Searching %s on page %s:\n%s" % (buscando, page, url))
+ logger.debug("[Tmdb.py] Searching %s on page %s:\n%s" % (buscando, page, url))
resultado = self.get_json(url)
if not isinstance(resultado, dict):
resultado = ast.literal_eval(resultado.decode('utf-8'))
@@ -1042,7 +1042,7 @@ class Tmdb(object):
url = ('http://api.themoviedb.org/3/%s?api_key=a1ab8b8669da03637a4b98fa39c39228&%s'
% (type_search, "&".join(params)))
- logger.info("[Tmdb.py] Searcing %s:\n%s" % (type_search, url))
+ logger.debug("[Tmdb.py] Searcing %s:\n%s" % (type_search, url))
resultado = self.get_json(url, cache=False)
if not isinstance(resultado, dict):
resultado = ast.literal_eval(resultado.decode('utf-8'))
@@ -1107,7 +1107,7 @@ class Tmdb(object):
return True
def get_list_resultados(self, num_result=20):
- # logger.info("self %s" % str(self))
+ # logger.debug("self %s" % str(self))
res = []
if num_result <= 0:
@@ -1327,7 +1327,7 @@ class Tmdb(object):
"&append_to_response=credits" % (self.result["id"], numtemporada, self.busqueda_idioma)
buscando = "id_Tmdb: " + str(self.result["id"]) + " season: " + str(numtemporada) + "\nURL: " + url
- logger.info("[Tmdb.py] Searcing " + buscando)
+ logger.debug("[Tmdb.py] Searcing " + buscando)
try:
self.temporada[numtemporada] = self.get_json(url)
if not isinstance(self.temporada[numtemporada], dict):
@@ -1516,7 +1516,7 @@ class Tmdb(object):
items.extend(list(self.get_episodio(ret_infoLabels['season'], episodio).items()))
- # logger.info("ret_infoLabels" % ret_infoLabels)
+ # logger.debug("ret_infoLabels" % ret_infoLabels)
for k, v in items:
if not v:
diff --git a/core/trakt_tools.py b/core/trakt_tools.py
index e5d23e51..639c8803 100644
--- a/core/trakt_tools.py
+++ b/core/trakt_tools.py
@@ -128,7 +128,7 @@ def token_trakt(item):
def set_trakt_info(item):
- logger.info()
+ logger.debug()
import xbmcgui
# Envia los datos a trakt
try:
@@ -139,7 +139,7 @@ def set_trakt_info(item):
pass
def get_trakt_watched(id_type, mediatype, update=False):
- logger.info()
+ logger.debug()
id_list = []
id_dict = dict()
@@ -229,7 +229,7 @@ def trakt_check(itemlist):
def get_sync_from_file():
- logger.info()
+ logger.debug()
sync_path = os.path.join(config.get_data_path(), 'settings_channels', 'trakt_data.json')
trakt_node = {}
if os.path.exists(sync_path):
@@ -241,7 +241,7 @@ def get_sync_from_file():
def update_trakt_data(mediatype, trakt_data):
- logger.info()
+ logger.debug()
sync_path = os.path.join(config.get_data_path(), 'settings_channels', 'trakt_data.json')
if os.path.exists(sync_path):
@@ -251,7 +251,7 @@ def update_trakt_data(mediatype, trakt_data):
def ask_install_script():
- logger.info()
+ logger.debug()
from platformcode import platformtools
@@ -265,7 +265,7 @@ def ask_install_script():
def wait_for_update_trakt():
- logger.info()
+ logger.debug()
t = Thread(update_all)
t.setDaemon(True)
t.start()
@@ -274,7 +274,7 @@ def wait_for_update_trakt():
def update_all():
# from core.support import dbg;dbg()
from time import sleep
- logger.info()
+ logger.debug()
sleep(20)
while xbmc.Player().isPlaying():
sleep(20)
diff --git a/core/tvdb.py b/core/tvdb.py
index 5e5f6f6b..eb635d36 100644
--- a/core/tvdb.py
+++ b/core/tvdb.py
@@ -77,9 +77,9 @@ otvdb_global = None
def find_and_set_infoLabels(item):
- logger.info()
+ logger.debug()
# from core.support import dbg;dbg()
- # logger.info("item es %s" % item)
+ # logger.debug("item es %s" % item)
p_dialog = None
if not item.contentSeason:
@@ -382,7 +382,7 @@ class Tvdb(object):
@classmethod
def __check_token(cls):
- # logger.info()
+ # logger.debug()
if TOKEN == "":
cls.__login()
else:
@@ -397,7 +397,7 @@ class Tvdb(object):
@staticmethod
def __login():
- # logger.info()
+ # logger.debug()
global TOKEN
apikey = "106B699FDC04301C"
@@ -423,7 +423,7 @@ class Tvdb(object):
@classmethod
def __refresh_token(cls):
- # logger.info()
+ # logger.debug()
global TOKEN
is_success = False
@@ -521,7 +521,7 @@ class Tvdb(object):
]
}
"""
- logger.info()
+ logger.debug()
if id_episode and self.episodes.get(id_episode):
return self.episodes.get(id_episode)
@@ -589,7 +589,7 @@ class Tvdb(object):
}
}
"""
- logger.info()
+ logger.debug()
url = HOST + "/series/%s/episodes?page=%s" % (_id, page)
@@ -662,7 +662,7 @@ class Tvdb(object):
"""
if semaforo:
semaforo.acquire()
- logger.info()
+ logger.debug()
url = HOST + "/episodes/%s" % _id
@@ -681,7 +681,7 @@ class Tvdb(object):
else:
dict_html = req.json()
- # logger.info("dict_html %s" % dict_html)
+ # logger.debug("dict_html %s" % dict_html)
self.episodes[_id] = dict_html.pop("data") if 'Error' not in dict_html else {}
if semaforo:
@@ -712,7 +712,7 @@ class Tvdb(object):
"status": "string"
}
"""
- logger.info()
+ logger.debug()
params = {}
if name:
@@ -802,7 +802,7 @@ class Tvdb(object):
}
}
"""
- logger.info()
+ logger.debug()
resultado = {}
url = HOST + "/series/%s" % _id
@@ -855,7 +855,7 @@ class Tvdb(object):
@rtype: dict
"""
- logger.info()
+ logger.debug()
if self.result.get('image_season_%s' % season):
return self.result['image_season_%s' % season]
@@ -909,7 +909,7 @@ class Tvdb(object):
@return: dictionary with actors
@rtype: dict
"""
- logger.info()
+ logger.debug()
url = HOST + "/series/%s/actors" % _id
DEFAULT_HEADERS["Accept-Language"] = lang
@@ -942,7 +942,7 @@ class Tvdb(object):
@rtype: list
@return: list of results
"""
- logger.info()
+ logger.debug()
list_results = []
# if we have a result and it has seriesName, we already have the info of the series, it is not necessary to search again
diff --git a/core/videolibrarytools.py b/core/videolibrarytools.py
index 36ade9ed..a11e75ca 100644
--- a/core/videolibrarytools.py
+++ b/core/videolibrarytools.py
@@ -78,7 +78,7 @@ def save_movie(item, silent=False):
@rtype fallidos: int
@return: the number of failed items or -1 if all failed
"""
- logger.info()
+ logger.debug()
# logger.debug(item.tostring('\n'))
insertados = 0
sobreescritos = 0
@@ -144,7 +144,7 @@ def save_movie(item, silent=False):
if not path:
# Create folder
path = filetools.join(MOVIES_PATH, ("%s [%s]" % (base_name, _id)).strip())
- logger.info("Creating movie directory:" + path)
+ logger.debug("Creating movie directory:" + path)
if not filetools.mkdir(path):
logger.debug("Could not create directory")
return 0, 0, -1, path
@@ -159,7 +159,7 @@ def save_movie(item, silent=False):
if not nfo_exists:
# We create .nfo if it doesn't exist
- logger.info("Creating .nfo: " + nfo_path)
+ logger.debug("Creating .nfo: " + nfo_path)
head_nfo = scraper.get_nfo(item)
item_nfo = Item(title=item.contentTitle, channel="videolibrary", action='findvideos',
@@ -182,7 +182,7 @@ def save_movie(item, silent=False):
if item_nfo and strm_exists:
if json_exists:
- logger.info("The file exists. Is overwritten")
+ logger.debug("The file exists. Is overwritten")
sobreescritos += 1
else:
insertados += 1
@@ -209,7 +209,7 @@ def save_movie(item, silent=False):
item_nfo.library_urls[item.channel] = item.url
if filetools.write(nfo_path, head_nfo + item_nfo.tojson()):
- #logger.info("FOLDER_MOVIES : %s" % FOLDER_MOVIES)
+ #logger.debug("FOLDER_MOVIES : %s" % FOLDER_MOVIES)
# We update the Kodi video library with the movie
if config.is_xbmc() and config.get_setting("videolibrary_kodi") and not silent:
from platformcode import xbmc_videolibrary
@@ -238,7 +238,7 @@ def update_renumber_options(item, head_nfo, path):
json = json_file['TVSHOW_AUTORENUMBER']
if item.fulltitle in json:
item.channel_prefs[channel]['TVSHOW_AUTORENUMBER'] = json[item.fulltitle]
- logger.info('UPDATED=\n' + str(item.channel_prefs))
+ logger.debug('UPDATED=\n' + str(item.channel_prefs))
filetools.write(tvshow_path, head_nfo + item.tojson())
def add_renumber_options(item, head_nfo, path):
@@ -426,7 +426,7 @@ def save_tvshow(item, episodelist, silent=False):
@rtype path: str
@return: serial directory
"""
- logger.info()
+ logger.debug()
# logger.debug(item.tostring('\n'))
path = ""
@@ -486,7 +486,7 @@ def save_tvshow(item, episodelist, silent=False):
if not path:
path = filetools.join(TVSHOWS_PATH, ("%s [%s]" % (base_name, _id)).strip())
- logger.info("Creating series directory: " + path)
+ logger.debug("Creating series directory: " + path)
try:
filetools.mkdir(path)
except OSError as exception:
@@ -496,7 +496,7 @@ def save_tvshow(item, episodelist, silent=False):
tvshow_path = filetools.join(path, "tvshow.nfo")
if not filetools.exists(tvshow_path):
# We create tvshow.nfo, if it does not exist, with the head_nfo, series info and watched episode marks
- logger.info("Creating tvshow.nfo: " + tvshow_path)
+ logger.debug("Creating tvshow.nfo: " + tvshow_path)
head_nfo = scraper.get_nfo(item)
item.infoLabels['mediatype'] = "tvshow"
item.infoLabels['title'] = item.contentSerieName
@@ -570,11 +570,11 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
@rtype fallidos: int
@return: the number of failed episodes
"""
- logger.info()
+ logger.debug()
episodelist = filter_list(episodelist, serie.action, path)
# No episode list, nothing to save
if not len(episodelist):
- logger.info("There is no episode list, we go out without creating strm")
+ logger.debug("There is no episode list, we go out without creating strm")
return 0, 0, 0
# process local episodes
@@ -589,7 +589,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
elif config.get_setting("local_episodes", "videolibrary"):
done, local_episodes_path = config_local_episodes_path(path, serie)
if done < 0:
- logger.info("An issue has occurred while configuring local episodes, going out without creating strm")
+ logger.debug("An issue has occurred while configuring local episodes, going out without creating strm")
return 0, 0, done
item_nfo.local_episodes_path = local_episodes_path
filetools.write(nfo_path, head_nfo + item_nfo.tojson())
@@ -713,7 +713,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
# No episode list, nothing to save
if not len(new_episodelist):
- logger.info("There is no episode list, we go out without creating strm")
+ logger.debug("There is no episode list, we go out without creating strm")
return 0, 0, 0
local_episodelist += get_local_content(path)
@@ -745,12 +745,12 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower())
if season_episode in local_episodelist:
- logger.info('Skipped: Serie ' + serie.contentSerieName + ' ' + season_episode + ' available as local content')
+ logger.debug('Skipped: Serie ' + serie.contentSerieName + ' ' + season_episode + ' available as local content')
continue
# check if the episode has been downloaded
if filetools.join(path, "%s [downloads].json" % season_episode) in ficheros:
- logger.info('INFO: "%s" episode %s has been downloaded, skipping it' % (serie.contentSerieName, season_episode))
+ logger.debug('INFO: "%s" episode %s has been downloaded, skipping it' % (serie.contentSerieName, season_episode))
continue
strm_exists = strm_path in ficheros
@@ -806,7 +806,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
if filetools.write(json_path, e.tojson()):
if not json_exists:
- logger.info("Inserted: %s" % json_path)
+ logger.debug("Inserted: %s" % json_path)
insertados += 1
# We mark episode as unseen
news_in_playcounts[season_episode] = 0
@@ -817,14 +817,14 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
news_in_playcounts[serie.contentSerieName] = 0
else:
- logger.info("Overwritten: %s" % json_path)
+ logger.debug("Overwritten: %s" % json_path)
sobreescritos += 1
else:
- logger.info("Failed: %s" % json_path)
+ logger.debug("Failed: %s" % json_path)
fallidos += 1
else:
- logger.info("Failed: %s" % json_path)
+ logger.debug("Failed: %s" % json_path)
fallidos += 1
if not silent and p_dialog.iscanceled():
@@ -894,7 +894,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
def config_local_episodes_path(path, item, silent=False):
- logger.info(item)
+ logger.debug(item)
from platformcode.xbmc_videolibrary import search_local_path
local_episodes_path=search_local_path(item)
if not local_episodes_path:
@@ -906,11 +906,11 @@ def config_local_episodes_path(path, item, silent=False):
platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(80043))
local_episodes_path = platformtools.dialog_browse(0, config.get_localized_string(80046))
if local_episodes_path == '':
- logger.info("User has canceled the dialog")
+ logger.debug("User has canceled the dialog")
return -2, local_episodes_path
elif path in local_episodes_path:
platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(80045))
- logger.info("Selected folder is the same of the TV show one")
+ logger.debug("Selected folder is the same of the TV show one")
return -2, local_episodes_path
if local_episodes_path:
@@ -925,7 +925,7 @@ def config_local_episodes_path(path, item, silent=False):
def process_local_episodes(local_episodes_path, path):
- logger.info()
+ logger.debug()
sub_extensions = ['.srt', '.sub', '.sbv', '.ass', '.idx', '.ssa', '.smi']
artwork_extensions = ['.jpg', '.jpeg', '.png']
@@ -964,7 +964,7 @@ def process_local_episodes(local_episodes_path, path):
def get_local_content(path):
- logger.info()
+ logger.debug()
local_episodelist = []
for root, folders, files in filetools.walk(path):
@@ -993,7 +993,7 @@ def add_movie(item):
@type item: item
@param item: item to be saved.
"""
- logger.info()
+ logger.debug()
from platformcode.launcher import set_search_temp; set_search_temp(item)
# To disambiguate titles, TMDB is caused to ask for the really desired title
@@ -1040,7 +1040,7 @@ def add_tvshow(item, channel=None):
@param channel: channel from which the series will be saved. By default, item.from_channel or item.channel will be imported.
"""
- logger.info("show=#" + item.show + "#")
+ logger.debug("show=#" + item.show + "#")
from platformcode.launcher import set_search_temp; set_search_temp(item)
if item.channel == "downloads":
@@ -1117,7 +1117,7 @@ def add_tvshow(item, channel=None):
else:
platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60070) % item.show)
- logger.info("%s episodes of series %s have been added to the video library" % (insertados, item.show))
+ logger.debug("%s episodes of series %s have been added to the video library" % (insertados, item.show))
if config.is_xbmc():
if config.get_setting("sync_trakt_new_tvshow", "videolibrary"):
import xbmc
@@ -1133,7 +1133,7 @@ def add_tvshow(item, channel=None):
def emergency_urls(item, channel=None, path=None, headers={}):
- logger.info()
+ logger.debug()
import re
from servers import torrent
try:
diff --git a/core/ziptools.py b/core/ziptools.py
index 138c4c1b..f7f73db6 100644
--- a/core/ziptools.py
+++ b/core/ziptools.py
@@ -17,8 +17,8 @@ from core import filetools
class ziptools(object):
def extract(self, file, dir, folder_to_extract="", overwrite_question=False, backup=False):
- logger.info("file= %s" % file)
- logger.info("dir= %s" % dir)
+ logger.debug("file= %s" % file)
+ logger.debug("dir= %s" % dir)
if not dir.endswith(':') and not filetools.exists(dir):
filetools.mkdir(dir)
@@ -30,13 +30,13 @@ class ziptools(object):
for nameo in zf.namelist():
name = nameo.replace(':', '_').replace('<', '_').replace('>', '_').replace('|', '_').replace('"', '_').replace('?', '_').replace('*', '_')
- logger.info("name=%s" % nameo)
+ logger.debug("name=%s" % nameo)
if not name.endswith('/'):
- logger.info("it's not a directory")
+ logger.debug("it's not a directory")
try:
(path, filename) = filetools.split(filetools.join(dir, name))
- logger.info("path=%s" % path)
- logger.info("name=%s" % name)
+ logger.debug("path=%s" % path)
+ logger.debug("name=%s" % name)
if folder_to_extract:
if path != filetools.join(dir, folder_to_extract):
break
@@ -49,7 +49,7 @@ class ziptools(object):
else:
outfilename = filetools.join(dir, name)
- logger.info("outfilename=%s" % outfilename)
+ logger.debug("outfilename=%s" % outfilename)
try:
if filetools.exists(outfilename) and overwrite_question:
from platformcode import platformtools
@@ -74,7 +74,7 @@ class ziptools(object):
try:
zf.close()
except:
- logger.info("Error closing .zip " + file)
+ logger.error("Error closing .zip " + file)
def _createstructure(self, file, dir):
self._makedirs(self._listdirs(file), dir)
diff --git a/lib/arm_chromeos.py b/lib/arm_chromeos.py
index d4808402..b33d1dfd 100644
--- a/lib/arm_chromeos.py
+++ b/lib/arm_chromeos.py
@@ -27,7 +27,7 @@ class ChromeOSImage:
"""
def __init__(self, imgpath):
- logger.info('Image Path: ' + imgpath)
+ logger.debug('Image Path: ' + imgpath)
"""Prepares the image"""
self.imgpath = imgpath
self.bstream = self.get_bstream(imgpath)
@@ -59,7 +59,7 @@ class ChromeOSImage:
self.seek_stream(entries_start * lba_size)
if not calcsize(part_format) == entry_size:
- logger.info('Partition table entries are not 128 bytes long')
+ logger.debug('Partition table entries are not 128 bytes long')
return 0
for index in range(1, entries_num + 1): # pylint: disable=unused-variable
@@ -71,7 +71,7 @@ class ChromeOSImage:
break
if not offset:
- logger.info('Failed to calculate losetup offset.')
+ logger.debug('Failed to calculate losetup offset.')
return 0
return offset
@@ -93,7 +93,7 @@ class ChromeOSImage:
while True:
chunk2 = self.read_stream(chunksize)
if not chunk2:
- logger.info('File %s not found in the ChromeOS image' % filename)
+ logger.debug('File %s not found in the ChromeOS image' % filename)
return False
chunk = chunk1 + chunk2
diff --git a/lib/doh.py b/lib/doh.py
index 5375caa5..14ce62a6 100644
--- a/lib/doh.py
+++ b/lib/doh.py
@@ -47,7 +47,7 @@ def query(name, type='A', server=DOH_SERVER, path="/dns-query", fallback=True):
else:
retval = []
except Exception as ex:
- logger.info("Exception occurred: '%s'" % ex)
+ logger.error("Exception occurred: '%s'" % ex)
if retval is None and fallback:
if type == 'A':
diff --git a/lib/generictools.py b/lib/generictools.py
index a4757b59..44789305 100644
--- a/lib/generictools.py
+++ b/lib/generictools.py
@@ -25,7 +25,7 @@ intervenido_sucuri = 'Access Denied - Sucuri Website Firewall'
def update_title(item):
- logger.info()
+ logger.debug()
from core import scraper,support
@@ -41,7 +41,7 @@ def update_title(item):
The channel must add a method to be able to receive the call from Kodi / Alfa, and be able to call this method:
def actualizar_titulos(item):
- logger.info()
+ logger.debug()
itemlist = []
from lib import generictools
from platformcode import launcher
@@ -206,7 +206,7 @@ def update_title(item):
def refresh_screen(item):
- logger.info()
+ logger.debug()
"""
#### Kodi 18 compatibility ####
@@ -240,7 +240,7 @@ def refresh_screen(item):
def post_tmdb_listado(item, itemlist):
- logger.info()
+ logger.debug()
itemlist_fo = []
"""
@@ -485,7 +485,7 @@ def post_tmdb_listado(item, itemlist):
def post_tmdb_seasons(item, itemlist):
- logger.info()
+ logger.debug()
"""
@@ -645,7 +645,7 @@ def post_tmdb_seasons(item, itemlist):
def post_tmdb_episodios(item, itemlist):
- logger.info()
+ logger.debug()
itemlist_fo = []
"""
@@ -996,7 +996,7 @@ def post_tmdb_episodios(item, itemlist):
def post_tmdb_findvideos(item, itemlist):
- logger.info()
+ logger.debug()
"""
@@ -1216,7 +1216,7 @@ def post_tmdb_findvideos(item, itemlist):
def get_field_from_kodi_DB(item, from_fields='*', files='file'):
- logger.info()
+ logger.debug()
"""
Call to read from the Kodi DB the input fields received (from_fields, by default "*") of the video indicated in Item
@@ -1294,7 +1294,7 @@ def get_field_from_kodi_DB(item, from_fields='*', files='file'):
def fail_over_newpct1(item, patron, patron2=None, timeout=None):
- logger.info()
+ logger.debug()
import ast
"""
@@ -1495,7 +1495,7 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
def web_intervenida(item, data, desactivar=True):
- logger.info()
+ logger.debug()
"""
@@ -1578,7 +1578,7 @@ def web_intervenida(item, data, desactivar=True):
def regenerate_clones():
- logger.info()
+ logger.debug()
import json
from core import videolibrarytools
@@ -1592,7 +1592,7 @@ def regenerate_clones():
# Find the paths where to leave the control .json file, and the Video Library
json_path = filetools.exists(filetools.join(config.get_runtime_path(), 'verify_cached_torrents.json'))
if json_path:
- logger.info('Previously repaired video library: WE ARE GOING')
+ logger.debug('Previously repaired video library: WE ARE GOING')
return False
json_path = filetools.join(config.get_runtime_path(), 'verify_cached_torrents.json')
filetools.write(json_path, json.dumps({"CINE_verify": True})) # Prevents another simultaneous process from being launched
@@ -1632,7 +1632,7 @@ def regenerate_clones():
# Delete the Tvshow.nfo files and check if the .nfo has more than one channel and one is clone Newpct1
for file in files:
- # logger.info('file - nfos: ' + file)
+ # logger.debug('file - nfos: ' + file)
if 'tvshow.nfo' in file:
file_path = filetools.join(root, 'tvshow.nfo')
filetools.remove(file_path)
@@ -1698,7 +1698,7 @@ def regenerate_clones():
for file in files:
file_path = filetools.join(root, file)
if '.json' in file:
- logger.info('** file: ' + file)
+ logger.debug('** file: ' + file)
canal_json = scrapertools.find_single_match(file, r'\[(\w+)\].json')
if canal_json not in nfo.library_urls:
filetools.remove(file_path) # we delete the .json is a zombie
@@ -1741,7 +1741,7 @@ def regenerate_clones():
def dejuice(data):
- logger.info()
+ logger.debug()
# Method to unobtrusive JuicyCodes data
import base64
diff --git a/platformcode/autorenumber.py b/platformcode/autorenumber.py
index 8c530384..cf5b2d27 100644
--- a/platformcode/autorenumber.py
+++ b/platformcode/autorenumber.py
@@ -25,14 +25,14 @@ TYPE = "Type"
# helper Functions
def check(item):
- logger.info()
+ logger.debug()
dict_series = load(item)
title = item.fulltitle.rstrip()
if title in dict_series: title = dict_series[title]
return True if ID in title and EPISODE in title else False
def filename(item):
- logger.info()
+ logger.debug()
name_file = item.channel + "_data.json"
path = filetools.join(config.get_data_path(), "settings_channels")
fname = filetools.join(path, name_file)
@@ -40,7 +40,7 @@ def filename(item):
def load(item):
- logger.info()
+ logger.debug()
try:
json_file = open(filename(item), "r").read()
json = jsontools.load(json_file)[TVSHOW_RENUMERATE]
@@ -52,7 +52,7 @@ def load(item):
def write(item, json):
- logger.info()
+ logger.debug()
json_file = open(filename(item), "r").read()
js = jsontools.load(json_file)
js[TVSHOW_RENUMERATE] = json
@@ -71,7 +71,7 @@ def b64(json, mode = 'encode'):
def RepresentsInt(s):
# Controllo Numro Stagione
- logger.info()
+ logger.debug()
try:
int(s)
return True
@@ -79,7 +79,7 @@ def RepresentsInt(s):
return False
def find_episodes(item):
- logger.info()
+ logger.debug()
ch = __import__('channels.' + item.channel, fromlist=["channels.%s" % item.channel])
itemlist = ch.episodios(item)
return itemlist
@@ -705,7 +705,7 @@ class SelectreNumerationWindow(xbmcgui.WindowXMLDialog):
items.append(item)
self.seasons[item.getLabel()] = '%sx%s' % (item.getProperty('season'), item.getProperty('episode'))
self.items = items
- logger.info('SELF',self.seasons)
+ logger.debug('SELF',self.seasons)
def addseasons(self):
seasonlist = []
diff --git a/platformcode/globalsearch.py b/platformcode/globalsearch.py
index 43aaa72e..bb20e575 100644
--- a/platformcode/globalsearch.py
+++ b/platformcode/globalsearch.py
@@ -59,7 +59,7 @@ SERVERLIST = 300
class SearchWindow(xbmcgui.WindowXML):
def start(self, item):
- logger.info()
+ logger.debug()
self.exit = False
self.item = item
self.lastSearch()
@@ -81,7 +81,7 @@ class SearchWindow(xbmcgui.WindowXML):
self.doModal()
def lastSearch(self):
- logger.info()
+ logger.debug()
if not self.item.text:
if config.get_setting('last_search'): last_search = channeltools.get_channel_setting('Last_searched', 'search', '')
else: last_search = ''
@@ -89,7 +89,7 @@ class SearchWindow(xbmcgui.WindowXML):
if self.item.text: channeltools.set_channel_setting('Last_searched', self.item.text, 'search')
def select(self):
- logger.info()
+ logger.debug()
self.PROGRESS.setVisible(False)
items = []
if self.persons:
@@ -122,7 +122,7 @@ class SearchWindow(xbmcgui.WindowXML):
self.NORESULTS.setVisible(True)
def actors(self):
- logger.info()
+ logger.debug()
self.PROGRESS.setVisible(False)
items = []
@@ -174,7 +174,7 @@ class SearchWindow(xbmcgui.WindowXML):
self.NORESULTS.setVisible(True)
def get_channels(self):
- logger.info()
+ logger.debug()
channels_list = []
all_channels = channelselector.filterchannels('all')
@@ -196,12 +196,12 @@ class SearchWindow(xbmcgui.WindowXML):
if config.get_setting("include_in_global_search", channel) and ch_param.get("active", False):
channels_list.append(channel)
- logger.info('search in channels:',channels_list)
+ logger.debug('search in channels:',channels_list)
return channels_list
def getModule(self, channel):
- logger.info()
+ logger.debug()
try:
module = __import__('channels.%s' % channel, fromlist=["channels.%s" % channel])
mainlist = getattr(module, 'mainlist')(Item(channel=channel, global_search=True))
@@ -233,7 +233,7 @@ class SearchWindow(xbmcgui.WindowXML):
executor.submit(self.get_channel_results, self.item, self.moduleDict, searchAction)
def get_channel_results(self, item, module_dict, search_action):
- logger.info()
+ logger.debug()
channel = search_action.channel
results = []
valid = []
@@ -266,7 +266,7 @@ class SearchWindow(xbmcgui.WindowXML):
else: self.update(channel, valid + other)
def makeItem(self, item):
- logger.info()
+ logger.debug()
thumb = item.thumbnail if item.thumbnail else 'Infoplus/' + item.contentType.replace('show','')
it = xbmcgui.ListItem(item.title)
it.setProperty('thumb', thumb)
@@ -282,7 +282,7 @@ class SearchWindow(xbmcgui.WindowXML):
return it
def update(self, channel, results):
- logger.info('Search on channel', channel)
+ logger.debug('Search on channel', channel)
if results:
channelParams = channeltools.get_channel_parameters(channel)
name = channelParams['title']
diff --git a/platformcode/launcher.py b/platformcode/launcher.py
index bd30e037..6bba9076 100644
--- a/platformcode/launcher.py
+++ b/platformcode/launcher.py
@@ -19,7 +19,7 @@ def start():
Within this function all calls should go to
functions that we want to execute as soon as we open the plugin.
"""
- logger.info()
+ logger.debug()
# config.set_setting('show_once', True)
# Test if all the required directories are created
config.verify_directories_created()
@@ -37,7 +37,7 @@ def start():
updater.showSavedChangelog()
def run(item=None):
- logger.info()
+ logger.debug()
if not item:
# Extract item from sys.argv
if sys.argv[2]:
@@ -94,7 +94,7 @@ def run(item=None):
# If item has no action, stops here
if item.action == "":
- logger.info("Item without action")
+ logger.debug("Item without action")
return
# Action for main menu in channelselector
@@ -193,7 +193,7 @@ def run(item=None):
channel_file = os.path.join(config.get_runtime_path(), CHANNELS, item.channel + ".py")
- logger.info("channel_file= " + channel_file + ' - ' + CHANNELS + ' - ' + item.channel)
+ logger.debug("channel_file= " + channel_file + ' - ' + CHANNELS + ' - ' + item.channel)
channel = None
@@ -213,12 +213,12 @@ def run(item=None):
trakt_tools.set_trakt_info(item)
except:
pass
- logger.info("item.action=%s" % item.action.upper())
+ logger.debug("item.action=%s" % item.action.upper())
# logger.debug("item_toPlay: " + "\n" + item.tostring('\n'))
# First checks if channel has a "play" function
if hasattr(channel, 'play'):
- logger.info("Executing channel 'play' method")
+ logger.debug("Executing channel 'play' method")
itemlist = channel.play(item)
b_favourite = item.isFavourite
# Play should return a list of playable URLS
@@ -239,7 +239,7 @@ def run(item=None):
# If player don't have a "play" function, not uses the standard play from platformtools
else:
- logger.info("Executing core 'play' method")
+ logger.debug("Executing core 'play' method")
platformtools.play_video(item)
# Special action for findvideos, where the plugin looks for known urls
@@ -252,8 +252,7 @@ def run(item=None):
# If not, uses the generic findvideos function
else:
- logger.info("No channel 'findvideos' method, "
- "executing core method")
+ logger.debug("No channel 'findvideos' method, " "executing core method")
itemlist = servertools.find_video_items(item)
if config.get_setting("max_links", "videolibrary") != 0:
@@ -297,7 +296,7 @@ def run(item=None):
else:
filetools.remove(temp_search_file)
- logger.info("item.action=%s" % item.action.upper())
+ logger.debug("item.action=%s" % item.action.upper())
from core import channeltools
if config.get_setting('last_search'):
@@ -318,7 +317,7 @@ def run(item=None):
# For all other actions
else:
# import web_pdb; web_pdb.set_trace()
- logger.info("Executing channel '%s' method" % item.action)
+ logger.debug("Executing channel '%s' method" % item.action)
itemlist = getattr(channel, item.action)(item)
if config.get_setting('trakt_sync'):
from core import trakt_tools
@@ -399,7 +398,7 @@ def set_search_temp(item):
filetools.write(temp_search_file, f)
def reorder_itemlist(itemlist):
- logger.info()
+ logger.debug()
# logger.debug("Inlet itemlist size: %i" % len(itemlist))
new_list = []
@@ -437,7 +436,7 @@ def reorder_itemlist(itemlist):
new_list.extend(mod_list)
new_list.extend(not_mod_list)
- logger.info("Modified Titles:%i |Unmodified:%i" % (modified, not_modified))
+ logger.debug("Modified Titles:%i |Unmodified:%i" % (modified, not_modified))
if len(new_list) == 0:
new_list = itemlist
@@ -447,7 +446,7 @@ def reorder_itemlist(itemlist):
def limit_itemlist(itemlist):
- logger.info()
+ logger.debug()
# logger.debug("Inlet itemlist size: %i" % len(itemlist))
try:
@@ -480,7 +479,7 @@ def play_from_library(item):
itemlist=[]
item.fromLibrary = True
- logger.info()
+ logger.debug()
# logger.debug("item: \n" + item.tostring('\n'))
# Try to reproduce an image (this does nothing and also does not give an error)
diff --git a/platformcode/platformtools.py b/platformcode/platformtools.py
index be2e432a..d70a04ff 100644
--- a/platformcode/platformtools.py
+++ b/platformcode/platformtools.py
@@ -206,7 +206,7 @@ def render_items(itemlist, parent_item):
"""
Function used to render itemlist on kodi
"""
- logger.info('START render_items')
+ logger.debug('START render_items')
thumb_type = config.get_setting('video_thumbnail_type')
from platformcode import shortcuts
# from core import httptools
@@ -291,7 +291,7 @@ def render_items(itemlist, parent_item):
set_view_mode(itemlist[0], parent_item)
xbmcplugin.endOfDirectory(_handle)
- logger.info('END render_items')
+ logger.debug('END render_items')
def getCurrentView(item=None, parent_item=None):
@@ -348,11 +348,11 @@ def set_view_mode(item, parent_item):
if content:
mode = int(config.get_setting('view_mode_%s' % content).split(',')[-1])
if mode == 0:
- logger.info('default mode')
+ logger.debug('default mode')
mode = 55
xbmcplugin.setContent(handle=int(sys.argv[1]), content=Type)
xbmc.executebuiltin('Container.SetViewMode(%s)' % mode)
- logger.info('TYPE: ' + Type + ' - ' + 'CONTENT: ' + content)
+ logger.debug('TYPE: ' + Type + ' - ' + 'CONTENT: ' + content)
def set_infolabels(listitem, item, player=False):
@@ -572,10 +572,10 @@ def is_playing():
def play_video(item, strm=False, force_direct=False, autoplay=False):
- logger.info()
+ logger.debug()
logger.debug(item.tostring('\n'))
if item.channel == 'downloads':
- logger.info("Play local video: %s [%s]" % (item.title, item.url))
+ logger.debug("Play local video: %s [%s]" % (item.title, item.url))
xlistitem = xbmcgui.ListItem(path=item.url)
xlistitem.setArt({"thumb": item.thumbnail})
set_infolabels(xlistitem, item, True)
@@ -583,7 +583,7 @@ def play_video(item, strm=False, force_direct=False, autoplay=False):
return
default_action = config.get_setting("default_action")
- logger.info("default_action=%s" % default_action)
+ logger.debug("default_action=%s" % default_action)
# Open the selection dialog to see the available options
opciones, video_urls, seleccion, salir = get_dialogo_opciones(item, default_action, strm, autoplay)
@@ -593,8 +593,8 @@ def play_video(item, strm=False, force_direct=False, autoplay=False):
seleccion = get_seleccion(default_action, opciones, seleccion, video_urls)
if seleccion < 0: return # Canceled box
- logger.info("selection=%d" % seleccion)
- logger.info("selection=%s" % opciones[seleccion])
+ logger.debug("selection=%d" % seleccion)
+ logger.debug("selection=%s" % opciones[seleccion])
# run the available option, jdwonloader, download, favorites, add to the video library ... IF IT IS NOT PLAY
salir = set_opcion(item, seleccion, opciones, video_urls)
@@ -755,7 +755,7 @@ def alert_unsopported_server():
def handle_wait(time_to_wait, title, text):
- logger.info("handle_wait(time_to_wait=%d)" % time_to_wait)
+ logger.debug("handle_wait(time_to_wait=%d)" % time_to_wait)
espera = dialog_progress(' ' + title, "")
secs = 0
@@ -774,15 +774,15 @@ def handle_wait(time_to_wait, title, text):
break
if cancelled:
- logger.info('Wait canceled')
+ logger.debug('Wait canceled')
return False
else:
- logger.info('Wait finished')
+ logger.debug('Wait finished')
return True
def get_dialogo_opciones(item, default_action, strm, autoplay):
- logger.info()
+ logger.debug()
# logger.debug(item.tostring('\n'))
from core import servertools
@@ -866,7 +866,7 @@ def get_dialogo_opciones(item, default_action, strm, autoplay):
def set_opcion(item, seleccion, opciones, video_urls):
- logger.info()
+ logger.debug()
# logger.debug(item.tostring('\n'))
salir = False
# You have not chosen anything, most likely because you have given the ESC
@@ -916,7 +916,7 @@ def set_opcion(item, seleccion, opciones, video_urls):
def get_video_seleccionado(item, seleccion, video_urls):
- logger.info()
+ logger.debug()
mediaurl = ""
view = False
wait_time = 0
@@ -942,7 +942,7 @@ def get_video_seleccionado(item, seleccion, video_urls):
mpd = True
# If there is no mediaurl it is because the video is not there :)
- logger.info("mediaurl=" + mediaurl)
+ logger.debug("mediaurl=" + mediaurl)
if mediaurl == "":
if item.server == "unknown":
alert_unsopported_server()
@@ -959,7 +959,7 @@ def get_video_seleccionado(item, seleccion, video_urls):
def set_player(item, xlistitem, mediaurl, view, strm, nfo_path=None, head_nfo=None, item_nfo=None):
- logger.info()
+ logger.debug()
# logger.debug("item:\n" + item.tostring('\n'))
# Moved del conector "torrent" here
if item.server == "torrent":
@@ -1046,7 +1046,7 @@ def torrent_client_installed(show_tuple=False):
def play_torrent(item, xlistitem, mediaurl):
- logger.info()
+ logger.debug()
import time
from servers import torrent
diff --git a/platformcode/recaptcha.py b/platformcode/recaptcha.py
index c4422143..d7265d43 100644
--- a/platformcode/recaptcha.py
+++ b/platformcode/recaptcha.py
@@ -65,7 +65,7 @@ class Recaptcha(xbmcgui.WindowXMLDialog):
data = httptools.downloadpage(self.url, post=post, headers=self.headers).data
from platformcode import logger
- logger.info(data)
+ logger.debug(data)
self.result = scrapertools.find_single_match(data, '
|\s{2,}', "", data)
diff --git a/servers/clicknupload.py b/servers/clicknupload.py
index f54c5d5c..a2f95930 100755
--- a/servers/clicknupload.py
+++ b/servers/clicknupload.py
@@ -22,7 +22,7 @@ excption = False
def test_video_exists(page_url):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
data = get_data(page_url.replace(".org", ".me"))
if "File Not Found" in data: return False, config.get_localized_string(70449) % "Clicknupload"
@@ -31,7 +31,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
- logger.info("url=" + page_url)
+ logger.debug("url=" + page_url)
data = get_data(page_url.replace(".org", ".me"))
@@ -51,7 +51,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
media_url = media.rsplit('/', 1)[0] + "/" + url_strip
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [clicknupload]", media_url])
for video_url in video_urls:
- logger.info("%s - %s" % (video_url[0], video_url[1]))
+ logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls
diff --git a/servers/clipwatching.py b/servers/clipwatching.py
index 93aa4804..6ac3ec78 100644
--- a/servers/clipwatching.py
+++ b/servers/clipwatching.py
@@ -6,7 +6,7 @@ from lib import jsunpack
from platformcode import logger, config
def test_video_exists(page_url):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "File Not Found" in data or "File was deleted" in data:
@@ -15,7 +15,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
video_urls = []
try:
diff --git a/servers/cloudvideo.py b/servers/cloudvideo.py
index b7885afe..36e299fc 100644
--- a/servers/cloudvideo.py
+++ b/servers/cloudvideo.py
@@ -8,7 +8,7 @@ from lib import jsunpack
def test_video_exists(page_url):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
html = httptools.downloadpage(page_url)
global data
data = html.data
@@ -18,7 +18,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
- logger.info("url=" + page_url)
+ logger.debug("url=" + page_url)
video_urls = []
global data
# data = httptools.downloadpage(page_url).data
diff --git a/servers/crunchyroll.py b/servers/crunchyroll.py
index 2d252b3a..31384209 100755
--- a/servers/crunchyroll.py
+++ b/servers/crunchyroll.py
@@ -30,7 +30,7 @@ proxy = "https://www.usa-proxy.org/"
def test_video_exists(page_url):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, headers=GLOBAL_HEADER).data
if "Este es un clip de muestra" in data:
@@ -44,7 +44,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
#page_url='https://www.crunchyroll.com/es-es/one-piece/episode-891-climbing-up-a-waterfall-a-great-journey-through-the-land-of-wanos-sea-zone-786643'
- logger.info("url=" + page_url)
+ logger.debug("url=" + page_url)
video_urls = []
if "crunchyroll.com" in page_url:
media_id = page_url.rsplit("-", 1)[1]
@@ -94,7 +94,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
file_sub = ""
video_urls.append(["%s %sp [crunchyroll]" % (filename, quality), media_url, 0, file_sub])
for video_url in video_urls:
- logger.info("%s - %s" % (video_url[0], video_url[1]))
+ logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls
diff --git a/servers/dailymotion.py b/servers/dailymotion.py
index 7de4b4fd..b6d3ddbc 100644
--- a/servers/dailymotion.py
+++ b/servers/dailymotion.py
@@ -6,7 +6,7 @@ from platformcode import logger, config
def test_video_exists(page_url):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
global response
response = httptools.downloadpage(page_url, cookies=False)
@@ -18,7 +18,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
video_urls = []
cookie = {'Cookie': response.headers["set-cookie"]}
data = response.data.replace("\\", "")
@@ -40,5 +40,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
stream_url = stream_url_http
video_urls.append(["%sp .%s [dailymotion]" % (calidad, stream_type), stream_url, 0, subtitle])
for video_url in video_urls:
- logger.info("%s - %s" % (video_url[0], video_url[1]))
+ logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls
\ No newline at end of file
diff --git a/servers/debriders/alldebrid.py b/servers/debriders/alldebrid.py
index 6f0bd18a..3818fffd 100644
--- a/servers/debriders/alldebrid.py
+++ b/servers/debriders/alldebrid.py
@@ -7,7 +7,7 @@ from platformcode import logger
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
- logger.info()
+ logger.debug()
page_url = correct_url(page_url)
dd1 = httptools.downloadpage("https://api.alldebrid.com/user/login?agent=mySoft&username=%s&password=%s" %(user, password)).data
token = scrapertools.find_single_match(dd1, 'token":"([^"]+)')
diff --git a/servers/debriders/realdebrid.py b/servers/debriders/realdebrid.py
index 7d8b855d..d8e6d4f5 100755
--- a/servers/debriders/realdebrid.py
+++ b/servers/debriders/realdebrid.py
@@ -22,7 +22,7 @@ headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:65.0) Gecko/20
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
- logger.info("(page_url='%s' , video_password=%s)" % (page_url, video_password))
+ logger.debug("(page_url='%s' , video_password=%s)" % (page_url, video_password))
page_url = page_url.replace(".nz/embed", ".nz/")
# Se comprueba si existe un token guardado y sino se ejecuta el proceso de autentificación
token_auth = config.get_setting("token", server="realdebrid")
@@ -99,7 +99,7 @@ def get_enlaces(data):
def authentication():
- logger.info()
+ logger.debug()
try:
client_id = "YTWNFBIJEEBP6"
diff --git a/servers/decrypters/adfly.py b/servers/decrypters/adfly.py
index 20b34d18..8acac50a 100755
--- a/servers/decrypters/adfly.py
+++ b/servers/decrypters/adfly.py
@@ -8,7 +8,7 @@ from platformcode import logger
def get_long_url(short_url):
- logger.info("short_url = '%s'" % short_url)
+ logger.debug("short_url = '%s'" % short_url)
data = httptools.downloadpage(short_url).data
ysmm = scrapertools.find_single_match(data, "var ysmm = '([^']+)';")
diff --git a/servers/decrypters/linkbucks.py b/servers/decrypters/linkbucks.py
index 5b15ec00..e1a7b521 100755
--- a/servers/decrypters/linkbucks.py
+++ b/servers/decrypters/linkbucks.py
@@ -17,7 +17,7 @@ from platformcode import logger
# Obtiene la URL que hay detrás de un enlace a linkbucks
def get_long_url(short_url):
- logger.info("(short_url='%s')" % short_url)
+ logger.debug("(short_url='%s')" % short_url)
request_headers = []
request_headers.append(["User-Agent",
@@ -33,17 +33,17 @@ def get_long_url(short_url):
while True:
for name, value in response_headers:
if name == "set-cookie":
- logger.info("Set-Cookie: " + value)
+ logger.debug("Set-Cookie: " + value)
cookie_name = scrapertools.scrapertools.find_single_match(value, '(.*?)\=.*?\;')
cookie_value = scrapertools.scrapertools.find_single_match(value, '.*?\=(.*?)\;')
request_headers.append(["Cookie", cookie_name + "=" + cookie_value])
body, response_headers = scrapertools.read_body_and_headers(url, headers=request_headers)
- logger.info("body=" + body)
+ logger.debug("body=" + body)
try:
location = scrapertools.scrapertools.find_single_match(body, '
|\s{2,}', "", data)
return data
def test_video_exists(page_url):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
data = get_source(page_url)
if "File was deleted" in data or "File Not Found" in data:
@@ -40,7 +40,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
- logger.info("url=" + page_url)
+ logger.debug("url=" + page_url)
video_urls = []
referer = ''
diff --git a/servers/samaup.py b/servers/samaup.py
index cc6285c0..5d168a9c 100644
--- a/servers/samaup.py
+++ b/servers/samaup.py
@@ -10,7 +10,7 @@ from platformcode import logger
def test_video_exists(page_url):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "Not Found" in data or "File was deleted" in data:
@@ -19,7 +19,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
- logger.info("url=" + page_url)
+ logger.debug("url=" + page_url)
video_urls = []
ext = 'mp4'
diff --git a/servers/sendvid.py b/servers/sendvid.py
index ed11f426..7cdd94b4 100755
--- a/servers/sendvid.py
+++ b/servers/sendvid.py
@@ -9,7 +9,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
video_urls = []
data = scrapertools.httptools.downloadpage(page_url).data
media_url = scrapertools.find_single_match(data, 'var\s+video_source\s+\=\s+"([^"]+)"')
@@ -24,5 +24,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
else:
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [sendvid]", media_url])
for video_url in video_urls:
- logger.info("%s - %s" % (video_url[0], video_url[1]))
+ logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls
diff --git a/servers/speedvideo.py b/servers/speedvideo.py
index c9a2e42e..4f4d50a2 100644
--- a/servers/speedvideo.py
+++ b/servers/speedvideo.py
@@ -5,7 +5,7 @@ from core import httptools, scrapertools
from platformcode import config, logger
def test_video_exists(page_url):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
@@ -15,22 +15,22 @@ def test_video_exists(page_url):
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
- logger.info("url=" + page_url)
+ logger.debug("url=" + page_url)
video_urls = []
quality ={'MOBILE':1,
'NORMAL':2,
'HD':3}
data = httptools.downloadpage(page_url).data
- logger.info('SPEEDVIDEO DATA '+ data)
+ logger.debug('SPEEDVIDEO DATA '+ data)
media_urls = scrapertools.find_multiple_matches(data, r"file:[^']'([^']+)',\s*label:[^\"]\"([^\"]+)\"")
- logger.info("speed video - media urls: %s " % media_urls)
+ logger.debug("speed video - media urls: %s " % media_urls)
for media_url, label in media_urls:
media_url = httptools.downloadpage(media_url, only_headers=True, follow_redirects=False).headers.get("location", "")
if media_url:
video_urls.append([media_url.split('.')[-1] + ' - ' + label + ' - ' + ' [Speedvideo]', media_url])
- logger.info("speed video - media urls: %s " % video_urls)
+ logger.debug("speed video - media urls: %s " % video_urls)
return sorted(video_urls, key=lambda x: quality[x[0].split(' - ')[1]])
diff --git a/servers/streamtape.py b/servers/streamtape.py
index a2b4b4a2..05fbb07b 100644
--- a/servers/streamtape.py
+++ b/servers/streamtape.py
@@ -11,7 +11,7 @@ if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
def test_video_exists(page_url):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
global data
referer = {"Referer": page_url}
@@ -25,7 +25,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
- logger.info("url=" + page_url)
+ logger.debug("url=" + page_url)
video_urls = []
possible_url = scrapertools.find_single_match(data, 'innerHTML = "([^"]+)')
diff --git a/servers/streamz.py b/servers/streamz.py
index 5540558d..ac0fb568 100644
--- a/servers/streamz.py
+++ b/servers/streamz.py
@@ -8,7 +8,7 @@ from lib import jsunpack
def test_video_exists(page_url):
global data
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "File not found, sorry!" in data:
@@ -17,7 +17,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, video_password):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
video_urls = []
from core.support import match
matches = match(data, patron=r'(eval\(function\(p,a,c,k,e,d\).*?)\s+').matches
diff --git a/servers/supervideo.py b/servers/supervideo.py
index 1665152d..2ab9ae1a 100644
--- a/servers/supervideo.py
+++ b/servers/supervideo.py
@@ -9,7 +9,7 @@ from platformcode import config, logger
def test_video_exists(page_url):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url, cookies=False).data
if 'File is no longer available as it expired or has been deleted' in data:
@@ -19,7 +19,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
- logger.info("url=" + page_url)
+ logger.debug("url=" + page_url)
video_urls = []
# data = httptools.downloadpage(page_url).data
global data
diff --git a/servers/thevid.py b/servers/thevid.py
index e061856b..13ee9582 100644
--- a/servers/thevid.py
+++ b/servers/thevid.py
@@ -8,7 +8,7 @@ from platformcode import logger, config
def test_video_exists(page_url):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Video not found..." in data or "Video removed due to copyright" in data:
return False, config.get_localized_string(70292) % "Thevid"
@@ -31,5 +31,5 @@ def get_video_url(page_url, user="", password="", video_password=""):
continue
video = "https:" + video
video_urls.append(["mp4 [Thevid]", video])
- logger.info("Url: %s" % videos)
+ logger.debug("Url: %s" % videos)
return video_urls
diff --git a/servers/thevideobee.py b/servers/thevideobee.py
index f319dcfe..001d3471 100644
--- a/servers/thevideobee.py
+++ b/servers/thevideobee.py
@@ -9,7 +9,7 @@ from platformcode import logger
def test_video_exists(page_url):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data or "to copyright issues" in data:
return False, config.get_localized_string(70449) % "thevideobee"
@@ -17,7 +17,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
videourl = scrapertools.find_single_match(data, 'src: "([^"]+)')
diff --git a/servers/turbobit.py b/servers/turbobit.py
index d370f03f..9f254dc9 100644
--- a/servers/turbobit.py
+++ b/servers/turbobit.py
@@ -4,6 +4,6 @@ from platformcode import logger
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
video_urls = []
return video_urls
diff --git a/servers/turbovid.py b/servers/turbovid.py
index 4bb6b993..d1f1e19c 100644
--- a/servers/turbovid.py
+++ b/servers/turbovid.py
@@ -12,7 +12,7 @@ from platformcode import logger, config
def test_video_exists(page_url):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Not Found" in data or "File Does not Exist" in data:
return False, config.get_localized_string(70449) % "Turbovid"
@@ -21,7 +21,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password="", server='Turbovid'):
- logger.info("(turbovid page_url='%s')" % page_url)
+ logger.debug("(turbovid page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = data.replace('"', "'")
@@ -31,6 +31,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
time.sleep(6)
data = httptools.downloadpage(page_url_post, post=post).data
- logger.info("(data page_url='%s')" % data)
+ logger.debug("(data page_url='%s')" % data)
video_urls = support.get_jwplayer_mediaurl(data, 'Turbovid')
return video_urls
diff --git a/servers/tusfiles.py b/servers/tusfiles.py
index 18cec1ee..6390c208 100644
--- a/servers/tusfiles.py
+++ b/servers/tusfiles.py
@@ -9,7 +9,7 @@ from platformcode import logger
def test_video_exists(page_url):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data or "to copyright issues" in data:
return False, config.get_localized_string(70449) % "tusfiles"
@@ -17,7 +17,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
videourl = scrapertools.find_single_match(data, 'source src="([^"]+)')
diff --git a/servers/uploadedto.py b/servers/uploadedto.py
index 72b83510..37529ab7 100755
--- a/servers/uploadedto.py
+++ b/servers/uploadedto.py
@@ -6,7 +6,7 @@ from platformcode import logger
def test_video_exists(page_url):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
real_url = page_url.replace("uploaded.to", "uploaded.net")
code = httptools.downloadpage(real_url, only_headers=True).code
@@ -16,36 +16,36 @@ def test_video_exists(page_url):
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
- logger.info("(page_url='%s')" % page_url)
+ logger.debug("(page_url='%s')" % page_url)
video_urls = []
if premium:
#Si no hay almacenada una cookie activa, hacemos login
if check_cookie("uploaded.net", "login") != True:
# Login para conseguir la cookie
- logger.info("-------------------------------------------")
- logger.info("login")
- logger.info("-------------------------------------------")
+ logger.debug("-------------------------------------------")
+ logger.debug("login")
+ logger.debug("-------------------------------------------")
login_url = "http://uploaded.net/io/login"
post = "id=" + user + "&pw=" + password
setcookie = httptools.downloadpage(login_url, post=post, follow_redirects=False,
only_headers=True).headers.get("set-cookie", "")
- logger.info("-------------------------------------------")
- logger.info("obtiene la url")
- logger.info("-------------------------------------------")
+ logger.debug("-------------------------------------------")
+ logger.debug("obtiene la url")
+ logger.debug("-------------------------------------------")
location = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers.get("location",
"")
- logger.info("location=" + location)
+ logger.debug("location=" + location)
#fix descarga no directa
if location == "":
data = httptools.downloadpage(page_url).data
- #logger.info("data: %s" % data)
+ #logger.debug("data: %s" % data)
if "Premium Download
" in data:
location = scrapertools.find_single_match(data, '