riscritto logger
This commit is contained in:
@@ -27,7 +27,7 @@ class ChromeOSImage:
|
||||
"""
|
||||
|
||||
def __init__(self, imgpath):
|
||||
logger.log('Image Path: ' + imgpath)
|
||||
logger.info('Image Path: ' + imgpath)
|
||||
"""Prepares the image"""
|
||||
self.imgpath = imgpath
|
||||
self.bstream = self.get_bstream(imgpath)
|
||||
@@ -59,7 +59,7 @@ class ChromeOSImage:
|
||||
self.seek_stream(entries_start * lba_size)
|
||||
|
||||
if not calcsize(part_format) == entry_size:
|
||||
logger.log('Partition table entries are not 128 bytes long')
|
||||
logger.info('Partition table entries are not 128 bytes long')
|
||||
return 0
|
||||
|
||||
for index in range(1, entries_num + 1): # pylint: disable=unused-variable
|
||||
@@ -71,7 +71,7 @@ class ChromeOSImage:
|
||||
break
|
||||
|
||||
if not offset:
|
||||
logger.log('Failed to calculate losetup offset.')
|
||||
logger.info('Failed to calculate losetup offset.')
|
||||
return 0
|
||||
|
||||
return offset
|
||||
@@ -93,7 +93,7 @@ class ChromeOSImage:
|
||||
while True:
|
||||
chunk2 = self.read_stream(chunksize)
|
||||
if not chunk2:
|
||||
logger.log('File %s not found in the ChromeOS image' % filename)
|
||||
logger.info('File %s not found in the ChromeOS image' % filename)
|
||||
return False
|
||||
|
||||
chunk = chunk1 + chunk2
|
||||
|
||||
@@ -25,7 +25,7 @@ intervenido_sucuri = 'Access Denied - Sucuri Website Firewall'
|
||||
|
||||
|
||||
def update_title(item):
|
||||
logger.log()
|
||||
logger.info()
|
||||
from core import scraper,support
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ def update_title(item):
|
||||
The channel must add a method to be able to receive the call from Kodi / Alfa, and be able to call this method:
|
||||
|
||||
def actualizar_titulos(item):
|
||||
logger.log()
|
||||
logger.info()
|
||||
itemlist = []
|
||||
from lib import generictools
|
||||
from platformcode import launcher
|
||||
@@ -205,7 +205,7 @@ def update_title(item):
|
||||
|
||||
|
||||
def refresh_screen(item):
|
||||
logger.log()
|
||||
logger.info()
|
||||
|
||||
"""
|
||||
#### Kodi 18 compatibility ####
|
||||
@@ -239,7 +239,7 @@ def refresh_screen(item):
|
||||
|
||||
|
||||
def post_tmdb_listado(item, itemlist):
|
||||
logger.log()
|
||||
logger.info()
|
||||
itemlist_fo = []
|
||||
|
||||
"""
|
||||
@@ -484,7 +484,7 @@ def post_tmdb_listado(item, itemlist):
|
||||
|
||||
|
||||
def post_tmdb_seasons(item, itemlist):
|
||||
logger.log()
|
||||
logger.info()
|
||||
|
||||
"""
|
||||
|
||||
@@ -644,7 +644,7 @@ def post_tmdb_seasons(item, itemlist):
|
||||
|
||||
|
||||
def post_tmdb_episodios(item, itemlist):
|
||||
logger.log()
|
||||
logger.info()
|
||||
itemlist_fo = []
|
||||
|
||||
"""
|
||||
@@ -995,7 +995,7 @@ def post_tmdb_episodios(item, itemlist):
|
||||
|
||||
|
||||
def post_tmdb_findvideos(item, itemlist):
|
||||
logger.log()
|
||||
logger.info()
|
||||
|
||||
"""
|
||||
|
||||
@@ -1215,7 +1215,7 @@ def post_tmdb_findvideos(item, itemlist):
|
||||
|
||||
|
||||
def get_field_from_kodi_DB(item, from_fields='*', files='file'):
|
||||
logger.log()
|
||||
logger.info()
|
||||
"""
|
||||
|
||||
Call to read from the Kodi DB the input fields received (from_fields, by default "*") of the video indicated in Item
|
||||
@@ -1293,7 +1293,7 @@ def get_field_from_kodi_DB(item, from_fields='*', files='file'):
|
||||
|
||||
|
||||
def fail_over_newpct1(item, patron, patron2=None, timeout=None):
|
||||
logger.log()
|
||||
logger.info()
|
||||
import ast
|
||||
|
||||
"""
|
||||
@@ -1494,7 +1494,7 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
|
||||
|
||||
|
||||
def web_intervenida(item, data, desactivar=True):
|
||||
logger.log()
|
||||
logger.info()
|
||||
|
||||
"""
|
||||
|
||||
@@ -1577,7 +1577,7 @@ def web_intervenida(item, data, desactivar=True):
|
||||
|
||||
|
||||
def regenerate_clones():
|
||||
logger.log()
|
||||
logger.info()
|
||||
import json
|
||||
from core import videolibrarytools
|
||||
|
||||
@@ -1591,7 +1591,7 @@ def regenerate_clones():
|
||||
# Find the paths where to leave the control .json file, and the Video Library
|
||||
json_path = filetools.exists(filetools.join(config.get_runtime_path(), 'verify_cached_torrents.json'))
|
||||
if json_path:
|
||||
logger.log('Previously repaired video library: WE ARE GOING')
|
||||
logger.info('Previously repaired video library: WE ARE GOING')
|
||||
return False
|
||||
json_path = filetools.join(config.get_runtime_path(), 'verify_cached_torrents.json')
|
||||
filetools.write(json_path, json.dumps({"CINE_verify": True})) # Prevents another simultaneous process from being launched
|
||||
@@ -1631,7 +1631,7 @@ def regenerate_clones():
|
||||
|
||||
# Delete the Tvshow.nfo files and check if the .nfo has more than one channel and one is clone Newpct1
|
||||
for file in files:
|
||||
# logger.log('file - nfos: ' + file)
|
||||
# logger.info('file - nfos: ' + file)
|
||||
if 'tvshow.nfo' in file:
|
||||
file_path = filetools.join(root, 'tvshow.nfo')
|
||||
filetools.remove(file_path)
|
||||
@@ -1697,7 +1697,7 @@ def regenerate_clones():
|
||||
for file in files:
|
||||
file_path = filetools.join(root, file)
|
||||
if '.json' in file:
|
||||
logger.log('** file: ' + file)
|
||||
logger.info('** file: ' + file)
|
||||
canal_json = scrapertools.find_single_match(file, r'\[(\w+)\].json')
|
||||
if canal_json not in nfo.library_urls:
|
||||
filetools.remove(file_path) # we delete the .json is a zombie
|
||||
@@ -1740,7 +1740,7 @@ def regenerate_clones():
|
||||
|
||||
|
||||
def dejuice(data):
|
||||
logger.log()
|
||||
logger.info()
|
||||
# Method to unobtrusive JuicyCodes data
|
||||
|
||||
import base64
|
||||
|
||||
@@ -47,7 +47,7 @@ class Client(object):
|
||||
t= Thread(target=self._auto_shutdown)
|
||||
t.setDaemon(True)
|
||||
t.start()
|
||||
logger.log("MEGA Server Started")
|
||||
logger.info("MEGA Server Started")
|
||||
|
||||
def _auto_shutdown(self):
|
||||
while self.running:
|
||||
@@ -77,7 +77,7 @@ class Client(object):
|
||||
def stop(self):
|
||||
self.running = False
|
||||
self._server.stop()
|
||||
logger.log("MEGA Server Stopped")
|
||||
logger.info("MEGA Server Stopped")
|
||||
|
||||
def get_play_list(self):
|
||||
if len(self.files) > 1:
|
||||
@@ -105,7 +105,7 @@ class Client(object):
|
||||
return files
|
||||
|
||||
except:
|
||||
logger.log(traceback.format_exc())
|
||||
logger.info(traceback.format_exc())
|
||||
pass
|
||||
|
||||
return files
|
||||
|
||||
@@ -14,7 +14,7 @@ remote = None
|
||||
|
||||
|
||||
def parse_url(url):
|
||||
# logger.log("Url: %s" % url)
|
||||
# logger.info("Url: %s" % url)
|
||||
url = url.strip()
|
||||
patron = "^smb://(?:([^;\n]+);)?(?:([^:@\n]+)[:|@])?(?:([^@\n]+)@)?([^/]+)/([^/\n]+)([/]?.*?)$"
|
||||
domain, user, password, server_name, share_name, path = re.compile(patron, re.DOTALL).match(url).groups()
|
||||
@@ -27,7 +27,7 @@ def parse_url(url):
|
||||
if path.endswith("/"): path = path[:-1]
|
||||
if not path: path = "/"
|
||||
|
||||
# logger.log("Dominio: '%s' |Usuario: '%s' | Password: '%s' | Servidor: '%s' | IP: '%s' | Share Name: '%s' | Path: '%s'" % (domain, user, password, server_name, server_ip, share_name, path))
|
||||
# logger.info("Dominio: '%s' |Usuario: '%s' | Password: '%s' | Servidor: '%s' | IP: '%s' | Share Name: '%s' | Path: '%s'" % (domain, user, password, server_name, server_ip, share_name, path))
|
||||
return server_name, server_ip, share_name, unicode(path, "utf8"), user, password, domain
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ def get_server_name_ip(server):
|
||||
|
||||
|
||||
def connect(url):
|
||||
# logger.log("Url: %s" % url)
|
||||
# logger.info("Url: %s" % url)
|
||||
global remote
|
||||
server_name, server_ip, share_name, path, user, password, domain = parse_url(url)
|
||||
|
||||
@@ -63,7 +63,7 @@ def connect(url):
|
||||
|
||||
|
||||
def listdir(url):
|
||||
logger.log("Url: %s" % url)
|
||||
logger.info("Url: %s" % url)
|
||||
remote, share_name, path = connect(url)
|
||||
try:
|
||||
files = [f.filename for f in remote.listPath(share_name, path) if not f.filename in [".", ".."]]
|
||||
@@ -73,7 +73,7 @@ def listdir(url):
|
||||
|
||||
|
||||
def walk(url, topdown=True, onerror=None):
|
||||
logger.log("Url: %s" % url)
|
||||
logger.info("Url: %s" % url)
|
||||
remote, share_name, path = connect(url)
|
||||
|
||||
try:
|
||||
@@ -103,7 +103,7 @@ def walk(url, topdown=True, onerror=None):
|
||||
|
||||
|
||||
def get_attributes(url):
|
||||
logger.log("Url: %s" % url)
|
||||
logger.info("Url: %s" % url)
|
||||
remote, share_name, path = connect(url)
|
||||
try:
|
||||
return remote.getAttributes(share_name, path)
|
||||
@@ -112,7 +112,7 @@ def get_attributes(url):
|
||||
|
||||
|
||||
def mkdir(url):
|
||||
logger.log("Url: %s" % url)
|
||||
logger.info("Url: %s" % url)
|
||||
remote, share_name, path = connect(url)
|
||||
try:
|
||||
remote.createDirectory(share_name, path)
|
||||
@@ -121,12 +121,12 @@ def mkdir(url):
|
||||
|
||||
|
||||
def smb_open(url, mode):
|
||||
logger.log("Url: %s" % url)
|
||||
logger.info("Url: %s" % url)
|
||||
return SMBFile(url, mode)
|
||||
|
||||
|
||||
def isfile(url):
|
||||
logger.log("Url: %s" % url)
|
||||
logger.info("Url: %s" % url)
|
||||
remote, share_name, path = connect(url)
|
||||
try:
|
||||
files = [f.filename for f in remote.listPath(share_name, os.path.dirname(path)) if not f.isDirectory]
|
||||
@@ -136,7 +136,7 @@ def isfile(url):
|
||||
|
||||
|
||||
def isdir(url):
|
||||
logger.log("Url: %s" % url)
|
||||
logger.info("Url: %s" % url)
|
||||
remote, share_name, path = connect(url)
|
||||
try:
|
||||
folders = [f.filename for f in remote.listPath(share_name, os.path.dirname(path)) if f.isDirectory]
|
||||
@@ -146,7 +146,7 @@ def isdir(url):
|
||||
|
||||
|
||||
def exists(url):
|
||||
logger.log("Url: %s" % url)
|
||||
logger.info("Url: %s" % url)
|
||||
remote, share_name, path = connect(url)
|
||||
try:
|
||||
files = [f.filename for f in remote.listPath(share_name, os.path.dirname(path))]
|
||||
@@ -156,7 +156,7 @@ def exists(url):
|
||||
|
||||
|
||||
def remove(url):
|
||||
logger.log("Url: %s" % url)
|
||||
logger.info("Url: %s" % url)
|
||||
remote, share_name, path = connect(url)
|
||||
try:
|
||||
remote.deleteFiles(share_name, path)
|
||||
@@ -165,7 +165,7 @@ def remove(url):
|
||||
|
||||
|
||||
def rmdir(url):
|
||||
logger.log("Url: %s" % url)
|
||||
logger.info("Url: %s" % url)
|
||||
remote, share_name, path = connect(url)
|
||||
try:
|
||||
remote.deleteDirectory(share_name, path)
|
||||
@@ -174,7 +174,7 @@ def rmdir(url):
|
||||
|
||||
|
||||
def rename(url, new_name):
|
||||
logger.log("Url: %s" % url)
|
||||
logger.info("Url: %s" % url)
|
||||
remote, share_name, path = connect(url)
|
||||
_, _, _, new_name, _, _, _ = parse_url(new_name)
|
||||
try:
|
||||
|
||||
@@ -96,7 +96,7 @@ class UnshortenIt(object):
|
||||
if oldUri == uri:
|
||||
break
|
||||
|
||||
logger.log(uri)
|
||||
logger.info(uri)
|
||||
|
||||
return uri, code
|
||||
|
||||
@@ -531,12 +531,12 @@ class UnshortenIt(object):
|
||||
r = httptools.downloadpage(uri, timeout=self._timeout, headers=headers, follow_redirects=False)
|
||||
if 'Wait 1 hour' in r.data:
|
||||
uri = ''
|
||||
logger.log('IP bannato da vcrypt, aspetta un ora')
|
||||
logger.info('IP bannato da vcrypt, aspetta un ora')
|
||||
else:
|
||||
prev_uri = uri
|
||||
uri = r.headers['location']
|
||||
if uri == prev_uri:
|
||||
logger.log('Use Cloudscraper')
|
||||
logger.info('Use Cloudscraper')
|
||||
uri = httptools.downloadpage(uri, timeout=self._timeout, headers=headers, follow_redirects=False, cf=True).headers['location']
|
||||
|
||||
if "4snip" in uri:
|
||||
@@ -593,7 +593,7 @@ class UnshortenIt(object):
|
||||
r = httptools.downloadpage(uri, follow_redirect=True, timeout=self._timeout, cookies=False)
|
||||
if 'get/' in r.url:
|
||||
uri = 'https://linkhub.icu/view/' + re.search('\.\./view/([^"]+)', r.data).group(1)
|
||||
logger.log(uri)
|
||||
logger.info(uri)
|
||||
r = httptools.downloadpage(uri, follow_redirect=True, timeout=self._timeout, cookies=False)
|
||||
uri = re.search('<div id="text-url".*\n\s+<a href="([^"]+)', r.data).group(0)
|
||||
return uri, r.code
|
||||
@@ -683,7 +683,7 @@ def findlinks(text):
|
||||
regex = '(?:https?://(?:[\w\d]+\.)?)?(?:' + regex + ')/[a-zA-Z0-9_=/]+'
|
||||
for match in re.findall(regex, text):
|
||||
matches.append(match)
|
||||
logger.log('matches=' + str(matches))
|
||||
logger.info('matches=' + str(matches))
|
||||
if len(matches) == 1:
|
||||
text += '\n' + unshorten(matches[0])[0]
|
||||
elif matches:
|
||||
|
||||
Reference in New Issue
Block a user