- logger.info -> logger.log

- fix unshortenit kodi 19
This commit is contained in:
Alhaziel01
2020-08-17 11:17:55 +02:00
parent bfb80f6889
commit c7e41f41a2
162 changed files with 1011 additions and 1034 deletions

View File

@@ -27,7 +27,7 @@ class ChromeOSImage:
"""
def __init__(self, imgpath):
logger.info('Image Path: ' + imgpath)
logger.log('Image Path: ' + imgpath)
"""Prepares the image"""
self.imgpath = imgpath
self.bstream = self.get_bstream(imgpath)
@@ -59,7 +59,7 @@ class ChromeOSImage:
self.seek_stream(entries_start * lba_size)
if not calcsize(part_format) == entry_size:
logger.info('Partition table entries are not 128 bytes long')
logger.log('Partition table entries are not 128 bytes long')
return 0
for index in range(1, entries_num + 1): # pylint: disable=unused-variable
@@ -71,7 +71,7 @@ class ChromeOSImage:
break
if not offset:
logger.info('Failed to calculate losetup offset.')
logger.log('Failed to calculate losetup offset.')
return 0
return offset
@@ -93,7 +93,7 @@ class ChromeOSImage:
while True:
chunk2 = self.read_stream(chunksize)
if not chunk2:
logger.info('File %s not found in the ChromeOS image' % filename)
logger.log('File %s not found in the ChromeOS image' % filename)
return False
chunk = chunk1 + chunk2

View File

@@ -25,7 +25,7 @@ intervenido_sucuri = 'Access Denied - Sucuri Website Firewall'
def update_title(item):
logger.info()
logger.log()
from core import scraper,support
@@ -41,7 +41,7 @@ def update_title(item):
The channel must add a method to be able to receive the call from Kodi / Alfa, and be able to call this method:
def actualizar_titulos(item):
logger.info()
logger.log()
itemlist = []
from lib import generictools
from platformcode import launcher
@@ -205,7 +205,7 @@ def update_title(item):
def refresh_screen(item):
logger.info()
logger.log()
"""
#### Kodi 18 compatibility ####
@@ -239,7 +239,7 @@ def refresh_screen(item):
def post_tmdb_listado(item, itemlist):
logger.info()
logger.log()
itemlist_fo = []
"""
@@ -484,7 +484,7 @@ def post_tmdb_listado(item, itemlist):
def post_tmdb_seasons(item, itemlist):
logger.info()
logger.log()
"""
@@ -644,7 +644,7 @@ def post_tmdb_seasons(item, itemlist):
def post_tmdb_episodios(item, itemlist):
logger.info()
logger.log()
itemlist_fo = []
"""
@@ -995,7 +995,7 @@ def post_tmdb_episodios(item, itemlist):
def post_tmdb_findvideos(item, itemlist):
logger.info()
logger.log()
"""
@@ -1215,7 +1215,7 @@ def post_tmdb_findvideos(item, itemlist):
def get_field_from_kodi_DB(item, from_fields='*', files='file'):
logger.info()
logger.log()
"""
Call to read from the Kodi DB the input fields received (from_fields, by default "*") of the video indicated in Item
@@ -1293,7 +1293,7 @@ def get_field_from_kodi_DB(item, from_fields='*', files='file'):
def fail_over_newpct1(item, patron, patron2=None, timeout=None):
logger.info()
logger.log()
import ast
"""
@@ -1494,7 +1494,7 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
def web_intervenida(item, data, desactivar=True):
logger.info()
logger.log()
"""
@@ -1577,7 +1577,7 @@ def web_intervenida(item, data, desactivar=True):
def regenerate_clones():
logger.info()
logger.log()
import json
from core import videolibrarytools
@@ -1591,7 +1591,7 @@ def regenerate_clones():
# Find the paths where to leave the control .json file, and the Video Library
json_path = filetools.exists(filetools.join(config.get_runtime_path(), 'verify_cached_torrents.json'))
if json_path:
logger.info('Previously repaired video library: WE ARE GOING')
logger.log('Previously repaired video library: WE ARE GOING')
return False
json_path = filetools.join(config.get_runtime_path(), 'verify_cached_torrents.json')
filetools.write(json_path, json.dumps({"CINE_verify": True})) # Prevents another simultaneous process from being launched
@@ -1631,7 +1631,7 @@ def regenerate_clones():
# Delete the Tvshow.nfo files and check if the .nfo has more than one channel and one is clone Newpct1
for file in files:
# logger.info('file - nfos: ' + file)
# logger.log('file - nfos: ' + file)
if 'tvshow.nfo' in file:
file_path = filetools.join(root, 'tvshow.nfo')
filetools.remove(file_path)
@@ -1697,7 +1697,7 @@ def regenerate_clones():
for file in files:
file_path = filetools.join(root, file)
if '.json' in file:
logger.info('** file: ' + file)
logger.log('** file: ' + file)
canal_json = scrapertools.find_single_match(file, r'\[(\w+)\].json')
if canal_json not in nfo.library_urls:
filetools.remove(file_path) # we delete the .json is a zombie
@@ -1740,7 +1740,7 @@ def regenerate_clones():
def dejuice(data):
logger.info()
logger.log()
# Method to unobtrusive JuicyCodes data
import base64

View File

@@ -45,7 +45,7 @@ class Client(object):
t= Thread(target=self._auto_shutdown)
t.setDaemon(True)
t.start()
logger.info("MEGA Server Started")
logger.log("MEGA Server Started")
def _auto_shutdown(self):
while self.running:
@@ -75,7 +75,7 @@ class Client(object):
def stop(self):
self.running = False
self._server.stop()
logger.info("MEGA Server Stopped")
logger.log("MEGA Server Stopped")
def get_play_list(self):
if len(self.files) > 1:
@@ -103,7 +103,7 @@ class Client(object):
return files
except:
logger.info(traceback.format_exc())
logger.log(traceback.format_exc())
pass
return files

View File

@@ -14,7 +14,7 @@ remote = None
def parse_url(url):
# logger.info("Url: %s" % url)
# logger.log("Url: %s" % url)
url = url.strip()
patron = "^smb://(?:([^;\n]+);)?(?:([^:@\n]+)[:|@])?(?:([^@\n]+)@)?([^/]+)/([^/\n]+)([/]?.*?)$"
domain, user, password, server_name, share_name, path = re.compile(patron, re.DOTALL).match(url).groups()
@@ -27,7 +27,7 @@ def parse_url(url):
if path.endswith("/"): path = path[:-1]
if not path: path = "/"
# logger.info("Dominio: '%s' |Usuario: '%s' | Password: '%s' | Servidor: '%s' | IP: '%s' | Share Name: '%s' | Path: '%s'" % (domain, user, password, server_name, server_ip, share_name, path))
# logger.log("Dominio: '%s' |Usuario: '%s' | Password: '%s' | Servidor: '%s' | IP: '%s' | Share Name: '%s' | Path: '%s'" % (domain, user, password, server_name, server_ip, share_name, path))
return server_name, server_ip, share_name, unicode(path, "utf8"), user, password, domain
@@ -46,7 +46,7 @@ def get_server_name_ip(server):
def connect(url):
# logger.info("Url: %s" % url)
# logger.log("Url: %s" % url)
global remote
server_name, server_ip, share_name, path, user, password, domain = parse_url(url)
@@ -63,7 +63,7 @@ def connect(url):
def listdir(url):
logger.info("Url: %s" % url)
logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
files = [f.filename for f in remote.listPath(share_name, path) if not f.filename in [".", ".."]]
@@ -73,7 +73,7 @@ def listdir(url):
def walk(url, topdown=True, onerror=None):
logger.info("Url: %s" % url)
logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
@@ -103,7 +103,7 @@ def walk(url, topdown=True, onerror=None):
def get_attributes(url):
logger.info("Url: %s" % url)
logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
return remote.getAttributes(share_name, path)
@@ -112,7 +112,7 @@ def get_attributes(url):
def mkdir(url):
logger.info("Url: %s" % url)
logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
remote.createDirectory(share_name, path)
@@ -121,12 +121,12 @@ def mkdir(url):
def smb_open(url, mode):
logger.info("Url: %s" % url)
logger.log("Url: %s" % url)
return SMBFile(url, mode)
def isfile(url):
logger.info("Url: %s" % url)
logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
files = [f.filename for f in remote.listPath(share_name, os.path.dirname(path)) if not f.isDirectory]
@@ -136,7 +136,7 @@ def isfile(url):
def isdir(url):
logger.info("Url: %s" % url)
logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
folders = [f.filename for f in remote.listPath(share_name, os.path.dirname(path)) if f.isDirectory]
@@ -146,7 +146,7 @@ def isdir(url):
def exists(url):
logger.info("Url: %s" % url)
logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
files = [f.filename for f in remote.listPath(share_name, os.path.dirname(path))]
@@ -156,7 +156,7 @@ def exists(url):
def remove(url):
logger.info("Url: %s" % url)
logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
remote.deleteFiles(share_name, path)
@@ -165,7 +165,7 @@ def remove(url):
def rmdir(url):
logger.info("Url: %s" % url)
logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
try:
remote.deleteDirectory(share_name, path)
@@ -174,7 +174,7 @@ def rmdir(url):
def rename(url, new_name):
logger.info("Url: %s" % url)
logger.log("Url: %s" % url)
remote, share_name, path = connect(url)
_, _, _, new_name, _, _, _ = parse_url(new_name)
try:

View File

@@ -1,16 +1,15 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from urllib.parse import urlsplit, urlparse, parse_qs, urljoin
except:
import os, re, sys, json, time
if sys.version_info[0] >= 3:
from urllib.parse import urlsplit, urlparse, parse_qs, urljoin, urlencode
from urllib.request import urlopen
else:
from urllib import urlencode, urlopen
from urlparse import urlsplit, urlparse, parse_qs, urljoin
import json
import os
import re
import time
import urllib
from base64 import b64decode
from core import httptools, scrapertools
@@ -61,17 +60,13 @@ class UnshortenIt(object):
return uri, "No domain found in URI!"
had_google_outbound, uri = self._clear_google_outbound_proxy(uri)
if re.search(self._adfly_regex, domain,
re.IGNORECASE) or type == 'adfly':
if re.search(self._adfly_regex, domain, re.IGNORECASE) or type == 'adfly':
uri, code = self._unshorten_adfly(uri)
if re.search(self._adfocus_regex, domain,
re.IGNORECASE) or type == 'adfocus':
if re.search(self._adfocus_regex, domain, re.IGNORECASE) or type == 'adfocus':
uri, code = self._unshorten_adfocus(uri)
if re.search(self._linkbucks_regex, domain,
re.IGNORECASE) or type == 'linkbucks':
if re.search(self._linkbucks_regex, domain, re.IGNORECASE) or type == 'linkbucks':
uri, code = self._unshorten_linkbucks(uri)
if re.search(self._lnxlu_regex, domain,
re.IGNORECASE) or type == 'lnxlu':
if re.search(self._lnxlu_regex, domain, re.IGNORECASE) or type == 'lnxlu':
uri, code = self._unshorten_lnxlu(uri)
if re.search(self._shrink_service_regex, domain, re.IGNORECASE):
uri, code = self._unshorten_shrink_service(uri)
@@ -99,7 +94,7 @@ class UnshortenIt(object):
if oldUri == uri:
break
logger.info(uri)
logger.log(uri)
return uri, code
@@ -368,7 +363,7 @@ class UnshortenIt(object):
if len(code) > 0:
payload = {'click': code[0]}
r = httptools.downloadpage(
'http://lnx.lu?' + urllib.urlencode(payload),
'http://lnx.lu?' + urlencode(payload),
timeout=self._timeout)
return r.url, r.code
else:
@@ -400,7 +395,7 @@ class UnshortenIt(object):
payload = {'adSessionId': session_id, 'callback': 'c'}
r = httptools.downloadpage(
'http://sh.st/shortest-url/end-adsession?' +
urllib.urlencode(payload),
urlencode(payload),
headers=http_header,
timeout=self._timeout)
response = r.data[6:-2].decode('utf-8')
@@ -519,7 +514,7 @@ class UnshortenIt(object):
else:
if 'sb/' in uri or 'akv/' in uri or 'wss/' in uri or 'wsd/' in uri:
import datetime, hashlib
ip = urllib.urlopen('https://api.ipify.org/').read()
ip = urlopen('https://api.ipify.org/').read()
day = datetime.date.today().strftime('%Y%m%d')
headers = {
"Cookie": hashlib.md5(ip+day).hexdigest() + "=1"
@@ -531,12 +526,12 @@ class UnshortenIt(object):
r = httptools.downloadpage(uri, timeout=self._timeout, headers=headers, follow_redirects=False)
if 'Wait 1 hour' in r.data:
uri = ''
logger.info('IP bannato da vcrypt, aspetta un ora')
logger.log('IP bannato da vcrypt, aspetta un ora')
else:
prev_uri = uri
uri = r.headers['location']
if uri == prev_uri:
logger.info('Use Cloudscraper')
logger.log('Use Cloudscraper')
uri = httptools.downloadpage(uri, timeout=self._timeout, headers=headers, follow_redirects=False, cf=True).headers['location']
if "4snip" in uri:
@@ -593,7 +588,7 @@ class UnshortenIt(object):
r = httptools.downloadpage(uri, follow_redirect=True, timeout=self._timeout, cookies=False)
if 'get/' in r.url:
uri = 'https://linkhub.icu/view/' + re.search('\.\./view/([^"]+)', r.data).group(1)
logger.info(uri)
logger.log(uri)
r = httptools.downloadpage(uri, follow_redirect=True, timeout=self._timeout, cookies=False)
uri = re.search('<div id="text-url".*\n\s+<a href="([^"]+)', r.data).group(0)
return uri, r.code
@@ -641,7 +636,7 @@ class UnshortenIt(object):
try:
id = uri.split('/')[-2]
reqUrl = 'https://stayonline.pro/ajax/linkView.php'
p = urllib.urlencode({"id": id})
p = urlencode({"id": id})
r = httptools.downloadpage(reqUrl, post=p)
data = r.data
try:
@@ -683,7 +678,7 @@ def findlinks(text):
regex = '(?:https?://(?:[\w\d]+\.)?)?(?:' + regex + ')/[a-zA-Z0-9_=/]+'
for match in re.findall(regex, text):
matches.append(match)
logger.info('matches=' + str(matches))
logger.log('matches=' + str(matches))
if len(matches) == 1:
text += '\n' + unshorten(matches[0])[0]
elif matches: