Minori informazioni con log disabilitato

This commit is contained in:
Alhaziel01
2020-11-25 17:54:25 +01:00
parent 9f11eac225
commit 4f498a05f8
157 changed files with 720 additions and 726 deletions

View File

@@ -73,7 +73,7 @@ def search(item, text):
except:
import sys
for line in sys.exc_info():
logger.info("%s mainlist search log: %s" % (__channel__, line))
logger.error("%s" % line)
return []
# =========== def per le novità nel menu principale =============

View File

@@ -76,7 +76,7 @@ def newest(categoria):
def search(item, text):
logger.info(item, "search", text)
logger.info("search", text)
if item.contentType == 'tvshow': item.url = host + '/serietv/'
else: item.url = host
try:
@@ -189,14 +189,14 @@ def findvideos(item):
def load_links(itemlist, re_txt, desc_txt, quality=""):
streaming = scrapertools.find_single_match(data, re_txt).replace('"', '')
support.info('STREAMING', streaming)
support.info('STREAMING=', streaming)
logger.debug('STREAMING', streaming)
logger.debug('STREAMING=', streaming)
matches = support.match(streaming, patron = r'<td><a.*?href=([^ ]+) [^>]+>([^<]+)<').matches
for scrapedurl, scrapedtitle in matches:
logger.debug("##### findvideos %s ## %s ## %s ##" % (desc_txt, scrapedurl, scrapedtitle))
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl, server=scrapedtitle, quality=quality))
support.info()
logger.debug()
itemlist = []
@@ -228,12 +228,12 @@ def findvideos(item):
def findvid_serie(item):
support.info()
logger.debug()
data = re.sub(r'((?:<p>|<strong>)?[^\d]*\d*(?:&#215;|×)[0-9]+[^<]+)', '', item.other)
return support.server(item, data=data)
def play(item):
support.info()
logger.debug()
return servertools.find_video_items(item, data=item.url)

View File

@@ -29,7 +29,7 @@ def mainlist(item):
def search(item, text):
logger.info()
logger.info('search', text)
item.url = item.url + "/?s=" + text
try:
return support.dooplay_search(item)

View File

@@ -51,7 +51,7 @@ def episodios(item):
def search(item, text):
support.info('search', item)
support.info('search', text)
item.contentType = 'tvshow'
itemlist = []
text = text.replace(' ', '+')
@@ -66,5 +66,5 @@ def search(item, text):
def findvideos(item):
logger.info("[guardaserie_live] findvideos")
logger.debug()
return support.server(item, item.url)

View File

@@ -29,7 +29,7 @@ def mainlist(item):
def search(item, text):
logger.info()
logger.info('search', text)
item.url = item.url + "/?s=" + text
try:
return support.dooplay_search(item)

View File

@@ -30,7 +30,7 @@ def mainlist(item):
def search(item, text):
logger.info("[vedohd.py] " + item.url + " search " + text)
logger.info("search",text)
item.url = item.url + "/?s=" + text
return support.dooplay_search(item, blacklist)
@@ -44,7 +44,6 @@ def findvideos(item):
itemlist = []
for link in support.dooplay_get_links(item, host):
if link['title'] != 'Trailer':
logger.info(link['title'])
server, quality = scrapertools.find_single_match(link['title'], '([^ ]+) ?(HD|3D)?')
if quality:
title = server + " [COLOR blue][" + quality + "][/COLOR]"
@@ -63,7 +62,7 @@ def menu(item):
def play(item):
logger.info("[vedohd.py] play")
logger.debug()
data = support.swzz_get_url(item)

View File

@@ -14,15 +14,15 @@ YOUTUBE_V3_API_KEY = "AIzaSyCjsmBT0JZy1RT-PLwB-Zkfba87sa2inyI"
def youtube_api_call(method, parameters):
logger.info("method=" + method + ", parameters=" + repr(parameters))
logger.debug("method=" + method + ", parameters=" + repr(parameters))
encoded_parameters = urllib.urlencode(parameters)
url = "https://www.googleapis.com/youtube/v3/" + method + "?" + encoded_parameters + "&key=" + YOUTUBE_V3_API_KEY;
logger.info("url=" + url)
logger.debug("url=" + url)
data = httptools.downloadpage(url).data
logger.info("data=" + data)
logger.debug("data=" + data)
json_object = jsontools.load(data)
@@ -51,13 +51,13 @@ def youtube_get_playlist_items(playlist_id, pageToken=""):
# Show all YouTube playlists for the selected channel
def playlists(item, channel_id, pageToken=""):
logger.info()
logger.debug()
itemlist = []
json_object = youtube_get_user_playlists(channel_id, pageToken)
for entry in json_object["items"]:
logger.info("entry=" + repr(entry))
logger.debug("entry=" + repr(entry))
title = entry["snippet"]["title"]
plot = entry["snippet"]["description"]
@@ -85,13 +85,13 @@ def latest_videos(item, channel_id):
# Show all YouTube videos for the selected playlist
def videos(item, pageToken=""):
logger.info()
logger.debug()
itemlist = []
json_object = youtube_get_playlist_items(item.url, pageToken)
for entry in json_object["items"]:
logger.info("entry=" + repr(entry))
logger.debug("entry=" + repr(entry))
title = entry["snippet"]["title"]
plot = entry["snippet"]["description"]

View File

@@ -9,7 +9,7 @@ downloadenabled = addon.getSetting('downloadenabled')
def getmainlist(view="thumb_"):
logger.info()
logger.debug()
itemlist = list()
if config.dev_mode():
@@ -62,14 +62,14 @@ def getmainlist(view="thumb_"):
def getchanneltypes(view="thumb_"):
logger.info()
logger.debug()
# Category List
channel_types = ["movie", "tvshow", "anime", "documentary", "vos", "live", "torrent", "music"] #, "direct"
# Channel Language
channel_language = auto_filter()
logger.info("channel_language=%s" % channel_language)
logger.debug("channel_language=%s" % channel_language)
# Build Itemlist
itemlist = list()
@@ -92,7 +92,7 @@ def getchanneltypes(view="thumb_"):
def filterchannels(category, view="thumb_"):
from core import channeltools
logger.info('Filter Channels ' + category)
logger.debug('Filter Channels ' + category)
channelslist = []
@@ -103,14 +103,14 @@ def filterchannels(category, view="thumb_"):
appenddisabledchannels = True
channel_path = os.path.join(config.get_runtime_path(), 'channels', '*.json')
logger.info("channel_path = %s" % channel_path)
logger.debug("channel_path = %s" % channel_path)
channel_files = glob.glob(channel_path)
logger.info("channel_files found %s" % (len(channel_files)))
logger.debug("channel_files found %s" % (len(channel_files)))
# Channel Language
channel_language = auto_filter()
logger.info("channel_language=%s" % channel_language)
logger.debug("channel_language=%s" % channel_language)
for channel_path in channel_files:
logger.debug("channel in for = %s" % channel_path)
@@ -221,7 +221,7 @@ def get_thumb(thumb_name, view="thumb_"):
def set_channel_info(parameters):
logger.info()
logger.debug()
info = ''
language = ''

View File

@@ -29,7 +29,7 @@ def start(itemlist, item):
if item.global_search:
return itemlist
logger.info()
logger.debug()
global PLAYED
PLAYED = False
@@ -274,7 +274,7 @@ def start(itemlist, item):
def play_multi_channel(item, itemlist):
logger.info()
logger.debug()
start(itemlist, item)

View File

@@ -15,7 +15,7 @@ default_file = dict()
remote_path = 'https://raw.githubusercontent.com/kodiondemand/media/master/'
def is_enabled(channel_name):
logger.info("channel_name=" + channel_name)
logger.debug("channel_name=" + channel_name)
return get_channel_parameters(channel_name)["active"] and get_channel_setting("enabled", channel=channel_name,
default=True)
@@ -27,7 +27,7 @@ def get_channel_parameters(channel_name):
if channel_name not in dict_channels_parameters:
try:
channel_parameters = get_channel_json(channel_name)
# logger.debug(channel_parameters)
logger.debug(channel_parameters)
if channel_parameters:
# name and default changes
channel_parameters["title"] = channel_parameters.pop("name") + (' [DEPRECATED]' if 'deprecated' in channel_parameters and channel_parameters['deprecated'] else '')
@@ -87,7 +87,7 @@ def get_channel_parameters(channel_name):
def get_channel_json(channel_name):
# logger.info("channel_name=" + channel_name)
logger.debug("channel_name=" + channel_name)
from core import filetools
channel_json = None
try:
@@ -101,9 +101,9 @@ def get_channel_json(channel_name):
channel_name + ".json")
if filetools.isfile(channel_path):
# logger.info("channel_data=" + channel_path)
logger.debug("channel_data=" + channel_path)
channel_json = jsontools.load(filetools.read(channel_path))
# logger.info("channel_json= %s" % channel_json)
logger.debug("channel_json= %s" % channel_json)
except Exception as ex:
template = "An exception of type %s occured. Arguments:\n%r"
@@ -114,7 +114,7 @@ def get_channel_json(channel_name):
def get_channel_controls_settings(channel_name):
# logger.info("channel_name=" + channel_name)
logger.debug("channel_name=" + channel_name)
dict_settings = {}
# import web_pdb; web_pdb.set_trace()
# list_controls = get_channel_json(channel_name).get('settings', list())
@@ -137,7 +137,7 @@ def get_lang(channel_name):
if hasattr(channel, 'list_language'):
for language in channel.list_language:
list_language.append(language)
logger.info(list_language)
logger.debug(list_language)
else:
sub = False
langs = []

View File

@@ -253,7 +253,7 @@ class Downloader(object):
self.file.seek(2 ** 31, 0)
except OverflowError:
self._seekable = False
logger.info("Cannot do seek() or tell() in files larger than 2GB")
logger.error("Cannot do seek() or tell() in files larger than 2GB")
self.__get_download_info__()

View File

@@ -814,7 +814,7 @@ def remove_tags(title):
@rtype: str
@return: string without tags
"""
logger.info()
logger.debug()
title_without_tags = scrapertools.find_single_match(title, r'\[color .+?\](.+)\[\/color\]')
@@ -832,7 +832,7 @@ def remove_smb_credential(path):
@return: chain without credentials
@rtype: str
"""
logger.info()
logger.debug()
if not scrapertools.find_single_match(path, r'(^\w+:\/\/)'):
return path

View File

@@ -234,7 +234,7 @@ def get_link(list_item, item, list_language, list_quality=None, global_filter_la
@return: Item list
@rtype: list[Item]
"""
logger.info()
logger.debug()
# if the required fields are None we leave
if list_item is None or item is None:
@@ -274,7 +274,7 @@ def get_links(list_item, item, list_language, list_quality=None, global_filter_l
@return: lista de Item
@rtype: list[Item]
"""
logger.info()
logger.debug()
# if the required fields are None we leave
@@ -362,7 +362,7 @@ def no_filter(item):
@return: lista de enlaces
@rtype: list[Item]
"""
logger.info()
logger.debug()
itemlist = []
for i in item.list_item_all:
@@ -384,7 +384,7 @@ def mainlist(channel, list_language, list_quality):
@return: Item list
@rtype: list[Item]
"""
logger.info()
logger.debug()
itemlist = []
dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_FILTER)
@@ -425,8 +425,8 @@ def config_item(item):
@param item: item
@type item: Item
"""
logger.info()
logger.info("item %s" % item.tostring())
logger.debug()
logger.debug("item %s" % item.tostring())
# WE GET THE JSON DATA
dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER)
@@ -448,8 +448,8 @@ def config_item(item):
else:
lang_selected = dict_series.get(tvshow, {}).get(TAG_LANGUAGE, default_lang)
list_quality = dict_series.get(tvshow, {}).get(TAG_QUALITY_ALLOWED, [x.lower() for x in item.list_quality])
# logger.info("lang selected {}".format(lang_selected))
# logger.info("list quality {}".format(list_quality))
# logger.debug("lang selected {}".format(lang_selected))
# logger.debug("list quality {}".format(list_quality))
active = True
custom_button = {'visible': False}
@@ -516,7 +516,7 @@ def config_item(item):
def delete(item, dict_values):
logger.info()
logger.debug()
if item:
dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER)
@@ -554,7 +554,7 @@ def save(item, dict_data_saved):
@param dict_data_saved: dictionary with saved data
@type dict_data_saved: dict
"""
logger.info()
logger.debug()
if item and dict_data_saved:
logger.debug('item: %s\ndatos: %s' % (item.tostring(), dict_data_saved))
@@ -564,7 +564,7 @@ def save(item, dict_data_saved):
dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER)
tvshow = item.show.strip().lower()
logger.info("Data is updated")
logger.debug("Data is updated")
list_quality = []
for _id, value in list(dict_data_saved.items()):
@@ -599,7 +599,7 @@ def save_from_context(item):
@param item: item
@type item: item
"""
logger.info()
logger.debug()
dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER)
tvshow = item.show.strip().lower()
@@ -630,7 +630,7 @@ def delete_from_context(item):
@param item: item
@type item: item
"""
logger.info()
logger.debug()
# We come from get_links and no result has been obtained, in context menu and we delete
if item.to_channel != "":

View File

@@ -449,7 +449,7 @@ def downloadpage(url, **opt):
if not 'api.themoviedb' in url and not opt.get('alfa_s', False):
show_infobox(info_dict)
if not config.get_setting("debug"): logger.info('Page URL:',url)
return type('HTTPResponse', (), response)
def fill_fields_pre(url, opt, proxy_data, file_name):

View File

@@ -11,22 +11,22 @@ from inspect import stack
try:
import json
except:
logger.info("json included in the interpreter **NOT** available")
logger.error("json included in the interpreter **NOT** available")
try:
import simplejson as json
except:
logger.info("simplejson included in the interpreter **NOT** available")
logger.error("simplejson included in the interpreter **NOT** available")
try:
from lib import simplejson as json
except:
logger.info("simplejson in lib directory **NOT** available")
logger.error("simplejson in lib directory **NOT** available")
logger.error("A valid JSON parser was not found")
json = None
else:
logger.info("Using simplejson in the lib directory")
else:
logger.info("Using simplejson included in the interpreter")
logger.error("Using simplejson included in the interpreter")
# ~ else:
# ~ logger.info("Usando json incluido en el interprete")

View File

@@ -62,7 +62,7 @@ def find_and_set_infoLabels(item):
# Check if there is a 'code'
if scraper_result and item.infoLabels['code']:
# correct code
logger.info("Identificador encontrado: %s" % item.infoLabels['code'])
logger.debug("Identificador encontrado: %s" % item.infoLabels['code'])
scraper.completar_codigos(item)
return True
elif scraper_result:
@@ -72,7 +72,7 @@ def find_and_set_infoLabels(item):
# Content not found
msg = config.get_localized_string(60228) % title
logger.info(msg)
logger.debug(msg)
# Show box with other options:
item = platformtools.dialog_info(item, scraper_actual)
if item.exit:
@@ -83,7 +83,7 @@ def find_and_set_infoLabels(item):
def cuadro_completar(item):
logger.info()
logger.debug()
global dict_default
dict_default = {}
@@ -196,7 +196,7 @@ def get_nfo(item):
@rtype: str
@return:
"""
logger.info()
logger.debug()
if "infoLabels" in item and "noscrap_id" in item.infoLabels:
# Create the xml file with the data obtained from the item since there is no active scraper
info_nfo = '<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>'

View File

@@ -34,7 +34,7 @@ from platformcode import logger
def printMatches(matches):
i = 0
for match in matches:
logger.info("%d %s" % (i, match))
logger.debug("%d %s" % (i, match))
i = i + 1
@@ -447,7 +447,7 @@ def get_season_and_episode(title):
except:
pass
logger.info("'" + title + "' -> '" + filename + "'")
logger.debug("'" + title + "' -> '" + filename + "'")
return filename

View File

@@ -47,7 +47,7 @@ def find_video_items(item=None, data=None):
@return: returns the itemlist with the results
@rtype: list
"""
logger.info()
logger.debug()
itemlist = []
# Download the page
@@ -97,7 +97,7 @@ def get_servers_itemlist(itemlist, fnc=None, sort=False):
# Walk the patterns
for pattern in server_parameters.get("find_videos", {}).get("patterns", []):
logger.info(pattern["pattern"])
logger.debug(pattern["pattern"])
# Scroll through the results
for match in re.compile(pattern["pattern"], re.DOTALL).finditer(
"\n".join([item.url.split('|')[0] for item in itemlist if not item.server])):
@@ -144,7 +144,7 @@ def findvideos(data, skip=False):
return some link. It can also be an integer greater than 1, which would represent the maximum number of links to search.
:return:
"""
logger.info()
logger.debug()
devuelve = []
skip = int(skip)
servers_list = list(get_servers_list().keys())
@@ -181,7 +181,7 @@ def findvideosbyserver(data, serverid):
value = translate_server_name(server_parameters["name"]) , url, serverid, server_parameters.get("thumbnail", "")
if value not in devuelve and url not in server_parameters["find_videos"].get("ignore_urls", []):
devuelve.append(value)
logger.info(msg)
logger.debug(msg)
return devuelve
@@ -193,7 +193,7 @@ def guess_server_thumbnail(serverid):
def get_server_from_url(url):
logger.info()
logger.debug()
servers_list = list(get_servers_list().keys())
# Run findvideos on each active server
@@ -211,7 +211,7 @@ def get_server_from_url(url):
for n, pattern in enumerate(server_parameters["find_videos"].get("patterns", [])):
msg = "%s\npattern: %s" % (serverid, pattern["pattern"])
if not "pattern_compiled" in pattern:
# logger.info('compiled ' + serverid)
# logger.debug('compiled ' + serverid)
pattern["pattern_compiled"] = re.compile(pattern["pattern"])
dict_servers_parameters[serverid]["find_videos"]["patterns"][n]["pattern_compiled"] = pattern["pattern_compiled"]
# Scroll through the results
@@ -224,7 +224,7 @@ def get_server_from_url(url):
msg += "\nurl encontrada: %s" % url
value = translate_server_name(server_parameters["name"]), url, serverid, server_parameters.get("thumbnail", "")
if url not in server_parameters["find_videos"].get("ignore_urls", []):
logger.info(msg)
logger.debug(msg)
return value
return None
@@ -616,7 +616,7 @@ def get_server_setting(name, server, default=None):
dict_file['settings'] = dict_settings
# We create the file ../settings/channel_data.json
if not filetools.write(file_settings, jsontools.dump(dict_file)):
logger.info("ERROR saving file: %s" % file_settings)
logger.error("ERROR saving file: %s" % file_settings)
# We return the value of the local parameter 'name' if it exists, if default is not returned
return dict_settings.get(name, default)
@@ -638,7 +638,7 @@ def set_server_setting(name, value, server):
dict_file = jsontools.load(filetools.read(file_settings))
dict_settings = dict_file.get('settings', {})
except EnvironmentError:
logger.info("ERROR when reading the file: %s" % file_settings)
logger.error("ERROR when reading the file: %s" % file_settings)
dict_settings[name] = value
@@ -650,7 +650,7 @@ def set_server_setting(name, value, server):
# We create the file ../settings/channel_data.json
if not filetools.write(file_settings, jsontools.dump(dict_file)):
logger.info("ERROR saving file: %s" % file_settings)
logger.error("ERROR saving file: %s" % file_settings)
return None
return value
@@ -752,7 +752,7 @@ def check_video_link(item, timeout=3):
server_module = __import__('servers.%s' % server, None, None, ["servers.%s" % server])
except:
server_module = None
logger.info("[check_video_link] Cannot import server! %s" % server)
logger.error("[check_video_link] Cannot import server! %s" % server)
return item, NK
if hasattr(server_module, 'test_video_exists'):
@@ -762,20 +762,20 @@ def check_video_link(item, timeout=3):
try:
video_exists, message = server_module.test_video_exists(page_url=url)
if not video_exists:
logger.info("[check_video_link] Does not exist! %s %s %s" % (message, server, url))
logger.error("[check_video_link] Does not exist! %s %s %s" % (message, server, url))
resultado = KO
else:
logger.info("[check_video_link] check ok %s %s" % (server, url))
logger.debug("[check_video_link] check ok %s %s" % (server, url))
resultado = OK
except:
logger.info("[check_video_link] Can't check now! %s %s" % (server, url))
logger.error("[check_video_link] Can't check now! %s %s" % (server, url))
resultado = NK
finally:
httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = ant_timeout # Restore download time
return item, resultado
logger.info("[check_video_link] There is no test_video_exists for server: %s" % server)
logger.debug("[check_video_link] There is no test_video_exists for server: %s" % server)
return item, NK
def translate_server_name(name):

View File

@@ -33,7 +33,7 @@ def hdpass_get_servers(item):
for mir_url, srv in scrapertools.find_multiple_matches(mir, patron_option):
mir_url = scrapertools.decodeHtmlentities(mir_url)
info(mir_url)
logger.debug(mir_url)
it = item.clone(action="play", quality=quality, title=srv, server=srv, url= mir_url)
if not servertools.get_server_parameters(srv.lower()): it = hdpass_get_url(it)[0] # do not exists or it's empty
ret.append(it)
@@ -1022,7 +1022,7 @@ def videolibrary(itemlist, item, typography='', function_level=1, function=''):
# Simply add this function to add video library support
# Function_level is useful if the function is called by another function.
# If the call is direct, leave it blank
info()
logger.debug()
if item.contentType == 'movie':
action = 'add_pelicula_to_library'
@@ -1073,7 +1073,7 @@ def videolibrary(itemlist, item, typography='', function_level=1, function=''):
def nextPage(itemlist, item, data='', patron='', function_or_level=1, next_page='', resub=[]):
# Function_level is useful if the function is called by another function.
# If the call is direct, leave it blank
info()
logger.debug()
action = inspect.stack()[function_or_level][3] if type(function_or_level) == int else function_or_level
if next_page == '':
next_page = scrapertools.find_single_match(data, patron)
@@ -1083,7 +1083,7 @@ def nextPage(itemlist, item, data='', patron='', function_or_level=1, next_page=
if 'http' not in next_page:
next_page = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+') + (next_page if next_page.startswith('/') else '/' + next_page)
next_page = next_page.replace('&amp;', '&')
info('NEXT= ', next_page)
logger.debug('NEXT= ', next_page)
itemlist.append(
item.clone(channel=item.channel,
action = action,
@@ -1110,7 +1110,7 @@ def pagination(itemlist, item, page, perpage, function_level=1):
def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=True, Download=True, patronTag=None, Videolibrary=True):
info()
logger.debug()
blacklisted_servers = config.get_setting("black_list", server='servers')
if not blacklisted_servers: blacklisted_servers = []
if not data and not itemlist:
@@ -1375,7 +1375,7 @@ def thumb(item_itemlist_string=None, genre=False, live=False):
'_tvshow':['serie','tv', 'fiction']}
def autoselect_thumb(item, genre):
info('SPLIT',re.split(r'\.|\{|\}|\[|\]|\(|\)|/| ',item.title.lower()))
logger.debug('SPLIT',re.split(r'\.|\{|\}|\[|\]|\(|\)|/| ',item.title.lower()))
if genre == False:
for thumb, titles in icon_dict.items():
if any(word in re.split(r'\.|\{|\}|\[|\]|\(|\)|/| ',item.title.lower()) for word in search):

View File

@@ -87,7 +87,7 @@ create_bd()
# The function name is the name of the decorator and receives the function that decorates.
def cache_response(fn):
logger.info()
logger.debug()
# import time
# start_time = time.time()
@@ -495,7 +495,7 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None
def find_and_set_infoLabels(item):
logger.info()
logger.debug()
global otmdb_global
tmdb_result = None
@@ -902,7 +902,7 @@ class Tmdb(object):
cls.dic_generos[idioma][tipo] = {}
url = ('http://api.themoviedb.org/3/genre/%s/list?api_key=a1ab8b8669da03637a4b98fa39c39228&language=%s' % (tipo, idioma))
try:
logger.info("[Tmdb.py] Filling in dictionary of genres")
logger.debug("[Tmdb.py] Filling in dictionary of genres")
resultado = cls.get_json(url)
if not isinstance(resultado, dict):
@@ -934,7 +934,7 @@ class Tmdb(object):
'&language=%s' % (self.busqueda_id, source, self.busqueda_idioma))
buscando = "%s: %s" % (source.capitalize(), self.busqueda_id)
logger.info("[Tmdb.py] Searching %s:\n%s" % (buscando, url))
logger.debug("[Tmdb.py] Searching %s:\n%s" % (buscando, url))
resultado = self.get_json(url)
if not isinstance(resultado, dict):
resultado = ast.literal_eval(resultado.decode('utf-8'))
@@ -981,7 +981,7 @@ class Tmdb(object):
url += '&year=%s' % self.busqueda_year
buscando = self.busqueda_texto.capitalize()
logger.info("[Tmdb.py] Searching %s on page %s:\n%s" % (buscando, page, url))
logger.debug("[Tmdb.py] Searching %s on page %s:\n%s" % (buscando, page, url))
resultado = self.get_json(url)
if not isinstance(resultado, dict):
resultado = ast.literal_eval(resultado.decode('utf-8'))
@@ -1042,7 +1042,7 @@ class Tmdb(object):
url = ('http://api.themoviedb.org/3/%s?api_key=a1ab8b8669da03637a4b98fa39c39228&%s'
% (type_search, "&".join(params)))
logger.info("[Tmdb.py] Searcing %s:\n%s" % (type_search, url))
logger.debug("[Tmdb.py] Searcing %s:\n%s" % (type_search, url))
resultado = self.get_json(url, cache=False)
if not isinstance(resultado, dict):
resultado = ast.literal_eval(resultado.decode('utf-8'))
@@ -1107,7 +1107,7 @@ class Tmdb(object):
return True
def get_list_resultados(self, num_result=20):
# logger.info("self %s" % str(self))
# logger.debug("self %s" % str(self))
res = []
if num_result <= 0:
@@ -1327,7 +1327,7 @@ class Tmdb(object):
"&append_to_response=credits" % (self.result["id"], numtemporada, self.busqueda_idioma)
buscando = "id_Tmdb: " + str(self.result["id"]) + " season: " + str(numtemporada) + "\nURL: " + url
logger.info("[Tmdb.py] Searcing " + buscando)
logger.debug("[Tmdb.py] Searcing " + buscando)
try:
self.temporada[numtemporada] = self.get_json(url)
if not isinstance(self.temporada[numtemporada], dict):
@@ -1516,7 +1516,7 @@ class Tmdb(object):
items.extend(list(self.get_episodio(ret_infoLabels['season'], episodio).items()))
# logger.info("ret_infoLabels" % ret_infoLabels)
# logger.debug("ret_infoLabels" % ret_infoLabels)
for k, v in items:
if not v:

View File

@@ -128,7 +128,7 @@ def token_trakt(item):
def set_trakt_info(item):
logger.info()
logger.debug()
import xbmcgui
# Envia los datos a trakt
try:
@@ -139,7 +139,7 @@ def set_trakt_info(item):
pass
def get_trakt_watched(id_type, mediatype, update=False):
logger.info()
logger.debug()
id_list = []
id_dict = dict()
@@ -229,7 +229,7 @@ def trakt_check(itemlist):
def get_sync_from_file():
logger.info()
logger.debug()
sync_path = os.path.join(config.get_data_path(), 'settings_channels', 'trakt_data.json')
trakt_node = {}
if os.path.exists(sync_path):
@@ -241,7 +241,7 @@ def get_sync_from_file():
def update_trakt_data(mediatype, trakt_data):
logger.info()
logger.debug()
sync_path = os.path.join(config.get_data_path(), 'settings_channels', 'trakt_data.json')
if os.path.exists(sync_path):
@@ -251,7 +251,7 @@ def update_trakt_data(mediatype, trakt_data):
def ask_install_script():
logger.info()
logger.debug()
from platformcode import platformtools
@@ -265,7 +265,7 @@ def ask_install_script():
def wait_for_update_trakt():
logger.info()
logger.debug()
t = Thread(update_all)
t.setDaemon(True)
t.start()
@@ -274,7 +274,7 @@ def wait_for_update_trakt():
def update_all():
# from core.support import dbg;dbg()
from time import sleep
logger.info()
logger.debug()
sleep(20)
while xbmc.Player().isPlaying():
sleep(20)

View File

@@ -77,9 +77,9 @@ otvdb_global = None
def find_and_set_infoLabels(item):
logger.info()
logger.debug()
# from core.support import dbg;dbg()
# logger.info("item es %s" % item)
# logger.debug("item es %s" % item)
p_dialog = None
if not item.contentSeason:
@@ -382,7 +382,7 @@ class Tvdb(object):
@classmethod
def __check_token(cls):
# logger.info()
# logger.debug()
if TOKEN == "":
cls.__login()
else:
@@ -397,7 +397,7 @@ class Tvdb(object):
@staticmethod
def __login():
# logger.info()
# logger.debug()
global TOKEN
apikey = "106B699FDC04301C"
@@ -423,7 +423,7 @@ class Tvdb(object):
@classmethod
def __refresh_token(cls):
# logger.info()
# logger.debug()
global TOKEN
is_success = False
@@ -521,7 +521,7 @@ class Tvdb(object):
]
}
"""
logger.info()
logger.debug()
if id_episode and self.episodes.get(id_episode):
return self.episodes.get(id_episode)
@@ -589,7 +589,7 @@ class Tvdb(object):
}
}
"""
logger.info()
logger.debug()
url = HOST + "/series/%s/episodes?page=%s" % (_id, page)
@@ -662,7 +662,7 @@ class Tvdb(object):
"""
if semaforo:
semaforo.acquire()
logger.info()
logger.debug()
url = HOST + "/episodes/%s" % _id
@@ -681,7 +681,7 @@ class Tvdb(object):
else:
dict_html = req.json()
# logger.info("dict_html %s" % dict_html)
# logger.debug("dict_html %s" % dict_html)
self.episodes[_id] = dict_html.pop("data") if 'Error' not in dict_html else {}
if semaforo:
@@ -712,7 +712,7 @@ class Tvdb(object):
"status": "string"
}
"""
logger.info()
logger.debug()
params = {}
if name:
@@ -802,7 +802,7 @@ class Tvdb(object):
}
}
"""
logger.info()
logger.debug()
resultado = {}
url = HOST + "/series/%s" % _id
@@ -855,7 +855,7 @@ class Tvdb(object):
@rtype: dict
"""
logger.info()
logger.debug()
if self.result.get('image_season_%s' % season):
return self.result['image_season_%s' % season]
@@ -909,7 +909,7 @@ class Tvdb(object):
@return: dictionary with actors
@rtype: dict
"""
logger.info()
logger.debug()
url = HOST + "/series/%s/actors" % _id
DEFAULT_HEADERS["Accept-Language"] = lang
@@ -942,7 +942,7 @@ class Tvdb(object):
@rtype: list
@return: list of results
"""
logger.info()
logger.debug()
list_results = []
# if we have a result and it has seriesName, we already have the info of the series, it is not necessary to search again

View File

@@ -78,7 +78,7 @@ def save_movie(item, silent=False):
@rtype fallidos: int
@return: the number of failed items or -1 if all failed
"""
logger.info()
logger.debug()
# logger.debug(item.tostring('\n'))
insertados = 0
sobreescritos = 0
@@ -144,7 +144,7 @@ def save_movie(item, silent=False):
if not path:
# Create folder
path = filetools.join(MOVIES_PATH, ("%s [%s]" % (base_name, _id)).strip())
logger.info("Creating movie directory:" + path)
logger.debug("Creating movie directory:" + path)
if not filetools.mkdir(path):
logger.debug("Could not create directory")
return 0, 0, -1, path
@@ -159,7 +159,7 @@ def save_movie(item, silent=False):
if not nfo_exists:
# We create .nfo if it doesn't exist
logger.info("Creating .nfo: " + nfo_path)
logger.debug("Creating .nfo: " + nfo_path)
head_nfo = scraper.get_nfo(item)
item_nfo = Item(title=item.contentTitle, channel="videolibrary", action='findvideos',
@@ -182,7 +182,7 @@ def save_movie(item, silent=False):
if item_nfo and strm_exists:
if json_exists:
logger.info("The file exists. Is overwritten")
logger.debug("The file exists. Is overwritten")
sobreescritos += 1
else:
insertados += 1
@@ -209,7 +209,7 @@ def save_movie(item, silent=False):
item_nfo.library_urls[item.channel] = item.url
if filetools.write(nfo_path, head_nfo + item_nfo.tojson()):
#logger.info("FOLDER_MOVIES : %s" % FOLDER_MOVIES)
#logger.debug("FOLDER_MOVIES : %s" % FOLDER_MOVIES)
# We update the Kodi video library with the movie
if config.is_xbmc() and config.get_setting("videolibrary_kodi") and not silent:
from platformcode import xbmc_videolibrary
@@ -238,7 +238,7 @@ def update_renumber_options(item, head_nfo, path):
json = json_file['TVSHOW_AUTORENUMBER']
if item.fulltitle in json:
item.channel_prefs[channel]['TVSHOW_AUTORENUMBER'] = json[item.fulltitle]
logger.info('UPDATED=\n' + str(item.channel_prefs))
logger.debug('UPDATED=\n' + str(item.channel_prefs))
filetools.write(tvshow_path, head_nfo + item.tojson())
def add_renumber_options(item, head_nfo, path):
@@ -426,7 +426,7 @@ def save_tvshow(item, episodelist, silent=False):
@rtype path: str
@return: serial directory
"""
logger.info()
logger.debug()
# logger.debug(item.tostring('\n'))
path = ""
@@ -486,7 +486,7 @@ def save_tvshow(item, episodelist, silent=False):
if not path:
path = filetools.join(TVSHOWS_PATH, ("%s [%s]" % (base_name, _id)).strip())
logger.info("Creating series directory: " + path)
logger.debug("Creating series directory: " + path)
try:
filetools.mkdir(path)
except OSError as exception:
@@ -496,7 +496,7 @@ def save_tvshow(item, episodelist, silent=False):
tvshow_path = filetools.join(path, "tvshow.nfo")
if not filetools.exists(tvshow_path):
# We create tvshow.nfo, if it does not exist, with the head_nfo, series info and watched episode marks
logger.info("Creating tvshow.nfo: " + tvshow_path)
logger.debug("Creating tvshow.nfo: " + tvshow_path)
head_nfo = scraper.get_nfo(item)
item.infoLabels['mediatype'] = "tvshow"
item.infoLabels['title'] = item.contentSerieName
@@ -570,11 +570,11 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
@rtype fallidos: int
@return: the number of failed episodes
"""
logger.info()
logger.debug()
episodelist = filter_list(episodelist, serie.action, path)
# No episode list, nothing to save
if not len(episodelist):
logger.info("There is no episode list, we go out without creating strm")
logger.debug("There is no episode list, we go out without creating strm")
return 0, 0, 0
# process local episodes
@@ -589,7 +589,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
elif config.get_setting("local_episodes", "videolibrary"):
done, local_episodes_path = config_local_episodes_path(path, serie)
if done < 0:
logger.info("An issue has occurred while configuring local episodes, going out without creating strm")
logger.debug("An issue has occurred while configuring local episodes, going out without creating strm")
return 0, 0, done
item_nfo.local_episodes_path = local_episodes_path
filetools.write(nfo_path, head_nfo + item_nfo.tojson())
@@ -713,7 +713,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
# No episode list, nothing to save
if not len(new_episodelist):
logger.info("There is no episode list, we go out without creating strm")
logger.debug("There is no episode list, we go out without creating strm")
return 0, 0, 0
local_episodelist += get_local_content(path)
@@ -745,12 +745,12 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower())
if season_episode in local_episodelist:
logger.info('Skipped: Serie ' + serie.contentSerieName + ' ' + season_episode + ' available as local content')
logger.debug('Skipped: Serie ' + serie.contentSerieName + ' ' + season_episode + ' available as local content')
continue
# check if the episode has been downloaded
if filetools.join(path, "%s [downloads].json" % season_episode) in ficheros:
logger.info('INFO: "%s" episode %s has been downloaded, skipping it' % (serie.contentSerieName, season_episode))
logger.debug('INFO: "%s" episode %s has been downloaded, skipping it' % (serie.contentSerieName, season_episode))
continue
strm_exists = strm_path in ficheros
@@ -806,7 +806,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
if filetools.write(json_path, e.tojson()):
if not json_exists:
logger.info("Inserted: %s" % json_path)
logger.debug("Inserted: %s" % json_path)
insertados += 1
# We mark episode as unseen
news_in_playcounts[season_episode] = 0
@@ -817,14 +817,14 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
news_in_playcounts[serie.contentSerieName] = 0
else:
logger.info("Overwritten: %s" % json_path)
logger.debug("Overwritten: %s" % json_path)
sobreescritos += 1
else:
logger.info("Failed: %s" % json_path)
logger.debug("Failed: %s" % json_path)
fallidos += 1
else:
logger.info("Failed: %s" % json_path)
logger.debug("Failed: %s" % json_path)
fallidos += 1
if not silent and p_dialog.iscanceled():
@@ -894,7 +894,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
def config_local_episodes_path(path, item, silent=False):
logger.info(item)
logger.debug(item)
from platformcode.xbmc_videolibrary import search_local_path
local_episodes_path=search_local_path(item)
if not local_episodes_path:
@@ -906,11 +906,11 @@ def config_local_episodes_path(path, item, silent=False):
platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(80043))
local_episodes_path = platformtools.dialog_browse(0, config.get_localized_string(80046))
if local_episodes_path == '':
logger.info("User has canceled the dialog")
logger.debug("User has canceled the dialog")
return -2, local_episodes_path
elif path in local_episodes_path:
platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(80045))
logger.info("Selected folder is the same of the TV show one")
logger.debug("Selected folder is the same of the TV show one")
return -2, local_episodes_path
if local_episodes_path:
@@ -925,7 +925,7 @@ def config_local_episodes_path(path, item, silent=False):
def process_local_episodes(local_episodes_path, path):
logger.info()
logger.debug()
sub_extensions = ['.srt', '.sub', '.sbv', '.ass', '.idx', '.ssa', '.smi']
artwork_extensions = ['.jpg', '.jpeg', '.png']
@@ -964,7 +964,7 @@ def process_local_episodes(local_episodes_path, path):
def get_local_content(path):
logger.info()
logger.debug()
local_episodelist = []
for root, folders, files in filetools.walk(path):
@@ -993,7 +993,7 @@ def add_movie(item):
@type item: item
@param item: item to be saved.
"""
logger.info()
logger.debug()
from platformcode.launcher import set_search_temp; set_search_temp(item)
# To disambiguate titles, TMDB is caused to ask for the really desired title
@@ -1040,7 +1040,7 @@ def add_tvshow(item, channel=None):
@param channel: channel from which the series will be saved. By default, item.from_channel or item.channel will be imported.
"""
logger.info("show=#" + item.show + "#")
logger.debug("show=#" + item.show + "#")
from platformcode.launcher import set_search_temp; set_search_temp(item)
if item.channel == "downloads":
@@ -1117,7 +1117,7 @@ def add_tvshow(item, channel=None):
else:
platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60070) % item.show)
logger.info("%s episodes of series %s have been added to the video library" % (insertados, item.show))
logger.debug("%s episodes of series %s have been added to the video library" % (insertados, item.show))
if config.is_xbmc():
if config.get_setting("sync_trakt_new_tvshow", "videolibrary"):
import xbmc
@@ -1133,7 +1133,7 @@ def add_tvshow(item, channel=None):
def emergency_urls(item, channel=None, path=None, headers={}):
logger.info()
logger.debug()
import re
from servers import torrent
try:

View File

@@ -17,8 +17,8 @@ from core import filetools
class ziptools(object):
def extract(self, file, dir, folder_to_extract="", overwrite_question=False, backup=False):
logger.info("file= %s" % file)
logger.info("dir= %s" % dir)
logger.debug("file= %s" % file)
logger.debug("dir= %s" % dir)
if not dir.endswith(':') and not filetools.exists(dir):
filetools.mkdir(dir)
@@ -30,13 +30,13 @@ class ziptools(object):
for nameo in zf.namelist():
name = nameo.replace(':', '_').replace('<', '_').replace('>', '_').replace('|', '_').replace('"', '_').replace('?', '_').replace('*', '_')
logger.info("name=%s" % nameo)
logger.debug("name=%s" % nameo)
if not name.endswith('/'):
logger.info("it's not a directory")
logger.debug("it's not a directory")
try:
(path, filename) = filetools.split(filetools.join(dir, name))
logger.info("path=%s" % path)
logger.info("name=%s" % name)
logger.debug("path=%s" % path)
logger.debug("name=%s" % name)
if folder_to_extract:
if path != filetools.join(dir, folder_to_extract):
break
@@ -49,7 +49,7 @@ class ziptools(object):
else:
outfilename = filetools.join(dir, name)
logger.info("outfilename=%s" % outfilename)
logger.debug("outfilename=%s" % outfilename)
try:
if filetools.exists(outfilename) and overwrite_question:
from platformcode import platformtools
@@ -74,7 +74,7 @@ class ziptools(object):
try:
zf.close()
except:
logger.info("Error closing .zip " + file)
logger.error("Error closing .zip " + file)
def _createstructure(self, file, dir):
self._makedirs(self._listdirs(file), dir)

View File

@@ -27,7 +27,7 @@ class ChromeOSImage:
"""
def __init__(self, imgpath):
logger.info('Image Path: ' + imgpath)
logger.debug('Image Path: ' + imgpath)
"""Prepares the image"""
self.imgpath = imgpath
self.bstream = self.get_bstream(imgpath)
@@ -59,7 +59,7 @@ class ChromeOSImage:
self.seek_stream(entries_start * lba_size)
if not calcsize(part_format) == entry_size:
logger.info('Partition table entries are not 128 bytes long')
logger.debug('Partition table entries are not 128 bytes long')
return 0
for index in range(1, entries_num + 1): # pylint: disable=unused-variable
@@ -71,7 +71,7 @@ class ChromeOSImage:
break
if not offset:
logger.info('Failed to calculate losetup offset.')
logger.debug('Failed to calculate losetup offset.')
return 0
return offset
@@ -93,7 +93,7 @@ class ChromeOSImage:
while True:
chunk2 = self.read_stream(chunksize)
if not chunk2:
logger.info('File %s not found in the ChromeOS image' % filename)
logger.debug('File %s not found in the ChromeOS image' % filename)
return False
chunk = chunk1 + chunk2

View File

@@ -47,7 +47,7 @@ def query(name, type='A', server=DOH_SERVER, path="/dns-query", fallback=True):
else:
retval = []
except Exception as ex:
logger.info("Exception occurred: '%s'" % ex)
logger.error("Exception occurred: '%s'" % ex)
if retval is None and fallback:
if type == 'A':

View File

@@ -25,7 +25,7 @@ intervenido_sucuri = 'Access Denied - Sucuri Website Firewall'
def update_title(item):
logger.info()
logger.debug()
from core import scraper,support
@@ -41,7 +41,7 @@ def update_title(item):
The channel must add a method to be able to receive the call from Kodi / Alfa, and be able to call this method:
def actualizar_titulos(item):
logger.info()
logger.debug()
itemlist = []
from lib import generictools
from platformcode import launcher
@@ -206,7 +206,7 @@ def update_title(item):
def refresh_screen(item):
logger.info()
logger.debug()
"""
#### Kodi 18 compatibility ####
@@ -240,7 +240,7 @@ def refresh_screen(item):
def post_tmdb_listado(item, itemlist):
logger.info()
logger.debug()
itemlist_fo = []
"""
@@ -485,7 +485,7 @@ def post_tmdb_listado(item, itemlist):
def post_tmdb_seasons(item, itemlist):
logger.info()
logger.debug()
"""
@@ -645,7 +645,7 @@ def post_tmdb_seasons(item, itemlist):
def post_tmdb_episodios(item, itemlist):
logger.info()
logger.debug()
itemlist_fo = []
"""
@@ -996,7 +996,7 @@ def post_tmdb_episodios(item, itemlist):
def post_tmdb_findvideos(item, itemlist):
logger.info()
logger.debug()
"""
@@ -1216,7 +1216,7 @@ def post_tmdb_findvideos(item, itemlist):
def get_field_from_kodi_DB(item, from_fields='*', files='file'):
logger.info()
logger.debug()
"""
Call to read from the Kodi DB the input fields received (from_fields, by default "*") of the video indicated in Item
@@ -1294,7 +1294,7 @@ def get_field_from_kodi_DB(item, from_fields='*', files='file'):
def fail_over_newpct1(item, patron, patron2=None, timeout=None):
logger.info()
logger.debug()
import ast
"""
@@ -1495,7 +1495,7 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
def web_intervenida(item, data, desactivar=True):
logger.info()
logger.debug()
"""
@@ -1578,7 +1578,7 @@ def web_intervenida(item, data, desactivar=True):
def regenerate_clones():
logger.info()
logger.debug()
import json
from core import videolibrarytools
@@ -1592,7 +1592,7 @@ def regenerate_clones():
# Find the paths where to leave the control .json file, and the Video Library
json_path = filetools.exists(filetools.join(config.get_runtime_path(), 'verify_cached_torrents.json'))
if json_path:
logger.info('Previously repaired video library: WE ARE GOING')
logger.debug('Previously repaired video library: WE ARE GOING')
return False
json_path = filetools.join(config.get_runtime_path(), 'verify_cached_torrents.json')
filetools.write(json_path, json.dumps({"CINE_verify": True})) # Prevents another simultaneous process from being launched
@@ -1632,7 +1632,7 @@ def regenerate_clones():
# Delete the Tvshow.nfo files and check if the .nfo has more than one channel and one is clone Newpct1
for file in files:
# logger.info('file - nfos: ' + file)
# logger.debug('file - nfos: ' + file)
if 'tvshow.nfo' in file:
file_path = filetools.join(root, 'tvshow.nfo')
filetools.remove(file_path)
@@ -1698,7 +1698,7 @@ def regenerate_clones():
for file in files:
file_path = filetools.join(root, file)
if '.json' in file:
logger.info('** file: ' + file)
logger.debug('** file: ' + file)
canal_json = scrapertools.find_single_match(file, r'\[(\w+)\].json')
if canal_json not in nfo.library_urls:
filetools.remove(file_path) # we delete the .json is a zombie
@@ -1741,7 +1741,7 @@ def regenerate_clones():
def dejuice(data):
logger.info()
logger.debug()
# Method to unobtrusive JuicyCodes data
import base64

View File

@@ -25,14 +25,14 @@ TYPE = "Type"
# helper Functions
def check(item):
logger.info()
logger.debug()
dict_series = load(item)
title = item.fulltitle.rstrip()
if title in dict_series: title = dict_series[title]
return True if ID in title and EPISODE in title else False
def filename(item):
logger.info()
logger.debug()
name_file = item.channel + "_data.json"
path = filetools.join(config.get_data_path(), "settings_channels")
fname = filetools.join(path, name_file)
@@ -40,7 +40,7 @@ def filename(item):
def load(item):
logger.info()
logger.debug()
try:
json_file = open(filename(item), "r").read()
json = jsontools.load(json_file)[TVSHOW_RENUMERATE]
@@ -52,7 +52,7 @@ def load(item):
def write(item, json):
logger.info()
logger.debug()
json_file = open(filename(item), "r").read()
js = jsontools.load(json_file)
js[TVSHOW_RENUMERATE] = json
@@ -71,7 +71,7 @@ def b64(json, mode = 'encode'):
def RepresentsInt(s):
# Controllo Numro Stagione
logger.info()
logger.debug()
try:
int(s)
return True
@@ -79,7 +79,7 @@ def RepresentsInt(s):
return False
def find_episodes(item):
logger.info()
logger.debug()
ch = __import__('channels.' + item.channel, fromlist=["channels.%s" % item.channel])
itemlist = ch.episodios(item)
return itemlist
@@ -705,7 +705,7 @@ class SelectreNumerationWindow(xbmcgui.WindowXMLDialog):
items.append(item)
self.seasons[item.getLabel()] = '%sx%s' % (item.getProperty('season'), item.getProperty('episode'))
self.items = items
logger.info('SELF',self.seasons)
logger.debug('SELF',self.seasons)
def addseasons(self):
seasonlist = []

View File

@@ -59,7 +59,7 @@ SERVERLIST = 300
class SearchWindow(xbmcgui.WindowXML):
def start(self, item):
logger.info()
logger.debug()
self.exit = False
self.item = item
self.lastSearch()
@@ -81,7 +81,7 @@ class SearchWindow(xbmcgui.WindowXML):
self.doModal()
def lastSearch(self):
logger.info()
logger.debug()
if not self.item.text:
if config.get_setting('last_search'): last_search = channeltools.get_channel_setting('Last_searched', 'search', '')
else: last_search = ''
@@ -89,7 +89,7 @@ class SearchWindow(xbmcgui.WindowXML):
if self.item.text: channeltools.set_channel_setting('Last_searched', self.item.text, 'search')
def select(self):
logger.info()
logger.debug()
self.PROGRESS.setVisible(False)
items = []
if self.persons:
@@ -122,7 +122,7 @@ class SearchWindow(xbmcgui.WindowXML):
self.NORESULTS.setVisible(True)
def actors(self):
logger.info()
logger.debug()
self.PROGRESS.setVisible(False)
items = []
@@ -174,7 +174,7 @@ class SearchWindow(xbmcgui.WindowXML):
self.NORESULTS.setVisible(True)
def get_channels(self):
logger.info()
logger.debug()
channels_list = []
all_channels = channelselector.filterchannels('all')
@@ -196,12 +196,12 @@ class SearchWindow(xbmcgui.WindowXML):
if config.get_setting("include_in_global_search", channel) and ch_param.get("active", False):
channels_list.append(channel)
logger.info('search in channels:',channels_list)
logger.debug('search in channels:',channels_list)
return channels_list
def getModule(self, channel):
logger.info()
logger.debug()
try:
module = __import__('channels.%s' % channel, fromlist=["channels.%s" % channel])
mainlist = getattr(module, 'mainlist')(Item(channel=channel, global_search=True))
@@ -233,7 +233,7 @@ class SearchWindow(xbmcgui.WindowXML):
executor.submit(self.get_channel_results, self.item, self.moduleDict, searchAction)
def get_channel_results(self, item, module_dict, search_action):
logger.info()
logger.debug()
channel = search_action.channel
results = []
valid = []
@@ -266,7 +266,7 @@ class SearchWindow(xbmcgui.WindowXML):
else: self.update(channel, valid + other)
def makeItem(self, item):
logger.info()
logger.debug()
thumb = item.thumbnail if item.thumbnail else 'Infoplus/' + item.contentType.replace('show','')
it = xbmcgui.ListItem(item.title)
it.setProperty('thumb', thumb)
@@ -282,7 +282,7 @@ class SearchWindow(xbmcgui.WindowXML):
return it
def update(self, channel, results):
logger.info('Search on channel', channel)
logger.debug('Search on channel', channel)
if results:
channelParams = channeltools.get_channel_parameters(channel)
name = channelParams['title']

View File

@@ -19,7 +19,7 @@ def start():
Within this function all calls should go to
functions that we want to execute as soon as we open the plugin.
"""
logger.info()
logger.debug()
# config.set_setting('show_once', True)
# Test if all the required directories are created
config.verify_directories_created()
@@ -37,7 +37,7 @@ def start():
updater.showSavedChangelog()
def run(item=None):
logger.info()
logger.debug()
if not item:
# Extract item from sys.argv
if sys.argv[2]:
@@ -94,7 +94,7 @@ def run(item=None):
# If item has no action, stops here
if item.action == "":
logger.info("Item without action")
logger.debug("Item without action")
return
# Action for main menu in channelselector
@@ -193,7 +193,7 @@ def run(item=None):
channel_file = os.path.join(config.get_runtime_path(), CHANNELS, item.channel + ".py")
logger.info("channel_file= " + channel_file + ' - ' + CHANNELS + ' - ' + item.channel)
logger.debug("channel_file= " + channel_file + ' - ' + CHANNELS + ' - ' + item.channel)
channel = None
@@ -213,12 +213,12 @@ def run(item=None):
trakt_tools.set_trakt_info(item)
except:
pass
logger.info("item.action=%s" % item.action.upper())
logger.debug("item.action=%s" % item.action.upper())
# logger.debug("item_toPlay: " + "\n" + item.tostring('\n'))
# First checks if channel has a "play" function
if hasattr(channel, 'play'):
logger.info("Executing channel 'play' method")
logger.debug("Executing channel 'play' method")
itemlist = channel.play(item)
b_favourite = item.isFavourite
# Play should return a list of playable URLS
@@ -239,7 +239,7 @@ def run(item=None):
# If player don't have a "play" function, not uses the standard play from platformtools
else:
logger.info("Executing core 'play' method")
logger.debug("Executing core 'play' method")
platformtools.play_video(item)
# Special action for findvideos, where the plugin looks for known urls
@@ -252,8 +252,7 @@ def run(item=None):
# If not, uses the generic findvideos function
else:
logger.info("No channel 'findvideos' method, "
"executing core method")
logger.debug("No channel 'findvideos' method, " "executing core method")
itemlist = servertools.find_video_items(item)
if config.get_setting("max_links", "videolibrary") != 0:
@@ -297,7 +296,7 @@ def run(item=None):
else:
filetools.remove(temp_search_file)
logger.info("item.action=%s" % item.action.upper())
logger.debug("item.action=%s" % item.action.upper())
from core import channeltools
if config.get_setting('last_search'):
@@ -318,7 +317,7 @@ def run(item=None):
# For all other actions
else:
# import web_pdb; web_pdb.set_trace()
logger.info("Executing channel '%s' method" % item.action)
logger.debug("Executing channel '%s' method" % item.action)
itemlist = getattr(channel, item.action)(item)
if config.get_setting('trakt_sync'):
from core import trakt_tools
@@ -399,7 +398,7 @@ def set_search_temp(item):
filetools.write(temp_search_file, f)
def reorder_itemlist(itemlist):
logger.info()
logger.debug()
# logger.debug("Inlet itemlist size: %i" % len(itemlist))
new_list = []
@@ -437,7 +436,7 @@ def reorder_itemlist(itemlist):
new_list.extend(mod_list)
new_list.extend(not_mod_list)
logger.info("Modified Titles:%i |Unmodified:%i" % (modified, not_modified))
logger.debug("Modified Titles:%i |Unmodified:%i" % (modified, not_modified))
if len(new_list) == 0:
new_list = itemlist
@@ -447,7 +446,7 @@ def reorder_itemlist(itemlist):
def limit_itemlist(itemlist):
logger.info()
logger.debug()
# logger.debug("Inlet itemlist size: %i" % len(itemlist))
try:
@@ -480,7 +479,7 @@ def play_from_library(item):
itemlist=[]
item.fromLibrary = True
logger.info()
logger.debug()
# logger.debug("item: \n" + item.tostring('\n'))
# Try to reproduce an image (this does nothing and also does not give an error)

View File

@@ -206,7 +206,7 @@ def render_items(itemlist, parent_item):
"""
Function used to render itemlist on kodi
"""
logger.info('START render_items')
logger.debug('START render_items')
thumb_type = config.get_setting('video_thumbnail_type')
from platformcode import shortcuts
# from core import httptools
@@ -291,7 +291,7 @@ def render_items(itemlist, parent_item):
set_view_mode(itemlist[0], parent_item)
xbmcplugin.endOfDirectory(_handle)
logger.info('END render_items')
logger.debug('END render_items')
def getCurrentView(item=None, parent_item=None):
@@ -348,11 +348,11 @@ def set_view_mode(item, parent_item):
if content:
mode = int(config.get_setting('view_mode_%s' % content).split(',')[-1])
if mode == 0:
logger.info('default mode')
logger.debug('default mode')
mode = 55
xbmcplugin.setContent(handle=int(sys.argv[1]), content=Type)
xbmc.executebuiltin('Container.SetViewMode(%s)' % mode)
logger.info('TYPE: ' + Type + ' - ' + 'CONTENT: ' + content)
logger.debug('TYPE: ' + Type + ' - ' + 'CONTENT: ' + content)
def set_infolabels(listitem, item, player=False):
@@ -572,10 +572,10 @@ def is_playing():
def play_video(item, strm=False, force_direct=False, autoplay=False):
logger.info()
logger.debug()
logger.debug(item.tostring('\n'))
if item.channel == 'downloads':
logger.info("Play local video: %s [%s]" % (item.title, item.url))
logger.debug("Play local video: %s [%s]" % (item.title, item.url))
xlistitem = xbmcgui.ListItem(path=item.url)
xlistitem.setArt({"thumb": item.thumbnail})
set_infolabels(xlistitem, item, True)
@@ -583,7 +583,7 @@ def play_video(item, strm=False, force_direct=False, autoplay=False):
return
default_action = config.get_setting("default_action")
logger.info("default_action=%s" % default_action)
logger.debug("default_action=%s" % default_action)
# Open the selection dialog to see the available options
opciones, video_urls, seleccion, salir = get_dialogo_opciones(item, default_action, strm, autoplay)
@@ -593,8 +593,8 @@ def play_video(item, strm=False, force_direct=False, autoplay=False):
seleccion = get_seleccion(default_action, opciones, seleccion, video_urls)
if seleccion < 0: return # Canceled box
logger.info("selection=%d" % seleccion)
logger.info("selection=%s" % opciones[seleccion])
logger.debug("selection=%d" % seleccion)
logger.debug("selection=%s" % opciones[seleccion])
# run the available option, jdwonloader, download, favorites, add to the video library ... IF IT IS NOT PLAY
salir = set_opcion(item, seleccion, opciones, video_urls)
@@ -755,7 +755,7 @@ def alert_unsopported_server():
def handle_wait(time_to_wait, title, text):
logger.info("handle_wait(time_to_wait=%d)" % time_to_wait)
logger.debug("handle_wait(time_to_wait=%d)" % time_to_wait)
espera = dialog_progress(' ' + title, "")
secs = 0
@@ -774,15 +774,15 @@ def handle_wait(time_to_wait, title, text):
break
if cancelled:
logger.info('Wait canceled')
logger.debug('Wait canceled')
return False
else:
logger.info('Wait finished')
logger.debug('Wait finished')
return True
def get_dialogo_opciones(item, default_action, strm, autoplay):
logger.info()
logger.debug()
# logger.debug(item.tostring('\n'))
from core import servertools
@@ -866,7 +866,7 @@ def get_dialogo_opciones(item, default_action, strm, autoplay):
def set_opcion(item, seleccion, opciones, video_urls):
logger.info()
logger.debug()
# logger.debug(item.tostring('\n'))
salir = False
# You have not chosen anything, most likely because you have given the ESC
@@ -916,7 +916,7 @@ def set_opcion(item, seleccion, opciones, video_urls):
def get_video_seleccionado(item, seleccion, video_urls):
logger.info()
logger.debug()
mediaurl = ""
view = False
wait_time = 0
@@ -942,7 +942,7 @@ def get_video_seleccionado(item, seleccion, video_urls):
mpd = True
# If there is no mediaurl it is because the video is not there :)
logger.info("mediaurl=" + mediaurl)
logger.debug("mediaurl=" + mediaurl)
if mediaurl == "":
if item.server == "unknown":
alert_unsopported_server()
@@ -959,7 +959,7 @@ def get_video_seleccionado(item, seleccion, video_urls):
def set_player(item, xlistitem, mediaurl, view, strm, nfo_path=None, head_nfo=None, item_nfo=None):
logger.info()
logger.debug()
# logger.debug("item:\n" + item.tostring('\n'))
# Moved del conector "torrent" here
if item.server == "torrent":
@@ -1046,7 +1046,7 @@ def torrent_client_installed(show_tuple=False):
def play_torrent(item, xlistitem, mediaurl):
logger.info()
logger.debug()
import time
from servers import torrent

View File

@@ -65,7 +65,7 @@ class Recaptcha(xbmcgui.WindowXMLDialog):
data = httptools.downloadpage(self.url, post=post, headers=self.headers).data
from platformcode import logger
logger.info(data)
logger.debug(data)
self.result = scrapertools.find_single_match(data, '<div class="fbc-verification-token">.*?>([^<]+)<')
if self.result:
platformtools.dialog_notification("Captcha corretto", "Verifica conclusa")

View File

@@ -126,7 +126,7 @@ def SettingOnPosition(item):
xbmc.executebuiltin('Addon.OpenSettings(plugin.video.kod)')
category = item.category if item.category else 0
setting = item.setting if item.setting else 0
logger.info('SETTING= ' + str(setting))
logger.debug('SETTING= ' + str(setting))
xbmc.executebuiltin('SetFocus(%i)' % (category - 100))
xbmc.executebuiltin('SetFocus(%i)' % (setting - 80))

View File

@@ -43,7 +43,7 @@ def set_menu_settings(item):
jsontools.update_node(menu_node, 'menu_settings_data.json', "menu")
def check_user_home(item):
logger.info()
logger.debug()
if os.path.exists(menu_settings_path):
menu_node = jsontools.get_node_from_file('menu_settings_data.json', 'menu')
if 'user_home' in menu_node:
@@ -55,7 +55,7 @@ def check_user_home(item):
return item
def set_custom_start(item):
logger.info()
logger.debug()
if os.path.exists(menu_settings_path):
menu_node = jsontools.get_node_from_file('menu_settings_data.json', 'menu')
else:
@@ -69,7 +69,7 @@ def set_custom_start(item):
jsontools.update_node(menu_node, 'menu_settings_data.json', "menu")
def get_start_page():
logger.info()
logger.debug()
dictCategory = {
config.get_localized_string(70137): 'peliculas',
@@ -355,7 +355,7 @@ class Main(xbmcgui.WindowXMLDialog):
self.focus -= 1
def run_action(self, item):
logger.info()
logger.debug()
if item.menu != True:
self.close()
xbmc.executebuiltin("Container.update(%s)"%launcher.run(item))

View File

@@ -84,7 +84,7 @@ def regex_tvshow(compare, file, sub=""):
def set_Subtitle():
logger.info()
logger.debug()
exts = [".srt", ".sub", ".txt", ".smi", ".ssa", ".ass"]
subtitle_folder_path = filetools.join(config.get_data_path(), "subtitles")
@@ -216,7 +216,7 @@ def searchSubtitle(item):
filetools.mkdir(full_path_tvshow) # title_new + ".mp4"
full_path_video_new = xbmc.translatePath(
filetools.join(full_path_tvshow, "%s %sx%s.mp4" % (tvshow_title, season, episode)))
logger.info(full_path_video_new)
logger.debug(full_path_video_new)
listitem = xbmcgui.ListItem(title_new, iconImage="DefaultVideo.png", thumbnailImage="")
listitem.setInfo("video", {"Title": title_new, "Genre": "Tv shows", "episode": int(episode), "season": int(season), "tvshowtitle": tvshow_title})
@@ -230,7 +230,7 @@ def searchSubtitle(item):
try:
filetools.copy(path_video_temp, full_path_video_new)
copy = True
logger.info("nuevo path =" + full_path_video_new)
logger.debug("nuevo path =" + full_path_video_new)
time.sleep(2)
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
@@ -288,7 +288,7 @@ def get_from_subdivx(sub_url):
:return: The path to the unzipped subtitle
"""
logger.info()
logger.debug()
sub = ''
sub_dir = os.path.join(config.get_data_path(), 'temp_subs')
@@ -312,9 +312,9 @@ def get_from_subdivx(sub_url):
filetools.write(filename, data_dl)
sub = extract_file_online(sub_dir, filename)
except:
logger.info('sub invalid')
logger.debug('sub invalid')
else:
logger.info('sub invalid')
logger.debug('sub invalid')
return sub
@@ -328,7 +328,7 @@ def extract_file_online(path, filename):
:return:
"""
logger.info()
logger.debug()
url = "http://online.b1.org/rest/online/upload"

View File

@@ -32,7 +32,7 @@ class InfoWindow(xbmcgui.WindowXMLDialog):
self.scraper = scraper
self.doModal()
logger.info('RESPONSE',self.response)
logger.debug('RESPONSE',self.response)
return self.response
def make_items(self, i, result):
@@ -52,7 +52,7 @@ class InfoWindow(xbmcgui.WindowXMLDialog):
self.setCoordinateResolution(2)
with futures.ThreadPoolExecutor() as executor:
for i, result in enumerate(self.results):
logger.info(result)
logger.debug(result)
if ('seriesName' in result and result['seriesName']) or ('name' in result and result['name']) or ('title' in result and result['title']):
self.items += [executor.submit(self.make_items, i, result).result()]
self.items.sort(key=lambda it: int(it.getProperty('position')))

View File

@@ -22,7 +22,7 @@ from xml.dom import minidom
def mark_auto_as_watched(item, nfo_path=None, head_nfo=None, item_nfo=None):
def mark_as_watched_subThread(item, nfo_path, head_nfo, item_nfo):
logger.info()
logger.debug()
# logger.debug("item:\n" + item.tostring('\n'))
time_limit = time.time() + 30
@@ -53,7 +53,7 @@ def mark_auto_as_watched(item, nfo_path=None, head_nfo=None, item_nfo=None):
# Mark as Watched
if actual_time > mark_time and not marked:
logger.debug("Marked as Watched")
logger.info("Marked as Watched")
item.playcount = 1
marked = True
show_server = False
@@ -104,7 +104,7 @@ def sync_trakt_addon(path_folder):
"""
Updates the values of episodes seen if
"""
logger.info()
logger.debug()
# if the addon exists we do the search
if xbmc.getCondVisibility('System.HasAddon("script.trakt")'):
# we import dependencies
@@ -230,7 +230,7 @@ def sync_trakt_kodi(silent=True):
notificacion = False
xbmc.executebuiltin('RunScript(script.trakt,action=sync,silent=%s)' % silent)
logger.info("Synchronization with Trakt started")
logger.debug("Synchronization with Trakt started")
if notificacion:
platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(60045), sound=False, time=2000)
@@ -244,7 +244,7 @@ def mark_content_as_watched_on_kodi(item, value=1):
@type value: int
@param value: > 0 for seen, 0 for not seen
"""
logger.info()
logger.debug()
# logger.debug("item:\n" + item.tostring('\n'))
payload_f = ''
@@ -316,7 +316,7 @@ def mark_season_as_watched_on_kodi(item, value=1):
@type value: int
@param value: > 0 for seen, 0 for not seen
"""
logger.info()
logger.debug()
# logger.debug("item:\n" + item.tostring('\n'))
# We can only mark the season as seen in the Kodi database if the database is local, in case of sharing database this functionality will not work
@@ -350,7 +350,7 @@ def mark_content_as_watched_on_kod(path):
@type str: path
@param path: content folder to mark
"""
logger.info()
logger.debug()
#logger.debug("path: " + path)
FOLDER_MOVIES = config.get_setting("folder_movies")
@@ -443,7 +443,7 @@ def get_data(payload):
import urllib.request as urllib
except ImportError:
import urllib
logger.info("payload: %s" % payload)
logger.debug("payload: %s" % payload)
# Required header for XBMC JSON-RPC calls, otherwise you'll get a 415 HTTP response code - Unsupported media type
headers = {'content-type': 'application/json'}
@@ -460,7 +460,7 @@ def get_data(payload):
response = f.read()
f.close()
logger.info("get_data: response %s" % response)
logger.debug("get_data: response %s" % response)
data = jsontools.load(response)
except Exception as ex:
template = "An exception of type %s occured. Arguments:\n%r"
@@ -476,7 +476,7 @@ def get_data(payload):
logger.error("error en xbmc.executeJSONRPC: %s" % message)
data = ["error"]
logger.info("data: %s" % data)
logger.debug("data: %s" % data)
return data
@@ -490,7 +490,7 @@ def update(folder_content=config.get_setting("folder_tvshows"), folder=""):
@type folder: str
@param folder: name of the folder to scan.
"""
logger.info(folder)
logger.debug(folder)
payload = {
"jsonrpc": "2.0",
@@ -554,7 +554,7 @@ def set_content(content_type, silent=False, custom=False):
@type content_type: str ('movie' o 'tvshow')
@param content_type: content type to configure, series or movies
"""
logger.info()
logger.debug()
continuar = True
msg_text = ""
videolibrarypath = config.get_setting("videolibrarypath")
@@ -580,7 +580,7 @@ def set_content(content_type, silent=False, custom=False):
try:
# Install metadata.themoviedb.org
xbmc.executebuiltin('InstallAddon(metadata.themoviedb.org)', True)
logger.info("Instalado el Scraper de películas de TheMovieDB")
logger.debug("Instalado el Scraper de películas de TheMovieDB")
except:
pass
@@ -634,7 +634,7 @@ def set_content(content_type, silent=False, custom=False):
try:
# Install metadata.tvdb.com
xbmc.executebuiltin('InstallAddon(metadata.tvdb.com)', True)
logger.info("The TVDB series Scraper installed ")
logger.debug("The TVDB series Scraper installed ")
except:
pass
@@ -729,7 +729,7 @@ def set_content(content_type, silent=False, custom=False):
strScraper = 'metadata.universal'
path_settings = xbmc.translatePath("special://profile/addon_data/metadata.universal/settings.xml")
if not os.path.exists(path_settings):
logger.info("%s: %s" % (content_type, path_settings + " doesn't exist"))
logger.debug("%s: %s" % (content_type, path_settings + " doesn't exist"))
return continuar
settings_data = filetools.read(path_settings)
strSettings = ' '.join(settings_data.split()).replace("> <", "><")
@@ -748,7 +748,7 @@ def set_content(content_type, silent=False, custom=False):
strScraper = 'metadata.tvshows.themoviedb.org'
path_settings = xbmc.translatePath("special://profile/addon_data/metadata.tvshows.themoviedb.org/settings.xml")
if not os.path.exists(path_settings):
logger.info("%s: %s" % (content_type, path_settings + " doesn't exist"))
logger.debug("%s: %s" % (content_type, path_settings + " doesn't exist"))
return continuar
settings_data = filetools.read(path_settings)
strSettings = ' '.join(settings_data.split()).replace("> <", "><")
@@ -758,7 +758,7 @@ def set_content(content_type, silent=False, custom=False):
videolibrarypath += sep
strPath = videolibrarypath + config.get_setting("folder_tvshows") + sep
logger.info("%s: %s" % (content_type, strPath))
logger.debug("%s: %s" % (content_type, strPath))
# We check if strPath already exists in the DB to avoid duplicates
sql = 'SELECT idPath FROM path where strPath="%s"' % strPath
nun_records, records = execute_sql_kodi(sql)
@@ -800,15 +800,15 @@ def set_content(content_type, silent=False, custom=False):
heading = config.get_localized_string(70103) % content_type
msg_text = config.get_localized_string(70104)
logger.info("%s: %s" % (heading, msg_text))
logger.debug("%s: %s" % (heading, msg_text))
return continuar
def update_db(old_path, new_path, old_movies_folder, new_movies_folder, old_tvshows_folder, new_tvshows_folder, progress):
def path_replace(path, old, new):
logger.info()
logger.info('path: ' + path + ', old: ' + old + ', new: ' + new)
logger.debug()
logger.debug('path: ' + path + ', old: ' + old + ', new: ' + new)
if new.startswith("special://") or '://' in new: sep = '/'
else: sep = os.sep
@@ -819,7 +819,7 @@ def update_db(old_path, new_path, old_movies_folder, new_movies_folder, old_tvsh
return path
logger.info()
logger.debug()
sql_old_path = old_path
if sql_old_path.startswith("special://"):
@@ -831,10 +831,10 @@ def update_db(old_path, new_path, old_movies_folder, new_movies_folder, old_tvsh
if not sql_old_path.endswith(sep):
sql_old_path += sep
logger.info('sql_old_path: ' + sql_old_path)
logger.debug('sql_old_path: ' + sql_old_path)
# search MAIN path in the DB
sql = 'SELECT idPath, strPath FROM path where strPath LIKE "%s"' % sql_old_path
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
# change main path
@@ -842,7 +842,7 @@ def update_db(old_path, new_path, old_movies_folder, new_movies_folder, old_tvsh
idPath = records[0][0]
strPath = path_replace(records[0][1], old_path, new_path)
sql = 'UPDATE path SET strPath="%s" WHERE idPath=%s' % (strPath, idPath)
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
else:
progress.update(100)
@@ -859,7 +859,7 @@ def update_db(old_path, new_path, old_movies_folder, new_movies_folder, old_tvsh
# Search Main Sub Folder
sql = 'SELECT idPath, strPath FROM path where strPath LIKE "%s"' % sql_old_folder
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
# Change Main Sub Folder
@@ -868,13 +868,13 @@ def update_db(old_path, new_path, old_movies_folder, new_movies_folder, old_tvsh
idPath = record[0]
strPath = path_replace(record[1], filetools.join(old_path, OldFolder), filetools.join(new_path, NewFolder))
sql = 'UPDATE path SET strPath="%s" WHERE idPath=%s' % (strPath, idPath)
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
# Search if Sub Folder exixt in all paths
sql_old_folder += '%'
sql = 'SELECT idPath, strPath FROM path where strPath LIKE "%s"' % sql_old_folder
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
#Change Sub Folder in all paths
@@ -883,7 +883,7 @@ def update_db(old_path, new_path, old_movies_folder, new_movies_folder, old_tvsh
idPath = record[0]
strPath = path_replace(record[1], filetools.join(old_path, OldFolder), filetools.join(new_path, NewFolder))
sql = 'UPDATE path SET strPath="%s" WHERE idPath=%s' % (strPath, idPath)
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
@@ -891,27 +891,27 @@ def update_db(old_path, new_path, old_movies_folder, new_movies_folder, old_tvsh
# if is Movie Folder
# search and modify in "movie"
sql = 'SELECT idMovie, c22 FROM movie where c22 LIKE "%s"' % sql_old_folder
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
if records:
for record in records:
idMovie = record[0]
strPath = path_replace(record[1], filetools.join(old_path, OldFolder), filetools.join(new_path, NewFolder))
sql = 'UPDATE movie SET c22="%s" WHERE idMovie=%s' % (strPath, idMovie)
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
else:
# if is TV Show Folder
# search and modify in "episode"
sql = 'SELECT idEpisode, c18 FROM episode where c18 LIKE "%s"' % sql_old_folder
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
if records:
for record in records:
idEpisode = record[0]
strPath = path_replace(record[1], filetools.join(old_path, OldFolder), filetools.join(new_path, NewFolder))
sql = 'UPDATE episode SET c18="%s" WHERE idEpisode=%s' % (strPath, idEpisode)
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
p += 5
progress.update(p, config.get_localized_string(20000) + '\n' + config.get_localized_string(80013))
@@ -936,26 +936,26 @@ def clean(path_list=[]):
return path, sep
logger.info()
logger.debug()
progress = platformtools.dialog_progress_bg(config.get_localized_string(20000), config.get_localized_string(80025))
progress.update(0)
# if the path list is empty, clean the entire video library
if not path_list:
logger.info('the path list is empty, clean the entire video library')
logger.debug('the path list is empty, clean the entire video library')
if not config.get_setting("videolibrary_kodi"):
sql_path, sep = sql_format(config.get_setting("videolibrarypath"))
if not sql_path.endswith(sep): sql_path += sep
sql = 'SELECT idPath FROM path where strPath LIKE "%s"' % sql_path
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
idPath = records[0][0]
sql = 'DELETE from path WHERE idPath=%s' % idPath
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
sql = 'DELETE from path WHERE idParentPath=%s' % idPath
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
from core import videolibrarytools
@@ -969,7 +969,7 @@ def clean(path_list=[]):
if filetools.exists(tvshow_nfo):
path_list.append(filetools.join(config.get_setting("videolibrarypath"), videolibrarytools.FOLDER_TVSHOWS, folder))
logger.info('path_list: ' + str(path_list))
logger.debug('path_list: ' + str(path_list))
if path_list: t = float(100) / len(path_list)
for i, path in enumerate(path_list):
progress.update(int(math.ceil((i + 1) * t)))
@@ -979,13 +979,13 @@ def clean(path_list=[]):
sql_path, sep = sql_format(path)
if filetools.isdir(path) and not sql_path.endswith(sep): sql_path += sep
logger.info('path: ' + path)
logger.info('sql_path: ' + sql_path)
logger.debug('path: ' + path)
logger.debug('sql_path: ' + sql_path)
if filetools.isdir(path):
# search movie in the DB
sql = 'SELECT idMovie FROM movie where c22 LIKE "%s"' % (sql_path + '%')
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
# delete movie
if records:
@@ -994,7 +994,7 @@ def clean(path_list=[]):
continue
# search TV show in the DB
sql = 'SELECT idShow FROM tvshow_view where strPath LIKE "%s"' % sql_path
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
# delete TV show
if records:
@@ -1003,7 +1003,7 @@ def clean(path_list=[]):
elif config.get_setting("folder_movies") in sql_path:
# search movie in the DB
sql = 'SELECT idMovie FROM movie where c22 LIKE "%s"' % sql_path
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
# delete movie
if records:
@@ -1012,7 +1012,7 @@ def clean(path_list=[]):
else:
# search episode in the DB
sql = 'SELECT idEpisode FROM episode where c18 LIKE "%s"' % sql_path
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
# delete episode
if records:
@@ -1031,7 +1031,7 @@ def check_db(path):
ret = False
sql_path = '%' + sep + path.split(sep)[-1] + sep + '%'
sql = 'SELECT idShow FROM tvshow_view where strPath LIKE "%s"' % sql_path
logger.info('sql: ' + sql)
logger.debug('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
if records:
ret = True
@@ -1048,7 +1048,7 @@ def execute_sql_kodi(sql):
@return: list with the query result
@rtype records: list of tuples
"""
logger.info()
logger.debug()
file_db = ""
nun_records = 0
records = None
@@ -1069,14 +1069,14 @@ def execute_sql_kodi(sql):
break
if file_db:
logger.info("DB file: %s" % file_db)
logger.debug("DB file: %s" % file_db)
conn = None
try:
import sqlite3
conn = sqlite3.connect(file_db)
cursor = conn.cursor()
logger.info("Running sql: %s" % sql)
logger.debug("Running sql: %s" % sql)
cursor.execute(sql)
conn.commit()
@@ -1090,7 +1090,7 @@ def execute_sql_kodi(sql):
nun_records = conn.total_changes
conn.close()
logger.info("Query executed. Records: %s" % nun_records)
logger.debug("Query executed. Records: %s" % nun_records)
except:
logger.error("Error executing sql query")
@@ -1110,7 +1110,7 @@ def check_sources(new_movies_path='', new_tvshows_path=''):
if not path.endswith(sep): path += sep
return path
logger.info()
logger.debug()
new_movies_path = format_path(new_movies_path)
new_tvshows_path = format_path(new_tvshows_path)
@@ -1140,7 +1140,7 @@ def check_sources(new_movies_path='', new_tvshows_path=''):
def update_sources(new='', old=''):
logger.info()
logger.debug()
if new == old: return
SOURCES_PATH = xbmc.translatePath("special://userdata/sources.xml")
@@ -1182,9 +1182,9 @@ def update_sources(new='', old=''):
# create new path
list_path = [p.firstChild.data for p in paths_node]
if new in list_path:
logger.info("The path %s already exists in sources.xml" % new)
logger.debug("The path %s already exists in sources.xml" % new)
return
logger.info("The path %s does not exist in sources.xml" % new)
logger.debug("The path %s does not exist in sources.xml" % new)
# if the path does not exist we create one
source_node = xmldoc.createElement("source")
@@ -1223,7 +1223,7 @@ def update_sources(new='', old=''):
def ask_set_content(silent=False):
logger.info()
logger.debug()
logger.debug("videolibrary_kodi %s" % config.get_setting("videolibrary_kodi"))
def do_config(custom=False):
@@ -1280,7 +1280,7 @@ def ask_set_content(silent=False):
def next_ep(item):
from core.item import Item
logger.info()
logger.debug()
item.next_ep = False
# check if next file exist
@@ -1296,7 +1296,7 @@ def next_ep(item):
nextIndex = fileList.index(current_filename) + 1
if nextIndex == 0 or nextIndex == len(fileList): next_file = None
else: next_file = fileList[nextIndex]
logger.info('Next File:' + str(next_file))
logger.debug('Next File:' + str(next_file))
# start next episode window afther x time
if next_file:

View File

@@ -13,7 +13,7 @@ headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
# page_url = re.sub('akvideo.stream/(?:video/|video\\.php\\?file_code=)?(?:embed-)?([a-zA-Z0-9]+)','akvideo.stream/video/\\1',page_url)
global data
page = httptools.downloadpage(page_url, headers=headers)
@@ -32,18 +32,18 @@ def test_video_exists(page_url):
# ID, code = scrapertools.find_single_match(data, r"""input\D*id=(?:'|")([^'"]+)(?:'|").*?value='([a-z0-9]+)""")
# post = urllib.urlencode({ID: code})
# logger.info('PAGE DATA' + data)
# logger.debug('PAGE DATA' + data)
if "File Not Found" in data:
return False, config.get_localized_string(70449) % "Akvideo"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info(" url=" + page_url)
logger.debug(" url=" + page_url)
video_urls = []
global data
# logger.info('PAGE DATA' + data)
# logger.debug('PAGE DATA' + data)
# sitekey = scrapertools.find_single_match(data, 'data-sitekey="([^"]+)')
# captcha = platformtools.show_recaptcha(sitekey, page_url) if sitekey else ''
#

View File

@@ -6,7 +6,7 @@ from platformcode import config, logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url, cookies=False).data
if 'File you are looking for is not found.' in data:

View File

@@ -6,7 +6,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data or "to copyright issues" in data:
return False, config.get_localized_string(70449) % "animeid"
@@ -16,7 +16,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
label, videourl = scrapertools.find_single_match(data, 'label":"([^"]+)".*?file":"([^"]+)')

View File

@@ -9,7 +9,7 @@ from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
response = httptools.downloadpage(page_url)
if not response.success or "Not Found" in response.data or "File was deleted" in response.data or "is no longer available" in response.data:
return False, config.get_localized_string(70449) % "anonfile"
@@ -17,7 +17,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
patron = 'download-url.*?href="([^"]+)"'

View File

@@ -9,7 +9,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, config.get_localized_string(70449) % "ArchiveOrg"
@@ -17,7 +17,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
patron = '<meta property="og:video" content="([^"]+)">'

View File

@@ -9,7 +9,7 @@ except ImportError:
from urllib import urlencode
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
if 'http://' in page_url: # fastids
page_url = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers['location']
@@ -24,7 +24,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("page_url=" + page_url)
logger.debug("page_url=" + page_url)
video_urls = []
@@ -36,18 +36,18 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
if data_pack:
from lib import jsunpack
data = jsunpack.unpack(data_pack)
logger.info("page_url=" + data)
logger.debug("page_url=" + data)
# URL
url = scrapertools.find_single_match(data, r'"src"value="([^"]+)"')
if not url:
url = scrapertools.find_single_match(data, r'file\s*:\s*"([^"]+)"')
logger.info("URL=" + str(url))
logger.debug("URL=" + str(url))
# URL del vídeo
video_urls.append([".mp4" + " [backin]", url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], httptools.get_url_headers(video_url[1])))
logger.debug("%s - %s" % (video_url[0], httptools.get_url_headers(video_url[1])))
return video_urls

View File

@@ -11,7 +11,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
global page
page = httptools.downloadpage(page_url)
if not page.success:
@@ -20,7 +20,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
video_urls = []
ext = '.mp4'

View File

@@ -10,7 +10,7 @@ headers = {'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N)
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Archive no Encontrado" in data:
return False, config.get_localized_string(70449) % "bdupload"
@@ -19,7 +19,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
post = ""
patron = '(?s)type="hidden" name="([^"]+)".*?value="([^"]*)"'

View File

@@ -11,7 +11,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, config.get_localized_string(70449) % "CinemaUpload"
@@ -19,7 +19,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)

View File

@@ -22,7 +22,7 @@ excption = False
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = get_data(page_url.replace(".org", ".me"))
if "File Not Found" in data: return False, config.get_localized_string(70449) % "Clicknupload"
@@ -31,7 +31,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
data = get_data(page_url.replace(".org", ".me"))
@@ -51,7 +51,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
media_url = media.rsplit('/', 1)[0] + "/" + url_strip
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [clicknupload]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -6,7 +6,7 @@ from lib import jsunpack
from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "File Not Found" in data or "File was deleted" in data:
@@ -15,7 +15,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
video_urls = []
try:

View File

@@ -8,7 +8,7 @@ from lib import jsunpack
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
html = httptools.downloadpage(page_url)
global data
data = html.data
@@ -18,7 +18,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
video_urls = []
global data
# data = httptools.downloadpage(page_url).data

View File

@@ -30,7 +30,7 @@ proxy = "https://www.usa-proxy.org/"
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, headers=GLOBAL_HEADER).data
if "Este es un clip de muestra" in data:
@@ -44,7 +44,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
#page_url='https://www.crunchyroll.com/es-es/one-piece/episode-891-climbing-up-a-waterfall-a-great-journey-through-the-land-of-wanos-sea-zone-786643'
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
video_urls = []
if "crunchyroll.com" in page_url:
media_id = page_url.rsplit("-", 1)[1]
@@ -94,7 +94,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
file_sub = ""
video_urls.append(["%s %sp [crunchyroll]" % (filename, quality), media_url, 0, file_sub])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -6,7 +6,7 @@ from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
global response
response = httptools.downloadpage(page_url, cookies=False)
@@ -18,7 +18,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
video_urls = []
cookie = {'Cookie': response.headers["set-cookie"]}
data = response.data.replace("\\", "")
@@ -40,5 +40,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
stream_url = stream_url_http
video_urls.append(["%sp .%s [dailymotion]" % (calidad, stream_type), stream_url, 0, subtitle])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -7,7 +7,7 @@ from platformcode import logger
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
logger.debug()
page_url = correct_url(page_url)
dd1 = httptools.downloadpage("https://api.alldebrid.com/user/login?agent=mySoft&username=%s&password=%s" %(user, password)).data
token = scrapertools.find_single_match(dd1, 'token":"([^"]+)')

View File

@@ -22,7 +22,7 @@ headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:65.0) Gecko/20
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s' , video_password=%s)" % (page_url, video_password))
logger.debug("(page_url='%s' , video_password=%s)" % (page_url, video_password))
page_url = page_url.replace(".nz/embed", ".nz/")
# Se comprueba si existe un token guardado y sino se ejecuta el proceso de autentificación
token_auth = config.get_setting("token", server="realdebrid")
@@ -99,7 +99,7 @@ def get_enlaces(data):
def authentication():
logger.info()
logger.debug()
try:
client_id = "YTWNFBIJEEBP6"

View File

@@ -8,7 +8,7 @@ from platformcode import logger
def get_long_url(short_url):
logger.info("short_url = '%s'" % short_url)
logger.debug("short_url = '%s'" % short_url)
data = httptools.downloadpage(short_url).data
ysmm = scrapertools.find_single_match(data, "var ysmm = '([^']+)';")

View File

@@ -17,7 +17,7 @@ from platformcode import logger
# Obtiene la URL que hay detrás de un enlace a linkbucks
def get_long_url(short_url):
logger.info("(short_url='%s')" % short_url)
logger.debug("(short_url='%s')" % short_url)
request_headers = []
request_headers.append(["User-Agent",
@@ -33,17 +33,17 @@ def get_long_url(short_url):
while True:
for name, value in response_headers:
if name == "set-cookie":
logger.info("Set-Cookie: " + value)
logger.debug("Set-Cookie: " + value)
cookie_name = scrapertools.scrapertools.find_single_match(value, '(.*?)\=.*?\;')
cookie_value = scrapertools.scrapertools.find_single_match(value, '.*?\=(.*?)\;')
request_headers.append(["Cookie", cookie_name + "=" + cookie_value])
body, response_headers = scrapertools.read_body_and_headers(url, headers=request_headers)
logger.info("body=" + body)
logger.debug("body=" + body)
try:
location = scrapertools.scrapertools.find_single_match(body, '<textarea.*?class="caja_des">([^<]+)</textarea>')
logger.info("location=" + location)
logger.debug("location=" + location)
break
except:
n = n + 1

View File

@@ -38,15 +38,15 @@ servers = get_server_list()
def get_long_urls(data):
logger.info()
logger.debug()
patron = '<a href="http://([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for short_url in matches:
if short_url.startswith(tuple(servers)):
logger.info(": " + short_url)
logger.debug(": " + short_url)
longurl_data = httptools.downloadpage(
"http://api.longurl.org/v2/expand?url=" + urllib.quote_plus(short_url)).data
logger.info(longurl_data)
logger.debug(longurl_data)
try:
long_url = scrapertools.scrapertools.find_single_match(longurl_data, '<long-url><!\[CDATA\[(.*?)\]\]></long-url>')
except:

View File

@@ -5,9 +5,9 @@ from platformcode import logger
def get_long_url(short_url):
logger.info("(short_url='%s')" % short_url)
logger.debug("(short_url='%s')" % short_url)
location = scrapertools.get_header_from_response(short_url, header_to_get="location")
logger.info("location=" + location)
logger.debug("location=" + location)
return location

View File

@@ -8,7 +8,7 @@ from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
global data, real_url
page = httptools.downloadpage(page_url)
data = page.data.replace('"', "'")
@@ -20,7 +20,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(deltabit page_url='%s')" % page_url)
logger.debug("(deltabit page_url='%s')" % page_url)
global data, real_url
post = {k: v for k, v in scrapertools.find_multiple_matches(data, "name='([^']+)' value='([^']*)'")}
time.sleep(2.5)

View File

@@ -8,7 +8,7 @@ def test_video_exists(page_url):
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
video_urls = [["%s %s" % (page_url[-4:], config.get_localized_string(30137)), page_url]]

View File

@@ -7,7 +7,7 @@ from platformcode import logger, config
def test_video_exists(page_url):
global data
logger.info('page url=', page_url)
logger.debug('page url=', page_url)
response = httptools.downloadpage(page_url)
if response.code == 404 or 'File you are looking for is not found' in response.data:
@@ -20,7 +20,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
# from core.support import dbg;dbg()
global data
logger.info("URL", page_url)
logger.debug("URL", page_url)
video_urls = list()
host = "https://dood.to"
@@ -28,7 +28,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
new_url = scrapertools.find_single_match(data, r'<iframe src="([^"]+)"')
if new_url:
data = httptools.downloadpage(host + new_url).data
logger.info('DATA', data)
logger.debug('DATA', data)
label = scrapertools.find_single_match(data, r'type:\s*"video/([^"]+)"')

View File

@@ -9,7 +9,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, config.get_localized_string(70449) % "Dostream"
@@ -17,7 +17,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url, headers={"Referer":page_url}).data
patron = '"label":"([^"]+)".*?'

View File

@@ -6,7 +6,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data or "to copyright issues" in data:
return False, "[Downace] El video ha sido borrado"
@@ -18,7 +18,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
videourl = scrapertools.find_single_match(data, 'controls preload.*?src="([^"]+)')

View File

@@ -19,10 +19,10 @@ from platformcode import logger
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
page_url = page_url.replace("amp;", "")
data = httptools.downloadpage(page_url).data
logger.info("data=" + data)
logger.debug("data=" + data)
video_urls = []
patron = "video_src.*?(http.*?)%22%2C%22video_timestamp"
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -33,5 +33,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
videourl = urllib.unquote(videourl)
video_urls.append(["[facebook]", videourl])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -7,7 +7,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if "Object not found" in data.data or "longer exists on our servers" in data.data:
@@ -18,7 +18,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "p,a,c,k,e,d" in data:
@@ -38,6 +38,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
except:
pass
for video_url in video_urls:
logger.info(" %s - %s" % (video_url[0], video_url[1]))
logger.debug(" %s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -6,7 +6,7 @@ from core import jsontools
from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
global data
page_url = re.sub('://[^/]+/', '://feurl.com/', page_url)
@@ -16,14 +16,14 @@ def test_video_exists(page_url):
page_url = page_url.replace("/f/","/v/")
page_url = page_url.replace("/v/","/api/source/")
data = httptools.downloadpage(page_url, post={}).json
logger.info(data)
logger.debug(data)
if "Video not found or" in data or "We are encoding this video" in data:
return False, config.get_localized_string(70449) % "Fembed"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
video_urls = []
for file in data['data']:
media_url = file['file']

View File

@@ -8,7 +8,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, follow_redirects=False)
@@ -18,7 +18,7 @@ def test_video_exists(page_url):
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True)
logger.debug(data.headers)

View File

@@ -4,12 +4,12 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
video_urls = []
return video_urls

View File

@@ -9,7 +9,7 @@ from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
response = httptools.downloadpage(page_url)
if "File was deleted" in response.data or "is no longer available" in response.data:
return False, config.get_localized_string(70449) % "filepup"
@@ -17,7 +17,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
video_urls = []
page_url = page_url.replace("https","http") + "?wmode=transparent"
data = httptools.downloadpage(page_url).data
@@ -36,5 +36,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls.sort(key=lambda x: x[2])
for video_url in video_urls:
video_url[2] = 0
logger.info("%s - %s" % (video_url[0], video_url[1]))
logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -6,7 +6,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "File was deleted" in data:
@@ -16,7 +16,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
url = scrapertools.find_single_match(data, '(?i)link:\s*"(https://.*?filescdn\.com.*?mp4)"')

View File

@@ -6,7 +6,7 @@ from platformcode import logger
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("( page_url='%s')")
logger.debug("( page_url='%s')")
video_urls = []
itemlist = []
data1 = ''

View File

@@ -23,7 +23,7 @@ flashx_hash_f = ""
flashx_post = ""
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
global flashx_data
try:
flashx_data = httptools.downloadpage(page_url, cookies="xfsts=pfp5dj3e6go1l2o1").data
@@ -53,7 +53,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
pfxfx = ""
data = flashx_data
data = data.replace("\n", "")
@@ -70,8 +70,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
for f, v in matches:
pfxfx += f + "=" + v + "&"
logger.info("mfxfxfx1= %s" % js_fxfx)
logger.info("mfxfxfx2= %s" % pfxfx)
logger.debug("mfxfxfx1= %s" % js_fxfx)
logger.debug("mfxfxfx2= %s" % pfxfx)
if pfxfx == "":
pfxfx = "f=fail&fxfx=6"
coding_url = 'https://www.flashx.co/flashx.php?%s' % pfxfx
@@ -119,14 +119,14 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
filetools.write(subtitle, data)
except:
import traceback
logger.info("Error al descargar el subtítulo: " + traceback.format_exc())
logger.debug("Error al descargar el subtítulo: " + traceback.format_exc())
for media_url, label in media_urls:
if not media_url.endswith("png") and not media_url.endswith(".srt"):
video_urls.append(["." + media_url.rsplit('.', 1)[1] + " [flashx]", media_url, 0, subtitle])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
logger.debug("%s - %s" % (video_url[0], video_url[1]))
except:
pass

View File

@@ -8,7 +8,7 @@ from platformcode import logger
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
video_urls = []
@@ -17,7 +17,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
page_url = scrapertools.get_header_from_response(page_url, header_to_get="location")
# http://www.4shared.com/flash/player.swf?file=http://dc237.4shared.com/img/392975628/ff297d3f/dlink__2Fdownload_2Flj9Qu-tF_3Ftsid_3D20101030-200423-87e3ba9b/preview.flv&d
logger.info("redirect a '%s'" % page_url)
logger.debug("redirect a '%s'" % page_url)
patron = "file\=([^\&]+)\&"
matches = re.compile(patron, re.DOTALL).findall(page_url)
@@ -29,6 +29,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls.append(["[fourshared]", page_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -22,7 +22,7 @@ headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:%s.0) Geck
DATA = ''
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = alfaresolver.get_data(page_url, False)
@@ -46,7 +46,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = DATA
@@ -87,7 +87,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls.append([scrapertools.get_filename_from_url(mediaurl)[-4:] + " [gamovideo]", mediaurl])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -5,7 +5,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if '<h2 class="error">Download error</h2>' in data:
return False, "El enlace no es válido<br/>o ha sido borrado de gigasize"
@@ -13,7 +13,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
video_urls = []
return video_urls

View File

@@ -7,14 +7,14 @@ from platformcode import logger
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
video_urls = []
# Lo extrae a partir de flashvideodownloader.org
if page_url.startswith("http://"):
url = 'http://www.flashvideodownloader.org/download.php?u=' + page_url
else:
url = 'http://www.flashvideodownloader.org/download.php?u=http://video.google.com/videoplay?docid=' + page_url
logger.info("url=" + url)
logger.debug("url=" + url)
data = httptools.downloadpage(url).data
# Extrae el vídeo
@@ -24,6 +24,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls.append(["[googlevideo]", newmatches[0]])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -20,14 +20,14 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
video_urls = []
global data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
# logger.info('GOUN DATA= '+data)
# logger.debug('GOUN DATA= '+data)
packed_data = scrapertools.find_single_match(data, "javascript'>(eval.*?)</script>")
unpacked = jsunpack.unpack(packed_data)
# logger.info('GOUN DATA= '+unpacked)
# logger.debug('GOUN DATA= '+unpacked)
patron = r"sources..([^\]]+)"
matches = re.compile(patron, re.DOTALL).findall(unpacked)
if not matches:

View File

@@ -42,7 +42,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
logger.info()
logger.debug()
video_urls = []
urls = []
streams =[]

View File

@@ -7,7 +7,7 @@ from platformcode import config, logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, cookies=False).data
if 'Not found id' in data:
@@ -17,12 +17,12 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
logger.debug()
itemlist = []
logger.info(page_url)
logger.debug(page_url)
data = httptools.downloadpage(page_url, post='').data
logger.info(data)
logger.debug(data)
url = base64.b64decode(data)
itemlist.append([".mp4 [HDLoad]", url])

View File

@@ -8,12 +8,12 @@ from lib.fakeMail import Gmailnator
baseUrl = 'https://hdmario.live'
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
global page, data
page = httptools.downloadpage(page_url)
data = page.data
logger.info(page.url)
logger.debug(page.url)
if "the page you are looking for could not be found" in data:
return False, config.get_localized_string(70449) % "HDmario"
@@ -54,12 +54,12 @@ def registerOrLogin(page_url):
else:
import random
import string
logger.info('Registrazione automatica in corso')
logger.debug('Registrazione automatica in corso')
mailbox = Gmailnator()
randPsw = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(10))
captcha = httptools.downloadpage(baseUrl + '/captchaInfo').json
logger.info('email: ' + mailbox.address)
logger.info('pass: ' + randPsw)
logger.debug('email: ' + mailbox.address)
logger.debug('pass: ' + randPsw)
reg = platformtools.dialog_register(baseUrl + '/register/', email=True, password=True, email_default=mailbox.address, password_default=randPsw, captcha_img=captcha['captchaUrl'])
if not reg:
return False
@@ -90,7 +90,7 @@ def registerOrLogin(page_url):
else:
platformtools.dialog_ok('HDmario', 'Hai modificato la mail quindi KoD non sarà in grado di effettuare la verifica in autonomia, apri la casella ' + reg['email']
+ ' e clicca sul link. Premi ok quando fatto')
logger.info('Registrazione completata')
logger.debug('Registrazione completata')
return True
@@ -98,7 +98,7 @@ def registerOrLogin(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
global page, data
page_url = page_url.replace('?', '')
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
if 'unconfirmed' in page.url:
id = page_url.split('/')[-1]
@@ -109,7 +109,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
}
httptools.downloadpage(page.url, post=postData)
mail = mailbox.waitForMail()
logger.info(mail)
logger.debug(mail)
if mail:
code = mail.subject.split(' - ')[0]
page = httptools.downloadpage(page_url + '?code=' + code)
@@ -122,12 +122,12 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
page = httptools.downloadpage(page_url)
data = page.data
logger.info(data)
logger.debug(data)
from lib import jsunpack_js2py
unpacked = jsunpack_js2py.unpack(scrapertools.find_single_match(data, '<script type="text/javascript">\n*\s*\n*(eval.*)'))
# p,a,c,k,e,d data -> xhr.setRequestHeader
secureProof = scrapertools.find_single_match(unpacked, """X-Secure-Proof['"]\s*,\s*['"]([^"']+)""")
logger.info('X-Secure-Proof=' + secureProof)
logger.debug('X-Secure-Proof=' + secureProof)
data = httptools.downloadpage(baseUrl + '/pl/' + page_url.split('/')[-1].replace('?', '') + '.m3u8', headers=[['X-Secure-Proof', secureProof]]).data
filetools.write(xbmc.translatePath('special://temp/hdmario.m3u8'), data, 'w')

View File

@@ -20,7 +20,7 @@ from platformcode import logger
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
post = {}
r = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', data)
@@ -45,6 +45,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls.append([scrapertools.get_filename_from_url(mediaurl)[-4:] + " [hugefiles]", mediaurl])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -10,7 +10,7 @@ from platformcode import logger
data = ""
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url)
@@ -22,7 +22,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
logger.error(data)
video_urls = []
patron = 'source src="([^"]+)" type="([^"]+)" res=(\d+)'

View File

@@ -6,7 +6,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "The file you were looking for could not be found" in data:
return False, config.get_localized_string(70449) % "jawcloud"
@@ -14,7 +14,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
videourl = scrapertools.find_single_match(data, 'source src="([^"]+)')

View File

@@ -9,7 +9,7 @@ from platformcode import logger
video_urls = []
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
subtitles = ""
response = httptools.downloadpage(page_url)
@@ -21,7 +21,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
video_urls = []
media_url = scrapertools.find_single_match(data, '<video src="([^"]+)"')
if media_url:

View File

@@ -7,7 +7,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
page_url = page_url.replace("embed/", "").replace(".html", ".json")
data = httptools.downloadpage(page_url).data
if '"error":"video_not_found"' in data or '"error":"Can\'t find VideoInstance"' in data:
@@ -17,7 +17,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % (page_url))
logger.debug("(page_url='%s')" % (page_url))
video_urls = []
# Carga la página para coger las cookies
@@ -45,6 +45,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
pass
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -6,7 +6,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Invalid or Deleted File" in data or "Well, looks like we" in data:
return False, config.get_localized_string(70449) % "Mediafire"
@@ -16,7 +16,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
patron = "DownloadButtonAd-startDownload gbtnSecondary.*?href='([^']+)'"
@@ -27,5 +27,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
if len(matches) > 0:
video_urls.append([matches[0][-4:] + " [mediafire]", matches[0]])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -79,7 +79,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
page_url = page_url.replace('/embed#', '/#')
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
video_urls = []
# si hay mas de 5 archivos crea un playlist con todos

View File

@@ -10,7 +10,7 @@ from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
@@ -25,7 +25,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
video_urls = []
ext = '.mp4'

View File

@@ -15,7 +15,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(page_url).data)
match = scrapertools.find_single_match(data, "<script type='text/javascript'>(.*?)</script>")
data = jsunpack.unpack(match)
@@ -23,9 +23,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
media_url = scrapertools.find_single_match(data, '{type:"video/mp4",src:"([^"]+)"}')
if not media_url:
media_url = scrapertools.find_single_match(data, '"file":"([^"]+)')
logger.info("media_url=" + media_url)
logger.debug("media_url=" + media_url)
video_urls = list()
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [mp4upload]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -21,7 +21,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
logger.debug()
video_urls = []
data = httptools.downloadpage(page_url).data
data = scrapertools.find_single_match(data, 'var srca = \[(.*?)\]')

View File

@@ -10,7 +10,7 @@ import re
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
global page_data
page_data = data.data
@@ -21,7 +21,7 @@ def test_video_exists(page_url):
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
video_urls = []
global page_data
video_url = scrapertools.find_single_match(decode(page_data), r"'src',\s*'([^']+)")

View File

@@ -23,7 +23,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
logger.debug()
video_urls = []
data = httptools.downloadpage(page_url).data
matches = scrapertools.find_multiple_matches(data, 'tracker: "([^"]+)"')

View File

@@ -25,7 +25,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
#Deshabilitamos el server hasta nueva orden
return False, "[netutv] Servidor deshabilitado"
# http://netu.tv/watch_video.php=XX solo contiene una redireccion, ir directamente a http://hqq.tv/player/embed_player.php?vid=XX
@@ -37,7 +37,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
video_urls = []
if "hash=" in page_url:

View File

@@ -12,7 +12,7 @@ from platformcode import logger, config
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0']]
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Not Found" in data or "File was deleted" in data or "The file is being converted" in data or "Please try again later" in data:
return False, config.get_localized_string(70293) % "NowVideo"
@@ -23,7 +23,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
host = 'http://nowvideo.club'
logger.info("(nowvideo page_url='%s')" % page_url)
logger.debug("(nowvideo page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
page_url_post = scrapertools.find_single_match(data, '<Form id="[^"]+" method="POST" action="([^"]+)">')
@@ -31,7 +31,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
imhuman = '&imhuman=' + scrapertools.find_single_match(data, 'name="imhuman" value="([^"]+)"').replace(" ", "+")
post = urllib.urlencode({k: v for k, v in scrapertools.find_multiple_matches(data, 'name="([^"]+)" value="([^"]*)"')}) + imhuman
data = httptools.downloadpage(host + page_url_post, post=post).data
logger.info("nowvideo data page_url2 ='%s'" % data)
logger.debug("nowvideo data page_url2 ='%s'" % data)
headers.append(['Referer', page_url])
post_data = scrapertools.find_single_match(data,"</div>\s*<script>(eval.function.p,a,c,k,e,.*?)\s*</script>")
@@ -46,11 +46,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
_headers = urllib.urlencode(dict(headers))
for media_url in media_urls:
#logger.info("nowvideo data page_url2 ='%s'" % media_url)
#logger.debug("nowvideo data page_url2 ='%s'" % media_url)
video_urls.append([" mp4 [nowvideo] ", media_url + '|' + _headers])
for video_url in media_urls:
logger.info("[nowvideo.py] %s - %s" % (video_url[0], video_url[1]))
logger.debug("[nowvideo.py] %s - %s" % (video_url[0], video_url[1]))
return video_urls
@@ -60,7 +60,7 @@ def find_videos(data):
devuelve = []
patronvideos = r"nowvideo.club/(?:play|videos)?([a-z0-9A-Z]+)"
logger.info("[nowvideo.py] find_videos #" + patronvideos + "#")
logger.debug("[nowvideo.py] find_videos #" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
@@ -68,10 +68,10 @@ def find_videos(data):
url = 'http://nowvideo.club/%s' % match
if url not in encontrados:
logger.info(" url=" + url)
logger.debug(" url=" + url)
devuelve.append([titulo, url, 'nowvideo'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
logger.debug(" url duplicada=" + url)
return devuelve

View File

@@ -8,7 +8,7 @@ from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "copyrightsRestricted" in data or "COPYRIGHTS_RESTRICTED" in data:
@@ -20,7 +20,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data

View File

@@ -21,25 +21,25 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
if config.get_setting("premium", server="onefichier"):
user = config.get_setting("user", server="onefichier")
password = config.get_setting("password", server="onefichier")
url = "https://1fichier.com/login.pl"
logger.info("url=" + url)
logger.debug("url=" + url)
post_parameters = {"mail": user, "pass": password, "lt": "on", "purge": "on", "valider": "Send"}
post = urllib.urlencode(post_parameters)
logger.info("post=" + post)
logger.debug("post=" + post)
data = httptools.downloadpage(url, post=post).data
# logger.info("data="+data)
# logger.debug("data="+data)
cookies = config.get_cookie_data()
logger.info("cookies=" + cookies)
logger.debug("cookies=" + cookies)
# 1fichier.com TRUE / FALSE 1443553315 SID imC3q8MQ7cARw5tkXeWvKyrH493rR=1yvrjhxDAA0T0iEmqRfNF9GXwjrwPHssAQ
sid_cookie_value = scrapertools.find_single_match(cookies, "1fichier.com.*?SID\s+([A-Za-z0-9\+\=]+)")
logger.info("sid_cookie_value=" + sid_cookie_value)
logger.debug("sid_cookie_value=" + sid_cookie_value)
# .1fichier.com TRUE / FALSE 1443553315 SID imC3q8MQ7cARw5tkXeWvKyrH493rR=1yvrjhxDAA0T0iEmqRfNF9GXwjrwPHssAQ
cookie = urllib.urlencode({"SID": sid_cookie_value})
@@ -50,16 +50,16 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12'])
headers.append(['Cookie', cookie])
filename = scrapertools.get_header_from_response(page_url, header_to_get="Content-Disposition")
logger.info("filename=" + filename)
logger.debug("filename=" + filename)
# Construye la URL final para Kodi
location = page_url + "|Cookie=" + cookie
logger.info("location=" + location)
logger.debug("location=" + location)
video_urls = []
video_urls.append([filename[-4:] + " (Premium) [1fichier]", location])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
logger.debug("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -6,7 +6,7 @@ from platformcode import config, logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
@@ -17,8 +17,8 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
global data
# logger.info(data)
# logger.debug(data)
video_urls = support.get_jwplayer_mediaurl(data, 'Onlystream')
return video_urls

View File

@@ -8,6 +8,6 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
video_urls = []
return video_urls

View File

@@ -10,7 +10,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if "Object not found" in data.data or "longer exists on our servers" in data.data:
@@ -21,7 +21,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "p,a,c,k,e,d" in data:
@@ -44,6 +44,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
except:
pass
for video_url in video_urls:
logger.info(" %s - %s" % (video_url[0], video_url[1]))
logger.debug(" %s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -24,13 +24,13 @@ from core import jsontools
def get_source(url):
logger.info()
logger.debug()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
data = get_source(page_url)
if "File was deleted" in data or "File Not Found" in data:
@@ -40,7 +40,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
video_urls = []
referer = ''

View File

@@ -10,7 +10,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
logger.debug("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "Not Found" in data or "File was deleted" in data:
@@ -19,7 +19,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
logger.debug("url=" + page_url)
video_urls = []
ext = 'mp4'

Some files were not shown because too many files have changed in this diff Show More