rebase specials

This commit is contained in:
marco
2020-02-05 20:51:06 +01:00
parent 53b1436e68
commit 1ddf563e0a
17 changed files with 884 additions and 255 deletions

View File

@@ -200,7 +200,7 @@ def run(item=None):
channel = __import__('channels.%s' % item.channel, None,
None, ["channels.%s" % item.channel])
except ImportError:
exec("import channels." + item.channel + " as channel")
exec("import " + CHANNELS + "." + item.channel + " as channel")
logger.info("Running channel %s | %s" % (channel.__name__, channel.__file__))

View File

@@ -1206,12 +1206,6 @@ def set_player(item, xlistitem, mediaurl, view, strm, autoplay):
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_auto_as_watched(item)
if is_playing():
xbmc.sleep(2000)
if is_playing():
if not PY3: from lib import alfaresolver
else: from lib import alfaresolver_py3 as alfaresolver
alfaresolver.frequency_count(item)
def torrent_client_installed(show_tuple=False):
# Plugins externos se encuentra en servers/torrent.json nodo clients
@@ -1399,16 +1393,6 @@ def play_torrent(item, xlistitem, mediaurl):
mediaurl = item.url
if seleccion >= 0:
# Si tiene .torrent válido o magnet, lo registramos
if size or item.url.startswith('magnet'):
try:
import threading
if not PY3: from lib import alfaresolver
else: from lib import alfaresolver_py3 as alfaresolver
threading.Thread(target=alfaresolver.frequency_count, args=(item, )).start()
except:
logger.error(traceback.format_exc(1))
# Reproductor propio BT (libtorrent)
if seleccion == 0:

View File

@@ -1,13 +1,20 @@
# -*- coding: utf-8 -*-
#from builtins import str
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
from builtins import range
import os
from time import sleep
from core import channeltools
from core import jsontools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
from platformcode import launcher
from time import sleep
from platformcode.config import get_setting
__channel__ = "autoplay"
@@ -93,6 +100,7 @@ def start(itemlist, item):
base_item = item
if not config.is_xbmc():
#platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi')
return itemlist
@@ -125,9 +133,14 @@ def start(itemlist, item):
url_list_valid = []
autoplay_list = []
autoplay_b = []
favorite_langs = []
favorite_servers = []
favorite_quality = []
#2nd lang, vemos si se quiere o no filtrar
status_language = config.get_setting("filter_languages", channel_id)
# Guarda el valor actual de "Accion y Player Mode" en preferencias
user_config_setting_action = config.get_setting("default_action")
user_config_setting_player = config.get_setting("player_mode")
@@ -172,18 +185,21 @@ def start(itemlist, item):
favorite_quality.append(channel_node['quality'][settings_node['quality_%s' % num]])
# Se filtran los enlaces de itemlist y que se correspondan con los valores de autoplay
for item in itemlist:
for n, item in enumerate(itemlist):
autoplay_elem = dict()
b_dict = dict()
# Comprobamos q se trata de un item de video
if 'server' not in item:
continue
#2nd lang lista idiomas
if item.language not in favorite_langs:
favorite_langs.append(item.language)
# Agrega la opcion configurar AutoPlay al menu contextual
if 'context' not in item:
item.context = list()
if not filter(lambda x: x['action'] == 'autoplay_config', context):
if not [x for x in context if x['action'] == 'autoplay_config']:
item.context.append({"title": config.get_localized_string(60071),
"action": "autoplay_config",
"channel": "autoplay",
@@ -204,6 +220,7 @@ def start(itemlist, item):
b_dict['videoitem']= item
autoplay_b.append(b_dict)
continue
autoplay_elem["indice_lang"] = favorite_langs.index(item.language)
autoplay_elem["indice_server"] = favorite_servers.index(item.server.lower())
autoplay_elem["indice_quality"] = favorite_quality.index(item.quality)
@@ -216,6 +233,7 @@ def start(itemlist, item):
b_dict['videoitem'] = item
autoplay_b.append(b_dict)
continue
autoplay_elem["indice_lang"] = favorite_langs.index(item.language)
autoplay_elem["indice_server"] = favorite_servers.index(item.server.lower())
elif priority == 3: # Solo calidades
@@ -227,6 +245,7 @@ def start(itemlist, item):
b_dict['videoitem'] = item
autoplay_b.append(b_dict)
continue
autoplay_elem["indice_lang"] = favorite_langs.index(item.language)
autoplay_elem["indice_quality"] = favorite_quality.index(item.quality)
else: # No ordenar
@@ -245,16 +264,16 @@ def start(itemlist, item):
# Ordenamos segun la prioridad
if priority == 0: # Servidores y calidades
autoplay_list.sort(key=lambda orden: (orden['indice_server'], orden['indice_quality']))
autoplay_list.sort(key=lambda orden: (orden['indice_lang'], orden['indice_server'], orden['indice_quality']))
elif priority == 1: # Calidades y servidores
autoplay_list.sort(key=lambda orden: (orden['indice_quality'], orden['indice_server']))
autoplay_list.sort(key=lambda orden: (orden['indice_lang'], orden['indice_quality'], orden['indice_server']))
elif priority == 2: # Solo servidores
autoplay_list.sort(key=lambda orden: orden['indice_server'])
autoplay_list.sort(key=lambda orden: (orden['indice_lang'], orden['indice_server']))
elif priority == 3: # Solo calidades
autoplay_list.sort(key=lambda orden: orden['indice_quality'])
autoplay_list.sort(key=lambda orden: (orden['indice_lang'], orden['indice_quality']))
# Se prepara el plan b, en caso de estar activo se agregan los elementos no favoritos al final
try:
@@ -351,7 +370,7 @@ def start(itemlist, item):
# Si no quedan elementos en la lista se informa
if autoplay_elem == autoplay_list[-1]:
platformtools.dialog_notification('AutoPlay', config.get_localized_string(60072))
platformtools.dialog_notification('AutoPlay', config.get_localized_string(60072) % videoitem.server.upper())
else:
platformtools.dialog_notification(config.get_localized_string(60074), config.get_localized_string(60075))

View File

@@ -27,4 +27,4 @@
"visible": true
}
]
}
}

View File

@@ -176,14 +176,14 @@ def readbookmark(filepath):
except:
plot = lines[4].strip()
# Campos fulltitle y canal añadidos
# Campos contentTitle y canal añadidos
if len(lines) >= 6:
try:
fulltitle = urllib.unquote_plus(lines[5].strip())
contentTitle = urllib.unquote_plus(lines[5].strip())
except:
fulltitle = lines[5].strip()
contentTitle = lines[5].strip()
else:
fulltitle = titulo
contentTitle = titulo
if len(lines) >= 7:
try:
@@ -195,7 +195,7 @@ def readbookmark(filepath):
bookmarkfile.close()
return canal, titulo, thumbnail, plot, server, url, fulltitle
return canal, titulo, thumbnail, plot, server, url, contentTitle
def check_bookmark(readpath):
@@ -213,11 +213,11 @@ def check_bookmark(readpath):
time.sleep(0.1)
# Obtenemos el item desde el .txt
canal, titulo, thumbnail, plot, server, url, fulltitle = readbookmark(filetools.join(readpath, fichero))
canal, titulo, thumbnail, plot, server, url, contentTitle = readbookmark(filetools.join(readpath, fichero))
if canal == "":
canal = "favorites"
item = Item(channel=canal, action="play", url=url, server=server, title=fulltitle, thumbnail=thumbnail,
plot=plot, fanart=thumbnail, fulltitle=fulltitle, folder=False)
item = Item(channel=canal, action="play", url=url, server=server, title=contentTitle, thumbnail=thumbnail,
plot=plot, fanart=thumbnail, contentTitle=contentTitle, folder=False)
filetools.rename(filetools.join(readpath, fichero), fichero[:-4] + ".old")
itemlist.append(item)

View File

@@ -3,11 +3,13 @@
# filtertools - se encarga de filtrar resultados
# ------------------------------------------------------------
from core import channeltools
from builtins import object
from core import jsontools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
from core import channeltools
TAG_TVSHOW_FILTER = "TVSHOW_FILTER"
TAG_NAME = "name"
@@ -28,7 +30,7 @@ __channel__ = "filtertools"
# TODO echar un ojo a https://pyformat.info/, se puede formatear el estilo y hacer referencias directamente a elementos
class ResultFilter:
class ResultFilter(object):
def __init__(self, dict_filter):
self.active = dict_filter[TAG_ACTIVE]
self.language = dict_filter[TAG_LANGUAGE]
@@ -39,7 +41,7 @@ class ResultFilter:
(self.active, self.language, self.quality_allowed)
class Filter:
class Filter(object):
def __init__(self, item, global_filter_lang_id):
self.result = None
self.__get_data(item, global_filter_lang_id)
@@ -51,7 +53,7 @@ class Filter:
global_filter_language = config.get_setting(global_filter_lang_id, item.channel)
if tvshow in dict_filtered_shows.keys():
if tvshow in list(dict_filtered_shows.keys()):
self.result = ResultFilter({TAG_ACTIVE: dict_filtered_shows[tvshow][TAG_ACTIVE],
TAG_LANGUAGE: dict_filtered_shows[tvshow][TAG_LANGUAGE],
@@ -112,9 +114,9 @@ def context(item, list_language=None, list_quality=None, exist=False):
"""
# Dependiendo de como sea el contexto lo guardamos y añadimos las opciones de filtertools.
if type(item.context) == str:
if isinstance(item.context, str):
_context = item.context.split("|")
elif type(item.context) == list:
elif isinstance(item.context, list):
_context = item.context
else:
_context = []
@@ -127,9 +129,9 @@ def context(item, list_language=None, list_quality=None, exist=False):
dict_data["list_quality"] = list_quality
added = False
if type(_context) == list:
if isinstance(_context, list):
for x in _context:
if x and type(x) == dict:
if x and isinstance(x, dict):
if x["channel"] == "filtertools":
added = True
break
@@ -163,20 +165,30 @@ def load(item):
def check_conditions(_filter, list_item, item, list_language, list_quality, quality_count=0, language_count=0):
if item.contentLanguage: item.language = item.contentLanguage
is_language_valid = True
if _filter.language:
# logger.debug("title es %s" % item.title)
#2nd lang
from platformcode import unify
_filter.language = unify.set_lang(_filter.language).upper()
# viene de episodios
if isinstance(item.language, list):
#2nd lang
for n, lang in enumerate(item.language):
item.language[n] = unify.set_lang(lang).upper()
if _filter.language in item.language:
language_count += 1
else:
is_language_valid = False
# viene de findvideos
else:
#2nd lang
item.language = unify.set_lang(item.language).upper()
if item.language.lower() == _filter.language.lower():
language_count += 1
else:
@@ -194,6 +206,7 @@ def check_conditions(_filter, list_item, item, list_language, list_quality, qual
is_quality_valid = False
if is_language_valid and is_quality_valid:
#TODO 2nd lang: habría que ver si conviene unificar el idioma aqui o no
item.list_language = list_language
if list_quality:
item.list_quality = list_quality
@@ -208,7 +221,7 @@ def check_conditions(_filter, list_item, item, list_language, list_quality, qual
logger.debug(" calidad valida?: %s, item.quality: %s, filter.quality_allowed: %s"
% (is_quality_valid, quality, _filter.quality_allowed))
return list_item, quality_count, language_count
return list_item, quality_count, language_count, _filter.language
def get_link(list_item, item, list_language, list_quality=None, global_filter_lang_id="filter_languages"):
@@ -244,7 +257,7 @@ def get_link(list_item, item, list_language, list_quality=None, global_filter_la
if filter_global and filter_global.active:
list_item, quality_count, language_count = \
check_conditions(filter_global, list_item, item, list_language, list_quality)
check_conditions(filter_global, list_item, item, list_language, list_quality)[:3]
else:
item.context = context(item)
list_item.append(item)
@@ -271,6 +284,7 @@ def get_links(list_item, item, list_language, list_quality=None, global_filter_l
"""
logger.info()
# si los campos obligatorios son None salimos
if list_item is None or item is None:
return []
@@ -279,6 +293,13 @@ def get_links(list_item, item, list_language, list_quality=None, global_filter_l
if len(list_item) == 0:
return list_item
second_lang = config.get_setting('second_language')
#Ordena segun servidores favoritos, elima servers de blacklist y desactivados
from core import servertools
list_item= servertools.filter_servers(list_item)
logger.debug("total de items : %s" % len(list_item))
new_itemlist = []
@@ -288,13 +309,32 @@ def get_links(list_item, item, list_language, list_quality=None, global_filter_l
_filter = Filter(item, global_filter_lang_id).result
logger.debug("filter: '%s' datos: '%s'" % (item.show, _filter))
if _filter and _filter.active:
for item in list_item:
new_itemlist, quality_count, language_count = check_conditions(_filter, new_itemlist, item, list_language,
new_itemlist, quality_count, language_count, first_lang = check_conditions(_filter, new_itemlist, item, list_language,
list_quality, quality_count, language_count)
logger.info("ITEMS FILTRADOS: %s/%s, idioma [%s]: %s, calidad_permitida %s: %s"
#2nd lang
if second_lang and second_lang != 'No' and first_lang.lower() != second_lang.lower() :
second_list= []
_filter2 = _filter
_filter2.language = second_lang
for it in new_itemlist:
if isinstance(it.language, list):
if not second_lang in it.language:
second_list.append(it)
else:
second_list = new_itemlist
break
for item in list_item:
new_itemlist, quality_count, language_count, second_lang = check_conditions(_filter2, second_list, item, list_language,
list_quality, quality_count, language_count)
logger.debug("ITEMS FILTRADOS: %s/%s, idioma [%s]: %s, calidad_permitida %s: %s"
% (len(new_itemlist), len(list_item), _filter.language, language_count, _filter.quality_allowed,
quality_count))
@@ -303,14 +343,19 @@ def get_links(list_item, item, list_language, list_quality=None, global_filter_l
for i in list_item:
list_item_all.append(i.tourl())
_context = [{"title": config.get_localized_string(60430) % _filter.language, "action": "delete_from_context",
"channel": "filtertools", "to_channel": "seriesdanko"}]
_context = [
{"title": config.get_localized_string(60430) % _filter.language, "action": "delete_from_context",
"channel": "filtertools", "to_channel": item.channel}]
if _filter.quality_allowed:
msg_quality_allowed = " y calidad %s" % _filter.quality_allowed
else:
msg_quality_allowed = ""
msg_lang = ' %s' % first_lang.upper()
if second_lang and second_lang != 'No':
msg_lang = 's %s ni %s' % (first_lang.upper(), second_lang.upper())
new_itemlist.append(Item(channel=__channel__, action="no_filter", list_item_all=list_item_all,
show=item.show,
title=config.get_localized_string(60432) % (_filter.language, msg_quality_allowed),
@@ -541,7 +586,7 @@ def save(item, dict_data_saved):
logger.info("Se actualiza los datos")
list_quality = []
for _id, value in dict_data_saved.items():
for _id, value in list(dict_data_saved.items()):
if _id in item.list_quality and value:
list_quality.append(_id.lower())

View File

@@ -3,6 +3,14 @@
# infoplus ventana con información del Item
# ------------------------------------------------------------
from future import standard_library
standard_library.install_aliases()
#from builtins import str
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
from builtins import range
import re
from threading import Thread
@@ -175,7 +183,7 @@ class main(xbmcgui.WindowDialog):
if self.infoLabels["tmdb_id"]:
otmdb = tmdb.Tmdb(id_Tmdb=self.infoLabels["tmdb_id"], tipo=tipo_busqueda)
self.infoLabels["images"] = otmdb.result.get("images", {})
for key, value in self.infoLabels["images"].items():
for key, value in list(self.infoLabels["images"].items()):
if not value:
self.infoLabels["images"].pop(key)
@@ -1216,22 +1224,31 @@ class related(xbmcgui.WindowDialog):
def busqueda_global(item, infoLabels, org_title=False):
logger.info()
logger.debug(item)
if item.contentType != "movie":
cat = ["serie"]
else:
cat = ["movie"]
cat += ["infoPlus"]
new_item = Item()
new_item.extra = infoLabels.get("title", "")
new_item.extra = re.sub('\[.*?\]', '', new_item.extra)
if org_title:
new_item.extra = infoLabels.get("originaltitle", "")
new_item.category = item.contentType
new_item = Item(title=item.contentTitle, text=item.contentTitle.replace("+", " "), mode=item.contentType,
infoLabels=item.infoLabels)
from specials import search
return search.do_search(new_item, cat)
return search.channel_search(new_item)
# new_item = Item()
# new_item.extra = infoLabels.get("title", "")
# new_item.extra = re.sub('\[.*?\]', '', new_item.extra)
#
# if org_title:
# new_item.extra = infoLabels.get("originaltitle", "")
# new_item.category = item.contentType
#
# from channels import search
# return search.do_search(new_item, cat)
class Busqueda(xbmcgui.WindowXMLDialog):
@@ -1273,7 +1290,8 @@ class Busqueda(xbmcgui.WindowXMLDialog):
dialog = platformtools.dialog_progress_bg(config.get_localized_string(60496), config.get_localized_string(60497))
selectitem = self.getControl(6).getSelectedItem()
item = Item().fromurl(selectitem.getProperty("item_copy"))
exec "import channels." + item.channel + " as channel"
#exec("import channels." + item.channel + " as channel")
channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel])
itemlist = getattr(channel, item.action)(item)
global SearchWindows
window = GlobalSearch('DialogSelect.xml', config.get_runtime_path(), itemlist=itemlist, dialog=dialog)
@@ -1332,7 +1350,8 @@ class GlobalSearch(xbmcgui.WindowXMLDialog):
if (action == ACTION_SELECT_ITEM or action == 100) and self.getFocusId() == 6:
selectitem = self.getControl(6).getSelectedItem()
item = Item().fromurl(selectitem.getProperty("item_copy"))
exec "import channels." + item.channel + " as channel"
#exec("import channels." + item.channel + " as channel")
channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel])
ventana_error = None
if item.action == "play":
if hasattr(channel, 'play'):
@@ -1539,7 +1558,7 @@ class ActorInfo(xbmcgui.WindowDialog):
threads[i] = t
while threads:
for key, t in threads.items():
for key, t in list(threads.items()):
if not t.isAlive():
threads.pop(key)
xbmc.sleep(100)
@@ -1956,10 +1975,10 @@ class images(xbmcgui.WindowDialog):
self.mal = kwargs.get("mal", [])
self.imagenes = []
for key, value in self.tmdb.iteritems():
for key, value in self.tmdb.items():
for detail in value:
self.imagenes.append('http://image.tmdb.org/t/p/w342' + detail["file_path"])
for tipo, child in self.fanartv.iteritems():
for tipo, child in self.fanartv.items():
for imagen in child:
self.imagenes.append(imagen["url"].replace("/fanart/", "/preview/"))
for imagen, title in self.fa:
@@ -2326,7 +2345,7 @@ def fanartv(item, infoLabels, images={}):
url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_search
data = jsontools.load(httptools.downloadpage(url, headers=headers).data)
if data and not "error message" in data:
for key, value in data.items():
for key, value in list(data.items()):
if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]:
images[key] = value
return images
@@ -2398,15 +2417,14 @@ def translate(to_translate, to_language="auto", language="auto", i=0, bio=[]):
Example:
print(translate("salut tu vas bien?", "en"))
hello you alright?'''
import urllib2
import urllib
import urllib.request, urllib.error, urllib.parse
agents = {
'User-Agent': "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)"}
before_trans = 'class="t0">'
to_translate = urllib.quote(to_translate.replace(" ", "+")).replace("%2B", "+")
to_translate = urllib.parse.quote(to_translate.replace(" ", "+")).replace("%2B", "+")
link = "http://translate.google.com/m?hl=%s&sl=%s&q=%s" % (to_language, language, to_translate)
request = urllib2.Request(link, headers=agents)
page = urllib2.urlopen(request).read()
request = urllib.request.Request(link, headers=agents)
page = urllib.request.urlopen(request).read()
result = page[page.find(before_trans) + len(before_trans):]
result = result.split("<")[0]
result = re.sub(r"d>|nn", "", result)

View File

@@ -4,24 +4,29 @@
# ==============
# - Lista de enlaces guardados como favoritos, solamente en Alfa, no Kodi.
# - Los enlaces se organizan en carpetas (virtuales) que puede definir el usuario.
# - Se utiliza un sólo fichero para guardar todas las carpetas y enlaces: alfavorites-default.json
# - Se puede copiar alfavorites-default.json a otros dispositivos ya que la única dependencia local es el thumbnail asociado a los enlaces,
# - Se utiliza un sólo fichero para guardar todas las carpetas y enlaces: kodfavourites-default.json
# - Se puede copiar kodfavourites-default.json a otros dispositivos ya que la única dependencia local es el thumbnail asociado a los enlaces,
# pero se detecta por código y se ajusta al dispositivo actual.
# - Se pueden tener distintos ficheros de alfavoritos y alternar entre ellos, pero solamente uno de ellos es la "lista activa".
# - Los ficheros deben estar en config.get_data_path() y empezar por alfavorites- y terminar en .json
# - Los ficheros deben estar en config.get_data_path() y empezar por kodfavourites- y terminar en .json
# Requerimientos en otros módulos para ejecutar este canal:
# - Añadir un enlace a este canal en channelselector.py
# - Modificar platformtools.py para controlar el menú contextual y añadir "Guardar enlace" en set_context_commands
# ------------------------------------------------------------
import os
import re
#from builtins import str
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
from builtins import object
import os, re
from datetime import datetime
from core import filetools, jsontools
from core.item import Item
from platformcode import config, logger, platformtools
from core import filetools, jsontools
def fechahora_actual():
@@ -32,15 +37,15 @@ def fechahora_actual():
PREFIJO_LISTA = 'kodfavorites-'
# Devuelve el nombre de la lista activa (Ej: alfavorites-default.json)
# Devuelve el nombre de la lista activa (Ej: kodfavourites-default.json)
def get_lista_activa():
return config.get_setting('lista_activa', default = PREFIJO_LISTA + 'default.json')
# Extrae nombre de la lista del fichero, quitando prefijo y sufijo (Ej: alfavorites-Prueba.json => Prueba)
# Extrae nombre de la lista del fichero, quitando prefijo y sufijo (Ej: kodfavourites-Prueba.json => Prueba)
def get_name_from_filename(filename):
return filename.replace(PREFIJO_LISTA, '').replace('.json', '')
# Componer el fichero de lista a partir de un nombre, añadiendo prefijo y sufijo (Ej: Prueba => alfavorites-Prueba.json)
# Componer el fichero de lista a partir de un nombre, añadiendo prefijo y sufijo (Ej: Prueba => kodfavourites-Prueba.json)
def get_filename_from_name(name):
return PREFIJO_LISTA + name + '.json'
@@ -67,7 +72,7 @@ def text_clean(txt, disallowed_chars = '[^a-zA-Z0-9\-_()\[\]. ]+', blank_char =
# Clase para cargar y guardar en el fichero de Alfavoritos
# --------------------------------------------------------
class kodfavoritesData:
class KodfavouritesData(object):
def __init__(self, filename = None):
@@ -760,9 +765,11 @@ def compartir_lista(item):
progreso.update(10, config.get_localized_string(70645), config.get_localized_string(70646))
# Envío del fichero a tinyupload mediante multipart/form-data
from future import standard_library
standard_library.install_aliases()
from lib import MultipartPostHandler
import urllib2
opener = urllib2.build_opener(MultipartPostHandler.MultipartPostHandler)
import urllib.request, urllib.error
opener = urllib.request.build_opener(MultipartPostHandler.MultipartPostHandler)
params = { 'MAX_FILE_SIZE' : '52428800', 'file_description' : '', 'sessionid' : sessionid, 'uploaded_file' : open(fullfilename, 'rb') }
handle = opener.open(upload_url, params)
data = handle.read()
@@ -856,7 +863,7 @@ def descargar_lista(item, url):
if 'tinyupload.com/' in url:
try:
from urlparse import urlparse
from urllib.parse import urlparse
data = httptools.downloadpage(url).data
logger.debug(data)
down_url, url_name = scrapertools.find_single_match(data, ' href="(download\.php[^"]*)"><b>([^<]*)')
@@ -872,7 +879,7 @@ def descargar_lista(item, url):
if not puedes:
platformtools.dialog_ok('Alfa', config.get_localized_string(70655), motivo)
return False
url_json = video_urls[0][1] # https://www58.zippyshare.com/d/qPzzQ0UM/25460/alfavorites-testeanding.json
url_json = video_urls[0][1] # https://www58.zippyshare.com/d/qPzzQ0UM/25460/kodfavourites-testeanding.json
url_name = url_json[url_json.rfind('/')+1:]
elif 'friendpaste.com/' in url:
@@ -887,7 +894,7 @@ def descargar_lista(item, url):
# Download json
data = httptools.downloadpage(url_json).data
# Verificar formato json de alfavorites y añadir info de la descarga
# Verificar formato json de kodfavourites y añadir info de la descarga
jsondata = jsontools.load(data)
if 'user_favorites' not in jsondata or 'info_lista' not in jsondata:
logger.debug(data)

View File

@@ -3,6 +3,11 @@
# Channel for recent videos on several channels
# ------------------------------------------------------------
#from builtins import str
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import glob
import os
import re
@@ -358,7 +363,7 @@ def get_newest(channel_id, categoria):
modulo = __import__('channels.%s' % channel_id, fromlist=["channels.%s" % channel_id])
except:
try:
exec "import channels." + channel_id + " as modulo"
exec("import channels." + channel_id + " as modulo")
except:
puede = False
@@ -409,8 +414,8 @@ def get_title(item):
elif item.contentTitle: # Si es una pelicula con el canal adaptado
title = item.contentTitle
elif item.fulltitle: # Si el canal no esta adaptado
title = item.fulltitle
elif item.contentTitle: # Si el canal no esta adaptado
title = item.contentTitle
else: # Como ultimo recurso
title = item.title
@@ -510,7 +515,7 @@ def group_by_content(list_result_canal):
dict_contenidos[new_key] = [i]
# Añadimos el contenido encontrado en la lista list_result
for v in dict_contenidos.values():
for v in list(dict_contenidos.values()):
title = v[0].title
if len(v) > 1:
# Eliminar de la lista de nombres de canales los q esten duplicados
@@ -667,7 +672,7 @@ def cb_custom_button(item, dict_values):
if value == "":
value = False
for v in dict_values.keys():
for v in list(dict_values.keys()):
dict_values[v] = not value
if config.set_setting("custom_button_value_news", not value, item.channel) == True:

View File

@@ -46,7 +46,7 @@ def next_ep(item):
time_over = False
time_limit = time() + 30
time_steps = [20,30,40,50,60,70,80,90,100,110,120]
time_steps = [20,30,40,50,60]
TimeFromEnd = time_steps[config.get_setting('next_ep_seconds')]
# wait until the video plays

View File

@@ -3,6 +3,13 @@
# renumeratetools - se encarga de renumerar episodios
# --------------------------------------------------------------------------------
#from builtins import str
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
from builtins import range
from builtins import object
import os
try:
@@ -44,9 +51,9 @@ def context(item):
"""
# Dependiendo de como sea el contexto lo guardamos y añadimos las opciones de filtertools.
if type(item.context) == str:
if isinstance(item.context, str):
_context = item.context.split("|")
elif type(item.context) == list:
elif isinstance(item.context, list):
_context = item.context
else:
_context = []
@@ -155,7 +162,7 @@ def numbered_for_tratk(channel, show, season, episode):
dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE)
# ponemos en minusculas el key, ya que previamente hemos hecho lo mismo con show.
for key in dict_series.keys():
for key in list(dict_series.keys()):
new_key = key.lower()
if new_key != key:
dict_series[new_key] = dict_series[key]
@@ -548,7 +555,7 @@ if xbmcgui:
if len(self.data) > 5:
self.move_scroll()
except Exception, Ex:
except Exception as Ex:
logger.error("HA HABIDO UNA HOSTIA %s" % Ex)
# def onClick(self, control_id):
@@ -850,7 +857,7 @@ if xbmcgui:
self.move_scroll()
except Exception, Ex:
except Exception as Ex:
logger.error("HA HABIDO UNA HOSTIA %s" % Ex)
def move_scroll(self):
@@ -887,7 +894,7 @@ if xbmcgui:
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
class ControlGroup:
class ControlGroup(object):
"""
conjunto de controles, son los elementos que se muestra por línea de una lista.
"""

21
specials/search.py Executable file → Normal file
View File

@@ -3,6 +3,14 @@
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
from __future__ import division
from builtins import range
from past.utils import old_div
#from builtins import str
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import os, json, time, inspect, channelselector
from lib.concurrent import futures
from core.item import Item
@@ -206,7 +214,7 @@ def channel_search(item):
if finished in searching:
searching_titles.remove(searching_titles[searching.index(finished)])
searching.remove(finished)
progress.update((cnt * 100) / len(channel_list), config.get_localized_string(70744) % str(len(channel_list) - cnt),
progress.update(old_div((cnt * 100), len(channel_list)), config.get_localized_string(70744) % str(len(channel_list) - cnt),
str(searching_titles))
progress.close()
@@ -221,7 +229,7 @@ def channel_search(item):
ch_name = channel_titles[channel_list.index(key)]
grouped = list()
cnt += 1
progress.update((cnt * 100) / len(ch_list), config.get_localized_string(60295), config.get_localized_string(60293))
progress.update(old_div((cnt * 100), len(ch_list)), config.get_localized_string(60295), config.get_localized_string(60293))
if len(value) <= max_results and item.mode != 'all':
if len(value) == 1:
if not value[0].action or config.get_localized_string(70006).lower() in value[0].title.lower():
@@ -292,9 +300,8 @@ def get_channel_results(ch, item, session):
ch_params = channeltools.get_channel_parameters(ch)
exec("from channels import " + ch_params["channel"] + " as module")
mainlist = module.mainlist(Item(channel=ch_params["channel"]))
module = __import__('channels.%s' % ch_params["channel"], fromlist=["channels.%s" % ch_params["channel"]])
mainlist = getattr(module, 'mainlist')(Item(channel=ch_params["channel"]))
search_action = [elem for elem in mainlist if elem.action == "search" and (item.mode == 'all' or elem.contentType == item.mode)]
if search_action:
@@ -431,7 +438,7 @@ def setting_channel_new(item):
elif presel_values[ret] == 'none':
preselect = []
elif presel_values[ret] == 'all':
preselect = range(len(ids))
preselect = list(range(len(ids)))
elif presel_values[ret] in ['cast', 'lat']:
preselect = []
for i, lg in enumerate(lista_lang):
@@ -488,7 +495,7 @@ def genres_menu(item):
mode = item.mode.replace('show', '')
genres = tmdb.get_genres(mode)
for key, value in genres[mode].items():
for key, value in list(genres[mode].items()):
discovery = {'url': 'discover/%s' % mode, 'with_genres': key,
'language': def_lang, 'page': '1'}

View File

@@ -3,6 +3,14 @@
# Configuracion
# ------------------------------------------------------------
from __future__ import division
#from builtins import str
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
from builtins import range
from past.utils import old_div
from channelselector import get_thumb
from core import filetools
from core import servertools
@@ -70,6 +78,8 @@ def menu_channels(item):
from core import channeltools
channel_list = channelselector.filterchannels("all")
for channel in channel_list:
if not channel.channel:
continue
channel_parameters = channeltools.get_channel_parameters(channel.channel)
if channel_parameters["has_settings"]:
itemlist.append(Item(channel=CHANNELNAME, title=". " + config.get_localized_string(60547) % channel.title,
@@ -98,12 +108,41 @@ def autostart(item): # item necessario launcher.py linea 265
def setting_torrent(item):
logger.info()
LIBTORRENT_PATH = config.get_setting("libtorrent_path", server="torrent", default="")
LIBTORRENT_ERROR = config.get_setting("libtorrent_error", server="torrent", default="")
default = config.get_setting("torrent_client", server="torrent", default=0)
BUFFER = config.get_setting("mct_buffer", server="torrent", default="50")
DOWNLOAD_PATH = config.get_setting("mct_download_path", server="torrent", default=config.get_setting("downloadpath"))
if not DOWNLOAD_PATH: DOWNLOAD_PATH = filetools.join(config.get_data_path(), 'downloads')
BACKGROUND = config.get_setting("mct_background_download", server="torrent", default=True)
RAR = config.get_setting("mct_rar_unpack", server="torrent", default=True)
DOWNLOAD_LIMIT = config.get_setting("mct_download_limit", server="torrent", default="")
BUFFER_BT = config.get_setting("bt_buffer", server="torrent", default="50")
DOWNLOAD_PATH_BT = config.get_setting("bt_download_path", server="torrent", default=config.get_setting("downloadpath"))
if not DOWNLOAD_PATH_BT: DOWNLOAD_PATH_BT = filetools.join(config.get_data_path(), 'downloads')
MAGNET2TORRENT = config.get_setting("magnet2torrent", server="torrent", default=False)
torrent_options = [config.get_localized_string(30006), config.get_localized_string(70254), config.get_localized_string(70255)]
torrent_options.extend(platformtools.torrent_client_installed())
list_controls = [
{
"id": "libtorrent_path",
"type": "text",
"label": "Libtorrent path",
"default": LIBTORRENT_PATH,
"enabled": True,
"visible": False
},
{
"id": "libtorrent_error",
"type": "text",
"label": "libtorrent error",
"default": LIBTORRENT_ERROR,
"enabled": True,
"visible": False
},
{
"id": "list_torrent",
"type": "list",
@@ -112,6 +151,70 @@ def setting_torrent(item):
"enabled": True,
"visible": True,
"lvalues": torrent_options
},
{
"id": "mct_buffer",
"type": "text",
"label": "MCT - Tamaño del Buffer a descargar antes de la reproducción",
"default": BUFFER,
"enabled": True,
"visible": "eq(-1,%s)" % torrent_options[2]
},
{
"id": "mct_download_path",
"type": "text",
"label": "MCT - Ruta de la carpeta de descarga",
"default": DOWNLOAD_PATH,
"enabled": True,
"visible": "eq(-2,%s)" % torrent_options[2]
},
{
"id": "bt_buffer",
"type": "text",
"label": "BT - Tamaño del Buffer a descargar antes de la reproducción",
"default": BUFFER_BT,
"enabled": True,
"visible": "eq(-3,%s)" % torrent_options[1]
},
{
"id": "bt_download_path",
"type": "text",
"label": "BT - Ruta de la carpeta de descarga",
"default": DOWNLOAD_PATH_BT,
"enabled": True,
"visible": "eq(-4,%s)" % torrent_options[1]
},
{
"id": "mct_download_limit",
"type": "text",
"label": "Límite (en Kb's) de la velocidad de descarga en segundo plano (NO afecta a RAR)",
"default": DOWNLOAD_LIMIT,
"enabled": True,
"visible": "eq(-5,%s) | eq(-5,%s)" % (torrent_options[1], torrent_options[2])
},
{
"id": "mct_rar_unpack",
"type": "bool",
"label": "¿Quiere que se descompriman los archivos RAR y ZIP para su reproducción?",
"default": RAR,
"enabled": True,
"visible": True
},
{
"id": "mct_background_download",
"type": "bool",
"label": "¿Se procesa la descompresión de RARs en segundo plano?",
"default": BACKGROUND,
"enabled": True,
"visible": True
},
{
"id": "magnet2torrent",
"type": "bool",
"label": "¿Quiere convertir los Magnets a Torrents para ver tamaños y almacenarlos?",
"default": MAGNET2TORRENT,
"enabled": True,
"visible": True
}
]
@@ -122,6 +225,22 @@ def setting_torrent(item):
def save_setting_torrent(item, dict_data_saved):
if dict_data_saved and "list_torrent" in dict_data_saved:
config.set_setting("torrent_client", dict_data_saved["list_torrent"], server="torrent")
if dict_data_saved and "mct_buffer" in dict_data_saved:
config.set_setting("mct_buffer", dict_data_saved["mct_buffer"], server="torrent")
if dict_data_saved and "mct_download_path" in dict_data_saved:
config.set_setting("mct_download_path", dict_data_saved["mct_download_path"], server="torrent")
if dict_data_saved and "mct_background_download" in dict_data_saved:
config.set_setting("mct_background_download", dict_data_saved["mct_background_download"], server="torrent")
if dict_data_saved and "mct_rar_unpack" in dict_data_saved:
config.set_setting("mct_rar_unpack", dict_data_saved["mct_rar_unpack"], server="torrent")
if dict_data_saved and "mct_download_limit" in dict_data_saved:
config.set_setting("mct_download_limit", dict_data_saved["mct_download_limit"], server="torrent")
if dict_data_saved and "bt_buffer" in dict_data_saved:
config.set_setting("bt_buffer", dict_data_saved["bt_buffer"], server="torrent")
if dict_data_saved and "bt_download_path" in dict_data_saved:
config.set_setting("bt_download_path", dict_data_saved["bt_download_path"], server="torrent")
if dict_data_saved and "magnet2torrent" in dict_data_saved:
config.set_setting("magnet2torrent", dict_data_saved["magnet2torrent"], server="torrent")
def menu_servers(item):
logger.info()
@@ -138,7 +257,7 @@ def menu_servers(item):
# Inicio - Servidores configurables
server_list = servertools.get_debriders_list().keys()
server_list = list(servertools.get_debriders_list().keys())
for server in server_list:
server_parameters = servertools.get_server_parameters(server)
if server_parameters["has_settings"]:
@@ -149,13 +268,12 @@ def menu_servers(item):
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60554),
action="", folder=False, text_bold = True, thumbnail=get_thumb("setting_0.png")))
server_list = servertools.get_servers_list().keys()
server_list = list(servertools.get_servers_list().keys())
for server in sorted(server_list):
server_parameters = servertools.get_server_parameters(server)
logger.info(server_parameters)
if server_parameters["has_settings"] and filter(lambda x: x["id"] not in ["black_list", "white_list"],
server_parameters["settings"]):
if server_parameters["has_settings"] and [x for x in server_parameters["settings"] if x["id"] not in ["black_list", "white_list"]]:
itemlist.append(
Item(channel=CHANNELNAME, title=". " + config.get_localized_string(60553) % server_parameters["name"],
action="server_config", config=server, folder=False, thumbnail=""))
@@ -207,7 +325,7 @@ def cb_servers_blacklist(item, dict_values):
progreso = platformtools.dialog_progress(config.get_localized_string(60557), config.get_localized_string(60558))
n = len(dict_values)
i = 1
for k, v in dict_values.items():
for k, v in list(dict_values.items()):
if k == 'filter_servers':
config.set_setting('filter_servers', v)
else:
@@ -215,7 +333,7 @@ def cb_servers_blacklist(item, dict_values):
if v: # Si el servidor esta en la lista negra no puede estar en la de favoritos
config.set_setting("favorites_servers_list", 100, server=k)
f = True
progreso.update((i * 100) / n, config.get_localized_string(60559) % k)
progreso.update(old_div((i * 100), n), config.get_localized_string(60559) % k)
i += 1
if not f: # Si no hay ningun servidor en la lista, desactivarla
@@ -269,21 +387,21 @@ def cb_servers_favorites(server_names, dict_values):
dict_name = {}
progreso = platformtools.dialog_progress(config.get_localized_string(60557), config.get_localized_string(60558))
for i, v in dict_values.items():
for i, v in list(dict_values.items()):
if i == "favorites_servers":
config.set_setting("favorites_servers", v)
elif int(v) > 0:
dict_name[server_names[v]] = int(i)
servers_list = servertools.get_servers_list().items()
servers_list = list(servertools.get_servers_list().items())
n = len(servers_list)
i = 1
for server, server_parameters in servers_list:
if server_parameters['name'] in dict_name.keys():
if server_parameters['name'] in list(dict_name.keys()):
config.set_setting("favorites_servers_list", dict_name[server_parameters['name']], server=server)
else:
config.set_setting("favorites_servers_list", 0, server=server)
progreso.update((i * 100) / n, config.get_localized_string(60559) % server_parameters['name'])
progreso.update(old_div((i * 100), n), config.get_localized_string(60559) % server_parameters['name'])
i += 1
if not dict_name: # Si no hay ningun servidor en lalista desactivarla
@@ -310,11 +428,10 @@ def submenu_tools(item):
if filetools.exists(channel_custom):
itemlist.append(Item(channel='custom', action='mainlist', title='Custom Channel'))
itemlist.append(Item(channel=CHANNELNAME, action="check_quickfixes", folder=False,
title=config.get_localized_string(30001), plot=config.get_addon_version(with_fix=True) ))
# itemlist.append(Item(channel=CHANNELNAME, action="update_quasar", folder=False,
# title=config.get_localized_string(70569)))
# title=config.get_localized_string(70569)))
itemlist.append(Item(channel=CHANNELNAME, action="update_quasar", folder=False,
title="Actualizar addon externo Quasar"))
itemlist.append(Item(channel=CHANNELNAME, action="", title="", folder=False,
thumbnail=get_thumb("setting_0.png")))
@@ -477,7 +594,7 @@ def conf_tools(item):
channeljson_exists = True
# Obtenemos configuracion guardada de ../settings/channel_data.json
try:
dict_file = jsontools.load(open(file_settings, "rb").read())
dict_file = jsontools.load(filetools.read(file_settings))
if isinstance(dict_file, dict) and 'settings' in dict_file:
dict_settings = dict_file['settings']
except EnvironmentError:
@@ -517,14 +634,9 @@ def conf_tools(item):
dict_settings = default_settings
dict_file['settings'] = dict_settings
# Creamos el archivo ../settings/channel_data.json
json_data = jsontools.dump(dict_file)
try:
open(file_settings, "wb").write(json_data)
# logger.info(channel.channel + " - Archivo _data.json GUARDADO!")
# El channel_data.json se ha creado/modificado
list_status = config.get_localized_string(60560)
except EnvironmentError:
if not filetools.write(file_settings, jsontools.dump(dict_file), silent=True):
logger.error("ERROR al salvar el archivo: %s" % file_settings)
list_status = config.get_localized_string(60560)
else:
if default_settings is None:
list_status = config.get_localized_string(60571)
@@ -596,7 +708,7 @@ def channels_onoff(item):
ret = platformtools.dialog_select(config.get_localized_string(60545), preselecciones)
if ret == -1: return False # pedido cancel
if ret == 2: preselect = []
elif ret == 1: preselect = range(len(ids))
elif ret == 1: preselect = list(range(len(ids)))
else:
preselect = []
for i, canal in enumerate(ids):
@@ -751,10 +863,394 @@ def overwrite_tools(item):
movie.channel.capitalize()))
# ... y la volvemos a añadir
videolibrarytools.save_movie(movie)
except Exception, ex:
except Exception as ex:
logger.error("Error al crear de nuevo la película")
template = "An exception of type %s occured. Arguments:\n%r"
message = template % (type(ex).__name__, ex.args)
logger.error(message)
p_dialog2.close()
def report_menu(item):
logger.info('URL: ' + item.url)
from channelselector import get_thumb
thumb_debug = get_thumb("update.png")
thumb_error = get_thumb("error.png")
thumb_next = get_thumb("next.png")
itemlist = []
paso = 1
# Crea un menú de opciones para permitir al usuario reportar un fallo de Alfa a través de un servidor "pastebin"
# Para que el informe sea completo el usuario debe tener la opción de DEBUG=ON
# Los servidores "pastbin" gratuitos tienen limitación de capacidad, por lo que el tamaño del log es importante
# Al final de la operación de upload, se pasa al usuario la dirección de log en el servidor para que los reporte
itemlist.append(Item(channel=item.channel, action="", title="[COLOR gold]SIGA los siguiente PASOS:[/COLOR]",
thumbnail=thumb_next, folder=False))
#if not config.get_setting('debug'):
itemlist.append(Item(channel=item.channel, action="activate_debug", extra=True,
title="PASO %s: Active DEBUG aquí antes de generar el log" %
str(paso), thumbnail=thumb_debug, folder=False))
paso += 1
itemlist.append(Item(channel="channelselector", action="getmainlist",
title="PASO %s: Reproduzca el problema y vuelva al PASO %s" %
(str(paso), str(paso+1)), thumbnail=thumb_debug, folder=False))
paso += 1
itemlist.append(Item(channel=item.channel, action="report_send",
title="PASO %s: Genere el informe de FALLO desde aquí" %
str(paso), thumbnail=thumb_error))
paso += 1
#if config.get_setting('debug'):
itemlist.append(Item(channel=item.channel, action="activate_debug", extra=False,
title="PASO %s: Desactive DEBUG aquí -opcional-" % str(paso),
thumbnail=thumb_debug, folder=False))
paso += 1
if item.url:
itemlist.append(Item(channel=item.channel, action="", title="", folder=False))
itemlist.append(Item(channel=item.channel, action="",
title="[COLOR limegreen]Ha terminado de generar el informe de fallo,[/COLOR]",
thumbnail=thumb_next, folder=False))
itemlist.append(Item(channel=item.channel, action="",
title="[COLOR limegreen]Repórtelo en el Foro de Alfa: [/COLOR][COLOR yellow](pinche si Chrome)[/COLOR]",
thumbnail=thumb_next,
folder=False))
itemlist.append(Item(channel=item.channel, action="call_chrome",
url='https://alfa-addon.com/foros/ayuda.12/',
title="**- [COLOR yellow]https://alfa-addon.com/foros/ayuda.12/[/COLOR] -**",
thumbnail=thumb_next, unify=False, folder=False))
if item.one_use:
action = ''
url = ''
else:
action = 'call_chrome'
url = item.url
itemlist.append(Item(channel=item.channel, action=action,
title="**- LOG: [COLOR gold]%s[/COLOR] -**" % item.url, url=url,
thumbnail=thumb_next, unify=False, folder=False))
if item.one_use:
itemlist.append(Item(channel=item.channel, action="",
title="[COLOR orange]NO ACCEDA al INFORME: se BORRARÁ[/COLOR]",
thumbnail=thumb_next, folder=False))
itemlist.append(Item(channel=item.channel, action="",
title="[COLOR orange]ya que es de un solo uso[/COLOR]",
thumbnail=thumb_next, folder=False))
return itemlist
def activate_debug(item):
logger.info(item.extra)
from platformcode import platformtools
# Activa/Desactiva la opción de DEBUB en settings.xml
if isinstance(item.extra, str):
return report_menu(item)
if item.extra:
config.set_setting('debug', True)
platformtools.dialog_notification('Modo DEBUG', 'Activado')
else:
config.set_setting('debug', False)
platformtools.dialog_notification('Modo DEBUG', 'Desactivado')
def report_send(item, description='', fatal=False):
import xbmc
import xbmcaddon
import random
import traceback
import re
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
import urllib.parse as urllib
else:
import urllib # Usamos el nativo de PY2 que es más rápido
import urlparse
try:
requests_status = True
import requests
except:
requests_status = False
logger.error(traceback.format_exc())
from core import jsontools, httptools, proxytools, scrapertools
from platformcode import envtal
# Esta función realiza la operación de upload del LOG. El tamaño del archivo es de gran importacia porque
# los servicios de "pastebin" gratuitos tienen limitaciones, a veces muy bajas.
# Hay un ervicio, File.io, que permite subida directa de "achivos binarios" a través de la función "request"
# Esto aumenta dráticamente la capacidad del envío del log, muy por encima de lo necesitado
# Por ello es necesario contar con una lista de servicios "pastebin" que puedan realizar la operación de upload,
# ya sea por capacidad disponible o por disponibilidad.
# Para poder usar los servidores "pastebin" con un código común, se ha creado un diccionario con los servidores
# y sus características. En cada entrada se recogen las peculiaridades de cada servidor, tanto para formar
# la petición consu POST como para la forma de recibir el código del upload en la respuesta (json, header, regex
# en datos,...).
# Al iniciar este método se aleatoriza la lista de servidores "pastebin" para evitar que todos los usuarios hagan
# uploads contra el mismo servidor y puedan ocasionar sobrecargas.
# Se lee el arcivo de log y se compara su tamaño con la capacidad del servidor (parámetro 10 de cada entrada
# (empezando desde 0), expresado en MB, hasta que se encuentra uno capacitado. Si el upload falla se sigue intentado
# con los siguientes servidores que tengan la capacidad requerida.
# Si no se encuentra ningun servidor disponible se pide al usuario que lo intente más tarde, o que suba el log
# directamente en el foro. Si es un problema de tamaño, se le pide que reicinie Kodi y reporducza el fallo, para
# que el LOG sea más pequeño.
pastebin_list = {
'hastebin': ('1', 'https://hastebin.com/', 'documents', 'random', '', '',
'data', 'json', 'key', '', '0.29', '10', True, 'raw/', '', ''),
'dpaste': ('1', 'http://dpaste.com/', 'api/v2/', 'random', 'content=',
'&syntax=text&title=%s&poster=alfa&expiry_days=7',
'headers', '', '', 'location', '0.23', '15', True, '', '.txt', ''),
'ghostbin': ('1', 'https://ghostbin.com/', 'paste/new', 'random', 'lang=text&text=',
'&expire=2d&password=&title=%s',
'data', 'regex', '<title>(.*?)\s*-\s*Ghostbin<\/title>', '',
'0.49', '15', False, 'paste/', '', ''),
'write.as': ('1', 'https://write.as/', 'api/posts', 'random', 'body=', '&title=%s',
'data', 'json', 'data', 'id', '0.018', '15', True, '', '', ''),
'oneclickpaste': ('1', 'http://oneclickpaste.com/', 'index.php', 'random', 'paste_data=',
'&title=%s&format=text&paste_expire_date=1W&visibility=0&pass=&submit=Submit',
'data', 'regex', '<a class="btn btn-primary" href="[^"]+\/(\d+\/)">\s*View\s*Paste\s*<\/a>',
'', '0.060', '5', True, '', '', ''),
'bpaste': ('1', 'https://bpaste.net/', '', 'random', 'code=', '&lexer=text&expiry=1week',
'data', 'regex', 'View\s*<a\s*href="[^*]+/(.*?)">raw<\/a>', '',
'0.79', '15', True, 'raw/', '', ''),
'dumpz': ('0', 'http://dumpz.org/', 'api/dump', 'random', 'code=', '&lexer=text&comment=%s&password=',
'headers', '', '', 'location', '0.99', '15', False, '', '', ''),
'file.io': ('1', 'https://file.io/', '', 'random', '', 'expires=1w',
'requests', 'json', 'key', '', '99.0', '30', False, '', '.log', ''),
'uploadfiles': ('1', 'https://up.uploadfiles.io/upload', '', 'random', '', '',
'requests', 'json', 'url', '', '99.0', '30', False, None, '', '')
}
pastebin_list_last = ['hastebin', 'ghostbin', 'file.io'] # Estos servicios los dejamos los últimos
pastebin_one_use = ['file.io'] # Servidores de un solo uso y se borra
pastebin_dir = []
paste_file = {}
paste_params = ()
paste_post = ''
status = False
msg = 'Servicio no disponible. Inténtelo más tarde'
# Se verifica que el DEBUG=ON, si no está se rechaza y se pide al usuario que lo active y reproduzca el fallo
if not config.get_setting('debug'):
platformtools.dialog_notification('DEBUG debe estar ACTIVO', 'antes de generar el informe')
return report_menu(item)
# De cada al futuro se permitira al usuario que introduzca una breve descripción del fallo que se añadirá al LOG
if description == 'OK':
description = platformtools.dialog_input('', 'Introduzca una breve descripción del fallo')
# Escribimos en el log algunas variables de Kodi y Alfa que nos ayudarán en el diagnóstico del fallo
var = proxytools.logger_disp(debugging=True)
environment = envtal.list_env()
if not environment['log_path']:
environment['log_path'] = str(filetools.join(xbmc.translatePath("special://logpath/"), 'kodi.log'))
environment['log_size_bytes'] = str(filetools.getsize(environment['log_path']))
environment['log_size'] = str(round(float(environment['log_size_bytes']) / (1024*1024), 3))
# Se lee el archivo de LOG
log_path = environment['log_path']
if filetools.exists(log_path):
log_size_bytes = int(environment['log_size_bytes']) # Tamaño del archivivo en Bytes
log_size = float(environment['log_size']) # Tamaño del archivivo en MB
log_data = filetools.read(log_path) # Datos del archivo
if not log_data: # Algún error?
platformtools.dialog_notification('No puede leer el log de Kodi', 'Comuniquelo directamente en el Foro de Alfa')
return report_menu(item)
else: # Log no existe o path erroneo?
platformtools.dialog_notification('LOG de Kodi no encontrado', 'Comuniquelo directamente en el Foro de Alfa')
return report_menu(item)
# Si se ha introducido la descripción del fallo, se inserta la principio de los datos del LOG
log_title = '***** DESCRIPCIÓN DEL FALLO *****'
if description:
log_data = '%s\n%s\n\n%s' %(log_title, description, log_data)
# Se aleatorizan los nombre de los servidores "patebin"
for label_a, value_a in list(pastebin_list.items()):
if label_a not in pastebin_list_last:
pastebin_dir.append(label_a)
random.shuffle(pastebin_dir)
pastebin_dir.extend(pastebin_list_last) # Estos servicios los dejamos los últimos
#pastebin_dir = ['uploadfiles'] # Para pruebas de un servicio
#log_data = 'TEST PARA PRUEBAS DEL SERVICIO'
# Se recorre la lista de servidores "pastebin" hasta localizar uno activo, con capacidad y disponibilidad
for paste_name in pastebin_dir:
if pastebin_list[paste_name][0] != '1': # Si no esta activo el servidore, pasamos
continue
if pastebin_list[paste_name][6] == 'requests' and not requests_status: # Si "requests" no esta activo, pasamos
continue
paste_host = pastebin_list[paste_name][1] # URL del servidor "pastebin"
paste_sufix = pastebin_list[paste_name][2] # sufijo del API para el POST
paste_title = ''
if pastebin_list[paste_name][3] == 'random':
paste_title = "LOG" + str(random.randrange(1, 999999999)) # Título del LOG
paste_post1 = pastebin_list[paste_name][4] # Parte inicial del POST
paste_post2 = pastebin_list[paste_name][5] # Parte secundaria del POST
paste_type = pastebin_list[paste_name][6] # Tipo de downloadpage: DATA o HEADERS
paste_resp = pastebin_list[paste_name][7] # Tipo de respuesta: JSON o datos con REGEX
paste_resp_key = pastebin_list[paste_name][8] # Si es JSON, etiqueta `primaria con la CLAVE
paste_url = pastebin_list[paste_name][9] # Etiqueta primaria para HEADER y sec. para JSON
paste_file_size = float(pastebin_list[paste_name][10]) # Capacidad en MB del servidor
if paste_file_size > 0: # Si es 0, la capacidad es ilimitada
if log_size > paste_file_size: # Verificación de capacidad y tamaño
msg = 'Archivo de log demasiado grande. Reinicie Kodi y reinténtelo'
continue
paste_timeout = int(pastebin_list[paste_name][11]) # Timeout para el servidor
paste_random_headers = pastebin_list[paste_name][12] # Utiliza RAMDOM headers para despistar el serv.?
paste_host_return = pastebin_list[paste_name][13] # Parte de url para componer la clave para usuario
paste_host_return_tail = pastebin_list[paste_name][14] # Sufijo de url para componer la clave para usuario
paste_headers = {}
if pastebin_list[paste_name][15]: # Headers requeridas por el servidor
paste_headers.update(jsontools.load((pastebin_list[paste_name][15])))
if paste_name in pastebin_one_use:
pastebin_one_use_msg = '[COLOR red]NO ACCEDA al INFORME: se BORRARÁ[/COLOR]'
item.one_use = True
else:
pastebin_one_use_msg = ''
try:
# Se crea el POST con las opciones del servidor "pastebin"
# Se trata el formato de "requests"
if paste_type == 'requests':
paste_file = {'file': (paste_title+'.log', log_data)}
if paste_post1:
paste_file.update(paste_post1)
if paste_post2:
if '%s' in paste_post2:
paste_params = paste_post2 % (paste_title+'.log', log_size_bytes)
else:
paste_params = paste_post2
#Se trata el formato de downloads
else:
#log_data = 'Test de Servidor para ver su viabilidad (áéíóúñ¿?)'
if paste_name in ['hastebin']: # Hay algunos servicios que no necesitan "quote"
paste_post = log_data
else:
paste_post = urllib.quote_plus(log_data) # Se hace un "quote" de los datos del LOG
if paste_post1:
paste_post = '%s%s' % (paste_post1, paste_post)
if paste_post2:
if '%s' in paste_post2:
paste_post += paste_post2 % paste_title
else:
paste_post += paste_post2
# Se hace la petición en downloadpage con HEADERS o DATA, con los parámetros del servidor
if paste_type == 'headers':
data = httptools.downloadpage(paste_host+paste_sufix, post=paste_post,
timeout=paste_timeout, random_headers=paste_random_headers,
headers=paste_headers).headers
elif paste_type == 'data':
data = httptools.downloadpage(paste_host+paste_sufix, post=paste_post,
timeout=paste_timeout, random_headers=paste_random_headers,
headers=paste_headers).data
# Si la petición es con formato REQUESTS, se realiza aquí
elif paste_type == 'requests':
#data = requests.post(paste_host, params=paste_params, files=paste_file,
# timeout=paste_timeout)
data = httptools.downloadpage(paste_host, params=paste_params, file=log_data,
file_name=paste_title+'.log', timeout=paste_timeout,
random_headers=paste_random_headers, headers=paste_headers)
except:
msg = 'Inténtelo más tarde'
logger.error('Fallo al guardar el informe. ' + msg)
logger.error(traceback.format_exc())
continue
# Se analiza la respuesta del servidor y se localiza la clave del upload para formar la url a pasar al usuario
if data:
paste_host_resp = paste_host
if paste_host_return == None: # Si devuelve la url completa, no se compone
paste_host_resp = ''
paste_host_return = ''
# Respuestas a peticiones REQUESTS
if paste_type == 'requests': # Respuesta de petición tipo "requests"?
if paste_resp == 'json': # Respuesta en formato JSON?
if paste_resp_key in data.data:
if not paste_url:
key = jsontools.load(data.data)[paste_resp_key] # con una etiqueta
else:
key = jsontools.load(data.data)[paste_resp_key][paste_url] # con dos etiquetas anidadas
item.url = "%s%s%s" % (paste_host_resp+paste_host_return, key,
paste_host_return_tail)
else:
logger.error('ERROR en formato de retorno de datos. data.data=' +
str(data.data))
continue
# Respuestas a peticiones DOWNLOADPAGE
elif paste_resp == 'json': # Respuesta en formato JSON?
if paste_resp_key in data:
if not paste_url:
key = jsontools.load(data)[paste_resp_key] # con una etiqueta
else:
key = jsontools.load(data)[paste_resp_key][paste_url] # con dos etiquetas anidadas
item.url = "%s%s%s" % (paste_host_resp+paste_host_return, key,
paste_host_return_tail)
else:
logger.error('ERROR en formato de retorno de datos. data=' + str(data))
continue
elif paste_resp == 'regex': # Respuesta en DATOS, a buscar con un REGEX?
key = scrapertools.find_single_match(data, paste_resp_key)
if key:
item.url = "%s%s%s" % (paste_host_resp+paste_host_return, key,
paste_host_return_tail)
else:
logger.error('ERROR en formato de retorno de datos. data=' + str(data))
continue
elif paste_type == 'headers': # Respuesta en HEADERS, a buscar en "location"?
if paste_url in data:
item.url = data[paste_url] # Etiqueta de retorno de la clave
item.url = urlparse.urljoin(paste_host_resp + paste_host_return,
item.url + paste_host_return_tail)
else:
logger.error('ERROR en formato de retorno de datos. response.headers=' +
str(data))
continue
else:
logger.error('ERROR en formato de retorno de datos. paste_type=' +
str(paste_type) + ' / DATA: ' + data)
continue
status = True # Operación de upload terminada con éxito
logger.info('Informe de Fallo en Alfa CREADO: ' + str(item.url)) #Se guarda la URL del informe a usuario
if fatal: # De uso futuro, para logger.crash
platformtools.dialog_ok('Informe de ERROR en Alfa CREADO', 'Repórtelo en el foro agregando ERROR FATAL y esta URL: ', '[COLOR gold]%s[/COLOR]' % item.url, pastebin_one_use_msg)
else: # Se pasa la URL del informe a usuario
platformtools.dialog_ok('Informe de Fallo en Alfa CREADO', 'Repórtelo en el foro agregando una descripcion del fallo y esta URL: ', '[COLOR gold]%s[/COLOR]' % item.url, pastebin_one_use_msg)
break # Operación terminado, no seguimos buscando
if not status and not fatal: # Operación fracasada...
platformtools.dialog_notification('Fallo al guardar el informe', msg) #... se notifica la causa
logger.error('Fallo al guardar el informe. ' + msg)
# Se devuelve control con item.url actualizado, así aparecerá en el menú la URL del informe
return report_menu(item)
def call_chrome(item):
from lib import generictools
resultado = generictools.call_chrome(item.url)
return resultado

View File

@@ -1,19 +1,17 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
#from builtins import str
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import os
import xbmc
import xbmcaddon
import xbmcgui
from core import jsontools
from core.item import Item
from core import jsontools
from platformcode import config, logger
from platformcode import launcher
addon = xbmcaddon.Addon('metadata.themoviedb.org')
def_lang = addon.getSetting('language')
import xbmc, xbmcgui, xbmcplugin, xbmcaddon
media_path = os.path.join(config.get_runtime_path(), "resources/skins/Default/media/side_menu/")
menu_settings_path = os.path.join(config.get_data_path(), "settings_channels", 'menu_settings_data.json')
@@ -24,7 +22,8 @@ else:
menu_node = {'categoria actual':config.get_setting('category')}
jsontools.update_node(menu_node, 'menu_settings_data.json', "menu")
addon = xbmcaddon.Addon('metadata.themoviedb.org')
def_lang = addon.getSetting('language')
ACTION_SHOW_FULLSCREEN = 36
ACTION_GESTURE_SWIPE_LEFT = 511

View File

@@ -3,13 +3,29 @@
# Search trailers from youtube, filmaffinity, abandomoviez, vimeo, etc...
# --------------------------------------------------------------------------------
import re
import urllib
from __future__ import division
#from builtins import str
import sys
import urlparse
import xbmcaddon
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
from past.utils import old_div
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo
import urllib.parse as urlparse
else:
import urllib # Usamos el nativo de PY2 que es más rápido
import urlparse
import re
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core.item import Item
@@ -39,9 +55,9 @@ def buscartrailer(item, trailers=[]):
itemlist = globals()[item.action](item)
else:
# Se elimina la opción de Buscar Trailer del menú contextual para evitar redundancias
if type(item.context) is str and "buscar_trailer" in item.context:
if isinstance(item.context, str) and "buscar_trailer" in item.context:
item.context = item.context.replace("buscar_trailer", "")
elif type(item.context) is list and "buscar_trailer" in item.context:
elif isinstance(item.context, list) and "buscar_trailer" in item.context:
item.context.remove("buscar_trailer")
item.text_color = ""
@@ -50,15 +66,15 @@ def buscartrailer(item, trailers=[]):
if item.contentTitle != "":
item.contentTitle = item.contentTitle.strip()
elif keyboard:
fulltitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.fulltitle.strip())
item.contentTitle = platformtools.dialog_input(default=fulltitle, heading=config.get_localized_string(70505))
contentTitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.contentTitle.strip())
item.contentTitle = platformtools.dialog_input(default=contentTitle, heading=config.get_localized_string(70505))
if item.contentTitle is None:
item.contentTitle = fulltitle
item.contentTitle = contentTitle
else:
item.contentTitle = item.contentTitle.strip()
else:
fulltitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.fulltitle.strip())
item.contentTitle = fulltitle
contentTitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.contentTitle.strip())
item.contentTitle = contentTitle
item.year = item.infoLabels['year']
@@ -148,7 +164,6 @@ def tmdb_trailers(item, tipo="movie"):
def youtube_search(item):
logger.info()
itemlist = []
titulo = item.contentTitle
if item.extra != "youtube":
titulo += " trailer"
@@ -159,11 +174,10 @@ def youtube_search(item):
titulo = urllib.quote(titulo)
titulo = titulo.replace("%20", "+")
data = httptools.downloadpage("https://www.youtube.com/results?sp=EgIQAQ%253D%253D&q=" + titulo).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = """"thumbnails":\[\{"url":"(https://i.ytimg.com/vi[^"]+).*?"""
patron += """simpleText":"([^"]+).*?"""
patron += """simpleText":"[^"]+.*?simpleText":"([^"]+).*?"""
patron += """url":"([^"]+)"""
patron = 'thumbnails":\[\{"url":"(https://i.ytimg.com/vi[^"]+).*?'
patron += 'text":"([^"]+).*?'
patron += 'simpleText":"[^"]+.*?simpleText":"([^"]+).*?'
patron += 'url":"([^"]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, scrapedduration, scrapedurl in matches:
scrapedtitle = scrapedtitle.decode('utf8').encode('utf8')
@@ -173,18 +187,15 @@ def youtube_search(item):
url = urlparse.urljoin('https://www.youtube.com/', scrapedurl)
itemlist.append(item.clone(title=scrapedtitle, action="play", server="youtube", url=url,
thumbnail=scrapedthumbnail, text_color="white"))
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]+><span class="yt-uix-button-content">'
'Siguiente')
if next_page != "":
next_page = urlparse.urljoin("https://www.youtube.com", next_page)
itemlist.append(item.clone(title=config.get_localized_string(70502), action="youtube_search", extra="youtube", page=next_page,
thumbnail="", text_color=""))
if not itemlist:
itemlist.append(item.clone(title=config.get_localized_string(70501) % titulo,
action="", thumbnail="", text_color=""))
if keyboard:
if item.contextual:
title = "[COLOR green]%s[/COLOR]"
@@ -192,7 +203,6 @@ def youtube_search(item):
title = "%s"
itemlist.append(item.clone(title=title % config.get_localized_string(70510), action="manual_search",
text_color="green", thumbnail="", extra="youtube"))
return itemlist
@@ -206,11 +216,11 @@ def abandomoviez_search(item):
titulo = item.contentTitle.decode('utf-8').encode('iso-8859-1')
post = urllib.urlencode({'query': titulo, 'searchby': '1', 'posicion': '1', 'orden': '1',
'anioin': item.year, 'anioout': item.year, 'orderby': '1'})
url = "http://www.abandomoviez.net/db/busca_titulo_advance.php"
url = "http://www.abandomoviez.net/db/busca_titulo.php?busco2=%s" %item.contentTitle
item.prefix = "db/"
data = httptools.downloadpage(url, post=post).data
if "No hemos encontrado ninguna" in data:
url = "http://www.abandomoviez.net/indie/busca_titulo_advance.php"
url = "http://www.abandomoviez.net/indie/busca_titulo.php?busco2=%s" %item.contentTitle
item.prefix = "indie/"
data = httptools.downloadpage(url, post=post).data.decode("iso-8859-1").encode('utf-8')
@@ -253,7 +263,6 @@ def abandomoviez_search(item):
def search_links_abando(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = []
if "Lo sentimos, no tenemos trailer" in data:
@@ -286,9 +295,8 @@ def search_links_abando(item):
if item.contextual:
i += 1
message += ".."
progreso.update(10 + (90 * i / len(matches)), message)
progreso.update(10 + (old_div(90 * i, len(matches))), message)
scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle
data_trailer = httptools.downloadpage(scrapedurl).data
trailer_url = scrapertools.find_single_match(data_trailer, 'iframe.*?src="([^"]+)"')
trailer_url = trailer_url.replace("embed/", "watch?v=")
@@ -296,10 +304,8 @@ def search_links_abando(item):
thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code
itemlist.append(item.clone(title=scrapedtitle, url=trailer_url, server="youtube", action="play",
thumbnail=thumbnail, text_color="white"))
if item.contextual:
progreso.close()
if keyboard:
if item.contextual:
title = "[COLOR green]%s[/COLOR]"
@@ -342,7 +348,8 @@ def filmaffinity_search(item):
if not scrapedthumbnail.startswith("http"):
scrapedthumbnail = "http://www.filmaffinity.com" + scrapedthumbnail
scrapedurl = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % id
scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore")
if PY3:
scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore")
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, text_color="white",
action="search_links_filmaff", thumbnail=scrapedthumbnail))
@@ -389,7 +396,8 @@ def search_links_filmaff(item):
else:
server = ""
thumbnail = item.thumbnail
scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore")
if PY3:
scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore")
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
scrapedtitle += " [" + server + "]"
if item.contextual:
@@ -413,15 +421,12 @@ def search_links_filmaff(item):
try:
import xbmcgui
import xbmc
class Select(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
self.item = kwargs.get('item')
self.itemlist = kwargs.get('itemlist')
self.caption = kwargs.get('caption')
self.result = None
def onInit(self):
try:
self.control_list = self.getControl(6)
@@ -447,7 +452,6 @@ try:
self.control_list.reset()
self.control_list.addItems(self.items)
self.setFocus(self.control_list)
def onClick(self, id):
# Boton Cancelar y [X]
if id == 5:
@@ -461,7 +465,6 @@ try:
del window_select
else:
window_select[-1].doModal()
def onAction(self, action):
global window_select, result
if action == 92 or action == 110:
@@ -474,7 +477,6 @@ try:
del window_select
else:
window_select[-1].doModal()
try:
if (action == 7 or action == 100) and self.getFocusId() == 6:
selectitem = self.control_list.getSelectedItem()
@@ -489,7 +491,6 @@ try:
else:
result = None
self.result = None
elif item.action == "play" and not self.item.windowed:
for window in window_select:
window.close()

View File

@@ -11,10 +11,10 @@ from core import filetools
from core import httptools
from core import jsontools
from core import scrapertools
from core import trakt_tools
from core.item import Item
from core.support import typo, thumb
from core.support import typo
from core.tmdb import Tmdb
from core import trakt_tools
from platformcode import config, logger
from platformcode import platformtools
@@ -124,13 +124,12 @@ def search_(item):
def busqueda(item):
logger.info()
cat = [item.extra.replace("tv", "serie")]
new_item = Item()
new_item.extra = item.contentTitle.replace("+", " ")
new_item.category = item.extra
new_item = Item(title=item.contentTitle, text=item.contentTitle.replace("+", " "), mode=item.contentType,
infoLabels=item.infoLabels)
from specials import search
return search.do_search(new_item, cat)
return search.channel_search(new_item)
def tmdb(item):
@@ -479,8 +478,10 @@ def detalles(item):
# Si viene de seccion imdb
if not item.infoLabels["tmdb_id"]:
headers = [['Accept-Language', langi]]
data = httptools.downloadpage("http://www.imdb.com/title/" + item.infoLabels['imdb_id'], headers=headers,
replace_headers=True).data
#data = httptools.downloadpage("http://www.imdb.com/title/" + item.infoLabels['imdb_id'], headers=headers,
# replace_headers=True).data
data = httptools.downloadpage("http://www.imdb.com/title/" + item.infoLabels['imdb_id'], headers=headers).data
pics = scrapertools.find_single_match(data, 'showAllVidsAndPics.*?href=".*?(tt\d+)')
# Imágenes imdb
if pics:
@@ -568,7 +569,7 @@ def detalles(item):
post = "u=%s&proxy_formdata_server=nl&allowCookies=1&encodeURL=1&encodePage=0&stripObjects=0&stripJS=0&go=" % urllib.quote(
post_url)
while True:
response = httptools.downloadpage(url, post, follow_redirects=False)
response = httptools.downloadpage(url, post=post, follow_redirects=False)
if response.headers.get("location"):
url = response.headers["location"]
post = ""
@@ -888,10 +889,12 @@ def listado_imdb(item):
headers = [['Accept-Language', langi]]
if "www.imdb.com" in item.url:
data = httptools.downloadpage(item.url, headers=headers, replace_headers=True).data
#data = httptools.downloadpage(item.url, headers=headers, replace_headers=True).data
data = httptools.downloadpage(item.url, headers=headers).data
else:
url = 'http://www.imdb.com/search/title?' + item.url
data = httptools.downloadpage(url, headers=headers, replace_headers=True).data
#data = httptools.downloadpage(url, headers=headers, replace_headers=True).data
data = httptools.downloadpage(url, headers=headers).data
data = re.sub(r"\n|\r|\t|&nbsp;", "", data)
data = re.sub(r"\s{2}", " ", data)
@@ -1155,7 +1158,7 @@ def listado_fa(item):
if item.extra == "top":
if item.page_fa:
post = "from=%s" % item.page_fa
data = httptools.downloadpage(item.url, post).data
data = httptools.downloadpage(item.url, post=post).data
if item.total > item.page_fa:
item.page_fa += 30
else:
@@ -1521,7 +1524,7 @@ def detalles_fa(item):
post = "u=%s&proxy_formdata_server=nl&allowCookies=1&encodeURL=1&encodePage=0&stripObjects=0&stripJS=0&go=" % urllib.quote(
post_url)
while True:
response = httptools.downloadpage(url, post, follow_redirects=False)
response = httptools.downloadpage(url, post=post, follow_redirects=False)
if response.headers.get("location"):
url = response.headers["location"]
post = ""
@@ -1708,7 +1711,7 @@ def login_fa():
return True, ""
post = "postback=1&rp=&username=%s&password=%s&rememberme=on" % (user, password)
data = httptools.downloadpage("https://m.filmaffinity.com/%s/account.ajax.php?action=login" % langf, post).data
data = httptools.downloadpage("https://m.filmaffinity.com/%s/account.ajax.php?action=login" % langf, post=post).data
if "Invalid username" in data:
logger.error("Error en el login")
@@ -1716,7 +1719,7 @@ def login_fa():
else:
post = "name=user-menu&url=http://m.filmaffinity.com/%s/main.php" % langf
data = httptools.downloadpage("http://m.filmaffinity.com/%s/tpl.ajax.php?action=getTemplate" % langf,
post).data
post=post).data
userid = scrapertools.find_single_match(data, 'id-user=(\d+)')
if userid:
config.set_setting("userid", userid, "tvmoviedb")
@@ -1829,7 +1832,7 @@ def acciones_fa(item):
url = "http://filmaffinity.com/%s/movieslist.ajax.php" % langf
movieid = item.url.rsplit("=", 1)[1]
post = "action=%s&listId=%s&movieId=%s&itk=%s" % (item.accion, item.listid, movieid, item.itk)
data = jsontools.load(httptools.downloadpage(url, post).data)
data = jsontools.load(httptools.downloadpage(url, post=post).data)
if not item.folder:
import xbmc
return xbmc.executebuiltin("Container.Refresh")
@@ -1871,7 +1874,7 @@ def callback_voto(item, values):
item.action = "acciones_fa"
movieid = item.url.rsplit("=", 1)[1]
post = "id=%s&rating=%s&itk=%s&action=rate" % (movieid, item.voto, item.itk)
data = jsontools.load(httptools.downloadpage("http://filmaffinity.com/%s/ratingajax.php" % langf, post).data)
data = jsontools.load(httptools.downloadpage("http://filmaffinity.com/%s/ratingajax.php" % langf, post=post).data)
if not item.folder:
import xbmc
@@ -2040,7 +2043,8 @@ def fanartv(item):
% item.infoLabels['tmdb_id']
else:
url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_search
data = jsontools.load(httptools.downloadpage(url, headers=headers, replace_headers=True).data)
#data = jsontools.load(httptools.downloadpage(url, headers=headers, replace_headers=True).data)
data = jsontools.load(httptools.downloadpage(url, headers=headers).data)
if data and not "error message" in data:
item.images['fanart.tv'] = {}
for key, value in data.items():
@@ -2117,12 +2121,14 @@ def acciones_trakt(item):
post = jsontools.dump(item.post)
url = "http://api-v2launch.trakt.tv/%s" % item.url
data = httptools.downloadpage(url, post, headers=headers, replace_headers=True)
#data = httptools.downloadpage(url, post, headers=headers, replace_headers=True)
data = httptools.downloadpage(url, post=post, headers=headers)
if data.code == "401":
trakt_tools.token_trakt(item.clone(extra="renew"))
token_auth = config.get_setting("token_trakt", "trakt")
headers[3][1] = "Bearer %s" % token_auth
data = httptools.downloadpage(url, post, headers=headers, replace_headers=True)
#data = httptools.downloadpage(url, post, headers=headers, replace_headers=True)
data = httptools.downloadpage(url, post=post, headers=headers)
data = data.data
if data and "sync" in item.url:
@@ -2458,7 +2464,7 @@ def detalles_mal(item):
try:
title_search = re.sub(r'[^0-9A-z]+', ' ', title_mal)
post = "busqueda=%s&button=Search" % urllib.quote(title_search)
data_music = httptools.downloadpage("http://www.freeanimemusic.org/song_search.php", post).data
data_music = httptools.downloadpage("http://www.freeanimemusic.org/song_search.php", post=post).data
if not "NO MATCHES IN YOUR SEARCH" in data_music:
itemlist.append(
item.clone(action="musica_anime", title=config.get_localized_string(70317),
@@ -3265,7 +3271,8 @@ def addlist_mal(item):
url = "https://myanimelist.net/ownlist/anime/add.json"
if item.lista:
url = "https://myanimelist.net/ownlist/anime/edit.json"
data = httptools.downloadpage(url, post=jsontools.dump(post), headers=headers_mal, replace_headers=True).data
#data = httptools.downloadpage(url, post=jsontools.dump(post), headers=headers_mal, replace_headers=True).data
data = httptools.downloadpage(url, post=jsontools.dump(post), headers=headers_mal).data
item.title = "En tu lista"
if config.is_xbmc():
import xbmc

View File

@@ -1,16 +1,20 @@
# -*- coding: utf-8 -*-
import os
import traceback
#from builtins import str
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import os, traceback
from channelselector import get_thumb
from core import filetools
from core import scrapertools
from core import videolibrarytools
from core.item import Item
from lib import generictools
from platformcode import config, logger
from platformcode import platformtools
from lib import generictools
def mainlist(item):
@@ -50,12 +54,13 @@ def list_movies(item, silent=False):
xbmc_videolibrary.mark_content_as_watched_on_alfa(nfo_path)
except:
logger.error(traceback.format_exc())
head_nfo, new_item = videolibrarytools.read_nfo(nfo_path)
if not new_item: #Si no ha leído bien el .nfo, pasamos a la siguiente
logger.error('.nfo erroneo en ' + str(nfo_path))
continue
if len(new_item.library_urls) > 1:
multicanal = True
else:
@@ -176,8 +181,12 @@ def list_tvshows(item):
xbmc_videolibrary.mark_content_as_watched_on_alfa(tvshow_path)
except:
logger.error(traceback.format_exc())
head_nfo, item_tvshow = videolibrarytools.read_nfo(tvshow_path)
if not item_tvshow: #Si no ha leído bien el .nfo, pasamos a la siguiente
logger.error('.nfo erroneo en ' + str(tvshow_path))
continue
if len(item_tvshow.library_urls) > 1:
multicanal = True
@@ -309,7 +318,7 @@ def get_seasons(item):
itemlist = []
dict_temp = {}
raiz, carpetas_series, ficheros = filetools.walk(item.path).next()
raiz, carpetas_series, ficheros = next(filetools.walk(item.path))
# Menu contextual: Releer tvshow.nfo
head_nfo, item_nfo = videolibrarytools.read_nfo(item.nfo)
@@ -335,7 +344,7 @@ def get_seasons(item):
# if config.get_setting("no_pile_on_seasons", "videolibrary") == 1 and len(dict_temp_Visible) == 1: # Sólo si hay una temporada
# Creamos un item por cada temporada
for season, title in dict_temp.items():
for season, title in list(dict_temp.items()):
new_item = item.clone(action="get_episodes", title=title, contentSeason=season,
filtrar_season=True)
@@ -373,7 +382,7 @@ def get_episodes(item):
itemlist = []
# Obtenemos los archivos de los episodios
raiz, carpetas_series, ficheros = filetools.walk(item.path).next()
raiz, carpetas_series, ficheros = next(filetools.walk(item.path))
# Menu contextual: Releer tvshow.nfo
head_nfo, item_nfo = videolibrarytools.read_nfo(item.nfo)
@@ -444,22 +453,23 @@ def findvideos(item):
logger.debug("Unable to search for videos due to lack of parameters")
return []
content_title = filter(lambda c: c not in ":*?<>|\/", item.contentTitle.strip().lower())
#content_title = [c for c in item.contentTitle.strip().lower() if c not in ":*?<>|\/"]
content_title = "".join(c for c in item.contentTitle.strip().lower() if c not in ":*?<>|\/")
if item.contentType == 'movie':
item.strm_path = filetools.join(videolibrarytools.MOVIES_PATH, item.strm_path)
path_dir = os.path.dirname(item.strm_path)
item.nfo = filetools.join(path_dir, os.path.basename(path_dir) + ".nfo")
path_dir = filetools.dirname(item.strm_path)
item.nfo = filetools.join(path_dir, filetools.basename(path_dir) + ".nfo")
else:
item.strm_path = filetools.join(videolibrarytools.TVSHOWS_PATH, item.strm_path)
path_dir = os.path.dirname(item.strm_path)
path_dir = filetools.dirname(item.strm_path)
item.nfo = filetools.join(path_dir, 'tvshow.nfo')
for fd in filetools.listdir(path_dir):
if fd.endswith('.json'):
contenido, nom_canal = fd[:-6].split('[')
if (contenido.startswith(content_title) or item.contentType == 'movie') and nom_canal not in \
list_canales.keys():
list(list_canales.keys()):
list_canales[nom_canal] = filetools.join(path_dir, fd)
num_canales = len(list_canales)
@@ -467,7 +477,12 @@ def findvideos(item):
if 'downloads' in list_canales:
json_path = list_canales['downloads']
item_json = Item().fromjson(filetools.read(json_path))
###### Redirección al canal NewPct1.py si es un clone, o a otro canal y url si ha intervención judicial
try:
if item_json:
item_json, it, overwrite = generictools.redirect_clone_newpct1(item_json)
except:
logger.error(traceback.format_exc())
item_json.contentChannel = "local"
# Soporte para rutas relativas en descargas
if filetools.is_relative(item_json.url):
@@ -484,7 +499,7 @@ def findvideos(item):
filtro_canal = ''
if num_canales > 1 and config.get_setting("ask_channel", "videolibrary"):
opciones = [config.get_localized_string(70089) % k.capitalize() for k in list_canales.keys()]
opciones = [config.get_localized_string(70089) % k.capitalize() for k in list(list_canales.keys())]
opciones.insert(0, config.get_localized_string(70083))
if item_local:
opciones.append(item_local.title)
@@ -502,26 +517,36 @@ def findvideos(item):
filtro_canal = opciones[index].replace(config.get_localized_string(70078), "").strip()
itemlist = []
for nom_canal, json_path in list_canales.items():
for nom_canal, json_path in list(list_canales.items()):
if filtro_canal and filtro_canal != nom_canal.capitalize():
continue
item_canal = Item()
item_canal.channel = nom_canal
###### Redirección al canal NewPct1.py si es un clone, o a otro canal y url si ha intervención judicial
try:
item_canal, it, overwrite = generictools.redirect_clone_newpct1(item_canal)
except:
logger.error(traceback.format_exc())
nom_canal = item_canal.channel
# Importamos el canal de la parte seleccionada
try:
channel = __import__('channels.%s' % nom_canal, fromlist=["channels.%s" % nom_canal])
if nom_canal == 'community':
channel = __import__('specials.%s' % nom_canal, fromlist=["channels.%s" % nom_canal])
else:
channel = __import__('channels.%s' % nom_canal, fromlist=["channels.%s" % nom_canal])
except ImportError:
exec "import channels." + nom_canal + " as channel"
exec("import channels." + nom_canal + " as channel")
item_json = Item().fromjson(filetools.read(json_path))
###### Redirección al canal NewPct1.py si es un clone, o a otro canal y url si ha intervención judicial
try:
if item_json:
item_json, it, overwrite = generictools.redirect_clone_newpct1(item_json)
except:
logger.error(traceback.format_exc())
list_servers = []
try:
@@ -536,12 +561,21 @@ def findvideos(item):
item_json.contentChannel = 'videolibrary'
if hasattr(channel, 'findvideos'):
from core import servertools
if item_json.videolibray_emergency_urls:
del item_json.videolibray_emergency_urls
list_servers = getattr(channel, 'findvideos')(item_json)
list_servers = servertools.filter_servers(list_servers)
elif item_json.action == 'play':
from platformcode import platformtools
autoplay.set_status(True)
item_json.contentChannel = item_json.channel
item_json.channel = "videolibrary"
platformtools.play_video(item_json)
return ''
else:
from core import servertools
list_servers = servertools.find_video_items(item_json)
except Exception, ex:
except Exception as ex:
logger.error("The findvideos function for the channel %s failed" % nom_canal)
template = "An exception of type %s occured. Arguments:\n%r"
message = template % (type(ex).__name__, ex.args)
@@ -553,7 +587,6 @@ def findvideos(item):
for server in list_servers:
#if not server.action: # Ignorar/PERMITIR las etiquetas
# continue
server.contentChannel = server.channel
server.channel = "videolibrary"
server.nfo = item.nfo
@@ -619,6 +652,7 @@ def play(item):
v.title = config.get_localized_string(60036) % item.contentEpisodeNumber
v.thumbnail = item.thumbnail
v.contentThumbnail = item.thumbnail
v.contentChannel = item.contentChannel
return itemlist
@@ -662,22 +696,22 @@ def update_tvshow(item):
def verify_playcount_series(item, path):
logger.info()
"""
Este método revisa y repara el PlayCount de una serie que se haya desincronizado de la lista real de episodios en su carpeta. Las entradas de episodios, temporadas o serie que falten, son creado con la marca de "no visto". Posteriormente se envia a verificar los contadores de Temporadas y Serie
En el retorno envía de estado de True si se actualizado o False si no, normalmente por error. Con este estado, el caller puede actualizar el estado de la opción "verify_playcount" en "videolibrary.py". La intención de este método es la de dar una pasada que repare todos los errores y luego desactivarse. Se puede volver a activar en el menú de Videoteca de Alfa.
"""
#logger.debug("item:\n" + item.tostring('\n'))
#Si no ha hecho nunca la verificación, lo forzamos
estado = config.get_setting("verify_playcount", "videolibrary")
if not estado or estado == False:
estado = True #Si no ha hecho nunca la verificación, lo forzamos
else:
estado = False
if item.contentType == 'movie': #Esto es solo para Series
return (item, False)
if filetools.exists(path):
@@ -686,9 +720,9 @@ def verify_playcount_series(item, path):
if not hasattr(it, 'library_playcounts') or not it.library_playcounts: #Si el .nfo no tiene library_playcounts se lo creamos
logger.error('** It does not have PlayCount')
it.library_playcounts = {}
# Obtenemos los archivos de los episodios
raiz, carpetas_series, ficheros = filetools.walk(path).next()
raiz, carpetas_series, ficheros = next(filetools.walk(path))
# Crear un item en la lista para cada strm encontrado
estado_update = False
for i in ficheros:
@@ -714,7 +748,7 @@ def verify_playcount_series(item, path):
logger.error('** Estado de actualización: ' + str(estado) + ' / PlayCount: ' + str(it.library_playcounts))
estado = estado_update
# se comprueba que si todos los episodios de una temporada están marcados, se marque tb la temporada
for key, value in it.library_playcounts.iteritems():
for key, value in it.library_playcounts.items():
if key.startswith("season"):
season = scrapertools.find_single_match(key, 'season (\d+)') #Obtenemos en núm. de Temporada
it = check_season_playcount(it, season)
@@ -726,38 +760,38 @@ def verify_playcount_series(item, path):
def mark_content_as_watched2(item):
logger.info()
# logger.debug("item:\n" + item.tostring('\n'))
# logger.debug("item:\n" + item.tostring('\n'))
if filetools.exists(item.nfo):
head_nfo, it = videolibrarytools.read_nfo(item.nfo)
#logger.debug(it)
if item.contentType == 'movie':
name_file = os.path.splitext(os.path.basename(item.nfo))[0]
head_nfo, it = videolibrarytools.read_nfo(item.nfo)
#logger.debug(it)
name_file = ""
if item.contentType == 'movie' or item.contentType == 'tvshow':
name_file = os.path.splitext(filetools.basename(item.nfo))[0]
if name_file != 'tvshow' :
it.library_playcounts.update({name_file: item.playcount})
it.library_playcounts.update({name_file: item.playcount})
if item.contentType == 'episode' or item.contentType == 'tvshow' or item.contentType == 'list' or name_file == 'tvshow':
# elif item.contentType == 'episode':
name_file = os.path.splitext(os.path.basename(item.strm_path))[0]
name_file = os.path.splitext(filetools.basename(item.strm_path))[0]
num_season = name_file [0]
item.__setattr__('contentType', 'episode')
item.__setattr__('contentSeason', num_season)
#logger.debug(name_file)
item.__setattr__('contentType', 'episode')
item.__setattr__('contentSeason', num_season)
#logger.debug(name_file)
else:
name_file = item.contentTitle
# logger.debug(name_file)
# logger.debug(name_file)
if not hasattr(it, 'library_playcounts'):
it.library_playcounts = {}
it.library_playcounts.update({name_file: item.playcount})
it.library_playcounts.update({name_file: item.playcount})
# se comprueba que si todos los episodios de una temporada están marcados, se marque tb la temporada
if item.contentType != 'movie':
it = check_season_playcount(it, item.contentSeason)
#logger.debug(it)
#logger.debug(it)
# Guardamos los cambios en item.nfo
if filetools.write(item.nfo, head_nfo + it.tojson()):
@@ -785,7 +819,7 @@ def mark_content_as_watched(item):
head_nfo, it = videolibrarytools.read_nfo(item.nfo)
if item.contentType == 'movie':
name_file = os.path.splitext(os.path.basename(item.nfo))[0]
name_file = os.path.splitext(filetools.basename(item.nfo))[0]
elif item.contentType == 'episode':
name_file = "%sx%s" % (item.contentSeason, str(item.contentEpisodeNumber).zfill(2))
else:
@@ -826,7 +860,7 @@ def mark_season_as_watched(item):
it.library_playcounts = {}
# Obtenemos los archivos de los episodios
raiz, carpetas_series, ficheros = filetools.walk(item.path).next()
raiz, carpetas_series, ficheros = next(filetools.walk(item.path))
# Marcamos cada uno de los episodios encontrados de esta temporada
episodios_marcados = 0
@@ -839,14 +873,14 @@ def mark_season_as_watched(item):
season, episode = season_episode.split("x")
if int(item.contentSeason) == -1 or int(season) == int(item.contentSeason):
name_file = os.path.splitext(os.path.basename(i))[0]
name_file = os.path.splitext(filetools.basename(i))[0]
it.library_playcounts[name_file] = item.playcount
episodios_marcados += 1
if episodios_marcados:
if int(item.contentSeason) == -1:
# Añadimos todas las temporadas al diccionario item.library_playcounts
for k in it.library_playcounts.keys():
for k in list(it.library_playcounts.keys()):
if k.startswith("season"):
it.library_playcounts[k] = item.playcount
else:
@@ -882,7 +916,7 @@ def delete(item):
for file in filetools.listdir(_item.path):
if file.endswith(".strm") or file.endswith(".nfo") or file.endswith(".json")or file.endswith(".torrent"):
filetools.remove(filetools.join(_item.path, file))
raiz, carpeta_serie, ficheros = filetools.walk(_item.path).next()
raiz, carpeta_serie, ficheros = next(filetools.walk(_item.path))
if ficheros == []:
filetools.rmdir(_item.path)
@@ -908,7 +942,7 @@ def delete(item):
if item.multicanal:
# Obtener listado de canales
if item.dead == '':
opciones = [config.get_localized_string(70086) % k.capitalize() for k in item.library_urls.keys() if
opciones = [config.get_localized_string(70086) % k.capitalize() for k in list(item.library_urls.keys()) if
k != "downloads"]
opciones.insert(0, heading)
@@ -957,7 +991,7 @@ def check_season_playcount(item, season):
if season:
episodios_temporada = 0
episodios_vistos_temporada = 0
for key, value in item.library_playcounts.iteritems():
for key, value in item.library_playcounts.items():
if key.startswith("%sx" % season):
episodios_temporada += 1
if value > 0:
@@ -978,7 +1012,7 @@ def check_tvshow_playcount(item, season):
if season:
temporadas_serie = 0
temporadas_vistas_serie = 0
for key, value in item.library_playcounts.iteritems():
for key, value in item.library_playcounts.items():
#if key.startswith("season %s" % season):
if key.startswith("season" ):
temporadas_serie += 1