Merge pull request #3 from alfa-addon/master

update
This commit is contained in:
alfa-jor
2017-08-07 16:36:00 +02:00
committed by GitHub
16 changed files with 103 additions and 476 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="0.0.8" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="0.1.0" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -18,18 +18,9 @@
<screenshot>resources/media/general/ss/4.jpg</screenshot>
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Arreglos de canales[/B][/COLOR]
[I]- Seriespapaya
- Playmax - Gracias a d3v3l0p1n
- Seriesblanco - Posible error en versiones anteriores a kodi 16 por https
- AnimesHD
- Cinetux[/I]
[COLOR green][B]Servidor Nuevo[/B][/COLOR]
[I]- Vidlox
[COLOR green][B]Arreglos internos[/B][/COLOR]
[I]- platformtools - posible solución de favoritos
- videolibrarytools - solución a añadir películas de varios canales[/I]
[COLOR blue]Gracias a devalls por su cooperación en esta release.[/COLOR]
[I]- correccion de errores y fix por cambios en web[/I]
[COLOR blue]Gracias a [COLOR yellow]j2331223[/COLOR] por su colaboración en esta versión.[/COLOR]
</news>
<description lang="es">Descripción en Español</description>
<summary lang="en">English summary</summary>

View File

@@ -5,7 +5,7 @@
"adult": false,
"language": "es",
"thumbnail": "https://s22.postimg.org/irnlwuizh/allcalidad1.png",
"bannermenu": "https://s22.postimg.org/9y1athlep/allcalidad2.png",
"banner": "https://s22.postimg.org/9y1athlep/allcalidad2.png",
"version": 1,
"changes": [
{
@@ -43,4 +43,4 @@
"visible": true
}
]
}
}

View File

@@ -116,7 +116,7 @@ def findvideos(item):
data = httptools.downloadpage(item.url).data
patron = '(?s)fmi(.*?)thead'
bloque = scrapertools.find_single_match(data, patron)
match = scrapertools.find_multiple_matches(bloque, '(?is)iframe .*?src="([^"]+)')
match = scrapertools.find_multiple_matches(bloque, '(?is)(?:iframe|script) .*?src="([^"]+)')
for url in match:
server = servertools.get_server_from_url(url)
titulo = "Ver en: " + server
@@ -126,6 +126,8 @@ def findvideos(item):
titulo = "[COLOR = yellow]Ver trailer: " + server + "[/COLOR]"
elif "directo" in server:
continue
elif "vimeo" in url:
url += "|" + "http://www.allcalidad.com"
itemlist.append(
Item(channel = item.channel,
action = "play",
@@ -139,7 +141,7 @@ def findvideos(item):
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de XBMC"
# Opción "Añadir esta película a la biblioteca de KODI"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",

View File

@@ -1,48 +1,53 @@
{
"active": true,
"changes": [
{
"date": "18/07/2017",
"description": "Versión incial"
}
],
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?s)https://youtube.googleapis.com.*?docid=([^(?:&|\")]+)",
"url": "http://docs.google.com/get_video_info?docid=\\1"
}
]
},
"free": true,
"id": "gvideo",
"name": "gvideo",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"version": 1
{
"active": true,
"changes": [
{
"date": "18/07/2017",
"description": "Versión incial"
}
],
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?s)https://youtube.googleapis.com.*?docid=([^(?:&|\")]+)",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
"pattern": "(?s)https://drive.google.com/file/d/(.*?)/preview",
"url": "http://docs.google.com/get_video_info?docid=\\1"
}
]
},
"free": true,
"id": "gvideo",
"name": "gvideo",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"version": 1
}

2
plugin.video.alfa/channels/search.py Executable file → Normal file
View File

@@ -476,7 +476,7 @@ def clear_saved_searches(item):
def get_saved_searches():
current_saved_searches_list = config.get_setting("saved_searches_list", "buscador")
current_saved_searches_list = config.get_setting("saved_searches_list", "search")
if current_saved_searches_list is None:
saved_searches_list = []
else:

View File

@@ -190,6 +190,7 @@ def episodios(item):
episodes = re.findall("<tr.*?href=['\"](?P<url>[^'\"]+).+?>(?P<title>.+?)</a>.*?<td>(?P<flags>.*?)</td>", data,
re.MULTILINE | re.DOTALL)
for url, title, flags in episodes:
title = title.replace("<span itemprop='episodeNumber'>", "").replace("</span>", "")
idiomas = " ".join(["[%s]" % IDIOMAS.get(language, "OVOS") for language in
re.findall("banderas/([^\.]+)", flags, re.MULTILINE)])
filter_lang = idiomas.replace("[", "").replace("]", "").split(" ")

View File

@@ -5,7 +5,7 @@
"adult": false,
"language": "es",
"thumbnail": "https://s16.postimg.org/g4lzydrmd/vernovelasonline1.png",
"bannermenu": "https://s16.postimg.org/w44nhxno5/vernovelasonline2.png",
"banner": "https://s16.postimg.org/w44nhxno5/vernovelasonline2.png",
"version": 1,
"changes": [
{

View File

@@ -1,67 +0,0 @@
{
"id": "yaske",
"name": "Yaske",
"active": true,
"adult": false,
"language": "es",
"banner": "yaske.png",
"fanart": "https://github.com/master-1970/resources/raw/master/images/fanart/yaske.png",
"thumbnail": "yaske.png",
"version": 1,
"changes": [
{
"date": "27/06/17",
"description": "Desactivar por falta de contenidos"
},
{
"date": "04/06/17",
"description": "Desactivar por falta de contenidos"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/02/17",
"description": "Añadir imagenes, sinopsis, etc..."
},
{
"date": "18/01/17",
"description": "Uso de httptools"
},
{
"date": "12/12/16",
"description": "Cambios en la web"
},
{
"date": "01/07/16",
"description": "Eliminado código innecesario."
},
{
"date": "29/04/16",
"description": "Adaptar a Novedades Peliculas e Infantiles"
}
],
"categories": [
"latino",
"movie"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,256 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import channeltools
from core import config
from core import httptools
from core import logger
from core import scrapertoolsV2
from core import servertools
from core import tmdb
from core.item import Item
HOST = 'http://www.yaske.ro'
parameters = channeltools.get_channel_parameters('yaske')
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
color1, color2, color3 = ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E']
def mainlist(item):
logger.info()
itemlist = []
item.url = HOST
item.text_color = color2
item.fanart = fanart_host
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png"
itemlist.append(item.clone(title="Novedades", action="peliculas", text_bold=True, viewcontent='movies',
url=HOST + "/ultimas-y-actualizadas",
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Estrenos", action="peliculas", text_bold=True,
url=HOST + "/genre/premieres", thumbnail=thumbnail % 'estrenos'))
itemlist.append(item.clone(title="", folder=False))
itemlist.append(Item(channel=item.channel, title="Filtrar por:", fanart=fanart_host, folder=False,
text_color=color3, text_bold=True, thumbnail=thumbnail_host))
itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="genre", thumbnail=thumbnail % 'generos', viewmode="thumbnails"))
itemlist.append(item.clone(title=" Idioma", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="audio", thumbnail=thumbnail % 'idiomas'))
itemlist.append(item.clone(title=" Calidad", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="quality", thumbnail=thumbnail % 'calidad'))
itemlist.append(item.clone(title=" Año", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="year", thumbnail=thumbnail % 'year'))
itemlist.append(item.clone(title="", folder=False))
itemlist.append(item.clone(title="Buscar por título", action="search", thumbnail=thumbnail % 'buscar'))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
try:
# http://www.yaske.ro/search/?q=los+pitufos
item.url = HOST + "/search/?q=" + texto.replace(' ', '+')
item.extra = ""
itemlist.extend(peliculas(item))
if itemlist[-1].title == ">> Página siguiente":
item_pag = itemlist[-1]
itemlist = sorted(itemlist[:-1], key=lambda Item: Item.contentTitle)
itemlist.append(item_pag)
else:
itemlist = sorted(itemlist, key=lambda Item: Item.contentTitle)
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
item = Item()
try:
if categoria == 'peliculas':
item.url = HOST + "/ultimas-y-actualizadas"
elif categoria == 'infantiles':
item.url = HOST + "/search/?q=&genre%5B%5D=animation"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
url_next_page = ""
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<article class.*?'
patron += '<a href="([^"]+)">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<aside class="item-control down">(.*?)</aside>.*?'
patron += '<small class="pull-right text-muted">([^<]+)</small>.*?'
patron += '<h2 class.*?>([^<]+)</h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
# Paginacion
if item.next_page != 'b':
if len(matches) > 30:
url_next_page = item.url
matches = matches[:30]
next_page = 'b'
else:
matches = matches[30:]
next_page = 'a'
patron_next_page = 'Anteriores</a> <a href="([^"]+)" class="btn btn-default ".*?Siguiente'
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
if len(matches_next_page) > 0:
url_next_page = matches_next_page[0]
for scrapedurl, scrapedthumbnail, idiomas, year, scrapedtitle in matches:
patronidiomas = "<img src='([^']+)'"
matchesidiomas = re.compile(patronidiomas, re.DOTALL).findall(idiomas)
idiomas_disponibles = []
for idioma in matchesidiomas:
if idioma.endswith("la_la.png"):
idiomas_disponibles.append("LAT")
elif idioma.endswith("en_en.png"):
idiomas_disponibles.append("VO")
elif idioma.endswith("en_es.png"):
idiomas_disponibles.append("VOSE")
elif idioma.endswith("es_es.png"):
idiomas_disponibles.append("ESP")
if idiomas_disponibles:
idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]"
contentTitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist)
# Si es necesario añadir paginacion
if url_next_page:
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host,
url=url_next_page, next_page=next_page, folder=True, text_color=color3, text_bold=True))
return itemlist
def menu_buscar_contenido(item):
logger.info(item)
data = httptools.downloadpage(item.url).data
patron = '<select name="' + item.extra + '(.*?)</select>'
data = scrapertoolsV2.get_match(data, patron)
# Extrae las entradas
patron = "<option value='([^']+)'>([^<]+)</option>"
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedvalue, scrapedtitle in matches:
thumbnail = ""
if item.extra == 'genre':
if scrapedtitle.strip() in ['Documental', 'Short', 'News']:
continue
url = HOST + "/search/?q=&genre%5B%5D=" + scrapedvalue
filename = scrapedtitle.lower().replace(' ', '%20')
if filename == "ciencia%20ficción":
filename = "ciencia%20ficcion"
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png" \
% filename
elif item.extra == 'year':
url = HOST + "/search/?q=&year=" + scrapedvalue
thumbnail = item.thumbnail
else:
# http://www.yaske.ro/search/?q=&quality%5B%5D=c9
# http://www.yaske.ro/search/?q=&audio%5B%5D=es
url = HOST + "/search/?q=&" + item.extra + "%5B%5D=" + scrapedvalue
thumbnail = item.thumbnail
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, text_color=color1,
thumbnail=thumbnail, contentType='movie', folder=True, viewmode="movie_with_plot"))
if item.extra in ['genre', 'audio', 'year']:
return sorted(itemlist, key=lambda i: i.title.lower(), reverse=item.extra == 'year')
else:
return itemlist
def findvideos(item):
logger.info()
itemlist = list()
sublist = list()
# Descarga la página
data = httptools.downloadpage(item.url).data
if not item.plot:
item.plot = scrapertoolsV2.find_single_match(data, '>Sinopsis</dt> <dd>([^<]+)</dd>')
item.plot = scrapertoolsV2.decodeHtmlentities(item.plot)
patron = '<option value="([^"]+)"[^>]+'
patron += '>([^<]+).*?</i>([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, idioma, calidad in matches:
sublist.append(item.clone(action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip()))
sublist = servertools.get_servers_itemlist(sublist, lambda i: "Ver en %s %s" % (i.server, i.quality), True)
# Añadir servidores encontrados, agrupandolos por idioma
for k in ["Español", "Latino", "Subtitulado", "Ingles"]:
lista_idioma = filter(lambda i: i.language == k, sublist)
if lista_idioma:
itemlist.append(Item(channel=item.channel, title=k, fanart=item.fanart, folder=False,
text_color=color2, text_bold=True, thumbnail=thumbnail_host))
itemlist.extend(lista_idioma)
# Insertar items "Buscar trailer" y "Añadir a la videoteca"
if itemlist and item.extra != "library":
title = "%s [Buscar trailer]" % (item.contentTitle)
itemlist.insert(0, item.clone(channel="trailertools", action="buscartrailer",
text_color=color3, title=title, viewmode="list"))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca",
action="add_pelicula_to_library", url=item.url, text_color="green",
contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))
return itemlist

View File

@@ -18,6 +18,9 @@ def is_adult(channel_name):
channel_parameters = get_channel_parameters(channel_name)
return channel_parameters["adult"]
def is_enabled(channel_name):
logger.info("channel_name=" + channel_name)
return get_channel_parameters(channel_name)["active"] and get_channel_setting("enabled", channel = channel_name, default = True)
def get_channel_parameters(channel_name):
global dict_channels_parameters

View File

@@ -375,31 +375,20 @@ def update(folder_content=config.get_setting("folder_tvshows"), folder=""):
"""
logger.info(folder)
if not folder:
# Actualizar toda la coleccion
while xbmc.getCondVisibility('Library.IsScanningVideo()'):
xbmc.sleep(500)
xbmc.executebuiltin('UpdateLibrary(video)')
else:
# Actualizar una sola carpeta en un hilo independiente
def update_multi_threads(update_path, lock):
lock.acquire()
# logger.debug("%s\nINICIO" % update_path)
payload = {"jsonrpc": "2.0",
"method": "VideoLibrary.Scan",
"params": {"directory": update_path}, "id": 1}
data = get_data(payload)
lock.release()
# logger.debug("%s\nFIN data: %s" % (update_path, data))
payload = {
"jsonrpc": "2.0",
"method": "VideoLibrary.Scan",
"id": 1
}
if folder:
videolibrarypath = config.get_videolibrary_config_path()
if folder.endswith('/') or folder.endswith('\\'):
folder = folder[:-1]
update_path = None
if videolibrarypath.startswith("special:"):
if videolibrarypath.endswith('/'):
videolibrarypath = videolibrarypath[:-1]
@@ -407,9 +396,12 @@ def update(folder_content=config.get_setting("folder_tvshows"), folder=""):
else:
update_path = filetools.join(videolibrarypath, folder_content, folder) + "/"
t = threading.Thread(target=update_multi_threads, args=[update_path, threading.Lock()])
t.setDaemon(True)
t.start()
payload["params"] = {"directory": update_path}
while xbmc.getCondVisibility('Library.IsScanningVideo()'):
xbmc.sleep(500)
data = get_data(payload)
def clean(mostrar_dialogo=False):

View File

@@ -12,9 +12,8 @@ def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Object not found" in data:
if "Object not found" in data or "longer exists on our servers" in data:
return False, "[Fastplay] El archivo no existe o ha sido borrado"
return True, ""

View File

@@ -1,40 +0,0 @@
# -*- coding: utf-8 -*-
import urllib
from core import httptools
from core import logger
from core import scrapertools
def test_video_exists(page_url):
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
if "no+existe" in response.data:
return False, "[gvideo] El video no existe o ha sido borrado"
if "Se+ha+excedido+el" in response.data:
return False, "[gvideo] Se ha excedido el número de reproducciones permitidas"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
video_urls = []
urls = []
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
cookies = ""
cookie = response.headers["set-cookie"].split("HttpOnly, ")
for c in cookie:
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = "|Cookie=" + cookies
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
streams = scrapertools.find_multiple_matches(url_streams,
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
itags = {'18':'360p', '22':'720p', '34':'360p', '35':'480p', '37':'1080p', '43':'360p', '59':'480p'}
for itag, video_url in streams:
if not video_url in urls:
video_url += headers_string
video_urls.append([itags[itag], video_url])
urls.append(video_url)
video_urls.sort(key=lambda video_urls: int(video_urls[0].replace("p", "")))
return video_urls

6
plugin.video.alfa/servers/torrent.json Executable file → Normal file
View File

@@ -42,12 +42,16 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(http:\\/\\/(?:.*?)\\.torrent)",
"pattern": "(http:\\/\\/(?:[a-zA-Z0-9]+)\\.torrent)",
"url": "\\1"
},
{
"pattern": "(magnet:\\?xt=urn:[^\"]+)",
"url": "\\1"
},
{
"pattern": "(http://tumejorjuego.com/descargar/index.php\\?link=[^\"]+)",
"url": "\\1"
}
]
},

View File

@@ -2,39 +2,31 @@
import re
from core import jsontools
from core import logger
from core import scrapertools
from core import httptools
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
headers = [['User-Agent', 'Mozilla/5.0']]
if "|" in page_url:
page_url, referer = page_url.split("|", 1)
headers.append(['Referer', referer])
if not page_url.endswith("/config"):
page_url = find_videos(page_url)[0][1]
page_url = scrapertools.find_single_match(page_url, ".*?video/[0-9]+")
video_urls = []
data = scrapertools.downloadpage(page_url, headers=headers)
json_object = jsontools.load(data)
url_data = json_object['request']['files']['progressive']
for data_media in url_data:
media_url = data_media['url']
title = "%s (%s) [vimeo]" % (data_media['mime'].replace("video/", "."), data_media['quality'])
video_urls.append([title, media_url, data_media['height']])
data = httptools.downloadpage(page_url, headers = headers).data
logger.info("Intel11 %s" %data)
patron = 'mime":"([^"]+)"'
patron += '.*?url":"([^"]+)"'
patron += '.*?quality":"([^"]+)"'
match = scrapertools.find_multiple_matches(data, patron)
for mime, media_url, calidad in match:
title = "%s (%s) [vimeo]" % (mime.replace("video/", "."), calidad)
video_urls.append([title, media_url, int(calidad.replace("p",""))])
video_urls.sort(key=lambda x: x[2])
try:
video_urls.insert(0, [".m3u8 (SD) [vimeo]", json_object['request']['files']['hls']['cdns']
["akfire_interconnect"]["url"].replace("master.m3u8", "playlist.m3u8"), 0])
except:
pass
for video_url in video_urls:
video_url[2] = 0
logger.info("%s - %s" % (video_url[0], video_url[1]))

View File

@@ -11,6 +11,7 @@ import threading
from core import config
from core import filetools
from core import logger
from core import channeltools
from core import videolibrarytools
from platformcode import platformtools
@@ -24,9 +25,9 @@ def update(path, p_dialog, i, t, serie, overwrite):
serie.channel = channel
serie.url = url
channel_active = config.get_setting("active", channel=channel, default=False)
channel_enabled = channeltools.is_enabled(channel)
if channel_active:
if channel_enabled:
heading = 'Actualizando videoteca....'
p_dialog.update(int(math.ceil((i + 1) * t)), heading, "%s: %s" % (serie.contentSerieName,