This commit is contained in:
alfa_addon_10
2017-08-09 01:26:26 +02:00
26 changed files with 722 additions and 957 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="0.1.1" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="1.5.1" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -18,12 +18,17 @@
<screenshot>resources/media/general/ss/4.jpg</screenshot>
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agreados y arreglos[/B][/COLOR]
[I]- cinetux
- bajui2
- seriesblanco[/I]
[COLOR blue]Gracias a [COLOR yellow]devalls[/COLOR] y [COLOR yellow]j2331223[/COLOR] por su colaboración en esta versión.[/COLOR]
[COLOR green][B]Canales arreglos[/B][/COLOR]
[I]- yaske
- pelifox
- ver-pelis[/I]
[COLOR green][B]Servidor arreglado[/B][/COLOR]
- gvideo
[COLOR green][B]Novedades y mejoras[/B][/COLOR]
[I] - arreglos internos
- Directos - Nueva sección de canales con enlaces directos: Gvideo y vimeo
- videoteca - problemas con unicode fixed
- compatibilidad con la biblioteca de pelisalacarta[/I]
</news>
<description lang="es">Descripción en Español</description>
<summary lang="en">English summary</summary>

View File

@@ -15,6 +15,7 @@
],
"categories": [
"movie",
"direct",
"latino"
],
"settings": [

View File

@@ -28,6 +28,7 @@
],
"categories": [
"latino",
"direct",
"movie"
],
"settings": [

View File

@@ -1,53 +0,0 @@
{
"active": true,
"changes": [
{
"date": "18/07/2017",
"description": "Versión incial"
}
],
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?s)https://youtube.googleapis.com.*?docid=([^(?:&|\")]+)",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
"pattern": "(?s)https://drive.google.com/file/d/(.*?)/preview",
"url": "http://docs.google.com/get_video_info?docid=\\1"
}
]
},
"free": true,
"id": "gvideo",
"name": "gvideo",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"version": 1
}

View File

@@ -18,6 +18,7 @@
}
],
"categories": [
"direct",
"movie"
]
}
}

View File

@@ -20,6 +20,7 @@
"categories": [
"movie",
"latino",
"direct",
"VOS"
]
}
}

View File

@@ -23,6 +23,7 @@
],
"categories": [
"latino",
"direct",
"movie"
],
"settings": [
@@ -51,4 +52,4 @@
"visible": true
}
]
}
}

39
plugin.video.alfa/channels/pelisfox.py Executable file → Normal file
View File

@@ -9,6 +9,8 @@ from core import logger
from core import scrapertools
from core import tmdb
from core.item import Item
from core import servertools
tgenero = {"Drama": "https://s16.postimg.org/94sia332d/drama.png",
u"Accción": "https://s3.postimg.org/y6o9puflv/accion.png",
@@ -225,6 +227,7 @@ def findvideos(item):
logger.info()
itemlist = []
templist = []
video_list = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<li data-quality=(.*?) data-lang=(.*?)><a href=(.*?) title=.*?'
@@ -236,9 +239,9 @@ def findvideos(item):
language=lang,
url=url
))
logger.debug('templist: %s'%templist)
for videoitem in templist:
logger.debug('videoitem.language: %s'%videoitem.language)
data = httptools.downloadpage(videoitem.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
id = scrapertools.find_single_match(data, 'var _SOURCE =.*?source:(.*?),')
@@ -247,24 +250,26 @@ def findvideos(item):
sub = sub.replace('\\', '')
else:
sub = ''
new_url = 'http://iplay.one/api/embed?id=%s&token=8908d9f846&%s' % (id, sub)
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
'=%s&srt=%s' % (id, sub)
data = httptools.downloadpage(new_url).data
patron = 'file":"(.*?)","label":"(.*?)","type":".*?"}'
matches = matches = re.compile(patron, re.DOTALL).findall(data)
url = scrapertools.find_single_match (data, '<iframe src="(.*?preview)"')
title = videoitem.contentTitle + ' (' + audio[videoitem.language] + ')'
logger.debug('url: %s'%url)
video_list.extend(servertools.find_video_items(data=url))
for urls in video_list:
if urls.language=='':
urls.language = videoitem.language
urls.title = item.title+'(%s) (%s)'%(urls.language, urls.server)
logger.debug('video_list: %s'%video_list)
#itemlist.append(item.clone(title= title, url = url, action = 'play', subtitle = sub))
for video_url in video_list:
video_url.channel = item.channel
video_url.action ='play'
for scrapedurl, quality in matches:
title = videoitem.contentTitle + ' (' + quality + ') (' + audio[videoitem.language] + ')'
url = scrapedurl.replace('\\', '')
itemlist.append(item.clone(title=title,
action='play',
url=url,
subtitle=sub,
server='directo',
quality=quality,
language='lang'
))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
@@ -275,7 +280,7 @@ def findvideos(item):
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
return video_list
def newest(categoria):

View File

@@ -17,6 +17,7 @@
"categories": [
"movie",
"tvshow",
"direct",
"vos"
],
"settings": [

5
plugin.video.alfa/channels/setting.py Executable file → Normal file
View File

@@ -873,8 +873,9 @@ def overwrite_tools(item):
p_dialog = platformtools.dialog_progress_bg('alfa', heading)
p_dialog.update(0, '')
import glob
show_list = glob.glob(filetools.join(videolibrarytools.TVSHOWS_PATH, u'/*/tvshow.nfo'))
show_list = []
for path, folders, files in filetools.walk(videolibrarytools.TVSHOWS_PATH):
show_list.extend([filetools.join(path, f) for f in files if f == "tvshow.nfo"])
if show_list:
t = float(100) / len(show_list)

View File

@@ -1,438 +0,0 @@
# -*- coding: utf-8 -*-
import re
import unicodedata
from threading import Thread
from core import config
from core import httptools
from core import logger
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
ACTION_SHOW_FULLSCREEN = 36
ACTION_GESTURE_SWIPE_LEFT = 511
ACTION_SELECT_ITEM = 7
ACTION_PREVIOUS_MENU = 10
ACTION_MOVE_LEFT = 1
ACTION_MOVE_RIGHT = 2
ACTION_MOVE_DOWN = 4
ACTION_MOVE_UP = 3
OPTION_PANEL = 6
OPTIONS_OK = 5
__modo_grafico__ = config.get_setting('modo_grafico', "ver-pelis")
# Para la busqueda en bing evitando baneos
def browser(url):
import mechanize
# Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
br = mechanize.Browser()
# Browser options
br.set_handle_equiv(False)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(False)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# Want debugging messages?
# br.set_debug_http(True)
# br.set_debug_redirects(True)
# br.set_debug_responses(True)
# User-Agent (this is cheating, ok?)
# br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
# br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
# Open some site, let's pick a random one, the first that pops in mind
r = br.open(url)
response = r.read()
print response
if "img,divreturn" in response:
r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url)
print "prooooxy"
response = r.read()
return response
api_key = "2e2160006592024ba87ccdf78c28f49f"
api_fankey = "dffe90fba4d02c199ae7a9e71330c987"
def mainlist(item):
logger.info()
itemlist = []
i = 0
global i
itemlist.append(
item.clone(title="[COLOR oldlace][B]Películas[/B][/COLOR]", action="scraper", url="http://ver-pelis.me/ver/",
thumbnail="http://imgur.com/36xALWc.png", fanart="http://imgur.com/53dhEU4.jpg",
contentType="movie"))
itemlist.append(item.clone(title="[COLOR oldlace][B]Películas en Español[/B][/COLOR]", action="scraper",
url="http://ver-pelis.me/ver/espanol/", thumbnail="http://imgur.com/36xALWc.png",
fanart="http://imgur.com/53dhEU4.jpg", contentType="movie"))
itemlist.append(itemlist[-1].clone(title="[COLOR orangered][B]Buscar[/B][/COLOR]", action="search",
thumbnail="http://imgur.com/ebWyuGe.png", fanart="http://imgur.com/53dhEU4.jpg",
contentType="tvshow"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://ver-pelis.me/ver/buscar?s=" + texto
item.extra = "search"
if texto != '':
return scraper(item)
def scraper(item):
logger.info()
itemlist = []
url_next_page = ""
global i
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = scrapertools.find_multiple_matches(data,
'<a class="thumb cluetip".*?href="([^"]+)".*?src="([^"]+)" alt="([^"]+)".*?"res">([^"]+)</span>')
if len(patron) > 20:
if item.next_page != 20:
url_next_page = item.url
patron = patron[:20]
next_page = 20
item.i = 0
else:
patron = patron[item.i:][:20]
next_page = 20
url_next_page = item.url
for url, thumb, title, cuality in patron:
title = re.sub(r"Imagen", "", title)
title = ''.join((c for c in unicodedata.normalize('NFD', unicode(title.decode('utf-8'))) if
unicodedata.category(c) != 'Mn')).encode("ascii", "ignore")
titulo = "[COLOR floralwhite]" + title + "[/COLOR]" + " " + "[COLOR crimson][B]" + cuality + "[/B][/COLOR]"
title = re.sub(r"!|\/.*", "", title).strip()
if item.extra != "search":
item.i += 1
new_item = item.clone(action="findvideos", title=titulo, url=url, thumbnail=thumb, fulltitle=title,
contentTitle=title, contentType="movie", library=True)
new_item.infoLabels['year'] = get_year(url)
itemlist.append(new_item)
## Paginación
if url_next_page:
itemlist.append(item.clone(title="[COLOR crimson]Siguiente >>[/COLOR]", url=url_next_page, next_page=next_page,
thumbnail="http://imgur.com/w3OMy2f.png", i=item.i))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
if not "Siguiente >>" in item.title:
if "0." in str(item.infoLabels['rating']):
item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]"
else:
item.infoLabels['rating'] = "[COLOR orange]" + str(item.infoLabels['rating']) + "[/COLOR]"
item.title = item.title + " " + str(item.infoLabels['rating'])
except:
pass
for item_tmdb in itemlist:
logger.info(str(item_tmdb.infoLabels['tmdb_id']))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
th = Thread(target=get_art(item))
th.setDaemon(True)
th.start()
data = httptools.downloadpage(item.url).data
data_post = scrapertools.find_single_match(data, "type: 'POST'.*?id: (.*?),slug: '(.*?)'")
if data_post:
post = 'id=' + data_post[0] + '&slug=' + data_post[1]
data_info = httptools.downloadpage('http://ver-pelis.me/ajax/cargar_video.php', post=post).data
enlaces = scrapertools.find_multiple_matches(data_info,
"</i> (\w+ \w+).*?<a onclick=\"load_player\('([^']+)','([^']+)', ([^']+),.*?REPRODUCIR\">([^']+)</a>")
for server, id_enlace, name, number, idioma_calidad in enlaces:
if "SUBTITULOS" in idioma_calidad and not "P" in idioma_calidad:
idioma_calidad = idioma_calidad.replace("SUBTITULOS", "VO")
idioma_calidad = idioma_calidad.replace("VO", "[COLOR orangered] VO[/COLOR]")
elif "SUBTITULOS" in idioma_calidad and "P" in idioma_calidad:
idioma_calidad = "[COLOR indianred] " + idioma_calidad + "[/COLOR]"
elif "LATINO" in idioma_calidad:
idioma_calidad = idioma_calidad.replace("LATINO", "[COLOR red]LATINO[/COLOR]")
elif "Español" in idioma_calidad:
idioma_calidad = idioma_calidad.replace("Español", "[COLOR crimson]ESPAÑOL[/COLOR]")
if "HD" in idioma_calidad:
idioma_calidad = idioma_calidad.replace("HD", "[COLOR crimson] HD[/COLOR]")
elif "720" in idioma_calidad:
idioma_calidad = idioma_calidad.replace("720", "[COLOR firebrick] 720[/COLOR]")
elif "TS" in idioma_calidad:
idioma_calidad = idioma_calidad.replace("TS", "[COLOR brown] TS[/COLOR]")
elif "CAM" in idioma_calidad:
idioma_calidad = idioma_calidad.replace("CAM", "[COLOR darkkakhi] CAM[/COLOR]")
url = "http://ver-pelis.me/ajax/video.php?id=" + id_enlace + "&slug=" + name + "&quality=" + number
if not "Ultra" in server:
server = "[COLOR cyan][B]" + server + "[/B][/COLOR]"
extra = "yes"
else:
server = "[COLOR yellow][B]" + server + "[/B][/COLOR]"
extra = ""
title = server.strip() + " " + idioma_calidad
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, fanart=item.fanart,
thumbnail=item.thumbnail, fulltitle=item.title, extra=extra, folder=True))
if item.library and config.get_videolibrary_support() and len(itemlist) > 0:
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'],
'title': item.infoLabels['title']}
itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca",
action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels,
text_color="0xFFf7f7f7",
thumbnail='http://imgur.com/gPyN1Tf.png'))
else:
itemlist.append(
Item(channel=item.channel, action="", title="[COLOR red][B]Upps!..Archivo no encontrado...[/B][/COLOR]",
thumbnail=item.thumbnail))
return itemlist
def play(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'\\', '', data)
item.url = scrapertools.find_single_match(data, 'src="([^"]+)"')
data = httptools.downloadpage(item.url).data
if item.extra != "yes":
patron = '"label":(.*?),.*?"type":"(.*?)",.*?"file":"(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches:
patron = '"label":(.*?),.*?"file":"(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for dato_a, type, dato_b in matches:
if 'http' in dato_a:
url = dato_a
calidad = dato_b
else:
url = dato_b
calidad = dato_a
url = url.replace('\\', '')
type = type.replace('\\', '')
itemlist.append(
Item(channel=item.channel, url=url, action="play", title=item.fulltitle + " (" + dato_a + ")",
folder=False))
else:
url = scrapertools.find_single_match(data, 'window.location="([^"]+)"')
videolist = servertools.find_video_items(data=url)
for video in videolist:
itemlist.append(Item(channel=item.channel, url=video.url, server=video.server,
title="[COLOR floralwhite][B]" + video.server + "[/B][/COLOR]", action="play",
folder=False))
return itemlist
def fanartv(item, id_tvdb, id, images={}):
headers = [['Content-Type', 'application/json']]
from core import jsontools
if item.contentType == "movie":
url = "http://webservice.fanart.tv/v3/movies/%s?api_key=cab16e262d72fea6a6843d679aa10300" \
% id
else:
url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_tvdb
try:
data = jsontools.load(scrapertools.downloadpage(url, headers=headers))
if data and not "error message" in data:
for key, value in data.items():
if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]:
images[key] = value
else:
images = []
except:
images = []
return images
def get_art(item):
logger.info()
id = item.infoLabels['tmdb_id']
check_fanart = item.infoLabels['fanart']
if item.contentType != "movie":
tipo_ps = "tv"
else:
tipo_ps = "movie"
if not id:
year = item.extra
otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, year=year, tipo=tipo_ps)
id = otmdb.result.get("id")
if id == None:
otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, tipo=tipo_ps)
id = otmdb.result.get("id")
if id == None:
if item.contentType == "movie":
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % (
item.fulltitle.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data)
subdata_imdb = scrapertools.find_single_match(data,
'<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series')
else:
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (
item.fulltitle.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data)
subdata_imdb = scrapertools.find_single_match(data, '<li class="b_algo">(.*?)h="ID.*?<strong>')
try:
imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"')
except:
try:
imdb_id = scrapertools.get_match(subdata_imdb,
'<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"')
except:
imdb_id = ""
otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, idioma_busqueda="es")
id = otmdb.result.get("id")
if id == None:
if "(" in item.fulltitle:
title = scrapertools.find_single_match(item.fulltitle, '\(.*?\)')
if item.contentType != "movie":
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % (
title.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "",
data)
subdata_imdb = scrapertools.find_single_match(data,
'<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series')
else:
urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (
title.replace(' ', '+'), year)
data = browser(urlbing_imdb)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/", "",
data)
subdata_imdb = scrapertools.find_single_match(data,
'<li class="b_algo">(.*?)h="ID.*?<strong>')
try:
imdb_id = scrapertools.get_match(subdata_imdb,
'<a href=.*?http.*?imdb.com/title/(.*?)/.*?"')
except:
try:
imdb_id = scrapertools.get_match(subdata_imdb,
'<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"')
except:
imdb_id = ""
otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps,
idioma_busqueda="es")
id = otmdb.result.get("id")
if not id:
fanart = item.fanart
id_tvdb = ""
imagenes = []
itmdb = tmdb.Tmdb(id_Tmdb=id, tipo=tipo_ps)
images = itmdb.result.get("images")
if images:
for key, value in images.iteritems():
for detail in value:
imagenes.append('http://image.tmdb.org/t/p/original' + detail["file_path"])
if len(imagenes) >= 4:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[1] + "|" + imagenes[3]
elif imagenes[2] != check_fanart:
item.extra = imagenes[2] + "|" + imagenes[3]
else:
item.extra = imagenes[3] + "|" + imagenes[3]
elif len(imagenes) == 3:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
elif imagenes[2] != check_fanart:
item.extra = imagenes[1] + "|" + imagenes[2]
else:
item.extra = imagenes[1] + "|" + imagenes[1]
elif len(imagenes) == 2:
if imagenes[0] != check_fanart:
item.fanart = imagenes[0]
else:
item.fanart = imagenes[1]
if imagenes[1] != check_fanart and imagenes[1] != item.fanart:
item.extra = imagenes[0] + "|" + imagenes[1]
else:
item.extra = imagenes[1] + "|" + imagenes[0]
elif len(imagenes) == 1:
item.extra = imagenes + "|" + imagenes
else:
item.extra = item.fanart + "|" + item.fanart
images_fanarttv = fanartv(item, id_tvdb, id)
if images_fanarttv:
if item.contentType == "movie":
if images_fanarttv.get("moviedisc"):
item.thumbnail = images_fanarttv.get("moviedisc")[0].get("url")
elif images_fanarttv.get("hdmovielogo"):
item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url")
elif images_fanarttv.get("moviethumb"):
item.thumbnail = images_fanarttv.get("moviethumb")[0].get("url")
elif images_fanarttv.get("moviebanner"):
item.thumbnail_ = images_fanarttv.get("moviebanner")[0].get("url")
else:
item.thumbnail = item.thumbnail
else:
if images_fanarttv.get("hdtvlogo"):
item.thumbnail = images_fanarttv.get("hdtvlogo")[0].get("url")
elif images_fanarttv.get("clearlogo"):
item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url")
if images_fanarttv.get("tvbanner"):
item.extra = item.extra + "|" + images_fanarttv.get("tvbanner")[0].get("url")
elif images_fanarttv.get("tvthumb"):
item.extra = item.extra + "|" + images_fanarttv.get("tvthumb")[0].get("url")
else:
item.extra = item.extra + "|" + item.thumbnail
else:
item.extra = item.extra + "|" + item.thumbnail
def get_year(url):
data = httptools.downloadpage(url).data
year = scrapertools.find_single_match(data, '<p><strong>Año:</strong>(.*?)</p>')
if year == "":
year = " "
return year

View File

@@ -1,5 +1,5 @@
{
"id": "ver-pelis",
"id": "verpelis",
"name": "Ver-pelis",
"active": true,
"adult": false,
@@ -19,7 +19,8 @@
"categories": [
"torrent",
"movie",
"tvshow"
"tvshow",
"direct"
],
"settings": [
{

View File

@@ -0,0 +1,213 @@
# -*- coding: utf-8 -*-
import re
from core import config
from core import httptools
from core import logger
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
__modo_grafico__ = config.get_setting('modo_grafico', "ver-pelis")
host = "http://ver-pelis.me"
def mainlist(item):
logger.info()
itemlist = []
i = 0
global i
itemlist.append(
item.clone(title = "[COLOR oldlace]Películas[/COLOR]", action = "scraper", url = host + "/ver/",
thumbnail = "http://imgur.com/36xALWc.png", fanart = "http://imgur.com/53dhEU4.jpg",
contentType = "movie"))
itemlist.append(item.clone(title = "[COLOR oldlace]Películas por año[/COLOR]", action = "categoria_anno",
url = host, thumbnail = "http://imgur.com/36xALWc.png", extra = "Por año",
fanart = "http://imgur.com/53dhEU4.jpg", contentType = "movie"))
itemlist.append(item.clone(title = "[COLOR oldlace]Películas en Latino[/COLOR]", action = "scraper",
url = host + "/ver/latino/", thumbnail = "http://imgur.com/36xALWc.png",
fanart = "http://imgur.com/53dhEU4.jpg", contentType = "movie"))
itemlist.append(item.clone(title = "[COLOR oldlace]Películas en Español[/COLOR]", action = "scraper",
url = host + "/ver/subtituladas/", thumbnail = "http://imgur.com/36xALWc.png",
fanart = "http://imgur.com/53dhEU4.jpg", contentType = "movie"))
itemlist.append(item.clone(title = "[COLOR oldlace]Películas Subtituladas[/COLOR]", action = "scraper",
url = host + "/ver/espanol/", thumbnail = "http://imgur.com/36xALWc.png",
fanart = "http://imgur.com/53dhEU4.jpg", contentType = "movie"))
itemlist.append(item.clone(title = "[COLOR oldlace]Por Género[/COLOR]", action = "categoria_anno",
url = host, thumbnail = "http://imgur.com/36xALWc.png", extra = "Categorias",
fanart = "http://imgur.com/53dhEU4.jpg", contentType = "movie"))
itemlist.append(itemlist[-1].clone(title = "[COLOR orangered]Buscar[/COLOR]", action = "search",
thumbnail = "http://imgur.com/ebWyuGe.png", fanart = "http://imgur.com/53dhEU4.jpg",
contentType = "tvshow"))
return itemlist
def categoria_anno(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'mobile_menu.*?(%s.*?)</ul>' %item.extra)
logger.info("Intel44 %s" %bloque)
patron = '(?is)<li.*?a href="([^"]+)'
patron += '.*?title="[^"]+">([^<]+)'
match = scrapertools.find_multiple_matches(bloque, patron)
for url, titulo in match:
itemlist.append(Item(
channel = item.channel,
action = "scraper",
title = titulo,
url = url
))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/ver/buscar?s=" + texto
item.extra = "search"
if texto != '':
return scraper(item)
def scraper(item):
logger.info()
itemlist = []
url_next_page = ""
global i
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = scrapertools.find_multiple_matches(data,
'<a class="thumb cluetip".*?href="([^"]+)".*?src="([^"]+)" alt="([^"]+)".*?"res">([^"]+)</span>')
if len(patron) > 20:
if item.next_page != 20:
url_next_page = item.url
patron = patron[:20]
next_page = 20
item.i = 0
else:
patron = patron[item.i:][:20]
next_page = 20
url_next_page = item.url
for url, thumb, title, cuality in patron:
title = re.sub(r"Imagen", "", title)
titulo = "[COLOR floralwhite]" + title + "[/COLOR]" + " " + "[COLOR crimson][B]" + cuality + "[/B][/COLOR]"
title = re.sub(r"!|\/.*", "", title).strip()
if item.extra != "search":
item.i += 1
new_item = item.clone(action="findvideos", title=titulo, url=url, thumbnail=thumb, fulltitle=title,
contentTitle=title, contentType="movie", library=True)
new_item.infoLabels['year'] = get_year(url)
itemlist.append(new_item)
## Paginación
if url_next_page:
itemlist.append(item.clone(title="[COLOR crimson]Siguiente >>[/COLOR]", url=url_next_page, next_page=next_page,
thumbnail="http://imgur.com/w3OMy2f.png", i=item.i))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for item in itemlist:
if not "Siguiente >>" in item.title:
if "0." in str(item.infoLabels['rating']):
item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]"
else:
item.infoLabels['rating'] = "[COLOR orange]" + str(item.infoLabels['rating']) + "[/COLOR]"
item.title = item.title + " " + str(item.infoLabels['rating'])
except:
pass
for item_tmdb in itemlist:
logger.info(str(item_tmdb.infoLabels['tmdb_id']))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data_post = scrapertools.find_single_match(data, "type: 'POST'.*?id: (.*?),slug: '(.*?)'")
if data_post:
post = 'id=' + data_post[0] + '&slug=' + data_post[1]
data_info = httptools.downloadpage(host + '/ajax/cargar_video.php', post=post).data
enlaces = scrapertools.find_multiple_matches(data_info,
"</i> (\w+ \w+).*?<a onclick=\"load_player\('([^']+)','([^']+)', ([^']+),.*?REPRODUCIR\">([^']+)</a>")
for server, id_enlace, name, number, idioma_calidad in enlaces:
if "SUBTITULOS" in idioma_calidad and not "P" in idioma_calidad:
idioma_calidad = idioma_calidad.replace("SUBTITULOS", "VO")
idioma_calidad = idioma_calidad.replace("VO", "[COLOR orangered] VO[/COLOR]")
elif "SUBTITULOS" in idioma_calidad and "P" in idioma_calidad:
idioma_calidad = "[COLOR indianred] " + idioma_calidad + "[/COLOR]"
elif "LATINO" in idioma_calidad:
idioma_calidad = idioma_calidad.replace("LATINO", "[COLOR red]LATINO[/COLOR]")
elif "Español" in idioma_calidad:
idioma_calidad = idioma_calidad.replace("Español", "[COLOR crimson]ESPAÑOL[/COLOR]")
if "HD" in idioma_calidad:
idioma_calidad = idioma_calidad.replace("HD", "[COLOR crimson] HD[/COLOR]")
elif "720" in idioma_calidad:
idioma_calidad = idioma_calidad.replace("720", "[COLOR firebrick] 720[/COLOR]")
elif "TS" in idioma_calidad:
idioma_calidad = idioma_calidad.replace("TS", "[COLOR brown] TS[/COLOR]")
elif "CAM" in idioma_calidad:
idioma_calidad = idioma_calidad.replace("CAM", "[COLOR darkkakhi] CAM[/COLOR]")
url = host + "/ajax/video.php?id=" + id_enlace + "&slug=" + name + "&quality=" + number
if not "Ultra" in server:
server = "[COLOR cyan][B]" + server + "[/B][/COLOR]"
extra = ""
else:
server = "[COLOR yellow][B]" + server + "[/B][/COLOR]"
extra = "yes"
title = server.strip() + " " + idioma_calidad
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, fanart=item.fanart,
thumbnail=item.thumbnail, fulltitle=item.title, extra=extra, folder=True))
if item.library and config.get_videolibrary_support() and len(itemlist) > 0:
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'],
'title': item.infoLabels['title']}
itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca",
action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels,
text_color="0xFFf7f7f7",
thumbnail='http://imgur.com/gPyN1Tf.png'))
else:
itemlist.append(
Item(channel=item.channel, action="", title="[COLOR red][B]Upps!..Archivo no encontrado...[/B][/COLOR]",
thumbnail=item.thumbnail))
return itemlist
def play(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'\\', '', data)
item.url = scrapertools.find_single_match(data, 'src="([^"]+)"')
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, 'window.location="([^"]+)"')
if item.extra == "yes":
data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, '(?is)iframe src="([^"]+)"')
videolist = servertools.find_video_items(data=url)
for video in videolist:
itemlist.append(Item(channel=item.channel, url=video.url, server=video.server,
title="[COLOR floralwhite][B]" + video.server + "[/B][/COLOR]", action="play",
folder=False))
return itemlist
def get_year(url):
data = httptools.downloadpage(url).data
year = scrapertools.find_single_match(data, '<p><strong>Año:</strong>(.*?)</p>')
if year == "":
year = " "
return year

323
plugin.video.alfa/channels/videolibrary.py Executable file → Normal file
View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
import glob
import os
from core import config
@@ -36,53 +35,55 @@ def list_movies(item):
logger.info()
itemlist = []
for f in glob.glob(filetools.join(videolibrarytools.MOVIES_PATH, u'/*/*.nfo')):
nfo_path = f
head_nfo, new_item = videolibrarytools.read_nfo(nfo_path)
for raiz, subcarpetas, ficheros in filetools.walk(videolibrarytools.MOVIES_PATH):
for f in ficheros:
if f.endswith(".nfo"):
nfo_path = filetools.join(raiz, f)
head_nfo, new_item = videolibrarytools.read_nfo(nfo_path)
new_item.nfo = nfo_path
new_item.path = filetools.dirname(f)
new_item.thumbnail = new_item.contentThumbnail
new_item.text_color = "blue"
new_item.nfo = nfo_path
new_item.path = raiz
new_item.thumbnail = new_item.contentThumbnail
new_item.text_color = "blue"
if not filetools.exists(filetools.join(videolibrarytools.MOVIES_PATH, new_item.strm_path)):
# Si se ha eliminado el strm desde la bilbioteca de kodi, no mostrarlo
continue
if not filetools.exists(filetools.join(new_item.path, filetools.basename(new_item.strm_path))):
# Si se ha eliminado el strm desde la bilbioteca de kodi, no mostrarlo
continue
# Menu contextual: Marcar como visto/no visto
visto = new_item.library_playcounts.get(os.path.splitext(f)[0], 0)
new_item.infoLabels["playcount"] = visto
if visto > 0:
texto_visto = "Marcar película como no vista"
contador = 0
else:
texto_visto = "Marcar película como vista"
contador = 1
# Menu contextual: Marcar como visto/no visto
visto = new_item.library_playcounts.get(os.path.splitext(f)[0], 0)
new_item.infoLabels["playcount"] = visto
if visto > 0:
texto_visto = "Marcar película como no vista"
contador = 0
else:
texto_visto = "Marcar película como vista"
contador = 1
# Menu contextual: Eliminar serie/canal
num_canales = len(new_item.library_urls)
if "downloads" in new_item.library_urls:
num_canales -= 1
if num_canales > 1:
texto_eliminar = "Eliminar película/canal"
multicanal = True
else:
texto_eliminar = "Eliminar esta película"
multicanal = False
# Menu contextual: Eliminar serie/canal
num_canales = len(new_item.library_urls)
if "downloads" in new_item.library_urls:
num_canales -= 1
if num_canales > 1:
texto_eliminar = "Eliminar película/canal"
multicanal = True
else:
texto_eliminar = "Eliminar esta película"
multicanal = False
new_item.context = [{"title": texto_visto,
"action": "mark_content_as_watched",
"channel": "videolibrary",
"playcount": contador},
{"title": texto_eliminar,
"action": "delete",
"channel": "videolibrary",
"multicanal": multicanal}]
# ,{"title": "Cambiar contenido (PENDIENTE)",
# "action": "",
# "channel": "videolibrary"}]
# logger.debug("new_item: " + new_item.tostring('\n'))
itemlist.append(new_item)
new_item.context = [{"title": texto_visto,
"action": "mark_content_as_watched",
"channel": "videolibrary",
"playcount": contador},
{"title": texto_eliminar,
"action": "delete",
"channel": "videolibrary",
"multicanal": multicanal}]
# ,{"title": "Cambiar contenido (PENDIENTE)",
# "action": "",
# "channel": "videolibrary"}]
# logger.debug("new_item: " + new_item.tostring('\n'))
itemlist.append(new_item)
return sorted(itemlist, key=lambda it: it.title.lower())
@@ -92,66 +93,68 @@ def list_tvshows(item):
itemlist = []
# Obtenemos todos los tvshow.nfo de la videoteca de SERIES recursivamente
for f in glob.glob(filetools.join(videolibrarytools.TVSHOWS_PATH, u'/*/tvshow.nfo')):
# logger.debug("file es %s" % f)
for raiz, subcarpetas, ficheros in filetools.walk(videolibrarytools.TVSHOWS_PATH):
for f in ficheros:
if f == "tvshow.nfo":
tvshow_path = filetools.join(raiz, f)
# logger.debug(tvshow_path)
head_nfo, item_tvshow = videolibrarytools.read_nfo(tvshow_path)
item_tvshow.title = item_tvshow.contentTitle
item_tvshow.path = raiz
item_tvshow.nfo = tvshow_path
head_nfo, item_tvshow = videolibrarytools.read_nfo(f)
item_tvshow.title = item_tvshow.contentTitle
item_tvshow.path = filetools.join(videolibrarytools.TVSHOWS_PATH, item_tvshow.path)
item_tvshow.nfo = f
# Menu contextual: Marcar como visto/no visto
visto = item_tvshow.library_playcounts.get(item_tvshow.contentTitle, 0)
item_tvshow.infoLabels["playcount"] = visto
if visto > 0:
texto_visto = "Marcar serie como no vista"
contador = 0
else:
texto_visto = "Marcar serie como vista"
contador = 1
# Menu contextual: Marcar como visto/no visto
visto = item_tvshow.library_playcounts.get(item_tvshow.contentTitle, 0)
item_tvshow.infoLabels["playcount"] = visto
if visto > 0:
texto_visto = "Marcar serie como no vista"
contador = 0
else:
texto_visto = "Marcar serie como vista"
contador = 1
# Menu contextual: Buscar automáticamente nuevos episodios o no
if item_tvshow.active and int(item_tvshow.active) > 0:
texto_update = "Buscar automáticamente nuevos episodios: Desactivar"
value = 0
item_tvshow.text_color = "green"
else:
texto_update = "Buscar automáticamente nuevos episodios: Activar"
value = 1
item_tvshow.text_color = "0xFFDF7401"
# Menu contextual: Buscar automáticamente nuevos episodios o no
if item_tvshow.active and int(item_tvshow.active) > 0:
texto_update = "Buscar automáticamente nuevos episodios: Desactivar"
value = 0
item_tvshow.text_color = "green"
else:
texto_update = "Buscar automáticamente nuevos episodios: Activar"
value = 1
item_tvshow.text_color = "0xFFDF7401"
# Menu contextual: Eliminar serie/canal
num_canales = len(item_tvshow.library_urls)
if "downloads" in item_tvshow.library_urls:
num_canales -= 1
if num_canales > 1:
texto_eliminar = "Eliminar serie/canal"
multicanal = True
else:
texto_eliminar = "Eliminar esta serie"
multicanal = False
# Menu contextual: Eliminar serie/canal
num_canales = len(item_tvshow.library_urls)
if "downloads" in item_tvshow.library_urls:
num_canales -= 1
if num_canales > 1:
texto_eliminar = "Eliminar serie/canal"
multicanal = True
else:
texto_eliminar = "Eliminar esta serie"
multicanal = False
item_tvshow.context = [{"title": texto_visto,
"action": "mark_content_as_watched",
"channel": "videolibrary",
"playcount": contador},
{"title": texto_update,
"action": "mark_tvshow_as_updatable",
"channel": "videolibrary",
"active": value},
{"title": texto_eliminar,
"action": "delete",
"channel": "videolibrary",
"multicanal": multicanal},
{"title": "Buscar nuevos episodios ahora",
"action": "update_tvshow",
"channel": "videolibrary"}]
# ,{"title": "Cambiar contenido (PENDIENTE)",
# "action": "",
# "channel": "videolibrary"}]
item_tvshow.context = [{"title": texto_visto,
"action": "mark_content_as_watched",
"channel": "videolibrary",
"playcount": contador},
{"title": texto_update,
"action": "mark_tvshow_as_updatable",
"channel": "videolibrary",
"active": value},
{"title": texto_eliminar,
"action": "delete",
"channel": "videolibrary",
"multicanal": multicanal},
{"title": "Buscar nuevos episodios ahora",
"action": "update_tvshow",
"channel": "videolibrary"}]
# ,{"title": "Cambiar contenido (PENDIENTE)",
# "action": "",
# "channel": "videolibrary"}]
# logger.debug("item_tvshow:\n" + item_tvshow.tostring('\n'))
itemlist.append(item_tvshow)
# logger.debug("item_tvshow:\n" + item_tvshow.tostring('\n'))
itemlist.append(item_tvshow)
if itemlist:
itemlist = sorted(itemlist, key=lambda it: it.title.lower())
@@ -168,19 +171,18 @@ def get_seasons(item):
itemlist = []
dict_temp = {}
raiz, carpetas_series, ficheros = filetools.walk(item.path).next()
# Menu contextual: Releer tvshow.nfo
head_nfo, item_nfo = videolibrarytools.read_nfo(item.nfo)
# Miramos las temporadas que estén marcadas como vistas
if not hasattr(item_nfo, 'library_playcounts'):
item_nfo.library_playcounts = {}
if config.get_setting("no_pile_on_seasons", "videolibrary") == 2: # Siempre
return get_episodes(item)
for f in glob.glob1(item.path, u'*.json'):
season = f.split('x')[0]
dict_temp[season] = "Temporada %s" % season
for f in ficheros:
if f.endswith('.json'):
season = f.split('x')[0]
dict_temp[season] = "Temporada %s" % season
if config.get_setting("no_pile_on_seasons", "videolibrary") == 1 and len(dict_temp) == 1: # Sólo si hay una temporada
return get_episodes(item)
@@ -231,54 +233,58 @@ def get_episodes(item):
# logger.debug("item:\n" + item.tostring('\n'))
itemlist = []
# Obtenemos los archivos de los episodios
raiz, carpetas_series, ficheros = filetools.walk(item.path).next()
# Menu contextual: Releer tvshow.nfo
head_nfo, item_nfo = videolibrarytools.read_nfo(item.nfo)
# Crear un item en la lista para cada strm encontrado
for f in glob.glob1(item.path, u'*.strm'):
season_episode = scrapertools.get_season_and_episode(f)
if not season_episode:
# El fichero no incluye el numero de temporada y episodio
continue
season, episode = season_episode.split("x")
# Si hay q filtrar por temporada, ignoramos los capitulos de otras temporadas
if item.filtrar_season and int(season) != int(item.contentSeason):
continue
for i in ficheros:
if i.endswith('.strm'):
season_episode = scrapertools.get_season_and_episode(i)
if not season_episode:
# El fichero no incluye el numero de temporada y episodio
continue
season, episode = season_episode.split("x")
# Si hay q filtrar por temporada, ignoramos los capitulos de otras temporadas
if item.filtrar_season and int(season) != int(item.contentSeason):
continue
# Obtener los datos del season_episode.nfo
nfo_path = filetools.join(item.path, f).replace('.strm', '.nfo')
head_nfo, epi = videolibrarytools.read_nfo(nfo_path)
# Obtener los datos del season_episode.nfo
nfo_path = filetools.join(raiz, i).replace('.strm', '.nfo')
head_nfo, epi = videolibrarytools.read_nfo(nfo_path)
# Fijar el titulo del capitulo si es posible
if epi.contentTitle:
title_episodie = epi.contentTitle.strip()
else:
title_episodie = "Temporada %s Episodio %s" % \
(epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2))
# Fijar el titulo del capitulo si es posible
if epi.contentTitle:
title_episodie = epi.contentTitle.strip()
else:
title_episodie = "Temporada %s Episodio %s" % \
(epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2))
epi.contentTitle = "%sx%s" % (epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2))
epi.title = "%sx%s - %s" % (epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2), title_episodie)
epi.contentTitle = "%sx%s" % (epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2))
epi.title = "%sx%s - %s" % (epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2), title_episodie)
if item_nfo.library_filter_show:
epi.library_filter_show = item_nfo.library_filter_show
if item_nfo.library_filter_show:
epi.library_filter_show = item_nfo.library_filter_show
# Menu contextual: Marcar episodio como visto o no
visto = item_nfo.library_playcounts.get(season_episode, 0)
epi.infoLabels["playcount"] = visto
if visto > 0:
texto = "Marcar episodio como no visto"
value = 0
else:
texto = "Marcar episodio como visto"
value = 1
epi.context = [{"title": texto,
"action": "mark_content_as_watched",
"channel": "videolibrary",
"playcount": value,
"nfo": item.nfo}]
# Menu contextual: Marcar episodio como visto o no
visto = item_nfo.library_playcounts.get(season_episode, 0)
epi.infoLabels["playcount"] = visto
if visto > 0:
texto = "Marcar episodio como no visto"
value = 0
else:
texto = "Marcar episodio como visto"
value = 1
epi.context = [{"title": texto,
"action": "mark_content_as_watched",
"channel": "videolibrary",
"playcount": value,
"nfo": item.nfo}]
# logger.debug("epi:\n" + epi.tostring('\n'))
itemlist.append(epi)
# logger.debug("epi:\n" + epi.tostring('\n'))
itemlist.append(epi)
return sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
@@ -530,25 +536,28 @@ def mark_season_as_watched(item):
# logger.debug("item:\n" + item.tostring('\n'))
# Obtener el diccionario de episodios marcados
tvshow_path = filetools.join(item.path, 'tvshow.nfo')
head_nfo, it = videolibrarytools.read_nfo(tvshow_path)
f = filetools.join(item.path, 'tvshow.nfo')
head_nfo, it = videolibrarytools.read_nfo(f)
if not hasattr(it, 'library_playcounts'):
it.library_playcounts = {}
# Obtenemos los archivos de los episodios
raiz, carpetas_series, ficheros = filetools.walk(item.path).next()
# Marcamos cada uno de los episodios encontrados de esta temporada
episodios_marcados = 0
for f in glob.glob1(item.path, u'*.strm'):
# if f.endswith(".strm"):
season_episode = scrapertools.get_season_and_episode(f)
if not season_episode:
# El fichero no incluye el numero de temporada y episodio
continue
season, episode = season_episode.split("x")
for i in ficheros:
if i.endswith(".strm"):
season_episode = scrapertools.get_season_and_episode(i)
if not season_episode:
# El fichero no incluye el numero de temporada y episodio
continue
season, episode = season_episode.split("x")
if int(item.contentSeason) == -1 or int(season) == int(item.contentSeason):
name_file = os.path.splitext(os.path.basename(f))[0]
it.library_playcounts[name_file] = item.playcount
episodios_marcados += 1
if int(item.contentSeason) == -1 or int(season) == int(item.contentSeason):
name_file = os.path.splitext(os.path.basename(f))[0]
it.library_playcounts[name_file] = item.playcount
episodios_marcados += 1
if episodios_marcados:
if int(item.contentSeason) == -1:
@@ -564,7 +573,7 @@ def mark_season_as_watched(item):
it = check_tvshow_playcount(it, item.contentSeason)
# Guardamos los cambios en tvshow.nfo
filetools.write(tvshow_path, head_nfo + it.tojson())
filetools.write(f, head_nfo + it.tojson())
item.infoLabels['playcount'] = item.playcount
if config.is_xbmc():

View File

@@ -44,6 +44,7 @@
],
"categories": [
"latino",
"direct",
"movie"
],
"settings": [

View File

@@ -1,256 +1,263 @@
# -*- coding: utf-8 -*-
import re
from core import channeltools
from core import config
from core import httptools
from core import logger
from core import scrapertoolsV2
from core import servertools
from core import tmdb
from core.item import Item
HOST = 'http://www.yaske.ro'
parameters = channeltools.get_channel_parameters('yaske')
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
color1, color2, color3 = ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E']
def mainlist(item):
logger.info()
itemlist = []
item.url = HOST
item.text_color = color2
item.fanart = fanart_host
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png"
itemlist.append(item.clone(title="Novedades", action="peliculas", text_bold=True, viewcontent='movies',
url=HOST + "/ultimas-y-actualizadas",
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Estrenos", action="peliculas", text_bold=True,
url=HOST + "/genre/premieres", thumbnail=thumbnail % 'estrenos'))
itemlist.append(item.clone(title="", folder=False))
itemlist.append(Item(channel=item.channel, title="Filtrar por:", fanart=fanart_host, folder=False,
text_color=color3, text_bold=True, thumbnail=thumbnail_host))
itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="genre", thumbnail=thumbnail % 'generos', viewmode="thumbnails"))
itemlist.append(item.clone(title=" Idioma", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="audio", thumbnail=thumbnail % 'idiomas'))
itemlist.append(item.clone(title=" Calidad", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="quality", thumbnail=thumbnail % 'calidad'))
itemlist.append(item.clone(title=" Año", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="year", thumbnail=thumbnail % 'year'))
itemlist.append(item.clone(title="", folder=False))
itemlist.append(item.clone(title="Buscar por título", action="search", thumbnail=thumbnail % 'buscar'))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
try:
# http://www.yaske.ro/search/?q=los+pitufos
item.url = HOST + "/search/?q=" + texto.replace(' ', '+')
item.extra = ""
itemlist.extend(peliculas(item))
if itemlist[-1].title == ">> Página siguiente":
item_pag = itemlist[-1]
itemlist = sorted(itemlist[:-1], key=lambda Item: Item.contentTitle)
itemlist.append(item_pag)
else:
itemlist = sorted(itemlist, key=lambda Item: Item.contentTitle)
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
item = Item()
try:
if categoria == 'peliculas':
item.url = HOST + "/ultimas-y-actualizadas"
elif categoria == 'infantiles':
item.url = HOST + "/search/?q=&genre%5B%5D=animation"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
url_next_page = ""
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<article class.*?'
patron += '<a href="([^"]+)">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<aside class="item-control down">(.*?)</aside>.*?'
patron += '<small class="pull-right text-muted">([^<]+)</small>.*?'
patron += '<h2 class.*?>([^<]+)</h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
# Paginacion
if item.next_page != 'b':
if len(matches) > 30:
url_next_page = item.url
matches = matches[:30]
next_page = 'b'
else:
matches = matches[30:]
next_page = 'a'
patron_next_page = 'Anteriores</a> <a href="([^"]+)" class="btn btn-default ".*?Siguiente'
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
if len(matches_next_page) > 0:
url_next_page = matches_next_page[0]
for scrapedurl, scrapedthumbnail, idiomas, year, scrapedtitle in matches:
patronidiomas = "<img src='([^']+)'"
matchesidiomas = re.compile(patronidiomas, re.DOTALL).findall(idiomas)
idiomas_disponibles = []
for idioma in matchesidiomas:
if idioma.endswith("la_la.png"):
idiomas_disponibles.append("LAT")
elif idioma.endswith("en_en.png"):
idiomas_disponibles.append("VO")
elif idioma.endswith("en_es.png"):
idiomas_disponibles.append("VOSE")
elif idioma.endswith("es_es.png"):
idiomas_disponibles.append("ESP")
if idiomas_disponibles:
idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]"
contentTitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist)
# Si es necesario añadir paginacion
if url_next_page:
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host,
url=url_next_page, next_page=next_page, folder=True, text_color=color3, text_bold=True))
return itemlist
def menu_buscar_contenido(item):
logger.info(item)
data = httptools.downloadpage(item.url).data
patron = '<select name="' + item.extra + '(.*?)</select>'
data = scrapertoolsV2.get_match(data, patron)
# Extrae las entradas
patron = "<option value='([^']+)'>([^<]+)</option>"
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedvalue, scrapedtitle in matches:
thumbnail = ""
if item.extra == 'genre':
if scrapedtitle.strip() in ['Documental', 'Short', 'News']:
continue
url = HOST + "/search/?q=&genre%5B%5D=" + scrapedvalue
filename = scrapedtitle.lower().replace(' ', '%20')
if filename == "ciencia%20ficción":
filename = "ciencia%20ficcion"
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png" \
% filename
elif item.extra == 'year':
url = HOST + "/search/?q=&year=" + scrapedvalue
thumbnail = item.thumbnail
else:
# http://www.yaske.ro/search/?q=&quality%5B%5D=c9
# http://www.yaske.ro/search/?q=&audio%5B%5D=es
url = HOST + "/search/?q=&" + item.extra + "%5B%5D=" + scrapedvalue
thumbnail = item.thumbnail
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, text_color=color1,
thumbnail=thumbnail, contentType='movie', folder=True, viewmode="movie_with_plot"))
if item.extra in ['genre', 'audio', 'year']:
return sorted(itemlist, key=lambda i: i.title.lower(), reverse=item.extra == 'year')
else:
return itemlist
def findvideos(item):
logger.info()
itemlist = list()
sublist = list()
# Descarga la página
data = httptools.downloadpage(item.url).data
if not item.plot:
item.plot = scrapertoolsV2.find_single_match(data, '>Sinopsis</dt> <dd>([^<]+)</dd>')
item.plot = scrapertoolsV2.decodeHtmlentities(item.plot)
patron = '<option value="([^"]+)"[^>]+'
patron += '>([^<]+).*?</i>([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, idioma, calidad in matches:
sublist.append(item.clone(action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip()))
sublist = servertools.get_servers_itemlist(sublist, lambda i: "Ver en %s %s" % (i.server, i.quality), True)
# Añadir servidores encontrados, agrupandolos por idioma
for k in ["Español", "Latino", "Subtitulado", "Ingles"]:
lista_idioma = filter(lambda i: i.language == k, sublist)
if lista_idioma:
itemlist.append(Item(channel=item.channel, title=k, fanart=item.fanart, folder=False,
text_color=color2, text_bold=True, thumbnail=thumbnail_host))
itemlist.extend(lista_idioma)
# Insertar items "Buscar trailer" y "Añadir a la videoteca"
if itemlist and item.extra != "library":
title = "%s [Buscar trailer]" % (item.contentTitle)
itemlist.insert(0, item.clone(channel="trailertools", action="buscartrailer",
text_color=color3, title=title, viewmode="list"))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca",
action="add_pelicula_to_library", url=item.url, text_color="green",
contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))
return itemlist
# -*- coding: utf-8 -*-
import re
import base64
from core import channeltools
from core import config
from core import httptools
from core import logger
from core import scrapertoolsV2
from core import servertools
from core import tmdb
from core.item import Item
HOST = 'http://www.yaske.ro'
parameters = channeltools.get_channel_parameters('yaske')
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
color1, color2, color3 = ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E']
def mainlist(item):
logger.info()
itemlist = []
item.url = HOST
item.text_color = color2
item.fanart = fanart_host
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png"
itemlist.append(item.clone(title="Novedades", action="peliculas", text_bold=True, viewcontent='movies',
url=HOST + "/ultimas-y-actualizadas",
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Estrenos", action="peliculas", text_bold=True,
url=HOST + "/genre/premieres", thumbnail=thumbnail % 'estrenos'))
itemlist.append(item.clone(title="", folder=False))
itemlist.append(Item(channel=item.channel, title="Filtrar por:", fanart=fanart_host, folder=False,
text_color=color3, text_bold=True, thumbnail=thumbnail_host))
itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="genre", thumbnail=thumbnail % 'generos', viewmode="thumbnails"))
itemlist.append(item.clone(title=" Idioma", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="audio", thumbnail=thumbnail % 'idiomas'))
itemlist.append(item.clone(title=" Calidad", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="quality", thumbnail=thumbnail % 'calidad'))
itemlist.append(item.clone(title=" Año", action="menu_buscar_contenido", text_color=color1, text_italic=True,
extra="year", thumbnail=thumbnail % 'year'))
itemlist.append(item.clone(title="", folder=False))
itemlist.append(item.clone(title="Buscar por título", action="search", thumbnail=thumbnail % 'buscar'))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
try:
# http://www.yaske.ro/search/?q=los+pitufos
item.url = HOST + "/search/?q=" + texto.replace(' ', '+')
item.extra = ""
itemlist.extend(peliculas(item))
if itemlist[-1].title == ">> Página siguiente":
item_pag = itemlist[-1]
itemlist = sorted(itemlist[:-1], key=lambda Item: Item.contentTitle)
itemlist.append(item_pag)
else:
itemlist = sorted(itemlist, key=lambda Item: Item.contentTitle)
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
item = Item()
try:
if categoria == 'peliculas':
item.url = HOST + "/ultimas-y-actualizadas"
elif categoria == 'infantiles':
item.url = HOST + "/search/?q=&genre%5B%5D=animation"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
url_next_page = ""
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<article class.*?'
patron += '<a href="([^"]+)">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<aside class="item-control down">(.*?)</aside>.*?'
patron += '<small class="pull-right text-muted">([^<]+)</small>.*?'
patron += '<h2 class.*?>([^<]+)</h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
# Paginacion
if item.next_page != 'b':
if len(matches) > 30:
url_next_page = item.url
matches = matches[:30]
next_page = 'b'
else:
matches = matches[30:]
next_page = 'a'
patron_next_page = 'Anteriores</a> <a href="([^"]+)" class="btn btn-default ".*?Siguiente'
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
if len(matches_next_page) > 0:
url_next_page = matches_next_page[0]
for scrapedurl, scrapedthumbnail, idiomas, year, scrapedtitle in matches:
patronidiomas = "<img src='([^']+)'"
matchesidiomas = re.compile(patronidiomas, re.DOTALL).findall(idiomas)
idiomas_disponibles = []
for idioma in matchesidiomas:
if idioma.endswith("la_la.png"):
idiomas_disponibles.append("LAT")
elif idioma.endswith("en_en.png"):
idiomas_disponibles.append("VO")
elif idioma.endswith("en_es.png"):
idiomas_disponibles.append("VOSE")
elif idioma.endswith("es_es.png"):
idiomas_disponibles.append("ESP")
if idiomas_disponibles:
idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]"
contentTitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist)
# Si es necesario añadir paginacion
if url_next_page:
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host,
url=url_next_page, next_page=next_page, folder=True, text_color=color3, text_bold=True))
return itemlist
def menu_buscar_contenido(item):
logger.info(item)
data = httptools.downloadpage(item.url).data
patron = '<select name="' + item.extra + '(.*?)</select>'
data = scrapertoolsV2.get_match(data, patron)
# Extrae las entradas
patron = "<option value='([^']+)'>([^<]+)</option>"
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedvalue, scrapedtitle in matches:
thumbnail = ""
if item.extra == 'genre':
if scrapedtitle.strip() in ['Documental', 'Short', 'News']:
continue
url = HOST + "/search/?q=&genre%5B%5D=" + scrapedvalue
filename = scrapedtitle.lower().replace(' ', '%20')
if filename == "ciencia%20ficción":
filename = "ciencia%20ficcion"
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png" \
% filename
elif item.extra == 'year':
url = HOST + "/search/?q=&year=" + scrapedvalue
thumbnail = item.thumbnail
else:
# http://www.yaske.ro/search/?q=&quality%5B%5D=c9
# http://www.yaske.ro/search/?q=&audio%5B%5D=es
url = HOST + "/search/?q=&" + item.extra + "%5B%5D=" + scrapedvalue
thumbnail = item.thumbnail
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, text_color=color1,
thumbnail=thumbnail, contentType='movie', folder=True, viewmode="movie_with_plot"))
if item.extra in ['genre', 'audio', 'year']:
return sorted(itemlist, key=lambda i: i.title.lower(), reverse=item.extra == 'year')
else:
return itemlist
def findvideos(item):
logger.info()
itemlist = list()
sublist = list()
# Descarga la página
data = httptools.downloadpage(item.url).data
if not item.plot:
item.plot = scrapertoolsV2.find_single_match(data, '>Sinopsis</dt> <dd>([^<]+)</dd>')
item.plot = scrapertoolsV2.decodeHtmlentities(item.plot)
patron = '<option value="([^"]+)"[^>]+'
patron += '>([^<]+).*?</i>([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, idioma, calidad in matches:
if 'yaske' in url:
data = httptools.downloadpage(url).data
url_enc = scrapertoolsV2.find_single_match(data, "eval.*?'(.*?)'")
url_dec = base64.b64decode(url_enc)
url = scrapertoolsV2.find_single_match(url_dec, 'iframe src="(.*?)"')
sublist.append(item.clone(action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip()))
sublist = servertools.get_servers_itemlist(sublist, lambda i: "Ver en %s %s" % (i.server, i.quality), True)
# Añadir servidores encontrados, agrupandolos por idioma
for k in ["Español", "Latino", "Subtitulado", "Ingles"]:
lista_idioma = filter(lambda i: i.language == k, sublist)
if lista_idioma:
itemlist.append(Item(channel=item.channel, title=k, fanart=item.fanart, folder=False,
text_color=color2, text_bold=True, thumbnail=thumbnail_host))
itemlist.extend(lista_idioma)
# Insertar items "Buscar trailer" y "Añadir a la videoteca"
if itemlist and item.extra != "library":
title = "%s [Buscar trailer]" % (item.contentTitle)
itemlist.insert(0, item.clone(channel="trailertools", action="buscartrailer",
text_color=color3, title=title, viewmode="list"))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca",
action="add_pelicula_to_library", url=item.url, text_color="green",
contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))
return itemlist

4
plugin.video.alfa/channelselector.py Executable file → Normal file
View File

@@ -62,11 +62,11 @@ def getchanneltypes():
logger.info()
# Lista de categorias
channel_types = ["movie", "tvshow", "anime", "documentary", "vos", "torrent", "latino"]
channel_types = ["movie", "tvshow", "anime", "documentary", "vos", "direct", "torrent", "latino"]
dict_types_lang = {'movie': config.get_localized_string(30122), 'tvshow': config.get_localized_string(30123),
'anime': config.get_localized_string(30124), 'documentary': config.get_localized_string(30125),
'vos': config.get_localized_string(30136), 'adult': config.get_localized_string(30126),
'latino': config.get_localized_string(30127)}
'latino': config.get_localized_string(30127), 'direct': config.get_localized_string(30137)}
if config.get_setting("adult_mode") != 0:
channel_types.append("adult")

View File

@@ -127,14 +127,13 @@ def save_movie(item):
base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").lower().encode("utf8")
subcarpetas = os.listdir(MOVIES_PATH)
for c in subcarpetas:
code = scrapertools.find_single_match(c, '\[(.*?)\]')
if code and code in item.infoLabels['code']:
path = filetools.join(MOVIES_PATH, c)
_id = code
break
for raiz, subcarpetas, ficheros in filetools.walk(MOVIES_PATH):
for c in subcarpetas:
code = scrapertools.find_single_match(c, '\[(.*?)\]')
if code and code in item.infoLabels['code']:
path = filetools.join(raiz, c)
_id = code
break
if not path:
# Crear carpeta
@@ -248,14 +247,13 @@ def save_tvshow(item, episodelist):
base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").lower().encode("utf8")
subcarpetas = os.listdir(TVSHOWS_PATH)
for c in subcarpetas:
code = scrapertools.find_single_match(c, '\[(.*?)\]')
if code and code in item.infoLabels['code']:
path = filetools.join(TVSHOWS_PATH, c)
_id = code
break
for raiz, subcarpetas, ficheros in filetools.walk(TVSHOWS_PATH):
for c in subcarpetas:
code = scrapertools.find_single_match(c, '\[(.*?)\]')
if code and code in item.infoLabels['code']:
path = filetools.join(raiz, c)
_id = code
break
if not path:
path = filetools.join(TVSHOWS_PATH, ("%s [%s]" % (base_name, _id)).strip())
@@ -348,7 +346,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
news_in_playcounts = {}
# Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno
ficheros = os.listdir(path)
raiz, carpetas_series, ficheros = filetools.walk(path).next()
ficheros = [filetools.join(path, f) for f in ficheros]
# Silent es para no mostrar progreso (para videolibrary_service)

View File

@@ -135,6 +135,7 @@
<string id="30124">Anime</string>
<string id="30125">Documentals</string>
<string id="30126">Adults</string>
<string id="30137">Directos</string>
<string id="30127">Llatí</string>
<string id="30128">Cerca de tràilers</string>
<string id="30129">Adult</string>

View File

@@ -138,6 +138,7 @@
<string id="30125">Documentaries</string>
<string id="30136">Original version</string>
<string id="30126">Adult</string>
<string id="30137">Direct</string>
<string id="30127">Latin</string>
<string id="30128">Search Trailers</string>
<string id="30129">Adult</string>

View File

@@ -137,6 +137,7 @@
<string id="30125">Documentari</string>
<string id="30136">Versioni originali</string>
<string id="30126">Adulti</string>
<string id="30137">Direct</string>
<string id="30127">Latino</string>
<string id="30128">Cerca trailer</string>
<string id="30129">Adulto</string>

View File

@@ -137,6 +137,7 @@
<string id="30125">Documentales</string>
<string id="30126">Adultos</string>
<string id="30136">Versión original</string>
<string id="30137">Directos</string>
<string id="30127">Latino</string>
<string id="30128">Buscador de Trailers</string>
<string id="30129">Adultos</string>

View File

@@ -137,6 +137,7 @@
<string id="30125">Documentales</string>
<string id="30126">Adultos</string>
<string id="30136">Versión original</string>
<string id="30137">Directos</string>
<string id="30127">Latino</string>
<string id="30128">Buscador de Trailers</string>
<string id="30129">Adultos</string>

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

View File

@@ -12,6 +12,10 @@
{
"pattern": "(?s)https://youtube.googleapis.com.*?docid=([^(?:&|\")]+)",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
"pattern": "(?s)https://drive.google.com/file/d/([^/]+)/preview",
"url": "http://docs.google.com/get_video_info?docid=\\1"
}
]
},
@@ -45,4 +49,4 @@
}
],
"version": 1
}
}

View File

@@ -85,9 +85,10 @@ def check_for_update(overwrite=True):
heading = 'Actualizando videoteca....'
p_dialog = platformtools.dialog_progress_bg('alfa', heading)
p_dialog.update(0, '')
show_list = []
import glob
show_list = glob.glob(filetools.join(videolibrarytools.TVSHOWS_PATH, u'/*/tvshow.nfo'))
for path, folders, files in filetools.walk(library.TVSHOWS_PATH):
show_list.extend([filetools.join(path, f) for f in files if f == "tvshow.nfo"])
if show_list:
t = float(100) / len(show_list)