Merge pull request #110 from Alfa-beto/Fixes

Otro pequeño ajuste
This commit is contained in:
Alfa
2017-09-30 02:37:47 +02:00
committed by GitHub
10 changed files with 35 additions and 30 deletions

View File

@@ -91,7 +91,8 @@ def peliculas(item):
thumbnail = thumbnail,
url = url,
contentTitle = titulo,
contentType="movie"
contentType="movie",
language = idioma
)
if year:
new_item.infoLabels['year'] = int(year)

View File

@@ -137,18 +137,18 @@ def peliculas(item):
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, rating, calidad, scrapedurl, year in matches[item.page:item.page + 20]:
if 'Próximamente' not in calidad and '-XXX.jpg' not in scrapedthumbnail:
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches[item.page:item.page + 20]:
if 'Próximamente' not in quality and '-XXX.jpg' not in scrapedthumbnail:
scrapedtitle = scrapedtitle.replace('Ver ', '').strip()
contentTitle = scrapedtitle.partition(':')[0].partition(',')[0]
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
scrapedtitle, year, calidad)
scrapedtitle, year, quality)
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'year': year, 'rating': rating},
contentTitle=contentTitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer"))
title=title, context="buscar_trailer", quality = quality))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
@@ -367,7 +367,7 @@ def findvideos(item):
server = servertools.get_server_from_url(url)
title = "%s [COLOR yellow](%s) (%s)[/COLOR]" % (item.contentTitle, server.title(), lang)
itemlist.append(item.clone(action='play', url=url, title=title, extra1=title,
server=server, text_color=color3))
server=server, language = lang, text_color=color3))
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',

View File

@@ -108,9 +108,9 @@ def peliculas(item):
infolab = {'year': year}
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, infoLabels=infolab,
contentTitle=title, contentType="movie"))
contentTitle=title, contentType="movie", quality=calidad))
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" href="([^"]+)"')
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)"')
if next_page:
itemlist.append(item.clone(title=">> Página Siguiente", url=next_page))

View File

@@ -364,7 +364,7 @@ def peliculas(item):
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, extra="media",
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle,
text_color=color2, contentType="movie"))
text_color=color2, contentType="movie", quality=calidad, language=audios))
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
if next_page != "" and item.title != "":
@@ -676,7 +676,7 @@ def get_enlaces(item, url, type):
titulo = " [%s/%s]" % (language, scrapedcalidad.strip())
itemlist.append(
item.clone(action="play", url=google_url, title=" Ver en Gvideo" + titulo, text_color=color2,
extra="", server="gvideo"))
extra="", server="gvideo", language=language, quality=scrapedcalidad.strip()))
patron = '<div class="available-source".*?data-url="([^"]+)".*?class="language.*?title="([^"]+)"' \
'.*?class="source-name.*?>\s*([^<]+)<.*?<span class="quality-text">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)

View File

@@ -133,8 +133,8 @@ def peliculas(item):
patron += 'href="([^"]+)"'
patron += '.*?(?:<span>|<span class="year">)([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, calidad, scrapedurl, scrapedyear in matches:
calidad = scrapertools.find_single_match(calidad, '.*?quality">([^<]+)')
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, scrapedyear in matches:
quality = scrapertools.find_single_match(quality, '.*?quality">([^<]+)')
try:
fulltitle = scrapedtitle
year = scrapedyear.replace("&nbsp;", "")
@@ -143,11 +143,11 @@ def peliculas(item):
scrapedtitle = "%s (%s)" % (fulltitle, year)
except:
fulltitle = scrapedtitle
if calidad:
scrapedtitle += " [%s]" % calidad
if quality:
scrapedtitle += " [%s]" % quality
new_item = item.clone(action="findvideos", title=scrapedtitle, fulltitle=fulltitle,
url=scrapedurl, thumbnail=scrapedthumbnail,
contentTitle=fulltitle, contentType="movie")
contentTitle=fulltitle, contentType="movie", quality=quality)
if year:
new_item.infoLabels['year'] = int(year)
itemlist.append(new_item)
@@ -330,12 +330,14 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
if filtro_idioma == 3 or item.filtro:
lista_enlaces.append(item.clone(title=title, action="play", text_color=color2,
url=scrapedurl, server=scrapedserver, idioma=scrapedlanguage,
extra=item.url, contentThumbnail = item.thumbnail))
extra=item.url, contentThumbnail = item.thumbnail,
language=scrapedlanguage))
else:
idioma = dict_idiomas[language]
if idioma == filtro_idioma:
lista_enlaces.append(item.clone(title=title, text_color=color2, action="play", url=scrapedurl,
extra=item.url, contentThumbnail = item.thumbnail))
extra=item.url, contentThumbnail = item.thumbnail,
language=scrapedlanguage))
else:
if language not in filtrados:
filtrados.append(language)

View File

@@ -302,7 +302,7 @@ def findvideos(item):
if "partes" in title:
action = "extract_url"
new_item = Item(channel=item.channel, action=action, title=title, fulltitle=title, url=url,
thumbnail=thumbnail, plot=plot, parentContent=item, server = servername)
thumbnail=thumbnail, plot=plot, parentContent=item, server = servername, quality=calidad)
if comentarios.startswith("Ver en"):
itemlist_ver.append(new_item)
else:

View File

@@ -203,9 +203,10 @@ def newest(categoria):
# categoria='peliculas'
try:
if categoria == 'peliculas':
item.url = host
item.url = host +'peliculas/page/1'
elif categoria == 'infantiles':
item.url = host + 'category/animacion/'
item.url = host + 'categoria/animacion/'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()

View File

@@ -319,7 +319,7 @@ def fichas(item):
contentTitle = scrapedtitle.strip()
if scrapedlangs != ">":
textoidiomas = extrae_idiomas(scrapedlangs)
textoidiomas, language = extrae_idiomas(scrapedlangs)
#Todo Quitar el idioma
title += bbcode_kodi2html(" ( [COLOR teal][B]" + textoidiomas + "[/B][/COLOR])")
@@ -351,7 +351,7 @@ def fichas(item):
itemlist.append(
Item(channel=item.channel, action=action, title=title, url=url, fulltitle=title, thumbnail=thumbnail,
show=show, folder=True, contentType=contentType, contentTitle=contentTitle,
language =textoidiomas, infoLabels=infoLabels))
language =language, infoLabels=infoLabels))
## Paginación
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)">.raquo;</a>')
@@ -797,16 +797,17 @@ def agrupa_datos(data):
def extrae_idiomas(bloqueidiomas):
logger.info("idiomas=" + bloqueidiomas)
# Todo cambiar por lista
#textoidiomas=[]
language=[]
textoidiomas = ''
patronidiomas = '([a-z0-9]+).png"'
idiomas = re.compile(patronidiomas, re.DOTALL).findall(bloqueidiomas)
for idioma in idiomas:
# TODO quitar esto
textoidiomas = textoidiomas + idioma +" "
#textoidiomas.append(idioma.upper())
# TODO y dejar esto
language.append(idioma)
return textoidiomas
return textoidiomas, language
def bbcode_kodi2html(text):

View File

@@ -127,9 +127,8 @@ def findvideos(item):
matches = scrapertools.find_multiple_matches(data, patron)
for url, server, calidad, idioma in matches:
title = item.contentTitle
server = servertools.get_server_from_url(url)
title = '%s [%s] [%s] [%s]' % (item.contentTitle, server, calidad, idioma)
itemlist.append(item.clone(action="play", title=title, fulltitle = item.title, url=url, language = idioma,
contentTitle = item.contentTitle, quality = calidad, server = server))

View File

@@ -213,9 +213,10 @@ def findvideos(item):
if 'openload' in url:
url = url + '|' + item.url
extra_info = title.split(' - ')
title = "%s - %s" % ('%s', title)
itemlist.append(Item(channel=item.channel, action="play", url=url, title=title, text_color=color3))
itemlist.append(Item(channel=item.channel, action="play", url=url, title=title, language=extra_info[0],
quality=extra_info[1],text_color=color3))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())