correcciones

This commit is contained in:
Unknown
2018-09-26 16:18:45 -03:00
parent f8e72cbbef
commit dff5d0e8d7
5 changed files with 535 additions and 178 deletions

View File

@@ -1,249 +1,241 @@
# -*- coding: utf-8 -*-
# -*- Channel Pelisplus -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core import jsontools
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
from core import servertools
host = "http://www.pelisplus.tv/"
IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values()
IDIOMA = {'latino': 'Latino'}
list_language = IDIOMA.values()
list_quality = []
list_quality = ['1080p',
'720p',
'480p',
'360p',
'240p',
'default'
]
list_servers = [
'gvideo',
'directo',
'openload',
'thevideos'
]
'rapidvideo',
'streamango',
'vidlox',
'vidoza'
]
host = 'https://www.pelisplus.to/'
def get_source(url):
def get_source(url, referer=None):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if referer == None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(
item.clone(title="Peliculas",
action="sub_menu",
thumbnail=get_thumb('movies', auto=True),
))
itemlist.append(Item(channel=item.channel,
title="Peliculas",
action="sub_menu",
thumbnail=get_thumb('movies', auto=True),
))
itemlist.append(
item.clone(title="Series",
action="sub_menu",
thumbnail=get_thumb('tvshows', auto=True),
))
itemlist.append(Item(channel=item.channel,
title="Series",
action="sub_menu",
thumbnail=get_thumb('tvshows', auto=True),
))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + 'busqueda/?s=',
thumbnail=get_thumb('search', auto=True),
))
itemlist.append(Item(channel=item.channel,
title="Buscar", action="search", url=host + 'search/?s=',
thumbnail=get_thumb('search', auto=True),
))
autoplay.show_option(item.channel, itemlist)
return itemlist
def sub_menu(item):
logger.info()
itemlist = []
content = item.title.lower()
itemlist.append(item.clone(title="Todas",
action="list_all",
url=host + '%s/ultimas-%s/' % (content, content),
thumbnail=get_thumb('all', auto=True),
))
itemlist.append(item.clone(title="Generos",
action="generos",
url=host + '%s/' % content,
thumbnail=get_thumb('genres', auto=True),
))
itemlist.append(Item(channel=item.channel,
title="Ultimas",
action="list_all",
url=host + '%s/estrenos' % content,
thumbnail=get_thumb('last', auto=True),
type=content
))
itemlist.append(Item(channel=item.channel,title="Todas",
action="list_all",
url=host + '%s' % content,
thumbnail=get_thumb('all', auto=True),
type=content
))
itemlist.append(Item(channel=item.channel,
title="Generos",
action="section",
thumbnail=get_thumb('genres', auto=True),
type=content
))
return itemlist
def list_all(item):
logger.info()
itemlist = []
itemlist=[]
data = get_source(item.url)
patron = '(?:</a>|Posters>)<a href=(.*?) class=Posters.*?data-title=(.*?) data.*?src=(.*?) alt'
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '<div class="Posters">(.*?)</(?:ul|a></div>)')
patron = 'href="([^"]+)".*?src="([^"]+)".*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapertools.decodeHtmlentities(scrapedtitle)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapedtitle
thumbnail = scrapedthumbnail
filter_thumb = thumbnail.replace("https://image.tmdb.org/t/p/w300", "")
filter_list = {"poster_path": filter_thumb}
filter_list = filter_list.items()
url = scrapedurl
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w154", "")
filtro_list = {"poster_path": filtro_thumb}
filtro_list = filtro_list.items()
new_item=(
Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
infoLabels={'filtro': filtro_list},
context=autoplay.context
))
if 'serie' not in url:
new_item.contentTitle = scrapedtitle
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels={'filtro':filter_list})
if item.type == 'peliculas' or 'serie' not in url:
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
else:
new_item.contentSerieName = scrapedtitle
new_item.action = 'seasons'
new_item.contentSerieName = scrapedtitle
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
next_page_pattern = '<a class="page-link" href="([^"]+)" data-ci-pagination-page="\d+" rel="next">&gt;</a>'
url_next_page = scrapertools.find_single_match(full_data, next_page_pattern)
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all'))
# Pagination
if itemlist != []:
next_page = scrapertools.find_single_match(data, '<li><a href=([^ ]+) rel=next>&raquo;</a>')
if next_page != '':
itemlist.append(item.clone(action="list_all",
title='Siguiente >>>',
url=host+next_page,
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'
))
return itemlist
def generos(item):
logger.info()
itemlist = []
data = get_source(item.url)
if 'series' not in item.url:
clean_genre = 'PELÍCULAS DE'
else:
clean_genre = 'SERIES DE'
patron = '<h2 class=Heading--carousel> %s(.*?) <a class=Heading-link title=View All href=(.*?)><' % clean_genre
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
url = scrapedurl
title = scrapedtitle
if 'agregadas' not in title.lower():
itemlist.append(
Item(channel=item.channel,
action="list_all",
title=title,
url=url,
))
return itemlist
def seasons(item):
logger.info()
itemlist = []
templist = []
data = get_source(item.url)
serie_id = scrapertools.find_single_match(data, '<div class=owl-carousel data-serieid=(.*?)>')
itemlist=[]
patron = 'class=js-season-item> SEASON<span>(.*?)</span>'
data=get_source(item.url)
patron='data-toggle="tab">TEMPORADA (\d+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for season in matches:
contentSeasonNumber = season
infoLabels['season']=season
itemlist.append(Item(channel=item.channel, action="episodes", title='Temporada %s' % season,
serie_id=serie_id, contentSeasonNumber=contentSeasonNumber,
serie_url=item.url, infoLabels=infoLabels))
if item.extra == 'seasons':
for tempitem in itemlist:
templist += episodes(tempitem)
else:
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="seasons",
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber
))
if item.extra == 'seasons':
return templist
else:
return itemlist
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodes(item):
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist= []
itemlist = []
url = host+'api/episodes?titleId=%s&seasonNumber=%s' % (item.serie_id, item.contentSeasonNumber)
season = item.infoLabels['season']
data=get_source(item.url)
season_data = scrapertools.find_single_match(data, 'id="pills-vertical-%s">(.*?)</div>' % season)
patron='href="([^"]+)".*?block">Capitulo (\d+) - ([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(season_data)
data = jsontools.load(httptools.downloadpage(url).data)
episode_list = data['titles']
infoLabels = item.infoLabels
for episode in episode_list:
url = item.serie_url+episode['friendlyTitle4Url']
thumbnail = episode['url_image']
plot = episode['shortDescription']
contentEpisodeNumber = episode['tvSeasonEpisodeNumber']
title = '%sx%s - %s' % (item.contentSeasonNumber, contentEpisodeNumber, episode['title'])
infoLabels['episode']=contentEpisodeNumber
for scrapedurl, scrapedepisode, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
itemlist.append(Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail,
plot=plot, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def section(item):
logger.info()
itemlist=[]
data = get_source(host)
genres_data = scrapertools.find_single_match(data, '>Generos<(.*?)</ul>')
patron = 'href="\/\w+\/([^"]+)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(genres_data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = '%s/%s/%s' % (host, item.type, scrapedurl)
itemlist.append(Item(channel=item.channel, url=url, title=title, action='list_all', type=item.type))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
servers_page = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
data = get_source(servers_page, referer=item.url)
patron = '<a href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for enc_url in matches:
url_data = get_source(enc_url, referer=item.url)
url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
language = 'latino'
if not config.get_setting('unify'):
title = ' [%s]' % language.capitalize()
else:
title = ''
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=IDIOMAS[language],
infoLabels=item.infoLabels))
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.language = IDIOMA['latino']
videoitem.title = '[%s] [%s]' % (videoitem.server, videoitem.language)
videoitem.infoLabels = item.infoLabels
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
@@ -255,18 +247,18 @@ def findvideos(item):
if item.contentType == 'movie':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + 'busqueda/?s=' + texto
item.url += texto
try:
if texto != '':
@@ -279,26 +271,20 @@ def search(item, texto):
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = host + 'peliculas/ultimas-peliculas/'
if categoria in ['peliculas', 'latino']:
item.url = host + 'peliculas/estrenos'
elif categoria == 'infantiles':
item.url = host + 'peliculas/animacion/'
item.url = host + 'peliculas/generos/animacion/'
elif categoria == 'terror':
item.url = host + 'peliculas/terror/'
elif categoria == 'documentales':
item.url = host + 'documentales/'
item.url = host + 'peliculas/generos/terror/'
item.type='peliculas'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>>':
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
@@ -306,4 +292,4 @@ def newest(categoria):
logger.error("{0}".format(line))
return []
return itemlist
return itemlist

View File

@@ -0,0 +1,38 @@
{
"id": "seriesblancoxyz",
"name": "SeriesBlanco.xyz",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://s22.postimg.cc/nucz720sx/image.png",
"banner": "",
"categories": [
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Cast",
"Lat",
"VOSE",
"VO"
]
}
]
}

View File

@@ -0,0 +1,323 @@
# -*- coding: utf-8 -*-
# -*- Channel SeriesBlanco.xyz -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://seriesblanco.xyz/'
IDIOMAS = {'Esp':'Cast', 'es': 'Cast', 'la': 'Lat', 'Latino':'Lat', 'vos': 'VOSE', 'vo': 'VO'}
list_language = IDIOMAS.values()
list_quality = ['SD', 'Micro-HD-720p', '720p', 'HDitunes', 'Micro-HD-1080p' ]
list_servers = ['powvideo','yourupload', 'openload', 'gamovideo', 'flashx', 'clipwatching', 'streamango', 'streamcloud']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel,
title="Nuevos Capitulos",
action="new_episodes",
thumbnail=get_thumb('new_episodes', auto=True),
url=host))
itemlist.append(Item(channel=item.channel,
title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url=host + 'listado/',
))
itemlist.append(Item(channel=item.channel,
title="Generos",
action="section",
thumbnail=get_thumb('genres', auto=True),
url=host,
))
# itemlist.append(Item(channel=item.channel,
# title="A - Z",
# action="section",
# thumbnail=get_thumb('alphabet', auto=True),
# url=host+'listado/', ))
itemlist.append(Item(channel=item.channel,
title="Buscar",
action="search",
thumbnail=get_thumb('search', auto=True)))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = data.replace ("'", '"')
patron = '<li><div style=.*?><a href="([^"]+)"><img.*?src="([^"]+)" title="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapedtitle.strip()
url = host + scrapedurl
thumbnail = scrapedthumbnail
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(Item(channel=item.channel,
action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=scrapedtitle,
context=filtertools.context(item, list_language, list_quality),
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# #Paginacion
if itemlist != []:
base_page = scrapertools.find_single_match(item.url,'(.*?)?')
next_page = scrapertools.find_single_match(data, '</span><a href=?pagina=2>>></a>')
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="lista",
title='Siguiente >>>',
url=base_page+next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.title == 'Generos':
patron = '<li><a href="([^"]+)"><i class="fa fa-bookmark-o"></i> (.*?)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if item.title == 'Generos':
url = host + scrapedurl
title = scrapedtitle
itemlist.append(Item(channel=item.channel,
action='list_all',
title=title,
url=url
))
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = "<p class='panel-primary btn-primary'> Temporada (\d+)</p>"
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
id = scrapertools.find_single_match(data, "onclick='loadSeason\((\d+),\d+\);")
for scrapedseason in matches:
url = item.url
title = 'Temporada %s' % scrapedseason
contentSeasonNumber = scrapedseason
infoLabels['season'] = contentSeasonNumber
thumbnail = item.thumbnail
itemlist.append(Item(channel=item.channel,
action="episodesxseason",
title=title,
url=url,
thumbnail=thumbnail,
id=id,
contentSeasonNumber=contentSeasonNumber,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
season = item.contentSeasonNumber
season_url = '%sajax/visto3.php?season_id=%s&season_number=%s' % (host, item.id, season)
data = get_source(season_url)
patron = "<a href='([^ ]+)'.*?>.*?\d+x(\d+).*?-([^<]+)<.*?(/banderas.*?)</td>"
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scraped_episode, scrapedtitle, lang_data in matches:
url = host + scrapedurl
title = '%sx%s - %s' % (season, scraped_episode, scrapedtitle.strip())
infoLabels['episode'] = scraped_episode
thumbnail = item.thumbnail
title, language = add_language(title, lang_data)
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
url=url,
thumbnail=thumbnail,
language=language,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def new_episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = data.replace("'", '"')
data = scrapertools.find_single_match(data,
'<center>Series Online : Capítulos estrenados recientemente</center>.*?</ul>')
patron = '<li><h6.*?src="([^"]+)".*?aalt="([^"]+)".*?href="([^"]+)">.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for lang_data, scrapedtitle, scrapedurl, scrapedthumbnail in matches:
url =host+scrapedurl
thumbnail = scrapedthumbnail
season_episode = scrapertools.find_single_match(scrapedtitle, '.*? (\d+x\d+) ')
scrapedtitle= scrapertools.find_single_match(scrapedtitle, '(.*?) \d+x')
title = '%s - %s' % (scrapedtitle, season_episode )
title, language = add_language(title, lang_data)
itemlist.append(Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
language=language,
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def add_language(title, string):
logger.info()
languages = scrapertools.find_multiple_matches(string, '/banderas/(.*?).png')
language = []
for lang in languages:
if 'jap' in lang or lang not in IDIOMAS:
lang = 'vos'
if len(languages) == 1:
language = IDIOMAS[lang]
title = '%s [%s]' % (title, language)
else:
language.append(IDIOMAS[lang])
title = '%s [%s]' % (title, IDIOMAS[lang])
return title, language
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = data.replace ("'", '"')
patron = '<a href=([^ ]+) target="_blank"><img src="/servidores/(.*?).(?:png|jpg)".*?sno.*?'
patron += '<span>(.*?)<.*?(/banderas.*?)td'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, server, quality, lang_data in matches:
title = server.capitalize()
if quality == '':
quality = 'SD'
title = '%s [%s]' % (title, quality)
title, language = add_language(title, lang_data)
thumbnail = item.thumbnail
enlace_id, serie_id, se, ep = scrapertools.find_single_match(scrapedurl,'enlace(\d+)/(\d+)/(\d+)/(\d+)/')
url = host + 'ajax/load_enlace.php?serie=%s&temp=%s&cap=%s&id=%s' % (serie_id, se, ep, enlace_id)
itemlist.append(Item(channel=item.channel,
title=title,
url=url,
action="play",
thumbnail=thumbnail,
server=server,
quality=quality,
language=language,
infoLabels=item.infoLabels
))
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return sorted(itemlist, key=lambda it: it.language)
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.infoLabels = item.infoLabels
return itemlist
def search(item, texto):
logger.info()
if texto != '':
item.url = host + 'search.php?q1=%s' % texto
return list_all(item)

View File

@@ -154,7 +154,7 @@ def render_items(itemlist, parent_item):
valid_genre = True
elif anime:
valid_genre = True
elif 'siguiente' in item.title.lower() and '>' in item.title:
elif (('siguiente' in item.title.lower() and '>' in item.title) or ('pagina:' in item.title.lower())):
item.thumbnail = get_thumb("next.png")
elif 'add' in item.action:
if 'pelicula' in item.action:
@@ -1002,6 +1002,12 @@ def set_player(item, xlistitem, mediaurl, view, strm):
# Reproduce
# xbmc_player = xbmc_player
#### Compatibilidad con Kodi 18: evita cuelgues/cancelaciones cuando el .torrent se lanza desde pantalla convencional
if xbmc.getCondVisibility('Window.IsMedia'):
xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) #Preparamos el entorno para evitar error Kod1 18
xbmc.sleep(100) #Dejamos tiempo para que se ejecute
xbmc_player.play(playlist, xlistitem)
if config.get_setting('trakt_sync'):
trakt_tools.wait_for_update_trakt()
@@ -1019,6 +1025,10 @@ def set_player(item, xlistitem, mediaurl, view, strm):
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xlistitem)
elif config.get_setting("player_mode") == 2:
#### Compatibilidad con Kodi 18: evita cuelgues/cancelaciones cuando el .torrent se lanza desde pantalla convencional
if xbmc.getCondVisibility('Window.IsMedia'):
xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) #Preparamos el entorno para evitar error Kod1 18
xbmc.sleep(100) #Dejamos tiempo para que se ejecute
xbmc.executebuiltin("PlayMedia(" + mediaurl + ")")
# TODO MIRAR DE QUITAR VIEW
@@ -1079,7 +1089,7 @@ def play_torrent(item, xlistitem, mediaurl):
#### Compatibilidad con Kodi 18: evita cuelgues/cancelaciones cuando el .torrent se lanza desde pantalla convencional
if xbmc.getCondVisibility('Window.IsMedia'):
xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) #Preparamos el entorno para evitar error Kod1 18
time.sleep(1) #Dejamos tiempo para que se ejecute
xbmc.sleep(500) #Dejamos tiempo para que se ejecute
mediaurl = urllib.quote_plus(item.url)
if ("quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]) and item.infoLabels['tmdb_id']: #Llamada con más parámetros para completar el título

View File

@@ -279,7 +279,7 @@ def title_format(item):
visto = True
# Se elimina cualquier formato previo en el titulo
if item.action != '':
if item.action != '' and item.action !='mainlist':
item.title = remove_format(item.title)
#logger.debug('visto? %s' % visto)