diff --git a/plugin.video.alfa/channels/dilo.json b/plugin.video.alfa/channels/dilo.json new file mode 100644 index 00000000..1d094d6f --- /dev/null +++ b/plugin.video.alfa/channels/dilo.json @@ -0,0 +1,37 @@ +{ + "id": "dilo", + "name": "Dilo", + "active": true, + "adult": false, + "language": [], + "thumbnail": "https://s22.postimg.cc/u6efsniqp/dilo.png", + "banner": "", + "categories": [ + "tvshow", + "vos" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "CAST", + "LAT", + "VOSE" + ] + } + ] +} diff --git a/plugin.video.alfa/channels/dilo.py b/plugin.video.alfa/channels/dilo.py new file mode 100644 index 00000000..70a52b20 --- /dev/null +++ b/plugin.video.alfa/channels/dilo.py @@ -0,0 +1,297 @@ +# -*- coding: utf-8 -*- +# -*- Channel Dilo -*- +# -*- Created for Alfa-addon -*- +# -*- By the Alfa Develop Group -*- + +import re + +from channels import autoplay +from channels import filtertools +from core import httptools +from core import scrapertools +from core import servertools +from core import jsontools +from core import tmdb +from core.item import Item +from platformcode import config, logger +from channelselector import get_thumb + +host = 'https://www.dilo.nu/' + +IDIOMAS = {'Español': 'CAST', 'Latino': 'LAT', 'Subtitulado': 'VOSE'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = ['openload', 'streamango', 'powvideo', 'clipwatching', 'streamplay', 'streamcherry', 'gamovideo'] + +def get_source(url): + logger.info() + data = httptools.downloadpage(url).data + data = re.sub(r'\n|\r|\t| |
|\s{2,}', "", data) + return data + +def mainlist(item): + logger.info() + + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="Nuevos capitulos", action="latest_episodes", url=host, + thumbnail=get_thumb('new episodes', auto=True))) + + itemlist.append(Item(channel=item.channel, title="Ultimas", action="latest_shows", url=host, + thumbnail=get_thumb('last', auto=True))) + + itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'catalogue', + thumbnail=get_thumb('all', auto=True))) + + itemlist.append(Item(channel=item.channel, title="Generos", action="section", + url=host + 'catalogue', thumbnail=get_thumb('genres', auto=True))) + + itemlist.append(Item(channel=item.channel, title="Por Años", action="section", url=host + 'catalogue', + thumbnail=get_thumb('year', auto=True))) + + itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?s=', + thumbnail=get_thumb('search', auto=True))) + + autoplay.show_option(item.channel, itemlist) + + return itemlist + + +def list_all(item): + logger.info() + + itemlist = [] + data = get_source(item.url) + patron = '
') + if next_page != '': + itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>', + url=page_base+next_page, thumbnail=get_thumb("more.png"), + type=item.type)) + return itemlist + + + +def section(item): + logger.info() + + itemlist = [] + data=get_source(item.url) + + if item.title == 'Generos': + data = scrapertools.find_single_match(data, '>Todos los generos.*?.*?
.*?text-uppercase"') + patron = '
([^<]+)
' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + title = scrapedtitle + contentSerieName = scrapedtitle + itemlist.append(Item(channel=item.channel, action='seasons', url=scrapedurl, thumbnail=scrapedthumbnail, + title=title, contentSerieName=contentSerieName)) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + return itemlist + + +def seasons(item): + from core import jsontools + import urllib + logger.info() + + itemlist=[] + + data=get_source(item.url) + serie_id = scrapertools.find_single_match(data, '{"item_id": (\d+)}') + post = {'item_id': serie_id} + post = urllib.urlencode(post) + seasons_url = '%sapi/web/seasons.php' % host + headers = {'Referer':item.url} + data = jsontools.load(httptools.downloadpage(seasons_url, post=post, headers=headers).data) + infoLabels = item.infoLabels + for dict in data: + season = dict['number'] + + if season != '0': + infoLabels['season'] = season + title = 'Temporada %s' % season + itemlist.append(Item(channel=item.channel, url=item.url, title=title, action='episodesxseason', + contentSeasonNumber=season, id=serie_id, infoLabels=infoLabels)) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, + action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName)) + + return itemlist + + +def episodesxseason(item): + logger.info() + from core import jsontools + import urllib + logger.info() + + itemlist = [] + season = item.infoLabels['season'] + post = {'item_id': item.id, 'season_number': season} + post = urllib.urlencode(post) + + seasons_url = '%sapi/web/episodes.php' % host + headers = {'Referer': item.url} + data = jsontools.load(httptools.downloadpage(seasons_url, post=post, headers=headers).data) + infoLabels = item.infoLabels + for dict in data: + + episode = dict['number'] + epi_name = dict['name'] + title = '%sx%s - %s' % (season, episode, epi_name) + url = '%s%s/' % (host, dict['permalink']) + infoLabels['episode'] = episode + itemlist.append(Item(channel=item.channel, title=title, action='findvideos', url=url, + contentEpisodeNumber=season, id=item.id, infoLabels=infoLabels)) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + return itemlist + +def episodios(item): + logger.info() + itemlist = [] + templist = seasons(item) + for tempitem in templist: + itemlist += episodesxseason(tempitem) + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + data = get_source(item.url) + patron = 'data-link="([^"]+)">.*?500">([^<]+)<.*?>Reproducir en ([^<]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + for enc_url, server, language in matches: + if not config.get_setting('unify'): + title = ' [%s]' % language + else: + title = '' + + itemlist.append(Item(channel=item.channel, title='%s'+title, url=enc_url, action='play', + language=IDIOMAS[language], server=server, infoLabels=item.infoLabels)) + + itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize()) + + # Requerido para FilterTools + + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + return itemlist + + + +def decode_link(enc_url): + logger.info() + + try: + new_data = get_source(enc_url) + new_enc_url = scrapertools.find_single_match(new_data, 'src="([^"]+)"') + try: + url = httptools.downloadpage(new_enc_url, follow_redirects=False).headers['location'] + except: + if not 'jquery' in new_enc_url: + url = new_enc_url + except: + pass + + return url + + +def play(item): + logger.info() + + item.url = decode_link(item.url) + + itemlist = [item] + + return itemlist + + +def search(item, texto): + logger.info() + import urllib + itemlist = [] + texto = texto.replace(" ", "+") + item.url = item.url + texto + if texto != '': + try: + return list_all(item) + except: + itemlist.append(item.clone(url='', title='No hay elementos...', action='')) + return itemlist diff --git a/plugin.video.alfa/channels/goovie.py b/plugin.video.alfa/channels/goovie.py index 2948cf73..4de00a9c 100644 --- a/plugin.video.alfa/channels/goovie.py +++ b/plugin.video.alfa/channels/goovie.py @@ -17,10 +17,10 @@ from channels import autoplay from platformcode import config, logger -IDIOMAS = {'1':'Cast', '2':'Lat', '3':'VOSE', '4':'VO'} +IDIOMAS = {'EspaL':'Cast', 'LatinoL':'Lat', 'SubL':'VOSE', 'OriL':'VO'} list_language = IDIOMAS.values() -CALIDADES = {'1':'1080','2':'720','3':'480','4':'360'} +CALIDADES = {'1080p':'1080','720p':'720','480p':'480','360p':'360'} list_quality = ['1080', '720', '480', '360'] @@ -89,17 +89,20 @@ def section(item): logger.info() itemlist=[] data = get_source(host+item.type) - if 'Genero' in item.title: - data = scrapertools.find_single_match(data, 'genero.*?') + data = scrapertools.find_single_match(data, 'Generos.*?') elif 'Año' in item.title: - data = scrapertools.find_single_match(data, 'año.*?') - patron = '(.*?)' + data = scrapertools.find_single_match(data, 'Años.*?') + patron = "
  • " matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: + for scrapedtitle in matches: title = scrapedtitle - itemlist.append(Item(channel=item.channel, url=scrapedurl, title=title, action='list_all', + if r'\d+' in scrapedtitle: + url = '%s%s/filtro/,/%s,' % (host, item.type, title) + else: + url = '%s%s/filtro/%s,/,' % (host, item.type, title) + itemlist.append(Item(channel=item.channel, url=url, title=title, action='list_all', type=item.type)) return itemlist @@ -109,46 +112,33 @@ def list_all(item): itemlist = [] data = get_source(item.url) - #logger.debug(data) - #return - if item.type == 'peliculas': - patron = '
    .*?.*?

    (.*?)

    .*?' - patron += "

    (.*?)

    (\d{4}) /.*?.*?'(\d+)'" - matches = re.compile(patron, re.DOTALL).findall(data) + patron = '
    ' + matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedplot, year, video_id in matches: + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: - title = '%s [%s]' % (scrapedtitle, year) - contentTitle = scrapedtitle - thumbnail = scrapedthumbnail - url = scrapedurl + title = scrapedtitle + thumbnail = scrapedthumbnail.strip() + url = scrapedurl + filter_thumb = thumbnail.replace("https://image.tmdb.org/t/p/w154", "") + filter_list = {"poster_path": filter_thumb} + filter_list = filter_list.items() + new_item = Item(channel=item.channel, + title=title, + url=url, + thumbnail=thumbnail, + plot=thumbnail, + infoLabels={'filtro':filter_list}) - itemlist.append(item.clone(action='findvideos', - title=title, - url=url, - thumbnail=thumbnail, - contentTitle=contentTitle, - video_id=video_id, - infoLabels={'year':year})) + if item.type == 'peliculas': + new_item.action = 'findvideos' + new_item.contentTitle = scrapedtitle + else: + new_item.action = 'seasons' + new_item.contentSerieName = scrapedtitle - elif item.type == 'series': - patron = '
    .*?.*?.*?' - patron +='

    (.*?)

    (.*?)

    (\d{4}) /' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, year in matches: - title = scrapedtitle - contentSerieName = scrapedtitle - thumbnail = scrapedthumbnail - url = scrapedurl - - itemlist.append(item.clone(action='seasons', - title=title, - url=url, - thumbnail=thumbnail, - plot=scrapedplot, - contentSerieName=contentSerieName, - infoLabels={'year':year})) + itemlist.append(new_item) tmdb.set_infoLabels(itemlist, seekTmdb=True) # Paginación @@ -199,21 +189,18 @@ def episodesxseasons(item): itemlist = [] data=get_source(item.url) - logger.debug(data) - patron= "ViewEpisode\('(\d+)', this\)>
    %s - (\d+)
    " % item.infoLabels['season'] - patron += ".*?src=(.*?) />.*?namep>(.*?)" - + patron= "
  • ]+)>%s - (\d+)

    ([^>]+)

    " % item.infoLabels['season'] matches = re.compile(patron, re.DOTALL).findall(data) infoLabels = item.infoLabels - for video_id, scrapedepisode, scrapedthumbnail, scrapedtitle in matches: + for url, scrapedepisode, scrapedtitle in matches: infoLabels['episode'] = scrapedepisode title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle) - itemlist.append(Item(channel=item.channel, title= title, url=item.url, thumbnail=scrapedthumbnail, - action='findvideos', video_id=video_id, infoLabels=infoLabels)) + itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', + infoLabels=infoLabels)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) @@ -224,87 +211,45 @@ def findvideos(item): logger.info() from lib import jsunpack itemlist = [] - headers = {'referer':item.url} - if item.video_id == '': - find_id = get_source(item.url) - #logger.debug(find_id) - #return - item.video_id = scrapertools.find_single_match(find_id, 'var centerClick = (\d+);') - url = 'https://goovie.co/api/links/%s' % item.video_id - data = httptools.downloadpage(url, headers=headers).data - video_list = jsontools.load(data) - for video_info in video_list: - logger.debug(video_info) - url = video_info['visor'] - plot = 'idioma: %s calidad: %s' % (video_info['idioma'], video_info['calidad']) + data = get_source(item.url) + + patron = "onclick=clickLink\(this, '([^']+)', '([^']+)', '([^']+)'\);>" + matches = re.compile(patron, re.DOTALL).findall(data) + headers = {'referer': item.url} + for url, quality, language in matches: + data = httptools.downloadpage(url, headers=headers, follow_redirects=False).data data = re.sub(r'"|\n|\r|\t| |
    |\s{2,}', "", data) packed = scrapertools.find_single_match(data, '(eval\(.*?);var') unpacked = jsunpack.unpack(packed) - logger.debug('unpacked %s' % unpacked) server = scrapertools.find_single_match(unpacked, "src:.'(http://\D+)/") id = scrapertools.find_single_match(unpacked, "src:.'http://\D+/.*?description:.'(.*?).'") if server == '': if 'powvideo' in unpacked: - id = scrapertools.find_single_match(unpacked ,",description:.'(.*?).'") - server= 'https://powvideo.net' + id = scrapertools.find_single_match(unpacked, ",description:.'(.*?).'") + server = 'https://powvideo.net' url = '%s/%s' % (server, id) if server != '' and id != '': - language = IDIOMAS[video_info['idioma']] - quality = CALIDADES[video_info['calidad']] + language = IDIOMAS[language] + quality = CALIDADES[quality] title = ' [%s] [%s]' % (language, quality) - itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=language, - quality=quality)) - - itmelist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) + itemlist.append(Item(channel=item.channel, title='%s' + title, url=url, action='play', language=language, + quality=quality, infoLabels=item.infoLabels)) + itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) return sorted(itemlist, key=lambda i: i.language) + def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = item.url + texto item.type = 'peliculas' if texto != '': - return search_results(item) + return list_all(item) else: return [] -def search_results(item): - logger.info() - - itemlist=[] - - data=get_source(item.url) - logger.debug(data) - patron = '
    .*?href=(.*?)>.*?typeContent>(.*?)<.*?' - patron += '.*?

    (.*?)

    (.*?)

    (\d{4})<' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, content_type ,scrapedthumb, scrapedtitle, scrapedplot, year in matches: - - title = scrapedtitle - url = scrapedurl - thumbnail = scrapedthumb - plot = scrapedplot - if content_type != 'Serie': - action = 'findvideos' - else: - action = 'seasons' - - new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, plot=plot, - action=action, type=content_type, infoLabels={'year':year}) - if new_item.action == 'findvideos': - new_item.contentTitle = new_item.title - else: - new_item.contentSerieName = new_item.title - - itemlist.append(new_item) - - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - - return itemlist - def newest(categoria): logger.info() itemlist = [] @@ -313,9 +258,9 @@ def newest(categoria): if categoria in ['peliculas']: item.url = host + 'peliculas' elif categoria == 'infantiles': - item.url = host + 'peliculas/generos/animación' + item.url = host + 'peliculas/filtro/Animación,/,' elif categoria == 'terror': - item.url = host + 'peliculas/generos/terror' + item.url = host + 'peliculas/filtro/Terror,/,' item.type='peliculas' itemlist = list_all(item) if itemlist[-1].title == 'Siguiente >>': diff --git a/plugin.video.alfa/channels/pelisipad.py b/plugin.video.alfa/channels/pelisipad.py index 63034e92..2abea066 100644 --- a/plugin.video.alfa/channels/pelisipad.py +++ b/plugin.video.alfa/channels/pelisipad.py @@ -77,10 +77,10 @@ def submenu(item): url=host % "list/ultimas-peliculas" + ext, text_color=color2, thumbnail=host % "list/ultimas-peliculas/thumbnail_167x250.jpg", fanart=host % "list/ultimas-peliculas/background_1080.jpg", viewmode="movie_with_plot")) - itemlist.append(Item(channel=item.channel, title="Destacados", action="entradas", - url=host % "list/000-novedades" + ext, text_color=color2, - thumbnail=host % "list/screener/thumbnail_167x250.jpg", - fanart=host % "list/screener/background_1080.jpg", viewmode="movie_with_plot")) + # itemlist.append(Item(channel=item.channel, title="Destacados", action="entradas", + # url=host % "list/000-novedades" + ext, text_color=color2, + # thumbnail=host % "list/screener/thumbnail_167x250.jpg", + # fanart=host % "list/screener/background_1080.jpg", viewmode="movie_with_plot")) itemlist.append(Item(channel=item.channel, title="Más vistas", action="entradas", url=host % "list/peliculas-mas-vistas" + ext, text_color=color2, thumbnail=host % "list/peliculas-mas-vistas/thumbnail_167x250.jpg", @@ -167,7 +167,7 @@ def entradas(item): #if child['year']: # title += " (" + child['year'] + ")" #title += quality - + thumbnail += "|User-Agent=%s" % httptools.get_user_agent video_urls = [] for k, v in child.get("video", {}).items(): for vid in v: @@ -232,6 +232,7 @@ def entradasconlistas(item): thumbnail = host % "list/%s/thumbnail_167x250.jpg" % child["id"] fanart = host % "list/%s/background_1080.jpg" % child["id"] + thumbnail += "|User-Agent=%s" % httptools.get_user_agent itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, show=show, infoLabels=infolabels, contentTitle=fulltitle, viewmode="movie_with_plot", @@ -295,7 +296,7 @@ def entradasconlistas(item): for vid in v: video_urls.append(["http://%s.pelisipad.com/s/transcoder/%s" % (vid["server"], vid["url"]) + "?%s", vid["height"]]) - + thumbnail += "|User-Agent=%s" % httptools.get_user_agent itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, video_urls=video_urls, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels, contentTitle=fulltitle, viewmode="movie_with_plot", text_color=color3)) @@ -347,6 +348,7 @@ def series(item): if child.get("numberOfSeasons") and "- Temporada" not in title: title += " (Temps:%s)" % child['numberOfSeasons'] + thumbnail += "|User-Agent=%s" % httptools.get_user_agent itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, text_color=color3, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels, contentTitle=fulltitle, viewmode="movie_with_plot", show=fulltitle)) @@ -414,6 +416,7 @@ def episodios(item): title = fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1] except: title = fulltitle = child['id'].replace("-", " ") + thumbnail += "|User-Agent=%s" % httptools.get_user_agent itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, contentTitle=fulltitle, viewmode="movie", show=item.show, infoLabels=infoLabels, video_urls=video_urls, extra="episodios", @@ -491,6 +494,7 @@ def nuevos_cap(item): else: title = fulltitle = child['name'] + thumbnail += "|User-Agent=%s" % httptools.get_user_agent itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, contentTitle=fulltitle, viewmode="movie", show=item.fulltitle, infoLabels=infoLabels, video_urls=video_urls, extra="nuevos", @@ -571,6 +575,7 @@ def listas(item): infolabels['title'] = title try: from core import videolibrarytools + thumbnail += "|User-Agent=%s" % httptools.get_user_agent new_item = item.clone(title=title, url=url, fulltitle=title, fanart=fanart, extra="findvideos", thumbnail=thumbnail, infoLabels=infolabels, category="Cine") videolibrarytools.add_movie(new_item) diff --git a/plugin.video.alfa/channels/seriesblanco.py b/plugin.video.alfa/channels/seriesblanco.py index 45a6f607..0eed39d9 100644 --- a/plugin.video.alfa/channels/seriesblanco.py +++ b/plugin.video.alfa/channels/seriesblanco.py @@ -212,21 +212,21 @@ def new_episodes(item): itemlist = [] data = get_source(item.url) - data = scrapertools.find_single_match(data, '
    Series Online : Capítulos estrenados recientemente
    .*?') - patron = '
  • .*?src="([^"]+)".*? data-original-title=" (\d+x\d+).*?' + patron = '
  • .*?src="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) - for lang_data, scrapedurl, scrapedthumbnail, scrapedinfo, in matches: + for lang_data, scrapedinfo, scrapedurl, scrapedthumbnail in matches: - url = host+scrapedurl + url =scrapedurl thumbnail = scrapedthumbnail scrapedinfo = scrapedinfo.split('x') season = scrapedinfo[0] episode = scrapedinfo[1] - scrapedtitle = scrapertools.find_single_match(url, 'capitulo/([^/]+)/').replace("-", " ") - title = '%s - %sx%s' % (scrapedtitle, season, episode ) + scrapedtitle = scrapertools.find_single_match(url, 'capitulo/([^/]+)/') + url = '%scapitulos/%s' % (host, scrapedtitle) + title = '%s - %sx%s' % (scrapedtitle.replace('-', ' '), season, episode ) title, language = add_language(title, lang_data) itemlist.append(Item(channel=item.channel, action='seasons', diff --git a/plugin.video.alfa/platformcode/platformtools.py b/plugin.video.alfa/platformcode/platformtools.py index f1ad9e63..09667a5c 100644 --- a/plugin.video.alfa/platformcode/platformtools.py +++ b/plugin.video.alfa/platformcode/platformtools.py @@ -154,6 +154,13 @@ def render_items(itemlist, parent_item): valid_genre = True elif anime: valid_genre = True + elif 'siguiente' in item.title.lower() and '>' in item.title: + item.thumbnail = get_thumb("next.png") + elif 'add' in item.action: + if 'pelicula' in item.action: + item.thumbnail = get_thumb("videolibrary_movie.png") + elif 'serie' in item.action: + item.thumbnail = get_thumb("videolibrary_tvshow.png") if unify_enabled and parent_item.channel != 'alfavorites': @@ -1071,8 +1078,8 @@ def play_torrent(item, xlistitem, mediaurl): #### Compatibilidad con Kodi 18: evita cuelgues/cancelaciones cuando el .torrent se lanza desde pantalla convencional if xbmc.getCondVisibility('Window.IsMedia'): - xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) #Preparamos el entorno para evutar error Kod1 18 - time.sleep(1) #Dejamos que se ejecute + xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) #Preparamos el entorno para evitar error Kod1 18 + time.sleep(1) #Dejamos tiempo para que se ejecute mediaurl = urllib.quote_plus(item.url) if ("quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]) and item.infoLabels['tmdb_id']: #Llamada con más parámetros para completar el título @@ -1083,17 +1090,17 @@ def play_torrent(item, xlistitem, mediaurl): xbmc.executebuiltin("PlayMedia(" + torrent_options[seleccion][1] % mediaurl + ")") - #Seleccionamos que clientes torrent soportamos para el marcado de vídeos vistos - if "quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]: - time_limit = time.time() + 150 #Marcamos el timepo máx. de buffering - while not is_playing() and time.time() < time_limit: #Esperamos mientra buffera - time.sleep(5) #Repetimos cada intervalo - #logger.debug(str(time_limit)) - - if item.strm_path and is_playing(): #Sólo si es de Videoteca - from platformcode import xbmc_videolibrary - xbmc_videolibrary.mark_auto_as_watched(item) #Marcamos como visto al terminar - #logger.debug("Llamado el marcado") + #Seleccionamos que clientes torrent soportamos para el marcado de vídeos vistos: asumimos que todos funcionan + #if "quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]: + time_limit = time.time() + 150 #Marcamos el timepo máx. de buffering + while not is_playing() and time.time() < time_limit: #Esperamos mientra buffera + time.sleep(5) #Repetimos cada intervalo + #logger.debug(str(time_limit)) + + if item.strm_path and is_playing(): #Sólo si es de Videoteca + from platformcode import xbmc_videolibrary + xbmc_videolibrary.mark_auto_as_watched(item) #Marcamos como visto al terminar + #logger.debug("Llamado el marcado") if seleccion == 1: from platformcode import mct diff --git a/plugin.video.alfa/resources/media/themes/default/thumb_next.png b/plugin.video.alfa/resources/media/themes/default/thumb_next.png index a2035b8e..a2f3b346 100644 Binary files a/plugin.video.alfa/resources/media/themes/default/thumb_next.png and b/plugin.video.alfa/resources/media/themes/default/thumb_next.png differ