diff --git a/channels/animesaturn.py b/channels/animesaturn.py index c48377c8..560ecedc 100644 --- a/channels/animesaturn.py +++ b/channels/animesaturn.py @@ -8,10 +8,10 @@ import re import urlparse import channelselector -from core import httptools, tmdb, scrapertools, support +from core import httptools, tmdb, support, scrapertools, jsontools from core.item import Item from platformcode import logger, config -from specials import autoplay +from specials import autoplay, autorenumber __channel__ = "animesaturn" host = config.get_setting("channel_host", __channel__) @@ -22,45 +22,14 @@ list_language = IDIOMAS.values() list_servers = ['openload','fembed'] list_quality = ['default'] -# checklinks = config.get_setting('checklinks', __channel__) -# checklinks_number = config.get_setting('checklinks_number', __channel__) - def mainlist(item): support.log(item.channel + 'mainlist') itemlist = [] - support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host,'anime') - itemlist.append( - Item(channel=item.channel, - action="ultimiep", - url="%s/fetch_pages.php?request=episodios" % host, - title=support.typo("Novità submenu"), - extra="", - contentType='anime', - folder=True, - thumbnail=support.thumb()) - ) - # itemlist.append( - # Item(channel=item.channel, - # action="lista_anime", - # url="%s/animeincorso" % host, - # title=support.typo("In corso submenu"), - # extra="anime", - # contentType='anime', - # folder=True, - # thumbnail=channelselector.get_thumb('on_the_air.png')) - # ) - itemlist.append( - Item(channel=item.channel, - action="list_az", - url="%s/animelist?load_all=1" % host, - title=support.typo("Archivio A-Z submenu"), - extra="anime", - contentType='anime', - folder=True, - thumbnail=channelselector.get_thumb('channels_tvshow_az.png')) - ) - support.menu(itemlist, 'Cerca', 'search', host,'anime') + support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host) + support.menu(itemlist, 'Novità submenu', 'ultimiep', "%s/fetch_pages.php?request=episodes" % host,'tvshow') + support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', '%s/animelist?load_all=1' % host,args=['tvshow','alfabetico']) + support.menu(itemlist, 'Cerca', 'search', host) autoplay.init(item.channel, list_servers, list_quality) @@ -131,9 +100,14 @@ def lista_anime(item): title += ' '+support.typo(' (ITA)') infoLabels = {} - # if 'Akira' in title: - # movie = True - # infoLabels['year']= 1988 + if 'Akira' in title: + movie = True + infoLabels['year']= 1988 + + if 'Dragon Ball Super Movie' in title: + movie = True + infoLabels['year'] = 2019 + itemlist.append( Item(channel=item.channel, @@ -213,7 +187,7 @@ def episodios(item): fanart=item.thumbnail, thumbnail=item.thumbnail)) - if((len(itemlist) == 1 and 'Movie' in itemlist[0].title) or movie): + if(((len(itemlist) == 1 and 'Movie' in itemlist[0].title) or movie) and item.contentType!='movie'): item.url = itemlist[0].url item.contentType = 'movie' return findvideos(item) @@ -235,6 +209,7 @@ def findvideos(item): if(item.contentType == 'movie'): episodes = episodios(item) + if(len(episodes)>0): item.url = episodes[0].url @@ -245,23 +220,11 @@ def findvideos(item): url = scrapertools.find_single_match(data, patron) data = httptools.downloadpage(url).data - # patron = r"""""" - # matches = re.compile(patron, re.DOTALL).findall(data) - # for video in matches: - # itemlist.append( - # Item( - # channel=item.channel, - # action="play", - # fulltitle=item.fulltitle, - # title="".join([item.title, ' ', support.typo(video.title, 'color kod []')]), - # url=video, - # contentType=item.contentType, - # folder=False)) itemlist = support.server(item, data=data) - # itemlist = filtertools.get_links(itemlist, item, list_language) - + if item.contentType == 'movie': + support.videolibrary(itemlist, item, 'color kod') # Controlla se i link sono validi # if checklinks: # itemlist = servertools.check_list_links(itemlist, checklinks_number) @@ -279,16 +242,13 @@ def ultimiep(item): logger.info(item.channel + "ultimiep") itemlist = [] - post = "page=%s" % item.extra if item.extra else None - logger.debug(post) - logger.debug(item.url) + post = "page=%s" % item.args['page'] if item.args and item.args['page'] else None + data = httptools.downloadpage( item.url, post=post, headers={ 'X-Requested-With': 'XMLHttpRequest' }).data - logger.debug(data) - patron = r"""
[^
\s*""" patron += r"""
(.+?)
\s*""" patron += r"""
(.+?)
""" @@ -298,6 +258,7 @@ def ultimiep(item): scrapedtitle1 = cleantitle(scrapedtitle1) scrapedtitle2 = cleantitle(scrapedtitle2) scrapedtitle = scrapedtitle1 + ' - ' + scrapedtitle2 + '' + itemlist.append( Item(channel=item.channel, contentType="tvshow", @@ -313,16 +274,15 @@ def ultimiep(item): # Pagine patronvideos = r'data-page="(\d+)" title="Next">Pagina Successiva' next_page = scrapertools.find_single_match(data, patronvideos) - if next_page: itemlist.append( Item( channel=item.channel, action="ultimiep", title=support.typo(config.get_localized_string(30992), 'color kod bold'), - url=host + "/fetch_pages?request=episodios", + url=item.url, thumbnail= support.thumb(), - extra=next_page, + args={'page':next_page}, folder=True)) return itemlist @@ -362,42 +322,73 @@ def newest(categoria): # ================================================================================================================ # ---------------------------------------------------------------------------------------------------------------- -def search_anime(item): - logger.info(item.channel + " search_anime") +def search_anime(item, texto): + logger.info(item.channel + " search_anime: "+texto) itemlist = [] - data = httptools.downloadpage(host + "/animelist?load_all=1").data - data = scrapertools.decodeHtmlentities(data) + # data = httptools.downloadpage(host + "/animelist?load_all=1").data + # data = scrapertools.decodeHtmlentities(data) + # + # texto = texto.lower().split('+') + # + # patron = r']*?>[^>]*?>(.+?)<' + # matches = re.compile(patron, re.DOTALL).findall(data) + # + # for scrapedurl, scrapedtitle in [(scrapedurl, scrapedtitle) + # for scrapedurl, scrapedtitle in matches + # if all(t in scrapedtitle.lower() + # for t in texto)]: + # + # title = cleantitle(scrapedtitle).replace('(ita)','(ITA)') + # showtitle = title + # if '(ITA)' in title: + # title = title.replace('(ITA)','').strip() + # showtitle = title + # title += ' '+support.typo(' [ITA] color kod') + # + # itemlist.append( + # Item( + # channel=item.channel, + # contentType="episode", + # action="episodios", + # title=title, + # url=scrapedurl, + # fulltitle=title, + # show=showtitle, + # thumbnail="")) + # + # tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - texto = item.url.lower().split('+') + data = httptools.downloadpage(host + "/index.php?search=1&key=%s" % texto).data + jsondata = jsontools.load(data) - patron = r']*?>[^>]*?>(.+?)<' - matches = re.compile(patron, re.DOTALL).findall(data) + for title in jsondata: + data = str(httptools.downloadpage("%s/templates/header?check=1" % host, post="typeahead=%s" % title).data) - for scrapedurl, scrapedtitle in [(scrapedurl, scrapedtitle) - for scrapedurl, scrapedtitle in matches - if all(t in scrapedtitle.lower() - for t in texto)]: - - title = cleantitle(scrapedtitle).replace('(ita)','(ITA)') - showtitle = title - if '(ITA)' in title: - title = title.replace('(ITA)','').strip() + if 'Anime non esistente' in data: + continue + else: + title = title.replace('(ita)','(ITA)') showtitle = title - title += ' '+support.typo(' [ITA] color kod') + if '(ITA)' in title: + title = title.replace('(ITA)', '').strip() + showtitle = title + title += ' ' + support.typo(' (ITA)') - itemlist.append( - Item( - channel=item.channel, - contentType="episode", - action="episodios", - title=title, - url=scrapedurl, - fulltitle=title, - show=showtitle, - thumbnail="")) + url = "%s/anime/%s" % (host, data) + logger.debug(title) + logger.debug(url) - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + itemlist.append( + Item( + channel=item.channel, + contentType="episode", + action="episodios", + title=title, + url=url, + fulltitle=title, + show=showtitle, + thumbnail="")) return itemlist @@ -408,10 +399,9 @@ def search_anime(item): def search(item, texto): logger.info(item.channel + " search") itemlist = [] - item.url = texto try: - return search_anime(item) + return search_anime(item, texto) except: import sys diff --git a/channels/cineblog01.py b/channels/cineblog01.py index b6859b2d..a2cb2401 100644 --- a/channels/cineblog01.py +++ b/channels/cineblog01.py @@ -105,6 +105,7 @@ def newest(categoria): findhost() itemlist = [] item = Item() + item.contentType = 'movie' item.url = host + '/lista-film-ultimi-100-film-aggiunti/' return support.scrape(item, r']+)>([^<([]+)(?:\[([A-Z]+)\])?\s\(([0-9]{4})\)<\/a>', ['url', 'title', 'quality', 'year'], diff --git a/channels/fastsubita.json b/channels/fastsubita.json index dead1163..2db8f206 100644 --- a/channels/fastsubita.json +++ b/channels/fastsubita.json @@ -4,8 +4,8 @@ "language": ["ita"], "active": true, "adult": false, - "thumbnail": "http://fastsubita.com/wp-content/uploads/2017/10/Untitled-222255xxx.jpg", - "banner": "http://fastsubita.com/wp-content/uploads/2017/10/Untitled-222255xxx.jpg", + "thumbnail": "fastsubita.png", + "banner": "fastsubita.png", "categories": ["tvshow", "vosi"], "settings": [ { diff --git a/channels/fastsubita.py b/channels/fastsubita.py index 9318a714..c8f575f7 100644 --- a/channels/fastsubita.py +++ b/channels/fastsubita.py @@ -6,7 +6,8 @@ import re -from core import scrapertools, httptools, tmdb +import channelselector +from core import scrapertools, httptools, tmdb, support from core.item import Item from platformcode import config, logger from specials import autoplay @@ -44,8 +45,8 @@ def mainlist(item): support.menu(itemlist, 'Serie TV bold', 'lista_serie', host,'tvshow') support.menu(itemlist, 'Novità submenu', 'pelicuals_tv', host,'tvshow') support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host,'tvshow',args=['serie']) - support.menu(itemlist, 'Cerca', 'search', host,'tvshow') + autoplay.init(item.channel, list_servers, list_quality) autoplay.show_option(item.channel, itemlist) @@ -113,24 +114,31 @@ def pelicuals_tv(item): episode = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')[0] scrapedtitle = scrapedtitle.replace(scraped_1, "") + infoLabels = {} + infoLabels['season'] = episode[1] + infoLabels['episode'] = episode[2] if "http:" in scrapedurl: scrapedurl = scrapedurl else: scrapedurl = "http:" + scrapedurl + title = scraped_1+" - "+infoLabels['season']+"x"+infoLabels['episode']+" Sub-ITA" + itemlist.append( Item(channel=item.channel, action="findvideos", contentTpye="tvshow", - title=scraped_1 + " " + scrapedtitle, - fulltitle=scraped_1 + " " + scrapedtitle, + title=title, + fulltitle=title, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=scraped_1, extra=item.extra, - contentSerieName=scraped_1+" ("+episode[0]+" Sub-Ita)", + contentSerieName=scraped_1, + contentLanguage='Sub-ITA', + infoLabels=infoLabels, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) @@ -380,7 +388,7 @@ def episodios(item,itemlist = []): infoLabels = {} infoLabels['season'] = season infoLabels['episode'] = episode[2] - title = infoLabels['season']+'x'+infoLabels['episode'] + title = infoLabels['season']+'x'+infoLabels['episode']+" Sub-ITA" if "http:" not in scrapedurl: scrapedurl = "http:" + scrapedurl diff --git a/channels/seriehd.json b/channels/seriehd.json index 2d5a19b2..ecd59d5c 100644 --- a/channels/seriehd.json +++ b/channels/seriehd.json @@ -4,8 +4,8 @@ "active": true, "adult": false, "language": ["ita"], - "thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/seriehd.png", - "banner": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/seriehd.png", + "thumbnail": "seriehd.png", + "banner": "seriehd.png", "categories": ["tvshow"], "settings": [ { diff --git a/channels/serietvsubita.json b/channels/serietvsubita.json index 2c2daf83..fe08633c 100644 --- a/channels/serietvsubita.json +++ b/channels/serietvsubita.json @@ -4,8 +4,8 @@ "active": true, "adult": false, "language": ["ita"], - "thumbnail": "http://serietvsubita.xyz/wp-content/uploads/2012/07/logo.jpg", - "banner": "http://serietvsubita.xyz/wp-content/uploads/2012/07/logo.jpg", + "thumbnail": "serietvsubita.png", + "banner": "serietvsubita.png", "categories": ["tvshow"], "settings": [ { diff --git a/channels/serietvsubita.py b/channels/serietvsubita.py index 1b43e617..3ce72d6b 100644 --- a/channels/serietvsubita.py +++ b/channels/serietvsubita.py @@ -52,7 +52,7 @@ def mainlist(item): # ---------------------------------------------------------------------------------------------------------------- def cleantitle(scrapedtitle): scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()) - scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×','x').replace('Game of Thrones –','') + scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×','x').replace('Game of Thrones –','').replace('In The Dark 2019','In The Dark (2019)').strip() year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)') if year: scrapedtitle = scrapedtitle.replace('(' + year + ')', '') @@ -195,7 +195,7 @@ def episodios(item, itemlist=[]): infoLabels = {} infoLabels['season'] = season infoLabels['episode'] = episode - + fullepisode+=' Sub-ITA' itemlist.append( Item(channel=item.channel, extra=item.extra, @@ -293,23 +293,27 @@ def peliculas_tv(item): scrapedthumbnail = "" scrapedplot = "" scrapedtitle = cleantitle(scrapedtitle) + infoLabels = {} episode = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')[0] title = scrapedtitle.split(" S0")[0].strip() title = title.split(" S1")[0].strip() title = title.split(" S2")[0].strip() - + infoLabels['season'] = episode[1] + infoLabels['episode'] = episode[2] itemlist.append( Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle, - title=scrapedtitle, + title=title+" - "+episode[0]+" Sub-ITA", url=scrapedurl, thumbnail=scrapedthumbnail, - contentSerieName=title+" ("+episode[0]+" Sub-Ita)", + contentSerieName=title, + contentLanguage='Sub-ITA', plot=scrapedplot, + infoLabels=infoLabels, folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) diff --git a/channels/serietvu.json b/channels/serietvu.json index a7612fde..abb5dbd5 100644 --- a/channels/serietvu.json +++ b/channels/serietvu.json @@ -4,8 +4,8 @@ "active": true, "adult": false, "language": ["ita"], - "thumbnail": "https://www.serietvu.club/wp-content/themes/gurarjbar/images/logo.png", - "banner": "https://www.serietvu.club/wp-content/themes/gurarjbar/images/logo.png", + "thumbnail": "serietvu.png", + "banner": "serietvu.png", "categories": ["tvshow"], "settings": [ { diff --git a/channels/serietvu.py b/channels/serietvu.py index b25a7bec..d13b3451 100644 --- a/channels/serietvu.py +++ b/channels/serietvu.py @@ -52,7 +52,7 @@ def mainlist(item): # ---------------------------------------------------------------------------------------------------------------- def cleantitle(scrapedtitle): scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()) - scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('Game of Thrones –','').replace('Flash 2014','Flash') + scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('– Il Trono di Spade','').replace('Flash 2014','Flash') year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)') if year: scrapedtitle = scrapedtitle.replace('(' + year + ')', '') @@ -121,10 +121,14 @@ def episodios(item): matches = re.compile(patron, re.DOTALL).findall(blocco) for scrapedextra, scrapedurl, scrapedimg, scrapedtitle in matches: number = scrapertools.decodeHtmlentities(scrapedtitle.replace("Episodio", "")).strip() + + title = value + "x" + number.zfill(2) + + itemlist.append( Item(channel=item.channel, action="findvideos", - title=value + "x" + number.zfill(2), + title=title, fulltitle=scrapedtitle, contentType="episode", url=scrapedurl, @@ -145,6 +149,7 @@ def findvideos(item): support.log(item.channel + " findvideos") itemlist = support.server(item, data=item.url) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Controlla se i link sono validi @@ -192,6 +197,45 @@ def findepisodevideo(item): def latestep(item): support.log(item.channel + " latestep") itemlist = [] + titles = [] + + #recupero gli episodi in home nella sezione Ultimi episodi aggiunti + data = httptools.downloadpage(host, headers=headers).data + + block = scrapertools.find_single_match(data,r"Ultimi episodi aggiunti.*?

") + regex = r'(.*?)\((\d*?)x(\d*?)\s(Sub-Ita|Ita)' + matches = re.compile(regex, re.DOTALL).findall(block) + + for scrapedurl, scrapedimg, scrapedtitle, scrapedseason, scrapedepisode, scrapedlanguage in matches: + infoLabels = {} + year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)') + if year: + infoLabels['year'] = year + infoLabels['episode'] = scrapedepisode + infoLabels['season'] = scrapedseason + episode = scrapedseason+"x"+scrapedepisode + + scrapedtitle = cleantitle(scrapedtitle) + title = scrapedtitle+" - "+episode + contentlanguage = "" + if scrapedlanguage.strip().lower() != 'ita': + title +=" Sub-ITA" + contentlanguage = 'Sub-ITA' + + titles.append(title) + itemlist.append( + Item(channel=item.channel, + action="findepisodevideo", + title=title, + fulltitle=title, + url=scrapedurl, + extra=[[scrapedseason,scrapedepisode]], + thumbnail=scrapedimg, + contentSerieName=scrapedtitle, + contentLanguage=contentlanguage, + contentType='episode', + infoLabels=infoLabels, + folder=True)) data = httptools.downloadpage(item.url, headers=headers).data @@ -209,23 +253,38 @@ def latestep(item): infoLabels['tvshowtitle'] = scrapedtitle episodio = re.compile(r'(\d+)x(\d+)', re.DOTALL).findall(scrapedinfo) - title = "%s %s" % (scrapedtitle, scrapedinfo) + infoLabels['episode'] = episodio[0][1] + infoLabels['season'] = episodio[0][0] + + episode = infoLabels['season'] + "x" + infoLabels['episode'] + title = "%s - %s" % (scrapedtitle, episode) + title = title.strip() + contentlanguage = "" + if 'sub-ita' in scrapedinfo.lower(): + title+=" Sub-ITA" + contentlanguage = 'Sub-ITA' + + if title in titles: continue itemlist.append( Item(channel=item.channel, action="findepisodevideo", title=title, - fulltitle=scrapedtitle, + fulltitle=title, url=scrapedurl, extra=episodio, thumbnail=scrapedimg, - show=scrapedtitle, - contentTitle=scrapedtitle, - contentSerieName=title, + contentSerieName=scrapedtitle, + contentLanguage=contentlanguage, infoLabels=infoLabels, + contentType='episode', folder=True)) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + # logger.debug("".join(map(str,itemlist))) + return itemlist @@ -290,7 +349,7 @@ def categorie(item): Item(channel=item.channel, action="lista_serie", title=scrapedtitle, - contentType="tv", + contentType="tvshow", url="%s%s" % (host, scrapedurl), thumbnail=item.thumbnail, folder=True)) diff --git a/core/scrapertools.py b/core/scrapertools.py index 58dd8ea6..d5691a20 100644 --- a/core/scrapertools.py +++ b/core/scrapertools.py @@ -365,6 +365,9 @@ def get_season_and_episode(title): @return: Numero de temporada y episodio en formato "1x01" o cadena vacia si no se han encontrado """ filename = "" + # 4l3x87 - fix for series example 9-1-1 + original_title = title + title = title.replace('9-1-1','') patrons = ["(\d+)\s*[x-]\s*(\d+)", "(\d+)\s*×\s*(\d+)", "(?:s|t)(\d+)e(\d+)", "(?:season|temp|stagione\w*)\s*(\d+)\s*(?:capitulo|epi|episode|episodio\w*)\s*(\d+)"] @@ -372,6 +375,7 @@ def get_season_and_episode(title): for patron in patrons: try: matches = re.compile(patron, re.I).search(title) + if matches: if len(matches.group(1)) == 1: filename = matches.group(1) + "x" + matches.group(2).zfill(2) @@ -381,6 +385,6 @@ def get_season_and_episode(title): except: pass - logger.info("'" + title + "' -> '" + filename + "'") + logger.info("'" + original_title + "' -> '" + filename + "'") return filename diff --git a/core/tmdb.py b/core/tmdb.py index d71371b1..c57b1df1 100644 --- a/core/tmdb.py +++ b/core/tmdb.py @@ -324,8 +324,9 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None __leer_datos(otmdb_global) - if lock and lock.locked(): - lock.release() + # 4l3x87 - fix for overlap infoLabels if there is episode or season + # if lock and lock.locked(): + # lock.release() if item.infoLabels['episode']: try: @@ -354,6 +355,10 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None item.infoLabels['rating'] = episodio['episodio_vote_average'] item.infoLabels['votes'] = episodio['episodio_vote_count'] + # 4l3x87 - fix for overlap infoLabels if there is episode or season + if lock and lock.locked(): + lock.release() + return len(item.infoLabels) else: @@ -374,8 +379,17 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None if temporada['poster_path']: item.infoLabels['poster_path'] = 'http://image.tmdb.org/t/p/original' + temporada['poster_path'] item.thumbnail = item.infoLabels['poster_path'] + + # 4l3x87 - fix for overlap infoLabels if there is episode or season + if lock and lock.locked(): + lock.release() + return len(item.infoLabels) + # 4l3x87 - fix for overlap infoLabels if there is episode or season + if lock and lock.locked(): + lock.release() + # Buscar... else: otmdb = copy.copy(otmdb_global) diff --git a/resources/media/channels/banner/fastsubita.png b/resources/media/channels/banner/fastsubita.png new file mode 100644 index 00000000..2d226718 Binary files /dev/null and b/resources/media/channels/banner/fastsubita.png differ diff --git a/resources/media/channels/banner/seriehd.png b/resources/media/channels/banner/seriehd.png new file mode 100644 index 00000000..027c0a15 Binary files /dev/null and b/resources/media/channels/banner/seriehd.png differ diff --git a/resources/media/channels/banner/serietvsubita.png b/resources/media/channels/banner/serietvsubita.png new file mode 100644 index 00000000..62f21d1c Binary files /dev/null and b/resources/media/channels/banner/serietvsubita.png differ diff --git a/resources/media/channels/banner/serietvu.png b/resources/media/channels/banner/serietvu.png new file mode 100644 index 00000000..9e1c0929 Binary files /dev/null and b/resources/media/channels/banner/serietvu.png differ diff --git a/resources/media/channels/thumb/fastsubita.png b/resources/media/channels/thumb/fastsubita.png new file mode 100644 index 00000000..d1d7be10 Binary files /dev/null and b/resources/media/channels/thumb/fastsubita.png differ diff --git a/resources/media/channels/thumb/seriehd.png b/resources/media/channels/thumb/seriehd.png new file mode 100644 index 00000000..e310b89f Binary files /dev/null and b/resources/media/channels/thumb/seriehd.png differ diff --git a/resources/media/channels/thumb/serietvsubita.png b/resources/media/channels/thumb/serietvsubita.png new file mode 100644 index 00000000..38709e2a Binary files /dev/null and b/resources/media/channels/thumb/serietvsubita.png differ diff --git a/resources/media/channels/thumb/serietvu.png b/resources/media/channels/thumb/serietvu.png new file mode 100644 index 00000000..3835135f Binary files /dev/null and b/resources/media/channels/thumb/serietvu.png differ diff --git a/specials/news.py b/specials/news.py index 179fb422..734ddef1 100644 --- a/specials/news.py +++ b/specials/news.py @@ -396,6 +396,10 @@ def get_title(item): if not item.contentSeason: item.contentSeason = '1' title = "%s - %sx%s" % (title, item.contentSeason, str(item.contentEpisodeNumber).zfill(2)) + #4l3x87 - fix to add Sub-ITA in newest + if item.contentLanguage: + title+=" "+item.contentLanguage + elif item.contentTitle: # Si es una pelicula con el canal adaptado title = item.contentTitle