diff --git a/channels/altadefinizione01.py b/channels/altadefinizione01.py index dfce8065..9f92b624 100644 --- a/channels/altadefinizione01.py +++ b/channels/altadefinizione01.py @@ -153,7 +153,7 @@ def findvideos(item): itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) diff --git a/channels/altadefinizione01_link.py b/channels/altadefinizione01_link.py index f422bcf7..3f832c5e 100644 --- a/channels/altadefinizione01_link.py +++ b/channels/altadefinizione01_link.py @@ -257,7 +257,7 @@ def findvideos_film(item): itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) diff --git a/channels/altadefinizioneclick.py b/channels/altadefinizioneclick.py index 66373661..d6b570a6 100644 --- a/channels/altadefinizioneclick.py +++ b/channels/altadefinizioneclick.py @@ -101,7 +101,7 @@ def findvideos(item): if __comprueba_enlaces__: itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) autoplay.start(itemlist, item) support.videolibrary(itemlist, item ,'color kod bold') diff --git a/channels/animeleggendari.py b/channels/animeleggendari.py index 11fdfe01..0cd8d2ff 100644 --- a/channels/animeleggendari.py +++ b/channels/animeleggendari.py @@ -179,7 +179,7 @@ def findvideos(item): if __comprueba_enlaces__: itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) autoplay.start(itemlist, item) return itemlist diff --git a/channels/animespace.py b/channels/animespace.py index 783ddf88..a77405b7 100644 --- a/channels/animespace.py +++ b/channels/animespace.py @@ -247,7 +247,7 @@ def findvideos(item): # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay diff --git a/channels/animeworld.py b/channels/animeworld.py index 473fb4dc..f79980a1 100644 --- a/channels/animeworld.py +++ b/channels/animeworld.py @@ -404,7 +404,7 @@ def findvideos(item): # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay diff --git a/channels/casacinema.py b/channels/casacinema.py index 2c299acf..46b06a34 100644 --- a/channels/casacinema.py +++ b/channels/casacinema.py @@ -355,7 +355,7 @@ def findvideos(item): # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay diff --git a/channels/cineblog01.py b/channels/cineblog01.py index ec7f3614..c721a0d2 100644 --- a/channels/cineblog01.py +++ b/channels/cineblog01.py @@ -16,6 +16,7 @@ from platformcode import logger, config host = "" headers = "" + def findhost(): global host, headers permUrl = httptools.downloadpage('https://www.cb01.uno/', follow_redirects=False).headers @@ -30,8 +31,10 @@ list_quality = ['HD', 'default'] __comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'cineblog01') __comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'cineblog01') -#esclusione degli articoli 'di servizio' -blacklist = ['BENVENUTI', 'Richieste Serie TV', 'CB01.UNO ▶ TROVA L’INDIRIZZO UFFICIALE ', 'Aggiornamento Quotidiano Serie TV', 'OSCAR 2019 â–¶ CB01.UNO: Vota il tuo film preferito! 🎬', 'Openload: la situazione. Benvenuto Verystream'] +# esclusione degli articoli 'di servizio' +blacklist = ['BENVENUTI', 'Richieste Serie TV', 'CB01.UNO ▶ TROVA L’INDIRIZZO UFFICIALE ', + 'Aggiornamento Quotidiano Serie TV', 'OSCAR 2019 â–¶ CB01.UNO: Vota il tuo film preferito! 🎬', + 'Openload: la situazione. Benvenuto Verystream'] def mainlist(item): @@ -152,7 +155,7 @@ def peliculas(item): listGroups = ['thumb', 'url', 'title', 'quality', 'year', 'genre', 'duration', 'plot'] action = 'findvideos' else: - patron = r'div class="card-image">.*?([^<[(]+)<\/a>.*?([^<>0-9(]+)\(([0-9]{4}).*?<\/(p|div)>([^<>]+)' + patron = r'div class="card-image">.*?([^<[(]+)<\/a>.*?([^<>0-9(]+)\(([0-9]{4}).*?(.*?)(.*?)<\/article>', - r'
[^<>]*?
(.*?)
\[riduci\]
'], - patron='

([0-9]+(?:×|×)[0-9]+)(.*?)(?:<\/p>|[^<>]*?.*?)

\[riduci\]
') + + for match in matches: + support.log(match) + blocks = scrapertoolsV2.find_multiple_matches(match, '(?:

)(.*?)(?:

|.*?STAGIONE\s+\d+([^<>]+)').strip() + + for block in blocks: + episode = scrapertoolsV2.find_single_match(block, r'([0-9]+(?:×|×)[0-9]+)').strip() + seasons_n = scrapertoolsV2.find_single_match(block, r'STAGIONE\s+\d+([^<>]+)').strip() + + if seasons_n: + season = seasons_n + + if not episode: continue + + season = re.sub(r'–|–', "-", season) + itemlist.append( + Item(channel=item.channel, + action="findvideos", + contentType=item.contentType, + title="[B]" + episode + "[/B] " + season, + fulltitle=episode + " " + season, + show=episode + " " + season, + url=block, + extra=item.extra, + thumbnail=item.thumbnail, + infoLabels=item.infoLabels + )) + + support.videolibrary(itemlist, item) + + return itemlist def findvideos(item): @@ -177,7 +214,7 @@ def findvideos(item): def load_links(itemlist, re_txt, color, desc_txt, quality=""): streaming = scrapertoolsV2.find_single_match(data, re_txt).replace('"', '') - support.log('STREAMING=',streaming) + support.log('STREAMING=', streaming) patron = ']+>([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(streaming) for scrapedurl, scrapedtitle in matches: @@ -238,7 +275,7 @@ def findvideos(item): # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay @@ -253,7 +290,7 @@ def findvid_serie(item): def load_vid_series(html, item, itemlist, blktxt): logger.info('HTML' + html) patron = ']+>(.*?)' - # Estrae i contenuti + # Estrae i contenuti matches = re.compile(patron, re.DOTALL).finditer(html) for match in matches: scrapedurl = match.group(1) @@ -310,6 +347,7 @@ def findvid_serie(item): return itemlist + def play(item): support.log() itemlist = [] diff --git a/channels/cinehindi.py b/channels/cinehindi.py index 20f3ac0d..57505b18 100644 --- a/channels/cinehindi.py +++ b/channels/cinehindi.py @@ -144,7 +144,7 @@ def findvideos(item): itemlist.append(itemlist1[i]) tmdb.set_infoLabels(itemlist, True) # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay diff --git a/channels/cinemalibero.py b/channels/cinemalibero.py index 8a429939..8b18fd60 100644 --- a/channels/cinemalibero.py +++ b/channels/cinemalibero.py @@ -226,7 +226,7 @@ def findvideos(item): # Questa def. deve sempre essere nominata findvideos itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) # Necessario per FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Necessario per AutoPlay autoplay.start(itemlist, item) diff --git a/channels/cinemastreaming.py b/channels/cinemastreaming.py index 84d3df33..ce7834c4 100644 --- a/channels/cinemastreaming.py +++ b/channels/cinemastreaming.py @@ -4,77 +4,187 @@ # ------------------------------------------------------------ import re -from channels import filtertools -from core import scrapertools, servertools, httptools +from channels import filtertools, support, autoplay +from core import scrapertools, servertools, httptools, scrapertoolsV2 from core.item import Item -from platformcode import config -from core import tmdb -host = 'https://cinemastreaming.info' +host = 'https://cinemastreaming.icu' + +IDIOMAS = {'Italiano': 'IT'} +list_language = IDIOMAS.values() +list_servers = ['openload', 'streamango'] +list_quality = ['1080p', '1080p 3D', 'SD', 'CAM', 'default'] headers = [['Referer', host]] + def mainlist(item): - log() + support.log() # Menu Principale + itemlist = [] + support.menu(itemlist, 'Film bold', 'peliculas', host + '/film/') + support.menu(itemlist, 'Per genere submenu', 'menu', host, args="Film per Genere") + support.menu(itemlist, 'Anime bold', 'peliculas', host + '/category/anime/') + support.menu(itemlist, 'Serie TV bold', 'peliculas', host + '/serie-tv/', contentType='episode') + support.menu(itemlist, 'Ultime Uscite submenu', 'peliculas', host + "/stagioni/", "episode", args='latests') + support.menu(itemlist, 'Ultimi Episodi submenu', 'peliculas_latest_ep', host + "/episodi/", "episode", args='lateste') + support.menu(itemlist, '[COLOR blue]Cerca...[/COLOR]', 'search') - itemlist = [Item(channel = item.channel, - contentType = 'movie', - title = 'Film', - url = host + '/film/', - action = 'video', - thumbnail = '', - fanart = '' - ), - ] - - return itemlist - -def video(item): - log() - - itemlist = [] # Creo una lista Vuota - - # Carica la pagina - data = httptools.downloadpage(item.url, headers=headers).data - block = scrapertools.find_single_match(data, r'
(.*?)<\/main>') - block = re.sub('\t|\n', '', block) - - patron = r'.*?.*?src="([^"]+)".*?>.*?

([^<]+)<\/h3>(.*?)<\/article>' - matches = re.compile(patron, re.DOTALL).findall(block) - - for scrapedurl, scrapedthumb, scrapedtitle, scrapedinfo in matches: - log('Info Block', scrapedinfo) - patron = r'(.*?)<\/span>.*?(.*?)<\/span>.*?

(.*?)<\/p>.*?

(.*?)<\/p>

.*?(.*?)<\/a>.*?

(.*?)<\/p>' - info = re.compile(patron, re.DOTALL).findall(scrapedinfo) - for year, rating, plot, genre, director, cast in info: - genre = scrapertools.find_multiple_matches(genre, r'(.*?)<\/a>') - cast = scrapertools.find_multiple_matches(cast, r'(.*?)<\/a>') - - infoLabels = {} - infoLabels['Year'] = year - infoLabels['Rating'] = rating - infoLabels['Plot'] = plot - infoLabels['Genre'] = genre - infoLabels['Director'] = director - infoLabels['Cast'] = cast - - itemlist.append( - Item(channel=item.channel, - action="findvideos", - contentType=item.contentType, - title=scrapedtitle, - fulltitle=scrapedtitle, - url=scrapedurl, - thumbnail=scrapedthumb, - infoLabels = infoLabels, - show=scrapedtitle)) + autoplay.init(item.channel, list_servers, list_quality) + autoplay.show_option(item.channel, itemlist) return itemlist -def log(stringa1="", stringa2=""): - import inspect, os - from platformcode import logger - logger.info("[" + os.path.basename(__file__) + "] - [" + inspect.stack()[1][3] + "] " + str(stringa1) + str(stringa2)) \ No newline at end of file +def peliculas(item): + support.log() + list_groups = ["url", "thumb", "title", "year", "rating", "duration"] + + patron = r'([^<]+).*?Year">' + + if item.args == "latests": + patron += r'([^<]+)' + else: + patron += r'(\d{4}).*?AAIco-star.*?>([^<]+).*?AAIco-access_time">([^<]+).*?Qlty' + + patron_next = r'page-numbers current.*?href="([^"]+)"' + + if item.contentType == "movie": + patron += r'\">([^<]+)' + list_groups.append("quality") + + action = "findvideos" if item.contentType == "movie" else "episodios" + + return support.scrape(item, patron, list_groups, patronNext=patron_next, action=action) + + +def peliculas_latest_ep(item): + + patron = r'([^<]+)<\/span>([^<]+).*?([^<]+)' + + data = httptools.downloadpage(item.url).data + + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + for scrapedurl, scrapedthumbnail, scrapednum, scrapedep, scrapedtitle in matches: + itemlist.append( + Item(channel=item.channel, + action="findvideos", + contentType=item.contentType, + title="[B]" + scrapednum + "[/B]" + scrapedep + " - " + scrapedtitle, + fulltitle=scrapedep + " " + scrapedtitle, + show=scrapedep + " " + scrapedtitle, + url=scrapedurl, + extra=item.extra, + thumbnail="http:" + scrapedthumbnail, + infoLabels=item.infoLabels + )) + + support.nextPage(itemlist, item, data, r'page-numbers current.*?href="([^"]+)"') + + return itemlist + + +def peliculas_menu(item): + itemlist = peliculas(item) + return itemlist[:-1] + + +def episodios(item): + patron = r'(.*?)<\/a>.*?>\d{4}<' + list_groups = ["url", "title", "year"] + + itemlist = support.scrape(item, patron, list_groups) + + for itm in itemlist: + fixedtitle = scrapertools.get_season_and_episode(itm.url) + itm.title = fixedtitle + " - " + itm.title + itm.fulltitle = fixedtitle + " - " + itm.fulltitle + + return itemlist + + +def menu(item): + patron_block = r'

' + patron = r'menu-category-list">([^<]+)<' + list_groups = ["url", "title"] + + return support.scrape(item, patron, list_groups, blacklist="Anime", action="peliculas_menu", patron_block=patron_block) + + +def search(item, texto): + support.log("s=", texto) + item.url = host + "/?s=" + texto + try: + return peliculas(item) + # Continua la ricerca in caso di errore + except Exception, e: + import traceback + traceback.print_stack() + support.log(str(e)) + return [] + + +def newest(categoria): + support.log("newest" + categoria) + itemlist = [] + item = Item() + try: + if categoria == "series": + item.url = host + "/episodi/" + item.action = "peliculas" + item.args = "lateste" + item.contentType = "episode" + itemlist = peliculas(item) + + if itemlist[-1].action == "peliculas": + itemlist.pop() + + # Continua la ricerca in caso di errore + except Exception, e: + import traceback + traceback.print_stack() + support.log(str(e)) + return [] + + return itemlist + + +def findvideos(item): + + if item.quality.lower() in ["ended", "canceled", "returning series"]: + return episodios(item) + + itemlist = [] + data = scrapertoolsV2.decodeHtmlentities(httptools.downloadpage(item.url).data) + btns = re.compile(r'data-tplayernv="Opt.*?>([^<]+)([^<]+)', re.DOTALL).findall(data) + matches = re.compile(r'[^<]+(.*?)<[^<]+<[^<]+<[^<]+<[^>]+>
(.*?)<[^<]+
[^<]+<[^<]+<[^<]+<[^>]+>[^<]+<[^<]+<[^<]+<[^>]+><[^<]+<[^>]+>:\s*([^<]+)[^<]+<[^<]+[^<]+<[^<]+[^<]+<[^<]+[^<]+[^>]+>:\s*([^<]+)' + # patron = r'
[^<]+(.*?)<[^<]+<[^<]+<[^<]+<[^>]+>
(.*?)<[^<]+
' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedthumbnail, scrapedtitle, scrapedtv, scrapedgender, scrapedyear in matches: + # for scrapedthumbnail, scrapedtitle, scrapedtv in matches: + scrapedurl = "" + scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip() + infoLabels = {} + infoLabels["year"] = scrapedyear + itemlist.append( + Item(channel=item.channel, + action="do_search", + extra=urllib.quote_plus(scrapedtitle) + '{}' + 'movie', + title=scrapedtitle + "[COLOR yellow] " + scrapedtv + "[/COLOR]", + fulltitle=scrapedtitle, + url=scrapedurl, + thumbnail=scrapedthumbnail, + contentTitle=scrapedtitle, + contentType='movie', + infoLabels=infoLabels, + folder=True)) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + return itemlist + + +def do_search(item): + from channels import search + return search.do_search(item) \ No newline at end of file diff --git a/channels/filmsenzalimiti.py b/channels/filmsenzalimiti.py index aa024154..6b51f642 100644 --- a/channels/filmsenzalimiti.py +++ b/channels/filmsenzalimiti.py @@ -183,7 +183,7 @@ def findvideos(item): itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) # Necessario per FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Necessario per AutoPlay autoplay.start(itemlist, item) diff --git a/channels/filmsenzalimiticc.py b/channels/filmsenzalimiticc.py index befd5c30..0fd0b10f 100644 --- a/channels/filmsenzalimiticc.py +++ b/channels/filmsenzalimiticc.py @@ -244,7 +244,7 @@ def findvideos(item): # Questa def. deve sempre essere nominata findvideos itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) # Necessario per FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Necessario per AutoPlay autoplay.start(itemlist, item) diff --git a/channels/filtertools.py b/channels/filtertools.py index e9a5f2a5..0195e728 100644 --- a/channels/filtertools.py +++ b/channels/filtertools.py @@ -120,7 +120,7 @@ def context(item, list_language=None, list_quality=None, exist=False): _context = [] if access(): - dict_data = {"title": "FILTRO: Configurar", "action": "config_item", "channel": "filtertools"} + dict_data = {"title": config.get_localized_string(60426), "action": "config_item", "channel": "filtertools"} if list_language: dict_data["list_language"] = list_language if list_quality: @@ -139,10 +139,10 @@ def context(item, list_language=None, list_quality=None, exist=False): if item.action == "play": if not exist: - _context.append({"title": "FILTRO: Añadir '%s'" % item.language, "action": "save_from_context", + _context.append({"title": config.get_localized_string(60427) % item.language, "action": "save_from_context", "channel": "filtertools", "from_channel": item.channel}) else: - _context.append({"title": "FILTRO: Borrar '%s'" % item.language, "action": "delete_from_context", + _context.append({"title": config.get_localized_string(60428) % item.language, "action": "delete_from_context", "channel": "filtertools", "from_channel": item.channel}) return _context @@ -150,7 +150,7 @@ def context(item, list_language=None, list_quality=None, exist=False): def show_option(itemlist, channel, list_language, list_quality): if access(): - itemlist.append(Item(channel=__channel__, title="[COLOR %s]Configurar filtro para series...[/COLOR]" % + itemlist.append(Item(channel=__channel__, title=config.get_localized_string(60429) % COLOR.get("parent_item", "auto"), action="load", list_language=list_language, list_quality=list_quality, from_channel=channel)) @@ -377,17 +377,16 @@ def mainlist(channel, list_language, list_quality): idx += 1 name = dict_series.get(tvshow, {}).get(TAG_NAME, tvshow) - activo = " (desactivado)" + activo = config.get_localized_string(60433) if dict_series[tvshow][TAG_ACTIVE]: activo = "" - title = "Configurar [COLOR %s][%s][/COLOR]%s" % (tag_color, name, activo) + title = config.get_localized_string(60434) % (tag_color, name, activo) itemlist.append(Item(channel=__channel__, action="config_item", title=title, show=name, list_language=list_language, list_quality=list_quality, from_channel=channel)) if len(itemlist) == 0: - itemlist.append(Item(channel=channel, action="mainlist", title="No existen filtros, busca una serie y " - "pulsa en menú contextual 'FILTRO: Configurar'")) + itemlist.append(Item(channel=channel, action="mainlist", title=config.get_localized_string(60435))) return itemlist diff --git a/channels/ilgeniodellostreaming.py b/channels/ilgeniodellostreaming.py index 187bec65..3a24cc2e 100644 --- a/channels/ilgeniodellostreaming.py +++ b/channels/ilgeniodellostreaming.py @@ -377,7 +377,7 @@ def findvideos(item): # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay diff --git a/channels/italiafilmhd.py b/channels/italiafilmhd.py index ceb38433..fd15b1c3 100644 --- a/channels/italiafilmhd.py +++ b/channels/italiafilmhd.py @@ -303,7 +303,7 @@ def findvideos(item): # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay diff --git a/channels/italiaserie.py b/channels/italiaserie.py index 630d89ca..af216988 100644 --- a/channels/italiaserie.py +++ b/channels/italiaserie.py @@ -163,7 +163,7 @@ def findvideos(item): support.log() itemlist = support.server(item, data=item.url) - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) autoplay.start(itemlist, item) diff --git a/channels/mondoserietv.py b/channels/mondoserietv.py index 6a8f156a..272e25ac 100644 --- a/channels/mondoserietv.py +++ b/channels/mondoserietv.py @@ -315,7 +315,7 @@ def findvideos(item): # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay diff --git a/channels/mundopelis.py b/channels/mundopelis.py index 3e3e8fca..ffd7b920 100644 --- a/channels/mundopelis.py +++ b/channels/mundopelis.py @@ -131,7 +131,7 @@ def findvideos(item): if __comprueba_enlaces__: itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) diff --git a/channels/piratestreaming.py b/channels/piratestreaming.py index 5fddb2df..0c80d520 100644 --- a/channels/piratestreaming.py +++ b/channels/piratestreaming.py @@ -248,7 +248,7 @@ def findvideos(item): # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay diff --git a/channels/playview.py b/channels/playview.py index 9e9c02ef..fc3fd3ac 100644 --- a/channels/playview.py +++ b/channels/playview.py @@ -262,7 +262,7 @@ def findvideos(item): # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay diff --git a/channels/seriehd.py b/channels/seriehd.py index 440c1651..58efde3d 100644 --- a/channels/seriehd.py +++ b/channels/seriehd.py @@ -142,7 +142,7 @@ def findvideos(item): # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay diff --git a/channels/serietvsubita.json b/channels/serietvsubita.json new file mode 100644 index 00000000..4dd01090 --- /dev/null +++ b/channels/serietvsubita.json @@ -0,0 +1,44 @@ +{ + "id": "serietvsubita", + "name": "Serie TV Sub ITA", + "active": false, + "adult": false, + "language": ["ita"], + "thumbnail": "http://serietvsubita.xyz/wp-content/uploads/2012/07/logo.jpg", + "banner": "http://serietvsubita.xyz/wp-content/uploads/2012/07/logo.jpg", + "categories": ["tvshow"], + "settings": [ + { + "id": "channel_host", + "type": "text", + "label": "Host del canale", + "default": "http://serietvsubita.xyz/", + "enabled": true, + "visible": true + }, + { + "id": "include_in_global_search", + "type": "bool", + "label": "Includi ricerca globale", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Includi in Novità - Serie TV", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_italiano", + "type": "bool", + "label": "Includi in Novità - Italiano", + "default": true, + "enabled": true, + "visible": true + } + ] +} diff --git a/channels/serietvsubita.py b/channels/serietvsubita.py new file mode 100644 index 00000000..5edd00c0 --- /dev/null +++ b/channels/serietvsubita.py @@ -0,0 +1,352 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# Canale per Serie Tv Sub ITA +# Ringraziamo Icarus crew +# ---------------------------------------------------------- +import inspect +import re +import time + +import channelselector +from channels import autoplay, support, filtertools +from core import httptools, tmdb, scrapertools +from core.item import Item +from platformcode import logger, config + +host = config.get_setting("channel_host", 'serietvsubita') +headers = [['Referer', host]] + +IDIOMAS = {'Italiano': 'IT'} +list_language = IDIOMAS.values() +list_servers = ['gounlimited','verystream','streamango','openload'] +list_quality = ['default'] + + + +def mainlist(item): + support.log(item.channel + 'mainlist') + itemlist = [] + support.menu(itemlist, 'Serie TV bold', 'lista_serie', host,'tvshow') + support.menu(itemlist, 'Novità submenu', 'peliculas_tv', host,'tvshow') + support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host,'tvshow') + support.menu(itemlist, 'Cerca', 'search', host,'tvshow') + + + autoplay.init(item.channel, list_servers, list_quality) + autoplay.show_option(item.channel, itemlist) + + itemlist.append( + Item(channel='setting', + action="channel_config", + title=support.typo("Configurazione Canale color lime"), + config=item.channel, + folder=False, + thumbnail=channelselector.get_thumb('setting_0.png')) + ) + + return itemlist + + +# ---------------------------------------------------------------------------------------------------------------- +def cleantitle(scrapedtitle): + scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()) + scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('Game of Thrones –','') + year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)') + if year: + scrapedtitle = scrapedtitle.replace('(' + year + ')', '') + + + return scrapedtitle.strip() + + +# ================================================================================================================ + +# ---------------------------------------------------------------------------------------------------------------- +def lista_serie(item): + support.log(item.channel + " lista_serie") + itemlist = [] + + PERPAGE = 15 + + p = 1 + if '{}' in item.url: + item.url, p = item.url.split('{}') + p = int(p) + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + + # Extrae las entradas + patron = '
  • ([^<]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + + for i, (scrapedurl, scrapedtitle) in enumerate(matches): + scrapedplot = "" + scrapedthumbnail = "" + if (p - 1) * PERPAGE > i: continue + if i >= p * PERPAGE: break + title = cleantitle(scrapedtitle) + itemlist.append( + Item(channel=item.channel, + extra=item.extra, + action="episodes", + title=title, + url=scrapedurl, + thumbnail=scrapedthumbnail, + fulltitle=title, + show=title, + plot=scrapedplot, + folder=True)) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + # Paginazione + if len(matches) >= p * PERPAGE: + scrapedurl = item.url + '{}' + str(p + 1) + itemlist.append( + Item(channel=item.channel, + action='lista_serie', + contentType=item.contentType, + title=support.typo(config.get_localized_string(30992), 'color kod bold'), + url=scrapedurl, + args=item.args, + thumbnail=support.thumb())) + + return itemlist + +# ================================================================================================================ + + +# ---------------------------------------------------------------------------------------------------------------- +def episodes(item): + support.log(item.channel + " episodes") + itemlist = [] + + data = httptools.downloadpage(item.url).data + + patron = '
  • ([^<]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + + for i, (scrapedurl, scrapedtitle) in enumerate(matches): + scrapedplot = "" + scrapedthumbnail = "" + if (p - 1) * PERPAGE > i: continue + if i >= p * PERPAGE: break + title = cleantitle(scrapedtitle) + itemlist.append( + Item(channel=item.channel, + extra=item.extra, + action="episodes", + title=title, + url=scrapedurl, + fulltitle=title, + show=title, + plot=scrapedplot, + folder=True)) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + # Paginazione + if len(matches) >= p * PERPAGE: + scrapedurl = item.url + '{}' + str(p + 1) + itemlist.append( + Item(channel=item.channel, + action='list_az', + contentType=item.contentType, + title=support.typo(config.get_localized_string(30992), 'color kod bold'), + url=scrapedurl, + args=item.args, + extra=item.extra, + thumbnail=support.thumb())) + + return itemlist + +# ================================================================================================================ diff --git a/channels/serietvu.json b/channels/serietvu.json new file mode 100644 index 00000000..b598e9ab --- /dev/null +++ b/channels/serietvu.json @@ -0,0 +1,44 @@ +{ + "id": "serietvu", + "name": "SerieTVU", + "active": true, + "adult": false, + "language": ["ita"], + "thumbnail": "https://www.serietvu.club/wp-content/themes/gurarjbar/images/logo.png", + "banner": "https://www.serietvu.club/wp-content/themes/gurarjbar/images/logo.png", + "categories": ["tvshow"], + "settings": [ + { + "id": "channel_host", + "type": "text", + "label": "Host del canale", + "default": "https://www.serietvu.club", + "enabled": true, + "visible": true + }, + { + "id": "include_in_global_search", + "type": "bool", + "label": "Includi ricerca globale", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Includi in Novità - Serie TV", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_italiano", + "type": "bool", + "label": "Includi in Novità - Italiano", + "default": true, + "enabled": true, + "visible": true + } + ] +} diff --git a/channels/serietvu.py b/channels/serietvu.py new file mode 100644 index 00000000..2a01fb91 --- /dev/null +++ b/channels/serietvu.py @@ -0,0 +1,296 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# Canale per SerieTVU +# Ringraziamo Icarus crew +# ---------------------------------------------------------- +import re + +import channelselector +from channels import autoplay, support, filtertools +from core import httptools, tmdb, scrapertools +from core.item import Item +from platformcode import logger, config + +host = config.get_setting("channel_host", 'serietvu') +headers = [['Referer', host]] + +IDIOMAS = {'Italiano': 'IT'} +list_language = IDIOMAS.values() +list_servers = ['speedvideo'] +list_quality = ['default'] + + + +def mainlist(item): + support.log(item.channel + 'mainlist') + itemlist = [] + support.menu(itemlist, 'Serie TV bold', 'lista_serie', "%s/category/serie-tv" % host,'tvshow') + support.menu(itemlist, 'Novità submenu', 'latestep', "%s/ultimi-episodi" % host,'tvshow') + # support.menu(itemlist, 'Nuove serie color azure', 'lista_serie', "%s/category/serie-tv" % host,'tvshow') + support.menu(itemlist, 'Categorie', 'categorie', host,'tvshow') + support.menu(itemlist, 'Cerca', 'search', host,'tvshow') + + + # autoplay.init(item.channel, list_servers, list_quality) + # autoplay.show_option(item.channel, itemlist) + + itemlist.append( + Item(channel='setting', + action="channel_config", + title=support.typo("Configurazione Canale color lime"), + config=item.channel, + folder=False, + thumbnail=channelselector.get_thumb('setting_0.png')) + ) + + return itemlist + + +# ---------------------------------------------------------------------------------------------------------------- +def cleantitle(scrapedtitle): + scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()) + scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('Game of Thrones –','').replace('Flash 2014','Flash') + year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)') + if year: + scrapedtitle = scrapedtitle.replace('(' + year + ')', '') + + + return scrapedtitle.strip() + + +# ================================================================================================================ + +# ---------------------------------------------------------------------------------------------------------------- +def lista_serie(item): + support.log(item.channel + " lista_serie") + itemlist = [] + + data = httptools.downloadpage(item.url, headers=headers).data + + patron = r'
  • Pagina successiva') + + return itemlist + +# ================================================================================================================ + + +# ---------------------------------------------------------------------------------------------------------------- +def episodios(item): + support.log(item.channel + " episodios") + itemlist = [] + + data = httptools.downloadpage(item.url, headers=headers).data + + patron = r'' + matches = re.compile(patron, re.DOTALL).findall(data) + + for value in matches: + patron = r'
    (.*?)
    \s*' % value + blocco = scrapertools.find_single_match(data, patron) + + patron = r'(
    )[^>]+>[^>]+>([^<]+)<' + matches = re.compile(patron, re.DOTALL).findall(blocco) + for scrapedextra, scrapedurl, scrapedimg, scrapedtitle in matches: + number = scrapertools.decodeHtmlentities(scrapedtitle.replace("Episodio", "")).strip() + itemlist.append( + Item(channel=item.channel, + action="findvideos", + title=value + "x" + number.zfill(2), + fulltitle=scrapedtitle, + contentType="episode", + url=scrapedurl, + thumbnail=scrapedimg, + extra=scrapedextra, + folder=True)) + + if config.get_videolibrary_support() and len(itemlist) != 0: + itemlist.append( + Item(channel=item.channel, + title=support.typo(config.get_localized_string(30161) + ' bold color kod'), + thumbnail=support.thumb(), + url=item.url, + action="add_serie_to_library", + extra="episodios", + contentSerieName=item.fulltitle, + show=item.show)) + + return itemlist + +# ================================================================================================================ + +# ---------------------------------------------------------------------------------------------------------------- +def findvideos(item): + support.log(item.channel + " findvideos") + + itemlist = support.server(item, data=item.url) + # itemlist = filtertools.get_links(itemlist, item, list_language) + + autoplay.start(itemlist, item) + + return itemlist + +# ================================================================================================================ + + +# ---------------------------------------------------------------------------------------------------------------- +def findepisodevideo(item): + support.log(item.channel + " findepisodevideo") + + # Download Pagina + data = httptools.downloadpage(item.url, headers=headers).data + + # Prendo il blocco specifico per la stagione richiesta + patron = r'
    (.*?)
    \s*' % item.extra[0][0] + blocco = scrapertools.find_single_match(data, patron) + + # Estraggo l'episodio + patron = r'
    ' % item.extra[0][1].lstrip("0") + matches = re.compile(patron, re.DOTALL).findall(blocco) + + itemlist = support.server(item, data=matches[0][0]) + # itemlist = filtertools.get_links(itemlist, item, list_language) + + autoplay.start(itemlist, item) + + return itemlist + + +# ================================================================================================================ + + +# ---------------------------------------------------------------------------------------------------------------- +def latestep(item): + support.log(item.channel + " latestep") + itemlist = [] + + data = httptools.downloadpage(item.url, headers=headers).data + + patron = r'
    \s*' + patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)([^<]+)<' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedimg, scrapedtitle, scrapedinfo in matches: + infoLabels = {} + year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)') + if year: + infoLabels['year'] = year + scrapedtitle = cleantitle(scrapedtitle) + + infoLabels['tvshowtitle'] = scrapedtitle + + episodio = re.compile(r'(\d+)x(\d+)', re.DOTALL).findall(scrapedinfo) + title = "%s %s" % (scrapedtitle, scrapedinfo) + itemlist.append( + Item(channel=item.channel, + action="findepisodevideo", + title=title, + fulltitle=scrapedtitle, + url=scrapedurl, + extra=episodio, + thumbnail=scrapedimg, + show=scrapedtitle, + contentTitle=scrapedtitle, + contentSerieName=title, + infoLabels=infoLabels, + folder=True)) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + return itemlist + + +# ================================================================================================================ + +# ---------------------------------------------------------------------------------------------------------------- +def newest(categoria): + logger.info('serietvu' + " newest" + categoria) + itemlist = [] + item = Item() + try: + if categoria == "series": + item.url = host + "/ultimi-episodi" + item.action = "latestep" + itemlist = latestep(item) + + if itemlist[-1].action == "latestep": + itemlist.pop() + + # Continua la ricerca in caso di errore + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +# ================================================================================================================ + +# ---------------------------------------------------------------------------------------------------------------- +def search(item, texto): + logger.info(item.channel + " search") + item.url = host + "/?s=" + texto + try: + return lista_serie(item) + # Continua la ricerca in caso di errore + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +# ================================================================================================================ + +# ---------------------------------------------------------------------------------------------------------------- +def categorie(item): + logger.info(item.channel +" categorie") + itemlist = [] + + data = httptools.downloadpage(item.url, headers=headers).data + blocco = scrapertools.find_single_match(data, r'

    Sfoglia

    \s*
      (.*?)
    \s*') + patron = r'
  • ([^<]+)
  • ' + matches = re.compile(patron, re.DOTALL).findall(blocco) + + for scrapedurl, scrapedtitle in matches: + if scrapedtitle == 'Home Page' or scrapedtitle == 'Calendario Aggiornamenti': + continue + itemlist.append( + Item(channel=item.channel, + action="lista_serie", + title=scrapedtitle, + contentType="tv", + url="%s%s" % (host, scrapedurl), + thumbnail=item.thumbnail, + folder=True)) + + return itemlist + +# ================================================================================================================ diff --git a/channels/support.py b/channels/support.py index 03c8ace3..cbbbdf30 100644 --- a/channels/support.py +++ b/channels/support.py @@ -135,25 +135,24 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data="" matches = scrapertoolsV2.find_multiple_matches(block, patron) log('MATCHES =', matches) + known_keys = ['url', 'title', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating'] for match in matches: if len(listGroups) > len(match): # to fix a bug match = list(match) - match.extend([''] * (len(listGroups)-len(match))) + match.extend([''] * (len(listGroups) - len(match))) - scrapedurl = url_host+match[listGroups.index('url')] if 'url' in listGroups else '' - scrapedtitle = match[listGroups.index('title')] if 'title' in listGroups else '' - scrapedthumb = match[listGroups.index('thumb')] if 'thumb' in listGroups else '' - scrapedquality = match[listGroups.index('quality')] if 'quality' in listGroups else '' - scrapedyear = match[listGroups.index('year')] if 'year' in listGroups else '' - scrapedplot = match[listGroups.index('plot')] if 'plot' in listGroups else '' - scrapedduration = match[listGroups.index('duration')] if 'duration' in listGroups else '' - scrapedgenre = match[listGroups.index('genre')] if 'genre' in listGroups else '' - scrapedrating = match[listGroups.index('rating')] if 'rating' in listGroups else '' + scraped = {} + for kk in known_keys: + val = match[listGroups.index(kk)] if kk in listGroups else '' + if kk == "url": + val = url_host + val + scraped[kk] = val - title = scrapertoolsV2.decodeHtmlentities(scrapedtitle) - plot = scrapertoolsV2.decodeHtmlentities(scrapedplot) - if scrapedquality: - longtitle = '[B]' + title + '[/B] [COLOR blue][' + scrapedquality + '][/COLOR]' + title = scrapertoolsV2.decodeHtmlentities(scraped["title"]).strip() + plot = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["plot"])) + + if scraped["quality"]: + longtitle = '[B]' + title + '[/B] [COLOR blue][' + scraped["quality"] + '][/COLOR]' else: longtitle = '[B]' + title + '[/B]' @@ -161,37 +160,48 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data="" infolabels = item.infoLabels else: infolabels = {} - if scrapedyear: - infolabels['year'] = scrapedyear - if scrapedplot: + if scraped["year"]: + infolabels['year'] = scraped["year"] + if scraped["plot"]: infolabels['plot'] = plot - if scrapedduration: - matches = scrapertoolsV2.find_multiple_matches(scrapedduration, r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)') + if scraped["duration"]: + matches = scrapertoolsV2.find_multiple_matches(scraped["duration"],r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)') for h, m in matches: - scrapedduration = int(h) * 60 + int(m) - infolabels['duration'] = int(scrapedduration) * 60 - if scrapedgenre: - genres = scrapertoolsV2.find_multiple_matches(scrapedgenre, '[A-Za-z]+') - infolabels['genre'] = ", ".join(genres) - if scrapedrating: - infolabels['rating'] = scrapertoolsV2.decodeHtmlentities(scrapedrating) + scraped["duration"] = int(h) * 60 + int(m) + if not matches: + scraped["duration"] = scrapertoolsV2.find_single_match(scraped["duration"], r'(\d+)') + infolabels['duration'] = int(scraped["duration"]) * 60 + if scraped["genere"]: + genres = scrapertoolsV2.find_multiple_matches(scraped["genere"], '[A-Za-z]+') + infolabels['genere'] = ", ".join(genres) + if scraped["rating"]: + infolabels['rating'] = scrapertoolsV2.decodeHtmlentities(scraped["rating"]) - if not scrapedtitle in blacklist: - itemlist.append( - Item(channel=item.channel, - action=action, - contentType=item.contentType, - title=longtitle, - fulltitle=title, - show=title, - quality=scrapedquality, - url=scrapedurl, - infoLabels=infolabels, - thumbnail=scrapedthumb - ) + if scraped["title"] not in blacklist: + it = Item( + channel=item.channel, + action=action, + contentType=item.contentType, + title=longtitle, + fulltitle=title, + show=title, + quality=scraped["quality"], + url=scraped["url"], + infoLabels=infolabels, + thumbnail=scraped["thumb"] ) - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + for lg in list(set(listGroups).difference(known_keys)): + it.__setattr__(lg, match[listGroups.index(lg)]) + + itemlist.append(it) + + if (item.contentType == "episode" and (action != "findvideos" and action != "play")) \ + or (item.contentType == "movie" and action != "play"): + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + else: + for it in itemlist: + it.infoLabels = item.infoLabels if patronNext: nextPage(itemlist, item, data, patronNext, 2) diff --git a/channels/tantifilm.py b/channels/tantifilm.py index 7d84a445..f7443fb9 100644 --- a/channels/tantifilm.py +++ b/channels/tantifilm.py @@ -474,7 +474,7 @@ def findvideos(item): # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + # itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay diff --git a/channelselector.py b/channelselector.py index 65c454e0..3b942e8e 100644 --- a/channelselector.py +++ b/channelselector.py @@ -91,6 +91,11 @@ def getchanneltypes(view="thumb_"): channel_type=channel_type, viewmode="thumbnails", thumbnail=get_thumb("channels_%s.png" % channel_type, view))) + itemlist.append(Item(title='Oggi in TV', channel="filmontv", action="mainlist", view=view, + category=title, channel_type="all", thumbnail=get_thumb("on_the_air.png", view), + viewmode="thumbnails")) + + itemlist.append(Item(title=config.get_localized_string(70685), channel="community", action="mainlist", view=view, category=title, channel_type="all", thumbnail=get_thumb("channels_community.png", view), viewmode="thumbnails")) diff --git a/platformcode/platformtools.py b/platformcode/platformtools.py index 6befc842..3ed3ac7f 100644 --- a/platformcode/platformtools.py +++ b/platformcode/platformtools.py @@ -22,6 +22,9 @@ from core import channeltools from core import trakt_tools, scrapertoolsV2 from core.item import Item from platformcode import logger +import xbmcaddon +addon = xbmcaddon.Addon('plugin.video.kod') +downloadenabled = addon.getSetting('downloadenabled') class XBMCPlayer(xbmc.Player): @@ -591,7 +594,7 @@ def set_context_commands(item, parent_item): (sys.argv[0], item.clone(action="add_pelicula_to_library", from_action=item.action).tourl()))) - if item.channel != "downloads": + if item.channel != "downloads" and downloadenabled != "false": # Descargar pelicula if item.contentType == "movie" and item.contentTitle: context_commands.append((config.get_localized_string(60354), "XBMC.RunPlugin(%s?%s)" % diff --git a/resources/language/Italian/strings.po b/resources/language/Italian/strings.po index 58d53ade..565ffe63 100644 --- a/resources/language/Italian/strings.po +++ b/resources/language/Italian/strings.po @@ -5439,11 +5439,11 @@ msgid "Disclaimer" msgstr "Disclaimer" msgctxt "#70691" -msgid "Utilizzando la funzione di download dichiari di essere in possesso di una copia fisica e di utilizzare questa funzione come backup dello stesso." +msgid "Using the download function you declare that you have a physical copy and use this function as a backup of the same." msgstr "Utilizzando la funzione di download dichiari di essere in possesso di una copia fisica e di utilizzare questa funzione come backup dello stesso." msgctxt "#70692" -msgid "Il team di KOD non si assume alcuna responsabilità dell'uso che viene fatto di questa funzione proposta" +msgid "The KOD team assumes no responsibility for the use that is made of this proposed function" msgstr "Il team di KOD non si assume alcuna responsabilità dell'uso che viene fatto di questa funzione proposta" msgctxt "#70693" diff --git a/servers/gounlimited.json b/servers/gounlimited.json index 7b66129f..e45a2872 100644 --- a/servers/gounlimited.json +++ b/servers/gounlimited.json @@ -4,7 +4,7 @@ "ignore_urls": [], "patterns": [ { - "pattern": "https://gounlimited.to/embed-(.*?).html", + "pattern": "https://gounlimited.to/(?:embed-|)([a-z0-9]+)(?:.html|)", "url": "https://gounlimited.to/embed-\\1.html" } ] diff --git a/version.json b/version.json new file mode 100644 index 00000000..eba29b4d --- /dev/null +++ b/version.json @@ -0,0 +1,9 @@ +{ + "update": { + "name": "Kodi on Demand", + "version":"101", + "tag": "1.0.1", + "date": "03/05/2019", + "changes": "Added Updater" + } +} \ No newline at end of file