Merge branch 'master' of https://github.com/kodiondemand/addon into master

This commit is contained in:
Alhaziel01
2020-09-29 19:58:03 +02:00
3 changed files with 201 additions and 306 deletions

View File

@@ -3,7 +3,8 @@
# Canale per Paramount Network
# ------------------------------------------------------------
from core import support, jsontools, autorenumber
from core import support, jsontools
from platformcode import autorenumber
# host = support.config.get_channel_url()
host = 'https://www.paramountnetwork.it'

View File

@@ -39,7 +39,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
# lQuality.reverse()
for source in lSrc:
quality = source['label'] if source.has_key('label') else 'auto'
quality = source['label'] if 'label' in source else 'auto'
video_urls.append(['.' + source['file'].split('.')[-1] + ' [' + quality + '] [SuperVideo]', source['file']])
else:

View File

@@ -2,332 +2,226 @@
# ------------------------------------------------------------
# Canale film in tv
# ------------------------------------------------------------
from datetime import datetime
import glob, time, gzip, xbmc, sys
from core import filetools, downloadtools, support, scrapertools
import re
import urllib
from channelselector import get_thumb
from core import httptools, scrapertools, support, tmdb, filetools
from core.item import Item
from platformcode import logger, config, platformtools
if sys.version_info[0] >= 3: from concurrent import futures
else: from concurrent_py2 import futures
host = "https://www.superguidatv.it"
host = "http://epg-guide.com/kltv.gz"
blacklisted_genres = ['attualita', 'scienza', 'religione', 'cucina', 'notiziario', 'altro', 'soap opera', 'viaggi', 'economia', 'tecnologia', 'magazine', 'show', 'reality show', 'lifestyle', 'societa', 'wrestling', 'azione', 'Musica', 'real life', 'real adventure', 'dplay original', 'natura', 'news', 'food', 'sport', 'moda', 'arte e cultura', 'crime', 'box set e serie tv', 'casa', 'storia', 'talk show', 'motori', 'attualit\xc3\xa0 e inchiesta', 'documentari', 'musica', 'spettacolo', 'medical', 'talent show', 'sex and love', 'beauty and style', 'news/current affairs', "children's/youth programmes", 'leisure hobbies', 'social/political issues/economics', 'education/science/factual topics', 'undefined content', 'show/game show', 'music/ballet/dance', 'sports', 'arts/culture', 'biografico', 'informazione', 'documentario']
TIMEOUT_TOTAL = 60
def mainlist(item):
support.info()
itemlist = [Item(title=support.typo('Film in onda oggi', 'bold'), channel=item.channel, action='category', contentType='movie', thumbnail=support.thumb('movie')),
Item(title=support.typo('Serie Tv in onda oggi', 'bold'), channel=item.channel, action='peliculas', contentType='tvshow', thumbnail=support.thumb('tvshow')),
Item(title=support.typo('Guida tv per canale', 'bold'), channel=item.channel, action='listaCanali', thumbnail=support.thumb('on_the_air')),
Item(title=support.typo('Canali live', 'bold'), channel=item.channel, action='live', thumbnail=support.thumb('tvshow_on_the_air'))]
logger.info(" mainlist")
itemlist = [#Item(channel="search", action='discover_list', title=config.get_localized_string(70309),
#search_type='list', list_type='movie/now_playing',
# thumbnail=get_thumb("now_playing.png")),
#Item(channel="search", action='discover_list', title=config.get_localized_string(70312),
# search_type='list', list_type='tv/on_the_air', thumbnail=get_thumb("on_the_air.png")),
Item(channel=item.channel,
title=config.get_setting("film1", channel="filmontv"),
action="now_on_tv",
url="%s/film-in-tv/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title=config.get_setting("film2", channel="filmontv"),
action="now_on_tv",
url="%s/film-in-tv/oggi/premium/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title=config.get_setting("film3", channel="filmontv"),
action="now_on_tv",
url="%s/film-in-tv/oggi/sky-intrattenimento/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title=config.get_setting("film4", channel="filmontv"),
action="now_on_tv",
url="%s/film-in-tv/oggi/sky-cinema/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title=config.get_setting("film5", channel="filmontv"),
action="now_on_tv",
url="%s/film-in-tv/oggi/sky-primafila/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title=config.get_setting("now1", channel="filmontv"),
action="now_on_misc",
url="%s/ora-in-onda/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title=config.get_setting("now2", channel="filmontv"),
action="now_on_misc",
url="%s/ora-in-onda/premium/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title=config.get_setting("now3", channel="filmontv"),
action="now_on_misc",
url="%s/ora-in-onda/sky-intrattenimento/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title=config.get_setting("now4", channel="filmontv"),
action="now_on_misc",
url="%s/ora-in-onda/sky-doc-e-lifestyle/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title=config.get_setting("now5", channel="filmontv"),
action="now_on_misc_film",
url="%s/ora-in-onda/sky-cinema/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title="Personalizza Oggi in TV",
action="server_config",
config="filmontv",
folder=False,
thumbnail=item.thumbnail)]
return itemlist
def server_config(item):
return platformtools.show_channel_settings(channelpath=filetools.join(config.get_runtime_path(), "specials", item.config))
def getEpg():
now = datetime.now()
fileName = support.config.get_temp_file('guidatv-') + now.strftime('%Y %m %d')
archiveName = fileName + '.gz'
xmlName = fileName + '.xml'
if not filetools.exists(xmlName):
support.info('downloading epg')
# cancello quelli vecchi
for f in glob.glob(support.config.get_temp_file('guidatv-') + '*'):
filetools.remove(f, silent=True)
# inmemory = io.BytesIO(httptools.downloadpage(host).data)
downloadtools.downloadfile(host, archiveName)
support.info('opening gzip and writing xml')
with gzip.GzipFile(fileobj=filetools.file_open(archiveName, mode='rb', vfs=False)) as f:
guide = f.read().decode('utf-8')
guide = guide.replace('\n', ' ').replace('><', '>\n<')
with open(xmlName, 'w') as f:
f.write(guide)
# else:
guide = filetools.file_open(xmlName, vfs=False)
return guide
def category(item):
itemlist = [Item(title="Tutti", all=True, channel=item.channel, action='peliculas', contentType=item.contentType)]
category = ['Animazione', 'Avventura', 'Azione', 'Biografico', 'Brillante', 'Comico', 'Commedia', 'Crime', 'Documentario', 'Documentaristico', 'Drammatico', 'Famiglia', 'Fantascienza', 'Fantastico', 'Giallo', 'Guerra', 'Horror', 'Mistero', 'Musicale', 'Poliziesco', 'Sexy', 'Storico', 'Thriller', 'Western']
for cat in category:
itemlist.append(Item(title=cat, category=cat, channel=item.channel, action='peliculas', contentType=item.contentType))
return support.thumb(itemlist)
def peliculas(item, f=None, ):
f = getEpg()
titles = [item.lastTitle] if not item.titles else item.titles
def now_on_misc_film(item):
logger.info("filmontv tvoggi")
itemlist = []
pag = item.pag if item.pag else 0
channel = ''
title = ''
episode = ''
plot = ''
thumbnail = ''
actors = []
director = ''
year = ''
genres = []
genre = ''
country = ''
skip = False
# Carica la pagina
data = httptools.downloadpage(item.url).data
#patron = r'spanTitleMovie">([A-Za-z À-ÖØ-öø-ÿ\-\']*)[a-z \n<>\/="_\-:0-9;A-Z.]*GenresMovie">([\-\'A-Za-z À-ÖØ-öø-ÿ\/]*)[a-z \n<>\/="_\-:0-9;A-Z.%]*src="([a-zA-Z:\/\.0-9?]*)[a-z \n<>\/="_\-:0-9;A-Z.%\-\']*Year">([A-Z 0-9a-z]*)'
patron = r'table-cell[;" ]*alt="([^"]+)".*?backdrop" alt="([^"]+)"[ ]*src="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedchannel, scrapedtitle, scrapedthumbnail in matches:
# for scrapedthumbnail, scrapedtitle, scrapedtv in matches:
scrapedurl = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
infoLabels = {}
#infoLabels["year"] = ""
infoLabels['title'] = "movie"
itemlist.append(
Item(channel=item.channel,
action="new_search",
extra=urllib.quote_plus(scrapedtitle) + '{}' + 'movie',
title="[B]" + scrapedtitle + "[/B] - " + scrapedchannel,
fulltitle=scrapedtitle,
mode='all',
search_text=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail.replace("?width=320", "?width=640"),
contentTitle=scrapedtitle,
contentType='movie',
infoLabels=infoLabels,
folder=True))
f.seek(pag)
line = True
while line:
line = f.readline()
if '<programme' in line:
channel = scrapertools.find_single_match(line, r'channel="([^"]+)"')
elif '<title' in line:
title = scrapertools.find_single_match(line, r'>([^<]+?)(?: - (?:1\s*\^\s*TV|Prima\s*T[Vv]))?<')
if not title or title in titles or title == 'EPG non disponibile':
skip = True
elif not skip and '<desc' in line:
genre, episode, plot = scrapertools.find_single_match(line, r'>(?:\[([^\]]+)\])?(S[0-9]+\s*Ep?[0-9]+)?(?:\s*-\s*)?([^<]+)')
if plot:
CY = scrapertools.find_single_match(plot, r'(\D{3}) (\d{4})')
if CY: country, year = CY
director = scrapertools.find_single_match(plot, r'Regia di ([^;|<]+)')
if episode and item.contentType == 'movie': skip = True
elif not skip and '<category' in line:
genre = scrapertools.find_single_match(line, r'>([^<]+)<')
# elif not skip and '<actor' in line:
# match = scrapertools.find_single_match(line, r'(?:role="([^"]*)")?>([^<]+)<')
# actors.append([match[1], match[0]])
# elif not skip and '<director' in line:
# director = scrapertools.find_single_match(line, r'>([^<]+)<')
# elif not skip and '<date' in line:
# year = scrapertools.find_single_match(line, r'>([^<]+)<')
# elif not skip and '<country' in line:
# country = scrapertools.find_single_match(line, r'>([^<]+)<')
# elif not skip and '<episode-num' in line:
# episode = scrapertools.find_single_match(line, r'>([^<]+)<')
# if item.contentType == 'movie':
# skip = True
# elif not skip and '<icon' in line:
# thumbnail = scrapertools.find_single_match(line, r'src="([^"]+)"')
elif '</programme' in line:
if genre in blacklisted_genres: skip = True
elif genre: genres = genre.split('/')
if not skip:
titles.append(title)
if (item.contentType == 'movie' and genres and (item.category in genres or item.all == True)) or (item.contentType == 'tvshow' and episode):
if episode:
episode = scrapertools.get_season_and_episode(episode)
se, ep = episode.split('x')
else:
se, ep = ('', '')
itemlist.append(Item(
channel=item.channel,
action='new_search',
title=support.typo(title + (' - ' + episode if episode else ''), 'bold'),
contentTitle=title if item.contentType == 'movie' else '',
contentSerieName=title if item.contentType == 'tvshow' else '',
contentSeason=se,
contentEpisodeNumber=ep,
fulltitle=title,
search_text=title,
mode=item.contentType,
thumbnail=thumbnail if thumbnail else item.thumbnail,
contentType=item.contentType,
channel_name=channel,
plot=plot,
infoLabels={
'director': director,
'genre': genres,
'country': country,
'year': year,
'season': se,
'episode': ep
}
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
channel = ''
title = ''
episode = ''
plot = ''
thumbnail = ''
actors = []
director = ''
year = ''
genres = []
genre = ''
country = ''
skip = False
if len(itemlist) >= 40:
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), pag= f.tell(), thumbnail=support.thumb(), lastTitle=titles[-1]))
break
support.tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
f.close()
return itemlist
def listaCanali(item):
def now_on_misc(item):
logger.info("filmontv tvoggi")
itemlist = []
f = getEpg()
thumbnail = None
skip = False
line = True
while line:
line = f.readline()
if '<channel' in line:
channelID = scrapertools.find_single_match(line, r'id="([^"]+)"')
elif '<display-name' in line:
channelName = scrapertools.find_single_match(line, r'>([^<]+)<')
elif not skip and '<icon' in line:
thumbnail = scrapertools.find_single_match(line, r'src="([^"]+)"')
elif not skip and '<programme' in line:
break
if '</channel' in line:
if not skip and thumbnail: #and 'channel="' + channel + '"' in f:
itemlist.append(Item(
channel=item.channel,
action='guidatv',
title=support.typo(channelName, 'bold'),
channelID=channelID,
thumbnail=thumbnail
))
thumbnail = None
skip = False
# return itemlist
# logger.info([i.title for i in itemlist])
f.close()
return sorted(itemlist, key=lambda x: x.title)
# Carica la pagina
data = httptools.downloadpage(item.url).data
#patron = r'spanTitleMovie">([A-Za-z À-ÖØ-öø-ÿ\-\']*)[a-z \n<>\/="_\-:0-9;A-Z.]*GenresMovie">([\-\'A-Za-z À-ÖØ-öø-ÿ\/]*)[a-z \n<>\/="_\-:0-9;A-Z.%]*src="([a-zA-Z:\/\.0-9?]*)[a-z \n<>\/="_\-:0-9;A-Z.%\-\']*Year">([A-Z 0-9a-z]*)'
patron = r'table-cell[;" ]*alt="([^"]+)".*?backdrop" alt="([^"]+)"[ ]*src="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedchannel, scrapedtitle, scrapedthumbnail in matches:
# for scrapedthumbnail, scrapedtitle, scrapedtv in matches:
scrapedurl = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
infoLabels = {}
infoLabels["year"] = ""
infoLabels['tvshowtitle'] = scrapedtitle
itemlist.append(
Item(channel=item.channel,
action="new_search",
extra=urllib.quote_plus(scrapedtitle) + '{}' + 'tvshow',
title="[B]" + scrapedtitle + "[/B] - " + scrapedchannel,
fulltitle=scrapedtitle,
mode='all',
search_text=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail.replace("?width=320", "?width=640"),
contentTitle=scrapedtitle,
contentType='tvshow',
infoLabels=infoLabels,
folder=True))
def guidatv(item):
itemlist = []
f = getEpg()
days = []
for day in range(11, 18):days.append(xbmc.getLocalizedString(day))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
channel = ''
title = ''
episode = ''
plot = ''
thumbnail = ''
actors = []
director = ''
year = ''
genres = []
country = ''
start = ''
stop = ''
skip = False
line = True
while line:
line = f.readline()
if '<programme' in line:
start, stop, channel = scrapertools.find_single_match(line,r'start="([^"]*)" stop="([^"]*)" channel="([^"]+)"')
if channel != item.channelID:
skip = True
elif not skip and '<title' in line:
title = scrapertools.find_single_match(line, r'>([^<]+?)<')
if title == 'EPG non disponibile':
skip = True
elif not skip and '<desc' in line:
genre, episode, plot = scrapertools.find_single_match(line, r'>(?:\[([^\]]+)\])?(S[0-9]+\s*Ep?[0-9]+)?(?:\s*-\s*)?([^<]+)')
if plot:
CY = scrapertools.find_single_match(plot, r'(\D{3}) (\d{4})')
if CY: country, year = CY
director = scrapertools.find_single_match(plot, r'Regia di ([^;|<]+)')
if genre: genres.append(genre)
if episode and item.contentType == 'movie':skip = True
elif not skip and '<category' in line:
genre = scrapertools.find_single_match(line, r'>([^<]+)<')
# elif not skip and '<actor' in line:
# match = scrapertools.find_single_match(line, r'(?:role="([^"]*)")?>([^<]+)<')
# actors.append([match[1], match[0]])
# elif not skip and '<director' in line:
# director = scrapertools.find_single_match(line, r'>([^<]+)<')
# elif not skip and '<date' in line:
# year = scrapertools.find_single_match(line, r'>([^<]+)<')
# elif not skip and '<category' in line:
# genres.append(scrapertools.find_single_match(line, r'>([^<]+)<'))
# elif not skip and '<country' in line:
# country = scrapertools.find_single_match(line, r'>([^<]+)<')
# elif not skip and '<episode-num' in line:
# episode = scrapertools.find_single_match(line, r'>([^<]+)<')
# elif not skip and '<icon' in line:
# thumbnail = scrapertools.find_single_match(line, r'src="([^"]+)"')
elif '</programme' in line:
if not skip:
if genre: genres = genre.split('/')
tzHour = int(start.split('+')[1][:2])
start = time.strptime(start.split(' ')[0], '%Y%m%d%H%M%S')
stop = time.strptime(stop.split(' ')[0], '%Y%m%d%H%M%S')
duration = days[start.tm_wday] + ' alle ' + str(start.tm_hour + tzHour).zfill(2) + ':' + str(start.tm_min).zfill(2) + ' - ' + str(stop.tm_hour + tzHour).zfill(2) + ':' + str(stop.tm_min).zfill(2) + ' - '
itemlist.append(Item(
channel=item.channel,
action='new_search',
title=duration + support.typo(title + (' - ' + episode if episode else ''), 'bold'),
fulltitle=title,
search_text=title,
mode=item.contentType,
thumbnail=thumbnail if thumbnail else item.thumbnail,
contentType=item.contentType,
channel_name=channel,
infoLabels={
'title': title,
'plot': plot,
'castandrole': actors,
'director': director,
'genre': genres,
'country': country,
'year': year
}
))
channel = ''
title = ''
episode = ''
plot = ''
thumbnail = ''
actors = []
director = ''
year = ''
genres = []
country = ''
start = ''
stop = ''
skip = False
f.close()
return itemlist
def now_on_tv(item):
logger.info("filmontv tvoggi")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data.replace('\n','')
#patron = r'spanTitleMovie">([A-Za-z À-ÖØ-öø-ÿ\-\']*)[a-z \n<>\/="_\-:0-9;A-Z.]*GenresMovie">([\-\'A-Za-z À-ÖØ-öø-ÿ\/]*)[a-z \n<>\/="_\-:0-9;A-Z.%]*src="([a-zA-Z:\/\.0-9?]*)[a-z \n<>\/="_\-:0-9;A-Z.%\-\']*Year">([A-Z 0-9a-z]*)'
patron = r'alt="([a-zA-Z 0-9]*)".*?spanMovieDuration">([^<]+).*?spanTitleMovie">([A-Za-z ,0-9\.À-ÖØ-öø-ÿ\-\']*).*?GenresMovie">([\-\'A-Za-z À-ÖØ-öø-ÿ\/]*).*?src="([a-zA-Z:\/\.0-9?]*).*?Year">([A-Z 0-9a-z]*)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedchannel, scrapedduration, scrapedtitle, scrapedgender, scrapedthumbnail, scrapedyear in matches:
# for scrapedthumbnail, scrapedtitle, scrapedtv in matches:
scrapedurl = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
infoLabels = {}
infoLabels["year"] = scrapedyear
itemlist.append(
Item(channel=item.channel,
action="new_search",
extra=urllib.quote_plus(scrapedtitle) + '{}' + 'movie',
title="[B]" + scrapedtitle + "[/B] - " + scrapedchannel + " - " + scrapedduration,
fulltitle="[B]" + scrapedtitle + "[/B] - " + scrapedchannel+ " - " + scrapedduration,
url=scrapedurl,
mode='all',
search_text=scrapedtitle,
thumbnail=scrapedthumbnail.replace("?width=240", "?width=480"),
contentTitle=scrapedtitle,
contentType='movie',
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def primafila(item):
logger.info("filmontv tvoggi")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
#patron = r'spanTitleMovie">([A-Za-z À-ÖØ-öø-ÿ]*)[a-z \n<>\/="_\-:0-9;A-Z.]*GenresMovie">([A-Za-z À-ÖØ-öø-ÿ\/]*)[a-z \n<>\/="_\-:0-9;A-Z.%]*src="([a-zA-Z:\/\.0-9?=]*)'
patron = r'spanTitleMovie">([A-Za-z À-ÖØ-öø-ÿ\-\']*)[a-z \n<>\/="_\-:0-9;A-Z.]*GenresMovie">([\-\'A-Za-z À-ÖØ-öø-ÿ\/]*)[a-z \n<>\/="_\-:0-9;A-Z.%]*src="([a-zA-Z:\/\.0-9?]*)[a-z \n<>\/="_\-:0-9;A-Z.%\-\']*Year">([A-Z 0-9a-z]*)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedgender, scrapedthumbnail, scrapedyear in matches:
# for scrapedthumbnail, scrapedtitle, scrapedtv in matches:
scrapedurl = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
infoLabels = {}
infoLabels["year"] = scrapedyear
itemlist.append(
Item(channel=item.channel,
action="new_search",
extra=urllib.quote_plus(scrapedtitle) + '{}' + 'movie',
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
mode='all',
search_text=scrapedtitle,
thumbnail=scrapedthumbnail.replace("?width=240", "?width=480"),
contentTitle=scrapedtitle,
contentType='movie',
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def new_search(item):
from specials import search
item.channel = 'search'
# se risultato certo, vai dritto alla ricerca per id
if item.infoLabels['tmdb_id']:
return search.channel_search(item)
else:
return search.new_search(item)
def live(item):
itemlist = []
channels_dict = {}
channels = ['raiplay', 'mediasetplay', 'la7']
with futures.ThreadPoolExecutor() as executor:
itlist = [executor.submit(load_live, channel) for channel in channels]
for res in futures.as_completed(itlist):
if res.result():
channel_name, itlist = res.result()
channels_dict[channel_name] = itlist
for channel in channels:
itemlist += channels_dict[channel]
return itemlist
def load_live(channel_name):
channel = __import__('%s.%s' % ('channels', channel_name), None, None, ['%s.%s' % ('channels', channel_name)])
itemlist = channel.live(channel.mainlist(Item())[0])
return channel_name, itemlist
return search.new_search(item)