- Aggiunti i canali Mediaset Play e La 7.
- Riscritto Animeunity.
- Le stagioni concluse vengono ora escluse dall'aggiornamento della videoteca.
- Ora è possibile aggiornare gli episodi di Kod dal menu contestuale della Libreria di Kod (se non gestite da Kod verranno cercate)
- Fix Adesso in Onda su ATV
- Fix Vari
This commit is contained in:
marco
2020-07-19 16:05:27 +02:00
parent c93ba1b736
commit 0e1eb946b2
60 changed files with 1736 additions and 794 deletions

View File

@@ -1,10 +1,18 @@
<addon id="plugin.video.kod" name="Kodi on Demand" version="1.2" provider-name="KoD Team">
<addon id="plugin.video.kod" name="Kodi on Demand" version="1.3" provider-name="KoD Team">
<requires>
<!-- <import addon="script.module.libtorrent" optional="true"/> -->
<import addon="metadata.themoviedb.org"/>
<import addon="metadata.tvdb.com"/>
</requires>
<extension point="kodi.context.item">
<menu id="kodi.core.main">
<item library="updatetvshow.py">
<label>70269</label>
<visible>String.IsEqual(ListItem.dbtype,tvshow)</visible>
</item>
</menu>
</extension>
<extension point="xbmc.python.pluginsource" library="default.py">
<provides>video</provides>
</extension>
@@ -18,11 +26,12 @@
<screenshot>resources/media/themes/ss/2.png</screenshot>
<screenshot>resources/media/themes/ss/3.png</screenshot>
</assets>
<news> - aggiunto nuovo canale: guardaserie.cam
- autoplay migliorato, ora i settaggi sono globali e non più per canale
- adesso in onda riscritto, ora usa EPG (ringraziamo epg-guide.com)
- Riprendi la visione di un film o episodio da server diversi (solo videoteca)
- fix e ottimizzazioni varie</news>
<news>- Aggiunti i canali Mediaset Play e La 7.
- Riscritto Animeunity.
- Le stagioni concluse vengono ora escluse dall'aggiornamento della videoteca.
- Ora è possibile aggiornare gli episodi di Kod dal menu contestuale della Libreria di Kod (se non gestite da Kod verranno cercate)
- Fix Adesso in Onda su ATV
- Fix Vari</news>
<description lang="it">Naviga velocemente sul web e guarda i contenuti presenti</description>
<disclaimer>[COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR]
[COLOR yellow]Kodi © is a registered trademark of the XBMC Foundation. We are not connected to or in any other way affiliated with Kodi, Team Kodi, or the XBMC Foundation. Furthermore, any software, addons, or products offered by us will receive no support in official Kodi channels, including the Kodi forums and various social networks.[/COLOR]</disclaimer>

View File

@@ -11,7 +11,6 @@
"animeunity": "https://www.animeunity.it",
"animeworld": "https://www.animeworld.tv",
"casacinema": "https://www.casacinema.rest",
"casacinemaInfo": "https://casacinema.life",
"cb01anime": "https://www.cineblog01.red/",
"cinemalibero": "https://cinemalibero.plus",
"cinetecadibologna": "http://cinestore.cinetecadibologna.it",
@@ -32,7 +31,7 @@
"guardaserieIcu": "https://guardaserie.icu/",
"guardaserieCam": "https://guardaserie.cam",
"piratestreaming": "https://www.piratestreaming.biz",
"polpotv": "https://polpotv.club",
"polpotv": "https://polpotv.live",
"pufimovies": "https://pufimovies.com",
"raiplay": "https://www.raiplay.it",
"seriehd": "https://seriehd.link",

View File

@@ -6,23 +6,35 @@
from core import support
host = support.config.get_channel_url()
headers = [['Referer', host]]
@support.menu
def mainlist(item):
anime = ['/lista-anime/',
('In Corso',['/lista-anime-in-corso/', 'peliculas', 'corso']),
('In Corso',['/anime/anime-status/in-corso/', 'peliculas', 'status']),
('Completi',['/anime/anime-status/completo/', 'peliculas', 'status']),
('Genere',['/anime', 'submenu', 'genre']),
('Anno',['/anime', 'submenu', 'anime-year']),
('Tipologia',['/anime', 'submenu', 'anime-type']),
('Stagione',['/anime', 'submenu', 'anime-season']),
('Ultime Serie',['/category/anime/articoli-principali/','peliculas','last'])
]
return locals()
@support.scrape
def submenu(item):
action = 'peliculas'
patronBlock = r'data-taxonomy="' + item.args + r'"(?P<block>.*?)</select'
patronMenu = r'<option class="level-\d+ (?P<u>[^"]+)"[^>]+>(?P<t>[^&]+)[^\(]+\((?P<num>\d+)'
def itemHook(item):
item.url += host + '/anime/' + item.args + '/' + item.u
item.title = support.typo(item.t, 'bold')
return item
return locals()
def newest(categoria):
support.log(categoria)
itemlist = []
@@ -60,13 +72,14 @@ def search(item, texto):
@support.scrape
def peliculas(item):
anime = True
action = 'episodios'
if 'movie' in item.url:
action = 'findvideos'
else:
action = 'check'
if not item.args:
pagination = ''
patron = r'<a\s*href="(?P<url>[^"]+)"\s*title="(?P<title>[^"]+)">'
elif item.args == 'corso':
pagination = ''
patron = r'<strong><a href="(?P<url>[^"]+)">(?P<title>.*?) [Ss][Uu][Bb]'
else:
patron = r'<a href="(?P<url>[^"]+)"[^>]+>\s*<img src="(?P<thumb>[^"]+)" alt="(?P<title>.*?)(?: Sub| sub| SUB|")'
@@ -81,10 +94,21 @@ def peliculas(item):
return locals()
def check(item):
m = support.match(item, headers=headers, patron=r'Tipologia[^>]+><a href="([^"]+)"')
item.data = m.data
if 'movie' in m.match:
item.contentType = 'movie'
return findvideos(item)
else:
return episodios(item)
@support.scrape
def episodios(item):
anime = True
data = support.match(item, headers=headers).data
data = item.data
if '<h6>Streaming</h6>' in data:
patron = r'<td style[^>]+>\s*.*?(?:<span[^>]+)?<strong>(?P<title>[^<]+)<\/strong>.*?<td style[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>'
else:
@@ -100,51 +124,9 @@ def episodios(item):
def findvideos(item):
support.log(item)
# try:
# from urlparse import urljoin
# except:
# from urllib.parse import urljoin
# support.dbg()
itemlist = []
if 'vvvvid' in item.url:
import requests
from lib import vvvvid_decoder
if support.match(item.url, string=True, patron=r'(\d+/\d+)').match:
item.action = 'play'
itemlist.append(item)
else:
# VVVVID vars
vvvvid_host = 'https://www.vvvvid.it/vvvvid/ondemand/'
vvvvid_headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:62.0) Gecko/20100101 Firefox/62.0'}
# VVVVID session
current_session = requests.Session()
login_page = 'https://www.vvvvid.it/user/login'
conn_id = current_session.get(login_page, headers=vvvvid_headers).json()['data']['conn_id']
payload = {'conn_id': conn_id}
# collect parameters
show_id = support.match(item.url, string=True, patron=r'(\d+)').match
ep_number = support.match(item.title, patron=r'(\d+)').match
json_file = current_session.get(vvvvid_host + show_id + '/seasons/', headers=vvvvid_headers, params=payload).json()
season_id = str(json_file['data'][0]['season_id'])
json_file = current_session.get(vvvvid_host + show_id + '/season/' + season_id +'/', headers=vvvvid_headers, params=payload).json()
# select the correct episode
for episode in json_file['data']:
support.log('Number',int(episode['number']),int(ep_number))
if int(episode['number']) == int(ep_number):
url = vvvvid_decoder.dec_ei(episode['embed_info'] or episode['embed_info'])
if 'youtube' in url: item.url = url
item.url = url.replace('manifest.f4m','master.m3u8').replace('http://','https://').replace('/z/','/i/')
if 'https' not in item.url:
url = support.match(item, url='https://or01.top-ix.org/videomg/_definst_/mp4:' + item.url + '/playlist.m3u')[1]
url = url.split()[-1]
itemlist.append(item.clone(action= 'play', url= 'https://or01.top-ix.org/videomg/_definst_/mp4:' + item.url + '/' + url, server= 'directo'))
elif 'adf.ly' in item.url:
if 'adf.ly' in item.url:
from servers.decrypters import adfly
url = adfly.get_long_url(item.url)
@@ -154,7 +136,6 @@ def findvideos(item):
else:
url = host
for u in item.url.split('/'):
# support.log(i)
if u and 'animeforce' not in u and 'http' not in u:
url += '/' + u
@@ -162,11 +143,12 @@ def findvideos(item):
url = support.httptools.downloadpage(url, only_headers=True, follow_redirects=False).headers.get("location")
url = support.match(url, patron=r'class="button"><a href=(?:")?([^" ]+)', headers=headers).match
else:
url = support.match(url, patron=[r'<source src=(?:")?([^" ]+)',r'name="_wp_http_referer" value="([^"]+)"']).match
if item.data: url = item.data
url = support.match(url, patron=r'data-href="([^"]+)" target').match
if not url: url = support.match(url, patron=[r'<source src=(?:")?([^" ]+)',r'name="_wp_http_referer" value="([^"]+)"']).match
if url.startswith('//'): url = 'https:' + url
elif url.startswith('/'): url = 'https:/' + url
itemlist.append(item.clone(action="play", title='Diretto', url=url, server='directo'))
if 'vvvvid' in url: itemlist.append(item.clone(action="play", title='VVVVID', url=url, server='vvvvid'))
else: itemlist.append(item.clone(action="play", title='Diretto', url=url, server='directo'))
return support.server(item, itemlist=itemlist)

View File

@@ -19,7 +19,7 @@ headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/
def mainlist(item):
anime = [
('Leggendari', ['/category/anime-leggendari/', 'peliculas']),
# ('Leggendari', ['/category/anime-leggendari/', 'peliculas']),
('ITA', ['/category/anime-ita/', 'peliculas']),
('SUB-ITA', ['/category/anime-sub-ita/', 'peliculas']),
('Conclusi', ['/category/serie-anime-concluse/', 'peliculas']),
@@ -59,7 +59,7 @@ def peliculas(item):
anime = True
blacklist = ['top 10 anime da vedere']
if item.url != host: patronBlock = r'<div id="main-content(?P<block>.*?)<aside'
patron = r'<figure class="(?:mh-carousel-thumb|mh-posts-grid-thumb)"> <a class="[^"]+" href="(?P<url>[^"]+)" title="(?P<title>.*?)(?: \((?P<year>\d+)\))? (?:(?P<lang>SUB ITA|ITA))(?: (?P<title2>[Mm][Oo][Vv][Ii][Ee]))?[^"]*"><img[^s]+src="(?P<thumb>[^"]+)"[^>]+'
patron = r'<figure class="(?:mh-carousel-thumb|mh-posts-grid-thumb)"> <a (?:class="[^"]+" )?href="(?P<url>[^"]+)" title="(?P<title>.*?)(?: \((?P<year>\d+)\))? (?:(?P<lang>SUB ITA|ITA))(?: (?P<title2>[Mm][Oo][Vv][Ii][Ee]))?[^"]*"><img (?:class="[^"]+"|width="[^"]+" height="[^"]+") src="(?P<thumb>[^"]+)"[^>]+'
def itemHook(item):
if 'movie' in item.title.lower():
item.title = support.re.sub(' - [Mm][Oo][Vv][Ii][Ee]|[Mm][Oo][Vv][Ii][Ee]','',item.title)
@@ -67,6 +67,13 @@ def peliculas(item):
item.contentType = 'movie'
item.action = 'findvideos'
return item
def itemlistHook(itemlist):
itlist = []
for item in itemlist:
if 'nuovo episodio:' not in item.title.lower():
itlist += [item]
return itlist
patronNext = r'<a class="next page-numbers" href="([^"]+)">'
action = 'episodios'
return locals()
@@ -80,15 +87,16 @@ def episodios(item):
patron = r'<iframe.*?src="(?P<url>[^"]+)"'
title = item.title
def fullItemlistHook(itemlist):
url = ''
for item in itemlist:
url += item.url +'\n'
item = itemlist[0]
item.data = url
item.title = title
item.contentType = 'movie'
itemlist = []
itemlist.append(item)
if len(itemlist) > 0:
url = ''
for item in itemlist:
url += item.url +'\n'
item = itemlist[0]
item.data = url
item.title = title
item.contentType = 'movie'
itemlist = []
itemlist.append(item)
return itemlist
else:
url = item.url

View File

@@ -6,5 +6,15 @@
"thumbnail": "animeunity.png",
"banner": "animeunity.png",
"categories": ["anime"],
"settings": []
"settings": [
{
"id": "order",
"type": "list",
"label": "Ordine di Visualizzazione",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [ "Standard", "Lista A-Z", "Lista Z-A", "Popolarità", "Valutazione" ]
}
]
}

View File

@@ -3,34 +3,86 @@
# Canale per AnimeUnity
# ------------------------------------------------------------
import requests, json, copy
from core import support
from specials import autorenumber
try: from lib import cloudscraper
except: from lib import cloudscraper
host = support.config.get_channel_url()
headers = [['Referer', host]]
response = cloudscraper.create_scraper().get(host + '/archivio')
csrf_token = support.match(response.text, patron= 'name="csrf-token" content="([^"]+)"').match
headers = {'content-type': 'application/json;charset=UTF-8',
'x-csrf-token': csrf_token,
'Cookie' : '; '.join([x.name + '=' + x.value for x in response.cookies])}
@support.menu
def mainlist(item):
anime = ['/anime.php?c=archive&page=*',
('In Corso',['/anime.php?c=onair', 'peliculas']),
('Ultimi Episodi', ['', 'peliculas', 'news']),
('Ultimi Aggiunti', ['', 'peliculas', 'last'])
]
top = [('Ultimi Episodi', ['', 'news'])]
menu = [('Anime {bullet bold}',['', 'menu', {}, 'tvshow']),
('Film {submenu}',['', 'menu', {'type':'Movie'}]),
('TV {submenu}',['', 'menu', {'type':'TV'}, 'tvshow']),
('OVA {submenu} {tv}',['', 'menu', ['type','OVA'], 'tvshow']),
('ONA {submenu} {tv}',['', 'menu', ['type','ONA'], 'tvshow']),
('Special {submenu} {tv}',['', 'menu', ['type','Special'], 'tvshow'])]
search =''
return locals()
@support.scrape
def menu(item):
action = 'peliculas'
patronBlock = item.args + r' Categorie</a>\s*<ul(?P<block>.*?)</ul>'
patronMenu = r'<a href="(?P<url>[^"]+)"[^>]+>(?P<title>[^>]+)<'
return locals()
item.action='peliculas'
ITA = copy.copy(item.args)
ITA['title'] = '(ita)'
InCorso = copy.copy(item.args)
InCorso['status'] = 'In Corso'
Terminato = copy.copy(item.args)
Terminato['status'] = 'Terminato'
itemlist = [item.clone(title=support.typo('Tutti','bold')),
item.clone(title=support.typo('ITA','bold'), args=ITA),
item.clone(title=support.typo('Genere','bold'), action='genres'),
item.clone(title=support.typo('Anno','bold'), action='years')]
if item.contentType == 'tvshow':
itemlist += [item.clone(title=support.typo('In Corso','bold'), args=InCorso),
item.clone(title=support.typo('Terminato','bold'), args=Terminato)]
itemlist +=[item.clone(title=support.typo('Cerca...','bold'), action='search', thumbnail=support.thumb(thumb='search.png'))]
return itemlist
def genres(item):
support.log()
itemlist = []
genres = json.loads(support.match(response.text, patron='genres="([^"]+)').match.replace('&quot;','"'))
for genre in genres:
item.args['genres'] = [genre]
itemlist.append(item.clone(title=support.typo(genre,'bold'), action='peliculas'))
return support.thumb(itemlist)
def years(item):
support.log()
itemlist = []
from datetime import datetime
current_year = datetime.today().year
oldest_year = int(support.match(response.text, patron='anime_oldest_date="([^"]+)').match)
for year in list(reversed(range(oldest_year, current_year + 1))):
item.args['year']=year
itemlist.append(item.clone(title=support.typo(year,'bold'), action='peliculas'))
return itemlist
def search(item, text):
support.log('search', item)
item.url = host + '/anime.php?c=archive&page=*'
if not item.args:
item.args = {'title':text}
else:
item.args['title'] = text
item.search = text
try:
return peliculas(item)
# Continua la ricerca in caso di errore
@@ -46,12 +98,11 @@ def newest(categoria):
itemlist = []
item = support.Item()
item.url = host
item.args = 'news'
item.action = 'peliculas'
try:
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
try:
itemlist = news(item)
if itemlist[-1].action == 'news':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
@@ -62,47 +113,110 @@ def newest(categoria):
return itemlist
def news(item):
support.log()
item.contentType = 'tvshow'
itemlist = []
fullJs = json.loads(support.match(item, headers=headers, patron=r'items-json="([^"]+)"').match.replace('&quot;','"'))
js = fullJs['data']
for it in js:
itemlist.append(
support.Item(channel=item.channel,
title= support.typo(it['anime']['title'] + ' - EP. ' + it['number'], 'bold'),
fulltitle=it['anime']['title'],
server='directo',
thumbnail=it['anime']['imageurl'],
forcethumb = True,
url=it['link'],
plot=it['anime']['plot'],
action='play')
)
if 'next_page_url' in fullJs:
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'),thumbnail=support.thumb(), url=fullJs['next_page_url']))
return itemlist
@support.scrape
def peliculas(item):
# debug = True
pagination = 20
anime = True
if item.args == 'news':
patron = r'col-lg-3 col-md-6 col-sm-6 col-xs-6 mobile-col">\s*<a href="(?P<url>[^"]+)">[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<img class="[^"]+" src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^-]+)\D+Episodio\s*(?P<episode>\d+)'
patronNext = r'page-link" href="([^"]+)">'
elif item.args == 'last':
patronBlock = r'ULTIME AGGIUNTE[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<block>.*?)<div class="row"'
patron = r'<img class="[^"]+" src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>\s*<a class="[^"]+" href="(?P<url>[^"]+)"\s*>(?P<title>[^<]+)</a>'
else:
search = item.search
patron = r'<div class="card-img-top archive-card-img"> <a href="(?P<url>[^"]+)"> <img class="[^"]+" src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<\(]+)(?:\((?P<lang>[^\)]+)\))?'
return locals()
support.log()
itemlist = []
page = item.page if item.page else 0
item.args['offset'] = page * 30
order = support.config.get_setting('order', item.channel)
if order:
order_list = [ "Standard", "Lista A-Z", "Lista Z-A", "Popolarità", "Valutazione" ]
item.args['order'] = order_list[order]
payload = json.dumps(item.args)
records = requests.post(host + '/archivio/get-animes', headers=headers, data=payload).json()['records']
js = []
for record in records:
js += record
for it in js:
lang = support.match(it['title'], patron=r'\(([It][Tt][Aa])\)').match
title = support.re.sub(r'\s*\([^\)]+\)', '', it['title'])
if 'ita' in lang.lower(): language = 'ITA'
else: language = 'Sub-ITA'
itm = item.clone(title=support.typo(title,'bold') + support.typo(language,'_ [] color kod') + (support.typo(it['title_eng'],'_ ()') if it['title_eng'] else ''))
itm.contentLanguage = language
itm.type = it['type']
itm.thumbnail = it['imageurl']
itm.plot = it['plot']
if it['episodes_count'] == 1:
itm.contentType = 'movie'
itm.fulltitle = itm.show = itm.contentTitle = title
itm.contentSerieName = ''
itm.action = 'findvideos'
itm.url = it['episodes'][0]['link']
else:
itm.contentType = 'tvshow'
itm.contentTitle = ''
itm.fulltitle = itm.show = itm.contentSerieName = title
itm.action = 'episodios'
itm.episodes = it['episodes'] if 'episodes' in it else it['link']
itm.url = ''
itemlist.append(itm)
autorenumber.renumber(itemlist)
if len(itemlist) >= 30:
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), thumbnail=support.thumb(), page=page + 1))
return itemlist
@support.scrape
def episodios(item):
# debug = True
data = item.data
anime = True
pagination = 50
patron = r'<a href="(?P<url>[^"]+)" class="\D+ep-button">(?P<episode>\d+)'
def itemHook(item):
item.title = item.title + support.typo(item.fulltitle,'-- bold')
return item
return locals()
support.log()
itemlist = []
title = 'Parte ' if item.type.lower() == 'movie' else 'Episodio '
for it in item.episodes:
itemlist.append(
support.Item(channel=item.channel,
title=support.typo(title + it['number'], 'bold'),
episode = it['number'],
fulltitle=item.title,
show=item.title,
contentTitle='',
contentSerieName=item.contentSerieName,
thumbnail=item.thumbnail,
plot=item.plot,
action='findvideos',
contentType='episode',
url=it['link']))
autorenumber.renumber(itemlist, item, 'bold')
support.videolibrary(itemlist, item)
support.download(itemlist, item)
return itemlist
def findvideos(item):
support.log()
html = support.match(item, patron=r'TIPO:\s*</b>\s*([A-Za-z]+)')
if html.match == 'TV' and item.contentType != 'episode':
item.contentType = 'tvshow'
item.data = html.data
return episodios(item)
else:
itemlist = []
if item.contentType != 'episode': item.contentType = 'movie'
video = support.match(html.data, patron=r'<source src="([^"]+)"').match
itemlist.append(item.clone(action="play", title='Diretto', url=video, server='directo'))
return support.server(item, itemlist=itemlist)
return support.server(item,itemlist=[item.clone(title='Diretto', server='directo', action='play')])

View File

@@ -1,10 +0,0 @@
{
"id": "casacinemaInfo",
"name": "La Casa del Cinema",
"language": ["ita", "sub-ita"],
"active": false,
"thumbnail": "casacinemainfo.png",
"banner": "casacinemainfo.png",
"categories": ["movie", "vos"],
"settings": []
}

View File

@@ -1,131 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per 'casacinemaInfo'
# ------------------------------------------------------------
"""
Problemi noti che non superano il test del canale:
-
Avvisi:
- Sub-ita non è nel titolo, lascia il puntatore sulla locandina
per visualizzare il titolo completo!
Novità:
- Film
Ulteriori info:
"""
from core import support
from core.item import Item
# def findhost():
# data = httptools.downloadpage('https://casacinema.nuovo.link').data
# host = scrapertools.find_single_match(data, r'<div class="elementor-widget-container"><div class="elementor-button-wrapper"> <a href="([^"]+)"')
# if host.endswith('/'):
# host = host[:-1]
# return host
host = support.config.get_channel_url()
headers = [['Referer', host]]
@support.menu
def mainlist(item):
support.log(item)
film = ['',
('Al Cinema', ['/category/in-sala/', 'peliculas', '']),
('Novità', ['/category/nuove-uscite/', 'peliculas', '']),
('Generi', ['', 'genres', 'genres']),
('Sub-ITA', ['/category/sub-ita/', 'peliculas', ''])
]
return locals()
@support.scrape
def peliculas(item):
support.log(item)
#support.dbg() # decommentare per attivare web_pdb
#findhost()
blacklist = ['']
if item.args != 'search':
patron = r'<div class="col-mt-5 postsh">[^<>]+<div class="poster-media-card">[^<>]+<a href="(?P<url>[^"]+)" title="(?P<title>.+?)[ ]?(?:\[(?P<lang>Sub-ITA)\])?".*?<img(?:.+?)?src="(?P<thumb>[^"]+)"'
patronBlock = r'<div class="showpost4 posthome">(?P<block>.*?)</section>'
else:
patron = r'<li class="col-md-12 itemlist">.*?<a href="(?P<url>[^"]+)" title="(?P<title>[^"]+)".*?<img src="(?P<thumb>[^"]+)".*?Film dell"anno: (?P<year>\d{4})(?:[\d\-]+)?</p> <p class="text-list">(?P<plot>[^<>]+)</p>'
patronBlock = r'<ul class="search-results-content infinite">(?P<block>.*?)</section>'
patronNext = '<a href="([^"]+)"\s+?><i class="glyphicon glyphicon-chevron-right"'
#support.regexDbg(item, patronBlock, headers)
# debug = True
return locals()
@support.scrape
def genres(item):
support.log(item)
#support.dbg()
action = 'peliculas'
blacklist = ['']
patron = r'href="(?P<url>[^"]+)">(?P<title>[^<]+)<'
patronBlock = r'</span>Generi</h3>(?P<block>.*?)<div class="clear"></div>'
## debug = True
return locals()
def search(item, text):
support.log('search', item)
itemlist = []
text = text.replace(' ', '+')
item.args = 'search'
item.url = host+'/?s=%s' % (text)
try:
return peliculas(item)
# Se captura la excepcion, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
support.log('search log:', line)
return []
def newest(categoria):
support.log('newest ->', categoria)
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
item.action = 'peliculas'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.log('newest log: ', {0}.format(line))
return []
return itemlist
def findvideos(item):
support.log('findvideos ->', item)
return support.hdpass_get_servers(item)
def play(item):
return support.hdpass_get_url(item)

View File

@@ -168,7 +168,7 @@ def findvideos(item):
if item.contentType == "episode":
return findvid_serie(item)
def load_links(itemlist, re_txt, color, desc_txt, quality=""):
def load_links(itemlist, re_txt, desc_txt, quality=""):
streaming = scrapertools.find_single_match(data, re_txt).replace('"', '')
support.log('STREAMING', streaming)
support.log('STREAMING=', streaming)
@@ -186,13 +186,13 @@ def findvideos(item):
data = re.sub('\n|\t', '', data)
# Estrae i contenuti - Streaming
load_links(itemlist, '<strong>Streamin?g:</strong>(.*?)cbtable', "orange", "Streaming", "SD")
load_links(itemlist, '<strong>Streamin?g:</strong>(.*?)cbtable', "Streaming", "SD")
# Estrae i contenuti - Streaming HD
load_links(itemlist, '<strong>Streamin?g HD[^<]+</strong>(.*?)cbtable', "yellow", "Streaming HD", "HD")
load_links(itemlist, '<strong>Streamin?g HD[^<]+</strong>(.*?)cbtable', "Streaming HD", "HD")
# Estrae i contenuti - Streaming 3D
load_links(itemlist, '<strong>Streamin?g 3D[^<]+</strong>(.*?)cbtable', "pink", "Streaming 3D")
load_links(itemlist, '<strong>Streamin?g 3D[^<]+</strong>(.*?)cbtable', "Streaming 3D")
itemlist = support.server(item, itemlist=itemlist)
# Extract the quality format

13
channels/la7.json Normal file
View File

@@ -0,0 +1,13 @@
{
"id": "la7",
"name": "La7",
"active": true,
"language": ["ita"],
"thumbnail": "la7.png",
"banner": "la7.png",
"categories": ["movie", "tvshow", "documentary", "live"],
"not_active": ["include_in_newest"],
"default_off": ["include_in_global_search"],
"settings": [],
"cloudflare": true
}

160
channels/la7.py Normal file
View File

@@ -0,0 +1,160 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per La7
# ------------------------------------------------------------
import requests
from core import support
DRM = 'com.widevine.alpha'
key_widevine = "https://la7.prod.conax.cloud/widevine/license"
host = 'https://www.la7.it'
headers = {
'host_token': 'pat.la7.it',
'host_license': 'la7.prod.conax.cloud',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
'accept': '*/*',
'accept-language': 'en,en-US;q=0.9,it;q=0.8',
'dnt': '1',
'te': 'trailers',
'origin': 'https://www.la7.it',
'referer': 'https://www.la7.it/',
}
icons = {'la7':'https://upload.wikimedia.org/wikipedia/commons/thumb/0/02/LA7_-_Logo_2011.svg/512px-LA7_-_Logo_2011.svg.png',
'la7d': 'https://upload.wikimedia.org/wikipedia/it/e/ea/LA7d_LOGO_.png' }
titles = {'la7': support.typo('La7', 'bold'), 'la7d': support.typo('La7d', 'bold')}
@support.menu
def mainlist(item):
top = [('Dirette {bold}', ['', 'live']),
('Replay {bold}', ['', 'replay_channels'])]
menu = [('Programmi TV {bullet bold}', ['/tutti-i-programmi', 'peliculas', '', 'tvshow']),
('Teche La7 {bullet bold}', ['/i-protagonisti', 'peliculas', '', 'tvshow'])]
search = ''
return locals()
def live(item):
itemlist = [item.clone(title=titles['la7'], url= host + '/dirette-tv', action='play', forcethumb = True, thumbnail= icons['la7']),
item.clone(title=titles['la7d'], url= host + '/live-la7d', action='play', forcethumb = True, thumbnail= icons['la7d'])]
return itemlist
def replay_channels(item):
itemlist = [item.clone(title=titles['la7'], url= host + '/rivedila7/0/la7', action='replay_menu', forcethumb = True, thumbnail= icons['la7']),
item.clone(title=titles['la7d'], url= host + '/rivedila7/0/la7d', action='replay_menu', forcethumb = True, thumbnail= icons['la7d'])]
return itemlist
@support.scrape
def replay_menu(item):
action = 'replay'
patron = r'href="(?P<url>[^"]+)"><div class="giorno-text">\s*(?P<day>[^>]+)</div><[^>]+>\s*(?P<num>[^<]+)</div><[^>]+>\s*(?P<month>[^<]+)<'
def itemHook(item):
item.title = support.typo(item.day + ' ' + item.num + ' ' + item.month,'bold')
return item
return locals()
@support.scrape
def replay(item):
action = 'play'
patron = r'guida-tv"><[^>]+><[^>]+>(?P<hour>[^<]+)<[^>]+><[^>]+><[^>]+>\s*<a href="(?P<url>[^"]+)"><[^>]+><div class="[^"]+" data-background-image="(?P<t>[^"]+)"><[^>]+><[^>]+><[^>]+><[^>]+>\s*(?P<name>[^<]+)<[^>]+><[^>]+><[^>]+>(?P<plot>[^<]+)<'
def itemHook(item):
item.title = support.typo(item.hour + ' - ' + item.name,'bold')
item.contentTitle = item.fulltitle = item.show = item.name
item.thumbnail = 'http:' + item.t
item.fanart = item.thumbnail
item.forcethumb = True
return item
return locals()
def search(item, text):
item.url = host + '/tutti-i-programmi'
item.search = text
return peliculas(item)
@support.scrape
def peliculas(item):
search = item.search
disabletmdb = True
action = 'episodios'
patron = r'<a href="(?P<url>[^"]+)"[^>]+><div class="[^"]+" data-background-image="(?P<t>[^"]+)"></div><div class="titolo">\s*(?P<title>[^<]+)<'
def itemHook(item):
item.thumbnail = 'http:' + item.t if item.t.startswith('//') else item.t if item.t else item.thumbnail
item.fanart = item.thumb
return item
return locals()
@support.scrape
def episodios(item):
data = support.match(item).data
action = 'play'
if '>puntate<' in data:
patronBlock = r'>puntate<(?P<block>.*?)home-block-outbrain'
url = support.match(data, patron=r'>puntate<[^>]+>[^>]+>[^>]+><a href="([^"]+)"').match
data += support.match(host + url).data
else:
item.url += '/video'
data = support.match(item).data
patron = r'(?:<a href="(?P<url>[^"]+)">[^>]+><div class="[^"]+" data-background-image="(?P<t>[^"]*)">[^>]+>[^>]+>[^>]+>(?:[^>]+>)?(?:[^>]+>)?\s*(?P<title>[^<]+)<(?:[^>]+>[^>]+>[^>]+><div class="data">(?P<date>[^<]+))?|class="heading">[^>]+>(?P<Title>[^<]+).*?window.shareUrl = "(?P<Url>[^"]+)".*?poster:\s*"(?P<Thumb>[^"]+)", title: "(?P<desc>[^"]+)")'
patronNext = r'<a href="([^"]+)">'
addVideolibrary = False
def itemHook(item):
if item.Thumb: item.t = item.Thumb
item.thumbnail = 'http:' + item.t if item.t.startswith('//') else item.t if item.t else item.thumbnail
if item.Title: item.title = support.typo(item.Title, 'bold')
if item.date:
item.title = support.re.sub(r'[Pp]untata (?:del )?\d+/\d+/\d+', '', item.title)
item.title += support.typo(item.date, '_ [] bold')
if item.desc: item.plot = item.desc
item.forcethumb = True
item.fanart = item.thumbnail
return item
return locals()
def play(item):
support.log()
data = support.match(item).data
match = support.match(data, patron='/content/entry/data/(.*?).mp4').match
if match:
url = 'https://awsvodpkg.iltrovatore.it/local/hls/,/content/entry/data/' + support.match(item, patron='/content/entry/data/(.*?).mp4').match + '.mp4.urlset/master.m3u8'
item = item.clone(title='Direct', url=url, server='directo', action='play')
else:
preurl = support.match(data, patron=r'preTokenUrl = "(.+?)"').match
url = support.match(data, patron=r'["]?dash["]?\s*:\s*"([^"]+)"').match
tokenHeader = {
'host': headers['host_token'],
'user-agent': headers['user-agent'],
'accept': headers['accept'],
'accept-language': headers['accept-language'],
'dnt': headers['dnt'],
'te': headers['te'],
'origin': headers['origin'],
'referer': headers['referer'],
}
preAuthToken = requests.get(preurl, headers=tokenHeader,verify=False).json()['preAuthToken']
licenseHeader = {
'host': headers['host_license'],
'user-agent': headers['user-agent'],
'accept': headers['accept'],
'accept-language': headers['accept-language'],
'preAuthorization': preAuthToken,
'origin': headers['origin'],
'referer': headers['referer'],
}
preLic= '&'.join(['%s=%s' % (name, value) for (name, value) in licenseHeader.items()])
tsatmp=str(int(support.time()))
license_url= key_widevine + '?d=%s'%tsatmp
lic_url='%s|%s|R{SSM}|'%(license_url, preLic)
item.drm = DRM
item.license = lic_url
return support.servertools.find_video_items(item, data=url)

View File

@@ -1,7 +1,7 @@
{
"id": "mediasetplay",
"name": "Mediaset Play",
"active": false,
"active": true,
"language": ["ita"],
"thumbnail": "mediasetplay.png",
"banner": "mediasetplay.png",

View File

@@ -4,13 +4,11 @@
# ------------------------------------------------------------
import requests
from core import support, httptools
from core import support
import sys
if sys.version_info[0] >= 3:
from concurrent import futures
from urllib.parse import urlencode, quote
else:
from concurrent_py2 import futures
from urllib import urlencode, quote
current_session = requests.Session()
@@ -84,7 +82,7 @@ def live(item):
contentTitle=it['title'],
thumbnail=it['thumbnails']['channel_logo-100x100']['url'],
forcethumb = True,
url=urls,
urls=urls,
plot=plot,
action='play'))
return itemlist
@@ -127,16 +125,21 @@ def peliculas(item):
contentType=contentType if contentType else item.contentType,
contentTitle=it['title'] if 'movie' in [contentType, item.contentType] else '',
contentSerieName=it['title'] if 'tvshow' in [contentType, item.contentType] else '',
thumbnail=it['thumbnails']['image_vertical-264x396']['url'],
thumbnail=it['thumbnails']['image_vertical-264x396']['url'] if 'image_vertical-264x396' in it['thumbnails'] else '',
fanart=it['thumbnails']['image_keyframe_poster-1280x720']['url'] if 'image_keyframe_poster-1280x720' in it['thumbnails'] else '',
plot=it['longDescription'] if 'longDescription' in it else it['description'] if 'description' in it else '',
url=urls))
urls=urls,
url=it['mediasetprogram$pageUrl']))
return itemlist
def episodios(item):
support.log()
itemlist = []
subBrandId = current_session.get('https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-brands?byCustomValue={brandId}{' + item.url + '}').json()['entries'][-1]['mediasetprogram$subBrandId']
subBrandId = current_session.get('https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-brands?byCustomValue={brandId}{' + item.urls + '}').json()
for entry in subBrandId['entries']:
if 'mediasetprogram$subBrandId' in entry and entry['description'] == 'Episodi':
subBrandId = entry['mediasetprogram$subBrandId']
break
json = current_session.get('https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs?byCustomValue={subBrandId}{' + subBrandId + '}').json()['entries']
for it in json:
urls = []
@@ -157,23 +160,26 @@ def episodios(item):
show=title,
contentType='episode',
contentSerieName = title,
thumbnail=it['thumbnails']['image_vertical-264x396']['url'],
thumbnail=it['thumbnails']['image_vertical-264x396']['url'] if 'image_vertical-264x396' in it['thumbnails'] else '',
fanart=it['thumbnails']['image_keyframe_poster-1280x720']['url'] if 'image_keyframe_poster-1280x720' in it['thumbnails'] else '',
plot=it['longDescription'] if 'longDescription' in it else it['description'],
url=urls))
urls=urls,
url=it['mediasetprogram$pageUrl']))
support.videolibrary(itemlist, item)
return sorted(itemlist, key=lambda it: it.title)
def findvideos(item):
support.log()
itemlist = []
itemlist.append(support.Item(server = 'directo', title = 'Direct', url = item.url, action = 'play'))
itemlist.append(support.Item(server = 'directo', title = 'Direct', url = item.urls, action = 'play'))
return support.server(item, itemlist=itemlist, Download=False)
def play(item):
support.log()
for url in item.url:
url = httptools.downloadpage(url, allow_redirects=True).url
if not item.urls: urls = item.url
else: urls = item.urls
for url in urls:
url = support.httptools.downloadpage(url, allow_redirects=True).url
if '.mpd' in url: data = url
return support.servertools.find_video_items(item, data=data)
@@ -190,7 +196,6 @@ def get_from_id(item):
id = quote(",".join(json["components"]))
json = current_session.get(entries.format(id=id)).json()
if 'entries' in json:
support.log(json['entries'])
return json['entries']
return {}

View File

@@ -57,7 +57,7 @@ def peliculas(item):
item.contentType='movie'
else:
item.contentType='tvshow'
itemlist.extend(get_itemlist_element(element,item))
itemlist.extend(get_itemlist_element(element, item))
try:
if support.inspect.stack()[1][3] not in ['newest']:
@@ -92,7 +92,7 @@ def get_season(item, seas_url, seasonNumber):
itemlist.append(
item.clone(action='findvideos',
contentType='episode',
title=str(seasonNumber)+"x"+str("%02d"%episode['episodeNumber']),
title=support.typo(str(seasonNumber)+"x"+str("%02d"%episode['episodeNumber']), 'bold'),
url=seas_url,
extra=str(len(json_object['hydra:member'])-episode['episodeNumber'])))
return itemlist[::-1]
@@ -173,6 +173,8 @@ def findvideos(item):
def get_itemlist_element(element,item):
support.log()
itemlist=[]
contentSerieName = ''
contentTitle =''
try:
if element['originalLanguage']['id']=='it':
scrapedtitle=element['originalTitle']
@@ -197,11 +199,13 @@ def get_itemlist_element(element,item):
infoLabels = {}
if item.contentType=='movie':
contentTitle = scrapedtitle
next_action='findvideos'
quality=support.typo(element['lastQuality'].upper(), '_ [] color kod bold')
url="%s%s/releases"
infoLabels['tmdb_id']=element['tmdbId']
else:
contentSerieName = scrapedtitle
next_action='episodios'
quality=''
url="%s%s"
@@ -216,7 +220,9 @@ def get_itemlist_element(element,item):
plot=scrapedplot,
fanart=scrapedfanart,
thumbnail=scrapedthumbnail,
contentTitle=scrapedtitle,
contentTitle=contentTitle,
contentSerieName=contentSerieName,
contentType=item.contentType,
url=url % (host, element['@id']),
infoLabels=infoLabels))

View File

@@ -155,12 +155,21 @@ def Type(item):
def live(item):
support.log()
itemlist =[]
info={}
json = current_session.get(item.url).json()['dirette']
onAir = current_session.get(onair).json()['on_air']
support.log(onAir)
for key in onAir:
channel = key['channel']
info[channel] = {}
info[channel]['fanart'] = getUrl(key['currentItem']['image'])
info[channel]['plot'] = support.typo(key['currentItem']['name'],'bold')+ '\n\n' + key['currentItem']['description']
for i, key in enumerate(json):
itemlist.append(item.clone(title = support.typo(key['channel'], 'bold'), fulltitle = key['channel'], show = key['channel'], url = key['video']['contentUrl'],
thumbnail = key['transparent-icon'].replace("[RESOLUTION]", "256x-"), forcethumb = True , fanart = getUrl(onAir[i]['currentItem']['image']),
plot = support.typo(onAir[i]['currentItem']['name'],'bold')+ '\n\n' + onAir[i]['currentItem']['description'], action = 'play'))
channel = key['channel']
itemlist.append(item.clone(title = support.typo(channel, 'bold'), fulltitle = channel, show = channel, url = key['video']['contentUrl'],
thumbnail = key['transparent-icon'].replace("[RESOLUTION]", "256x-"), forcethumb = True , fanart = info[channel]['fanart'],
plot = info[channel]['plot'], action = 'play'))
return itemlist

View File

@@ -122,7 +122,7 @@ def findvideos(item):
itemlist = support.server(item, data=urls_video)
itemlist.append(
item.colone(title=support.typo("Vai alla Serie Completa: " + item.fulltitle, ' bold'),
item.clone(title=support.typo("Vai alla Serie Completa: " + item.fulltitle, ' bold'),
contentType='tvshow',
url=url_serie,
action='episodios',

View File

@@ -50,15 +50,12 @@ def mainlist(item):
@support.scrape
def peliculas(item):
log()
if item.args == 'search':
patron = r'<a href="(?P<url>[^"]+)" title="Permalink to\s(?P<title>[^"]+) \((?P<year>[^<]+)\).*?".*?<img[^s]+src="(?P<thumb>[^"]+)".*?<div class="calitate">\s*<p>(?P<quality>[^<]+)<\/p>'
patron = r'<a href="(?P<url>[^"]+)" title="Permalink to\s*(?P<title>[^"]+) \((?P<year>[^<]+)\)[^"]*"[^>]+>\s*<img[^s]+src="(?P<thumb>[^"]+)".*?<div class="calitate">\s*<p>(?P<quality>[^<]+)<\/p>'
# support.regexDbg(item, patron, headers)
else:
patronNext = r'<a class="nextpostslink" rel="next" href="([^"]+)">'
patron = r'<div class="mediaWrap mediaWrapAlt">\s?<a href="(?P<url>[^"]+)"(?:[^>]+>|)>?\s?<img[^s]+src="([^"]+)"[^>]+>\s?<\/a>[^>]+>[^>]+>[^>]+>(?P<title>.+?)(?P<lang>[sSuUbB\-iItTaA]+)?(?:[ ]?\((?P<year>\d{4})-?(?:\d{4})?)\).[^<]+[^>]+><\/a>.+?<p>\s*(?P<quality>[a-zA-Z-0-9\.]+)\s*<\/p>[^>]+>'
patron = r'<div class="mediaWrap mediaWrapAlt">\s*<a href="(?P<url>[^"]+)"(?:[^>]+>)>?\s*<img[^s]+src="([^"]+)"[^>]+>\s*<\/a>[^>]+>[^>]+>[^>]+>(?P<title>.+?)(?P<lang>[sSuUbB\-iItTaA]+)?(?:[ ]?\((?P<year>\d{4})-?(?:\d{4})?)\).[^<]+[^>]+><\/a>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*(?P<quality>[a-zA-Z-0-9\.]+)'
patronBlock = r'<div id="main_col">(?P<block>.*?)<!\-\- main_col \-\->'
# if item.args != 'all' and item.args != 'search':

View File

@@ -148,8 +148,10 @@ def episodios(item):
for episode in episodes:
for key in episode:
if 'stagione' in encode(key['title']).lower():
match = support.match(encode(key['title']), patron=r'[Ss]tagione\s*(\d+) - [Ee]pisodio\s*(\d+)').match
title = match[0]+'x'+match[1] + ' - ' + item.fulltitle
season = support.match(encode(key['title']), patron=r'[Ss]tagione\s*(\d+)').match
episode = support.match(encode(key['title']), patron=r'[Ee]pisodio\s*(\d+)').match
if season and episode:
title = season + 'x' + episode + ' - ' + item.fulltitle
make_item = True
elif int(key['season_id']) == int(season_id):
try:

View File

@@ -285,7 +285,7 @@ def thumb(item_or_itemlist=None, genre=False, thumb=''):
'teenager':['ragazzi','teenager', 'teen'],
'learning':['learning'],
'all':['tutti', 'all'],
'news':['novità', "novita'", 'aggiornamenti', 'nuovi', 'nuove', 'new', 'newest', 'news'],
'news':['novità', "novita'", 'aggiornamenti', 'nuovi', 'nuove', 'new', 'newest', 'news', 'ultimi'],
'now_playing':['cinema', 'in sala'],
'anime':['anime'],
'genres':['genere', 'generi', 'categorie', 'categoria', 'category'],
@@ -318,6 +318,7 @@ def thumb(item_or_itemlist=None, genre=False, thumb=''):
'az':['lettera','lista','alfabetico','a-z', 'alphabetical'],
'year':['anno', 'anni', 'year'],
'update':['replay', 'update'],
'videolibrary':['teche'],
'autoplay':[config.get_localized_string(60071)]
}

View File

@@ -216,12 +216,14 @@ def downloadbest(video_urls, title, continuar=False):
return -2
def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False, resumir=True):
def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False, resumir=True, header=''):
logger.info("url= " + url)
logger.info("filename= " + nombrefichero)
if headers is None:
headers = []
if not header:
header = "plugin"
progreso = None
@@ -269,7 +271,7 @@ def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False
# Create the progress dialog
if not silent:
progreso = platformtools.dialog_progress("plugin", "Downloading...", url, nombrefichero)
progreso = platformtools.dialog_progress(header, "Downloading...", url, nombrefichero)
# If the platform does not return a valid dialog box, it assumes silent mode
if progreso is None:

View File

@@ -6,6 +6,8 @@
from __future__ import division
# from builtins import str
import io
from future.builtins import range
from past.utils import old_div
import sys
@@ -43,6 +45,11 @@ if os.name == "nt":
else:
fs_encoding = "utf8"
# per android è necessario, su kodi 18, usare FileIO
# https://forum.kodi.tv/showthread.php?tid=330124
# per xbox invece, è necessario usare open perchè _io è rotto :(
# https://github.com/jellyfin/jellyfin-kodi/issues/115#issuecomment-538811017
fileIo = platformtools.xbmc.getCondVisibility('system.platform.linux') and platformtools.xbmc.getCondVisibility('system.platform.android')
def validate_path(path):
@@ -142,19 +149,20 @@ def read(path, linea_inicio=0, total_lineas=None, whence=0, silent=False, vfs=Tr
total_lineas = None
if xbmc_vfs and vfs:
if not exists(path): return False
f = xbmcvfs.File(path, "rb")
f = xbmcvfs.File(path, "r")
data = f.read()
if total_lineas == None:
total_lineas = 9999999999
if linea_inicio > 0:
if not isinstance(whence, int):
try:
whence = int(whence)
except:
return False
f.seek(linea_inicio, whence)
logger.debug('POSITION of beginning of reading,, tell(): %s' % f.seek(0, 1))
if total_lineas == None:
total_lineas = 0
data = f.read(total_lineas)
return "".join(data)
data = '\n'.join(data.split('\n')[linea_inicio:total_lineas])
return data
elif path.lower().startswith("smb://"):
f = samba.smb_open(path, "rb")
else:
@@ -179,7 +187,7 @@ def read(path, linea_inicio=0, total_lineas=None, whence=0, silent=False, vfs=Tr
return unicode(b"".join(data))
def write(path, data, mode="wb", silent=False, vfs=True):
def write(path, data, mode="w", silent=False, vfs=True):
"""
Save the data to a file
@param path: file path to save
@@ -233,7 +241,11 @@ def file_open(path, mode="r", silent=False, vfs=True):
elif path.lower().startswith("smb://"):
return samba.smb_open(path, mode)
else:
return open(path, mode)
if fileIo:
return io.FileIO(path, mode)
else:
# return io.open(path, mode, decode='utf-8')
return open(path, mode)
except:
logger.error("ERROR when opening file: %s, %s" % (path, mode))
if not silent:

View File

@@ -56,7 +56,7 @@ HTTPTOOLS_DEFAULT_RANDOM_HEADERS = False
# with open(CF_LIST_PATH, "rb") as CF_File:
# CF_LIST = CF_File.read().splitlines()
FORCE_CLOUDSCRAPER_LIST = []
FORCE_CLOUDSCRAPER_LIST = ['akvideo.stream']
def get_user_agent():
# Returns the global user agent to be used when necessary for the url.
@@ -256,7 +256,7 @@ def downloadpage(url, **opt):
Parameter Type Description
-------------------------------------------------- -------------------------------------------------- ------------
HTTPResponse.sucess: bool True: Request successful | False: Error when making the request
HTTPResponse.success: bool True: Request successful | False: Error when making the request
HTTPResponse.code: int Server response code or error code if an error occurs
HTTPResponse.error: str Description of the error in case of an error
HTTPResponse.headers: dict Dictionary with server response headers
@@ -270,7 +270,7 @@ def downloadpage(url, **opt):
# global CF_LIST
CF = False
if domain in FORCE_CLOUDSCRAPER_LIST:
if domain in FORCE_CLOUDSCRAPER_LIST or opt.get('cf', False):
from lib import cloudscraper
session = cloudscraper.create_scraper()
CF = True
@@ -278,10 +278,10 @@ def downloadpage(url, **opt):
from lib import requests
session = requests.session()
# if domain in CF_LIST or opt.get('CF', False):
if opt.get('CF', False):
url = 'https://web.archive.org/save/' + url
CF = True
# if domain in CF_LIST or opt.get('CF', False):
if opt.get('CF', False):
url = 'https://web.archive.org/save/' + url
CF = True
if config.get_setting('resolver_dns') and not opt.get('use_requests', False):
from specials import resolverdns
@@ -380,9 +380,10 @@ def downloadpage(url, **opt):
req = requests.Response()
if not opt.get('ignore_response_code', False) and not proxy_data.get('stat', ''):
response['data'] = ''
response['sucess'] = False
response['success'] = False
info_dict.append(('Success', 'False'))
response['code'] = str(e)
import traceback
response['code'] = traceback.format_exc()
info_dict.append(('Response code', str(e)))
info_dict.append(('Finished in', time.time() - inicio))
if not opt.get('alfa_s', False):
@@ -393,7 +394,7 @@ def downloadpage(url, **opt):
else:
response['data'] = ''
response['sucess'] = False
response['success'] = False
response['code'] = ''
return type('HTTPResponse', (), response)
@@ -476,10 +477,10 @@ def fill_fields_post(info_dict, req, response, req_headers, inicio):
if response['code'] == 200:
info_dict.append(('Success', 'True'))
response['sucess'] = True
response['success'] = True
else:
info_dict.append(('Success', 'False'))
response['sucess'] = False
response['success'] = False
info_dict.append(('Response data length', len(response['data'])))

View File

@@ -352,7 +352,7 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
quality=quality,
url=scraped["url"],
infoLabels=infolabels,
thumbnail=item.thumbnail if function == 'episodios' or not scraped["thumb"] else scraped["thumb"],
thumbnail=item.thumbnail if not scraped["thumb"] else scraped["thumb"],
args=item.args,
contentSerieName= title if 'movie' not in [contentType] and function != 'episodios' else item.contentSerieName,
contentTitle= title if 'movie' in [contentType] and function == 'peliculas' else item.contentTitle,
@@ -429,6 +429,7 @@ def scrape(func):
typeContentDict = args['typeContentDict'] if 'typeContentDict' in args else {}
debug = args['debug'] if 'debug' in args else False
debugBlock = args['debugBlock'] if 'debugBlock' in args else False
disabletmdb = args['disabletmdb'] if 'disabletmdb' in args else False
if 'pagination' in args and inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']: pagination = args['pagination'] if args['pagination'] else 20
else: pagination = ''
lang = args['deflang'] if 'deflang' in args else ''
@@ -506,7 +507,7 @@ def scrape(func):
page=pag + 1,
thumbnail=thumb()))
if action != 'play' and function != 'episodios' and 'patronMenu' not in args and item.contentType in ['movie', 'tvshow', 'episode']:
if action != 'play' and function != 'episodios' and 'patronMenu' not in args and item.contentType in ['movie', 'tvshow', 'episode'] and not disabletmdb:
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if anime:
@@ -1095,6 +1096,7 @@ def videolibrary(itemlist, item, typography='', function_level=1, function=''):
contentSerieName=contentSerieName,
url=item.url,
action=action,
from_action=item.action,
extra=extra,
path=item.path,
thumbnail=get_thumb('add_to_videolibrary.png')
@@ -1334,9 +1336,9 @@ def addQualityTag(item, itemlist, data, patron):
else:
log('nessun tag qualità trovato')
def get_jwplayer_mediaurl(data, srvName):
def get_jwplayer_mediaurl(data, srvName, onlyHttp=False):
video_urls = []
block = scrapertools.find_single_match(data, r'sources: \[([^\]]+)\]')
block = scrapertools.find_single_match(data, r'sources:\s*\[([^\]]+)\]')
if 'file:' in block:
sources = scrapertools.find_multiple_matches(block, r'file:\s*"([^"]+)"(?:,label:\s*"([^"]+)")?')
elif 'src:' in block:
@@ -1346,7 +1348,7 @@ def get_jwplayer_mediaurl(data, srvName):
for url, quality in sources:
quality = 'auto' if not quality else quality
if url.split('.')[-1] != 'mpd':
video_urls.append(['.' + url.split('.')[-1] + ' [' + quality + '] [' + srvName + ']', url])
video_urls.append(['.' + url.split('.')[-1] + ' [' + quality + '] [' + srvName + ']', url if not onlyHttp else url.replace('https://', 'http://')])
video_urls.sort(key=lambda x: x[0].split()[1])
return video_urls

View File

@@ -435,7 +435,9 @@ def save_tvshow(item, episodelist, silent=False):
logger.debug("NOT FOUND contentSerieName or code")
return 0, 0, -1, path # Salimos sin guardar
contentTypeBackup = item.contentType # Fix errors in some channels
scraper_return = scraper.find_and_set_infoLabels(item)
item.contentType = contentTypeBackup # Fix errors in some channels
# At this point we can have:
# scraper_return = True: An item with infoLabels with the updated information of the series
# scraper_return = False: An item without movie information (it has been canceled in the window)
@@ -574,19 +576,63 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
# process local episodes
local_episodes_path = ''
local_episodelist = []
update = False
nfo_path = filetools.join(path, "tvshow.nfo")
head_nfo, item_nfo = read_nfo(nfo_path)
if item_nfo.update_last:
local_episodes_path = item_nfo.local_episodes_path
elif config.get_setting("local_episodes", "videolibrary"):
done, local_episodes_path = config_local_episodes_path(path, serie.show)
done, local_episodes_path = config_local_episodes_path(path, serie)
if done < 0:
logger.info("An issue has occurred while configuring local episodes, going out without creating strm")
return 0, 0, done
item_nfo.local_episodes_path = local_episodes_path
filetools.write(nfo_path, head_nfo + item_nfo.tojson())
if local_episodes_path:
process_local_episodes(local_episodes_path, path)
from platformcode.xbmc_videolibrary import check_db, clean
# check if the local episodes are in the Kodi video library
if check_db(local_episodes_path):
local_episodelist += get_local_content(local_episodes_path)
clean_list = []
for f in filetools.listdir(path):
match = scrapertools.find_single_match(f, r'[S]?(\d+)(?:x|_|\.)?[E]?(\d+)')
if match:
ep = '%dx%02d' % (int(match[0]), int(match[1]))
if ep in local_episodelist:
del_file = filetools.join(path, f)
filetools.remove(del_file)
if f.endswith('strm'):
sep = '\\' if '\\' in path else '/'
clean_path = path[:-len(sep)] if path.endswith(sep) else path
clean_path = '%/' + clean_path.split(sep)[-1] + '/' + f
clean_list.append(clean_path)
clean_list.append(clean_path.replace('/','\\'))
if clean_list:
clean(clean_list)
update = True
if item_nfo.local_episodes_list:
difference = [x for x in item_nfo.local_episodes_list if (x not in local_episodelist)]
if len(difference) > 0:
clean_list = []
for f in difference:
sep = '\\' if '\\' in local_episodes_path else '/'
clean_path = local_episodes_path[:-len(sep)] if local_episodes_path.endswith(sep) else local_episodes_path
clean_path = '%/' + clean_path.split(sep)[-1] + '/%' + f.replace('x','%') + '%'
clean_list.append(clean_path)
clean_list.append(clean_path.replace('/','\\'))
clean(clean_list)
update = True
item_nfo.local_episodes_list = sorted(local_episodelist)
filetools.write(nfo_path, head_nfo + item_nfo.tojson())
# the local episodes are not in the Kodi video library
else:
process_local_episodes(local_episodes_path, path)
insertados = 0
sobreescritos = 0
@@ -667,12 +713,13 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
logger.info("There is no episode list, we go out without creating strm")
return 0, 0, 0
local_episodelist += get_local_content(path)
# fix float because division is done poorly in python 2.x
try:
t = float(100) / len(new_episodelist)
except:
t = 0
for i, e in enumerate(scraper.sort_episode_list(new_episodelist)):
if not silent:
p_dialog.update(int(math.ceil((i + 1) * t)), config.get_localized_string(60064), e.title)
@@ -694,6 +741,10 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
nfo_path = filetools.join(path, "%s.nfo" % season_episode)
json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower())
if season_episode in local_episodelist:
logger.info('Skipped: Serie ' + serie.contentSerieName + ' ' + season_episode + ' available as local content')
continue
# check if the episode has been downloaded
if filetools.join(path, "%s [downloads].json" % season_episode) in ficheros:
logger.info('INFO: "%s" episode %s has been downloaded, skipping it' % (serie.contentSerieName, season_episode))
@@ -817,14 +868,17 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
filetools.write(tvshow_path, head_nfo + tvshow_item.tojson())
except:
logger.error("Error updating tvshow.nfo")
logger.error("Unable to save %s emergency urls in the video library" % tvshow_item.contentSerieName)
logger.error("Unable to save %s emergency urls in the video library" % serie.contentSerieName)
logger.error(traceback.format_exc())
fallidos = -1
else:
# ... if it was correct we update the Kodi video library
if config.is_xbmc() and config.get_setting("videolibrary_kodi") and not silent:
from platformcode import xbmc_videolibrary
xbmc_videolibrary.update()
update = True
if update:
from platformcode import xbmc_videolibrary
xbmc_videolibrary.update()
if fallidos == len(episodelist):
fallidos = -1
@@ -833,23 +887,25 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
return insertados, sobreescritos, fallidos
def config_local_episodes_path(path, title, silent=False):
logger.info()
local_episodes_path = ''
if not silent:
silent = platformtools.dialog_yesno(config.get_localized_string(30131), config.get_localized_string(80044) % title)
if silent:
if config.is_xbmc() and not config.get_setting("videolibrary_kodi"):
platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(80043))
local_episodes_path = platformtools.dialog_browse(0, config.get_localized_string(80046))
if local_episodes_path == '':
logger.info("User has canceled the dialog")
return -2, local_episodes_path
elif path in local_episodes_path:
platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(80045))
logger.info("Selected folder is the same of the TV show one")
return -2, local_episodes_path
def config_local_episodes_path(path, item, silent=False):
logger.info(item)
from platformcode.xbmc_videolibrary import search_local_path
local_episodes_path=search_local_path(item)
if not local_episodes_path:
title = item.contentSerieName if item.contentSerieName else item.show
if not silent:
silent = platformtools.dialog_yesno(config.get_localized_string(30131), config.get_localized_string(80044) % title)
if silent:
if config.is_xbmc() and not config.get_setting("videolibrary_kodi"):
platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(80043))
local_episodes_path = platformtools.dialog_browse(0, config.get_localized_string(80046))
if local_episodes_path == '':
logger.info("User has canceled the dialog")
return -2, local_episodes_path
elif path in local_episodes_path:
platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(80045))
logger.info("Selected folder is the same of the TV show one")
return -2, local_episodes_path
if local_episodes_path:
# import artwork
@@ -901,6 +957,21 @@ def process_local_episodes(local_episodes_path, path):
filetools.write(nfo_path, head_nfo + item_nfo.tojson())
def get_local_content(path):
logger.info()
local_episodelist = []
for root, folders, files in filetools.walk(path):
for file in files:
season_episode = scrapertools.get_season_and_episode(file)
if season_episode == "" or filetools.exists(filetools.join(path, "%s.strm" % season_episode)):
continue
local_episodelist.append(season_episode)
local_episodelist = sorted(set(local_episodelist))
return local_episodelist
def add_movie(item):
"""
Keep a movie at the movie library. The movie can be a link within a channel or a previously downloaded video.
@@ -969,7 +1040,7 @@ def add_tvshow(item, channel=None):
else:
# This mark is because the item has something else apart in the "extra" attribute
item.action = item.extra if item.extra else item.action
# item.action = item.extra if item.extra else item.action
if isinstance(item.extra, str) and "###" in item.extra:
item.action = item.extra.split("###")[0]
item.extra = item.extra.split("###")[1]

View File

@@ -23,7 +23,7 @@ class ziptools(object):
if not dir.endswith(':') and not filetools.exists(dir):
filetools.mkdir(dir)
zf = zipfile.ZipFile(file)
zf = zipfile.ZipFile(filetools.file_open(file, vfs=False))
if not folder_to_extract:
self._createstructure(file, dir)
num_files = len(zf.namelist())
@@ -93,7 +93,7 @@ class ziptools(object):
filetools.mkdir(curdir)
def _listdirs(self, file):
zf = zipfile.ZipFile(file)
zf = zipfile.ZipFile(filetools.file_open(file, vfs=False))
dirs = []
for name in zf.namelist():
if name.endswith('/'):
@@ -104,7 +104,7 @@ class ziptools(object):
def zip(self, dir, file):
import os
zf = zipfile.ZipFile(file, "w", zipfile.ZIP_DEFLATED)
zf = zipfile.ZipFile(filetools.file_open(file, "w", vfs=False), "w", zipfile.ZIP_DEFLATED)
abs_src = os.path.abspath(dir)
for dirname, subdirs, files in os.walk(dir):
for filename in files:

333
lib/arm_chromeos.py Normal file
View File

@@ -0,0 +1,333 @@
# -*- coding: utf-8 -*-
# MIT License (see LICENSE.txt or https://opensource.org/licenses/MIT)
# From inputstreamhelper
"""Implements a class with methods related to the Chrome OS image"""
from __future__ import absolute_import, division, unicode_literals
import os
from struct import calcsize, unpack
from zipfile import ZipFile
from io import UnsupportedOperation
from platformcode import logger, config
def compat_path(path, encoding='utf-8', errors='strict'):
"""Convert unicode path to bytestring if needed"""
import sys
if (sys.version_info.major == 2 and isinstance(path, unicode) # noqa: F821; pylint: disable=undefined-variable,useless-suppression
and not sys.platform.startswith('win')):
return path.encode(encoding, errors)
return path
class ChromeOSImage:
"""
The main class handling a Chrome OS image
Information related to ext2 is sourced from here: https://www.nongnu.org/ext2-doc/ext2.html
"""
def __init__(self, imgpath):
logger.info('Image Path: ' + imgpath)
"""Prepares the image"""
self.imgpath = imgpath
self.bstream = self.get_bstream(imgpath)
self.part_offset = None
self.sb_dict = None
self.blocksize = None
self.blk_groups = None
self.progress = None
def gpt_header(self):
"""Returns the needed parts of the GPT header, can be easily expanded if necessary"""
header_fmt = '<8s4sII4x4Q16sQ3I'
header_size = calcsize(header_fmt)
lba_size = 512 # assuming LBA size
self.seek_stream(lba_size)
# GPT Header entries: signature, revision, header_size, header_crc32, (reserved 4x skipped,) current_lba, backup_lba,
# first_usable_lba, last_usable_lba, disk_guid, start_lba_part_entries, num_part_entries,
# size_part_entry, crc32_part_entries
_, _, _, _, _, _, _, _, _, start_lba_part_entries, num_part_entries, size_part_entry, _ = unpack(header_fmt, self.read_stream(header_size))
return (start_lba_part_entries, num_part_entries, size_part_entry)
def chromeos_offset(self):
"""Calculate the Chrome OS losetup start offset"""
part_format = '<16s16sQQQ72s'
entries_start, entries_num, entry_size = self.gpt_header() # assuming partition table is GPT
lba_size = 512 # assuming LBA size
self.seek_stream(entries_start * lba_size)
if not calcsize(part_format) == entry_size:
logger.info('Partition table entries are not 128 bytes long')
return 0
for index in range(1, entries_num + 1): # pylint: disable=unused-variable
# Entry: type_guid, unique_guid, first_lba, last_lba, attr_flags, part_name
_, _, first_lba, _, _, part_name = unpack(part_format, self.read_stream(entry_size))
part_name = part_name.decode('utf-16').strip('\x00')
if part_name == 'ROOT-A': # assuming partition name is ROOT-A
offset = first_lba * lba_size
break
if not offset:
logger.info('Failed to calculate losetup offset.')
return 0
return offset
def extract_file(self, filename, extract_path, progress):
"""Extracts the file from the image"""
self.progress = progress
self.progress.update(2, config.get_localized_string(70813))
self.part_offset = self.chromeos_offset()
self.sb_dict = self.superblock()
self.blk_groups = self.block_groups()
bin_filename = filename.encode('ascii')
chunksize = 4 * 1024**2
percent8 = 40
self.progress.update(int(percent8 / 8), config.get_localized_string(70814))
chunk1 = self.read_stream(chunksize)
while True:
chunk2 = self.read_stream(chunksize)
if not chunk2:
logger.info('File %s not found in the ChromeOS image' % filename)
return False
chunk = chunk1 + chunk2
if bin_filename in chunk:
i_index_pos = chunk.index(bin_filename) - 8
dir_dict = self.dir_entry(chunk[i_index_pos:i_index_pos + len(filename) + 8])
if dir_dict['inode'] < self.sb_dict['s_inodes_count'] and dir_dict['name_len'] == len(filename):
break
chunk1 = chunk2
if percent8 < 240:
percent8 += 1
self.progress.update(int(percent8 / 8))
self.progress.update(32, config.get_localized_string(70815))
blk_group_num = (dir_dict['inode'] - 1) // self.sb_dict['s_inodes_per_group']
blk_group = self.blk_groups[blk_group_num]
i_index_in_group = (dir_dict['inode'] - 1) % self.sb_dict['s_inodes_per_group']
inode_pos = self.part_offset + self.blocksize * blk_group['bg_inode_table'] + self.sb_dict['s_inode_size'] * i_index_in_group
inode_dict, _ = self.inode_table(inode_pos)
return self.write_file(inode_dict, os.path.join(extract_path, filename))
def superblock(self):
"""Get relevant info from the superblock, assert it's an ext2 fs"""
names = ('s_inodes_count', 's_blocks_count', 's_r_blocks_count', 's_free_blocks_count', 's_free_inodes_count', 's_first_data_block',
's_log_block_size', 's_log_frag_size', 's_blocks_per_group', 's_frags_per_group', 's_inodes_per_group', 's_mtime', 's_wtime',
's_mnt_count', 's_max_mnt_count', 's_magic', 's_state', 's_errors', 's_minor_rev_level', 's_lastcheck', 's_checkinterval',
's_creator_os', 's_rev_level', 's_def_resuid', 's_def_resgid', 's_first_ino', 's_inode_size', 's_block_group_nr',
's_feature_compat', 's_feature_incompat', 's_feature_ro_compat', 's_uuid', 's_volume_name', 's_last_mounted',
's_algorithm_usage_bitmap', 's_prealloc_block', 's_prealloc_dir_blocks')
fmt = '<13I6H4I2HI2H3I16s16s64sI2B818x'
fmt_len = calcsize(fmt)
self.seek_stream(self.part_offset + 1024) # superblock starts after 1024 byte
pack = self.read_stream(fmt_len)
sb_dict = dict(zip(names, unpack(fmt, pack)))
sb_dict['s_magic'] = hex(sb_dict['s_magic'])
assert sb_dict['s_magic'] == '0xef53' # assuming/checking this is an ext2 fs
block_groups_count1 = sb_dict['s_blocks_count'] / sb_dict['s_blocks_per_group']
block_groups_count1 = int(block_groups_count1) if float(int(block_groups_count1)) == block_groups_count1 else int(block_groups_count1) + 1
block_groups_count2 = sb_dict['s_inodes_count'] / sb_dict['s_inodes_per_group']
block_groups_count2 = int(block_groups_count2) if float(int(block_groups_count2)) == block_groups_count2 else int(block_groups_count2) + 1
assert block_groups_count1 == block_groups_count2
sb_dict['block_groups_count'] = block_groups_count1
self.blocksize = 1024 << sb_dict['s_log_block_size']
return sb_dict
def block_group(self):
"""Get info about a block group"""
names = ('bg_block_bitmap', 'bg_inode_bitmap', 'bg_inode_table', 'bg_free_blocks_count', 'bg_free_inodes_count', 'bg_used_dirs_count', 'bg_pad')
fmt = '<3I4H12x'
fmt_len = calcsize(fmt)
pack = self.read_stream(fmt_len)
blk = unpack(fmt, pack)
blk_dict = dict(zip(names, blk))
return blk_dict
def block_groups(self):
"""Get info about all block groups"""
if self.blocksize == 1024:
self.seek_stream(self.part_offset + 2 * self.blocksize)
else:
self.seek_stream(self.part_offset + self.blocksize)
blk_groups = []
for i in range(self.sb_dict['block_groups_count']): # pylint: disable=unused-variable
blk_group = self.block_group()
blk_groups.append(blk_group)
return blk_groups
def inode_table(self, inode_pos):
"""Reads and returns an inode table and inode size"""
names = ('i_mode', 'i_uid', 'i_size', 'i_atime', 'i_ctime', 'i_mtime', 'i_dtime', 'i_gid', 'i_links_count', 'i_blocks', 'i_flags',
'i_osd1', 'i_block0', 'i_block1', 'i_block2', 'i_block3', 'i_block4', 'i_block5', 'i_block6', 'i_block7', 'i_block8',
'i_block9', 'i_block10', 'i_block11', 'i_blocki', 'i_blockii', 'i_blockiii', 'i_generation', 'i_file_acl', 'i_dir_acl', 'i_faddr')
fmt = '<2Hi4I2H3I15I4I12x'
fmt_len = calcsize(fmt)
inode_size = self.sb_dict['s_inode_size']
self.seek_stream(inode_pos)
pack = self.read_stream(fmt_len)
inode = unpack(fmt, pack)
inode_dict = dict(zip(names, inode))
inode_dict['i_mode'] = hex(inode_dict['i_mode'])
blocks = inode_dict['i_size'] / self.blocksize
inode_dict['blocks'] = int(blocks) if float(int(blocks)) == blocks else int(blocks) + 1
self.read_stream(inode_size - fmt_len)
return inode_dict, inode_size
@staticmethod
def dir_entry(chunk):
"""Returns the directory entry found in chunk"""
dir_names = ('inode', 'rec_len', 'name_len', 'file_type', 'name')
dir_fmt = '<IHBB' + str(len(chunk) - 8) + 's'
dir_dict = dict(zip(dir_names, unpack(dir_fmt, chunk)))
return dir_dict
def iblock_ids(self, blk_id, ids_to_read):
"""Reads the block indices/IDs from an indirect block"""
seek_pos = self.part_offset + self.blocksize * blk_id
self.seek_stream(seek_pos)
fmt = '<' + str(int(self.blocksize / 4)) + 'I'
ids = list(unpack(fmt, self.read_stream(self.blocksize)))
ids_to_read -= len(ids)
return ids, ids_to_read
def iiblock_ids(self, blk_id, ids_to_read):
"""Reads the block indices/IDs from a doubly-indirect block"""
seek_pos = self.part_offset + self.blocksize * blk_id
self.seek_stream(seek_pos)
fmt = '<' + str(int(self.blocksize / 4)) + 'I'
iids = unpack(fmt, self.read_stream(self.blocksize))
ids = []
for iid in iids:
if ids_to_read <= 0:
break
ind_block_ids, ids_to_read = self.iblock_ids(iid, ids_to_read)
ids += ind_block_ids
return ids, ids_to_read
def seek_stream(self, seek_pos):
"""Move position of bstream to seek_pos"""
try:
self.bstream[0].seek(seek_pos)
self.bstream[1] = seek_pos
return
except UnsupportedOperation:
chunksize = 4 * 1024**2
if seek_pos >= self.bstream[1]:
while seek_pos - self.bstream[1] > chunksize:
self.read_stream(chunksize)
self.read_stream(seek_pos - self.bstream[1])
return
self.bstream[0].close()
self.bstream[1] = 0
self.bstream = self.get_bstream(self.imgpath)
while seek_pos - self.bstream[1] > chunksize:
self.read_stream(chunksize)
self.read_stream(seek_pos - self.bstream[1])
return
def read_stream(self, num_of_bytes):
"""Read and return a chunk of the bytestream"""
self.bstream[1] += num_of_bytes
return self.bstream[0].read(num_of_bytes)
def get_block_ids(self, inode_dict):
"""Get all block indices/IDs of an inode"""
ids_to_read = inode_dict['blocks']
block_ids = [inode_dict['i_block' + str(i)] for i in range(12)]
ids_to_read -= 12
if not inode_dict['i_blocki'] == 0:
iblocks, ids_to_read = self.iblock_ids(inode_dict['i_blocki'], ids_to_read)
block_ids += iblocks
if not inode_dict['i_blockii'] == 0:
iiblocks, ids_to_read = self.iiblock_ids(inode_dict['i_blockii'], ids_to_read)
block_ids += iiblocks
return block_ids[:inode_dict['blocks']]
def read_file(self, block_ids):
"""Read blocks specified by IDs into a dict"""
block_dict = {}
for block_id in block_ids:
percent = int(35 + 60 * block_ids.index(block_id) / len(block_ids))
self.progress.update(percent, config.get_localized_string(70816))
seek_pos = self.part_offset + self.blocksize * block_id
self.seek_stream(seek_pos)
block_dict[block_id] = self.read_stream(self.blocksize)
return block_dict
@staticmethod
def write_file_chunk(opened_file, chunk, bytes_to_write):
"""Writes bytes to file in chunks"""
if len(chunk) > bytes_to_write:
opened_file.write(chunk[:bytes_to_write])
return 0
opened_file.write(chunk)
return bytes_to_write - len(chunk)
def write_file(self, inode_dict, filepath):
"""Writes file specified by its inode to filepath"""
bytes_to_write = inode_dict['i_size']
block_ids = self.get_block_ids(inode_dict)
block_ids_sorted = block_ids[:]
block_ids_sorted.sort()
block_dict = self.read_file(block_ids_sorted)
write_dir = os.path.join(os.path.dirname(filepath), '')
if not os.path.exists(write_dir):
os.mkdir(write_dir)
with open(compat_path(filepath), 'wb') as opened_file:
for block_id in block_ids:
bytes_to_write = self.write_file_chunk(opened_file, block_dict[block_id], bytes_to_write)
if bytes_to_write == 0:
return True
return False
@staticmethod
def get_bstream(imgpath):
"""Get a bytestream of the image"""
if imgpath.endswith('.zip'):
bstream = ZipFile(imgpath, 'r').open(os.path.basename(imgpath).strip('.zip'), 'r')
else:
bstream = open(imgpath, 'rb')
return [bstream, 0]

View File

@@ -127,7 +127,7 @@ class UnshortenIt(object):
timeout=self._timeout,
cookies=False,
follow_redirects=False)
if not r.sucess:
if not r.success:
return uri, -1
if '4snip' not in r.url and 'location' in r.headers and retries < self._maxretries:

View File

@@ -52,7 +52,7 @@ def dialog_notification(heading, message, icon=3, time=5000, sound=True):
dialog_ok(heading, message)
def dialog_yesno(heading, line1, line2="", line3="", nolabel="No", yeslabel="Si", autoclose=0, customlabel=None):
def dialog_yesno(heading, line1, line2="", line3="", nolabel=config.get_localized_string(70170), yeslabel=config.get_localized_string(30022), autoclose=0, customlabel=None):
# customlabel only on kodi 19
dialog = xbmcgui.Dialog()
if config.get_platform() == 'kodi-matrix':
@@ -109,9 +109,9 @@ def dialog_textviewer(heading, text): # available from kodi 16
return xbmcgui.Dialog().textviewer(heading, text)
def dialog_browse(_type, heading, default=""):
def dialog_browse(_type, heading, shares="files", mask="", useThumbs=False, treatAsFolder=False, defaultt="", enableMultiple=False):
dialog = xbmcgui.Dialog()
d = dialog.browse(_type, heading, 'files')
d = dialog.browse(_type, heading, shares, mask, useThumbs, treatAsFolder, defaultt, enableMultiple)
return d
@@ -179,13 +179,13 @@ def render_items(itemlist, parent_item):
# if cloudflare and cloudscraper is used, cookies are needed to display images taken from site
# before checking domain (time consuming), checking if tmdb failed (so, images scraped from website are used)
if item.action in ['findvideos'] and not item.infoLabels['tmdb_id']:
# if item.action in ['findvideos'] and not item.infoLabels['tmdb_id']:
# faster but ugly way of checking
for d in httptools.FORCE_CLOUDSCRAPER_LIST:
if d + '/' in item.url:
item.thumbnail = httptools.get_url_headers(item.thumbnail)
item.fanart = httptools.get_url_headers(item.fanart)
break
# for d in httptools.FORCE_CLOUDSCRAPER_LIST:
# if d + '/' in item.url:
# item.thumbnail = httptools.get_url_headers(item.thumbnail)
# item.fanart = httptools.get_url_headers(item.fanart)
# break
icon_image = "DefaultFolder.png" if item.folder else "DefaultVideo.png"
listitem = xbmcgui.ListItem(item.title)
@@ -248,7 +248,7 @@ def getCurrentView(item=None, parent_item=None):
elif (item.contentType in ['movie'] and parent_item.action in parent_actions) \
or (item.channel in ['videolibrary'] and parent_item.action in ['list_movies']) \
or (parent_item.channel in ['favorites'] and parent_item.action in ['mainlist']) \
or parent_item.action in ['now_on_tv', 'now_on_misc', 'now_on_misc_film', 'mostrar_perfil', 'live']:
or parent_item.action in ['now_on_tv', 'now_on_misc', 'now_on_misc_film', 'mostrar_perfil', 'live', 'replay', 'news']:
return 'movie', 'movies'
elif (item.contentType in ['tvshow'] and parent_item.action in parent_actions) \
@@ -505,7 +505,7 @@ def is_playing():
def play_video(item, strm=False, force_direct=False, autoplay=False):
logger.info()
# logger.debug(item.tostring('\n'))
logger.debug(item.tostring('\n'))
if item.channel == 'downloads':
logger.info("Play local video: %s [%s]" % (item.title, item.url))
xlistitem = xbmcgui.ListItem(path=item.url)
@@ -545,9 +545,15 @@ def play_video(item, strm=False, force_direct=False, autoplay=False):
# if it is a video in mpd format, the listitem is configured to play it ith the inpustreamaddon addon implemented in Kodi 17
# from core.support import dbg;dbg()
if mpd:
if not os.path.exists(os.path.join(xbmc.translatePath('special://home/addons/'),'inputstream.adaptive')): install_inputstream()
if not install_inputstream():
return
xlistitem.setProperty('inputstreamaddon', 'inputstream.adaptive')
xlistitem.setProperty('inputstream.adaptive.manifest_type', 'mpd')
if item.drm and item.license:
install_widevine()
xlistitem.setProperty("inputstream.adaptive.license_type", item.drm)
xlistitem.setProperty("inputstream.adaptive.license_key", item.license)
xlistitem.setMimeType('application/dash+xml')
if force_direct: item.play_from = 'window'
@@ -1067,17 +1073,201 @@ def resume_playback(item, return_played_time=False):
item.nfo = item.strm_path = ""
return item, None, None, None
##### INPUTSTREM #####
def install_inputstream():
from xbmcaddon import Addon
try:
# See if there's an installed repo that has it
xbmc.executebuiltin('InstallAddon(inputstream.adaptive)', wait=True)
if not os.path.exists(os.path.join(xbmc.translatePath('special://home/addons/'),'inputstream.adaptive')) and not os.path.exists(os.path.join(xbmc.translatePath('special://xbmcbinaddons/'),'inputstream.adaptive')):
try:
# See if there's an installed repo that has it
xbmc.executebuiltin('InstallAddon(inputstream.adaptive)', wait=True)
# Check if InputStream add-on exists!
Addon('inputstream.adaptive')
# Check if InputStream add-on exists!
Addon('inputstream.adaptive')
logger.info('InputStream add-on installed from repo.')
return True
except RuntimeError:
logger.info('InputStream add-on not installed.')
return False
logger.info('InputStream add-on installed from repo.')
except RuntimeError:
logger.info('InputStream add-on not installed.')
dialog_ok(config.get_localized_string(20000), config.get_localized_string(30126))
return False
else:
try:
Addon('inputstream.adaptive')
logger.info('InputStream add-on is installed and enabled')
except:
logger.info('enabling InputStream add-on')
xbmc.executebuiltin('UpdateLocalAddons')
xbmc.executeJSONRPC('{"jsonrpc": "2.0", "id":1, "method": "Addons.SetAddonEnabled", "params": { "addonid": "inputstream.adaptive", "enabled": true }}')
return True
def install_widevine():
platform = get_platform()
if platform['os'] != 'android':
from core.httptools import downloadpage
from xbmcaddon import Addon
from core import jsontools
from distutils.version import LooseVersion
path = xbmc.translatePath(Addon('inputstream.adaptive').getSetting('DECRYPTERPATH'))
# if Widevine CDM is not installed
if not os.path.exists(path):
select = dialog_yesno('Widevine CDM', config.get_localized_string(70808))
if select > 0:
if not 'arm' in platform['arch']:
last_version = downloadpage('https://dl.google.com/widevine-cdm/versions.txt').data.split()[-1]
download_widevine(last_version, platform, path)
else:
json = downloadpage('https://dl.google.com/dl/edgedl/chromeos/recovery/recovery.json').data
devices = jsontools.load(json)
download_chromeos_image(devices, platform, path)
# if Widevine CDM is outdated
elif platform['os'] != 'android':
if not 'arm' in platform['arch']:
last_version = downloadpage('https://dl.google.com/widevine-cdm/versions.txt').data.split()[-1]
current_version = jsontools.load(open(os.path.join(path, 'manifest.json')).read())['version']
if LooseVersion(last_version) > LooseVersion(current_version):
select = dialog_yesno(config.get_localized_string(70810),config.get_localized_string(70809))
if select > 0: download_widevine(last_version, platform, path)
else:
devices = jsontools.load(downloadpage('https://dl.google.com/dl/edgedl/chromeos/recovery/recovery.json').data)
current_version = jsontools.load(open(os.path.join(path, 'config.json')).read())['version']
last_version = best_chromeos_image(devices)['version']
if LooseVersion(last_version) > LooseVersion(current_version):
select = dialog_yesno(config.get_localized_string(70810),config.get_localized_string(70809))
if select > 0:download_chromeos_image(devices, platform, path)
def download_widevine(version, platform, path):
# for x86 architectures
from zipfile import ZipFile
from xbmcaddon import Addon
from core import downloadtools
archiveName = 'https://dl.google.com/widevine-cdm/' + version + '-' + platform['os'] + '-' + platform['arch'] + '.zip'
fileName = config.get_temp_file('widevine.zip')
if not os.path.exists(archiveName):
if not os.path.exists(fileName):
downloadtools.downloadfile(archiveName, fileName, header='Download Widevine CDM')
zip_obj = ZipFile(fileName)
for filename in zip_obj.namelist():
zip_obj.extract(filename, path)
zip_obj.close()
os.remove(fileName)
def download_chromeos_image(devices, platform, path):
# for arm architectures
from core import downloadtools
from zipfile import ZipFile
from core import jsontools
best = best_chromeos_image(devices)
archiveName = best['url']
version = best['version']
fileName = config.get_temp_file(archiveName.split('/')[-1])
if not os.path.exists(fileName):
downloadtools.downloadfile(archiveName, fileName, header='Download Widevine CDM')
from lib.arm_chromeos import ChromeOSImage
ChromeOSImage(fileName).extract_file(
filename='libwidevinecdm.so',
extract_path=os.path.join(path),
progress=dialog_progress(config.get_localized_string(70811),config.get_localized_string(70812)))
recovery_file = os.path.join(path, os.path.basename('https://dl.google.com/dl/edgedl/chromeos/recovery/recovery.json'))
config_file = os.path.join(path, 'config.json')
if not os.path.exists(path):
os.mkdir(path)
with open(recovery_file, 'w') as reco_file:
reco_file.write(jsontools.dump(devices, indent=4))
reco_file.close()
with open(config_file, 'w') as conf_file:
conf_file.write(jsontools.dump(best))
conf_file.close()
os.remove(fileName)
def best_chromeos_image(devices):
best = None
for device in devices:
# Select ARM hardware only
for arm_hwid in ['BIG','BLAZE','BOB','DRUWL','DUMO','ELM','EXPRESSO','FIEVEL','HANA','JAQ','JERRY','KEVIN','KITTY','MICKEY','MIGHTY','MINNIE','PHASER','PHASER360','PI','PIT','RELM','SCARLET','SKATE','SNOW','SPEEDY','SPRING','TIGER']:
if arm_hwid in device['hwidmatch']:
hwid = arm_hwid
break # We found an ARM device, rejoice !
else:
continue # Not ARM, skip this device
device['hwid'] = hwid
# Select the first ARM device
if best is None:
best = device
continue # Go to the next device
# Skip identical hwid
if hwid == best['hwid']:
continue
# Select the newest version
from distutils.version import LooseVersion # pylint: disable=import-error,no-name-in-module,useless-suppression
if LooseVersion(device['version']) > LooseVersion(best['version']):
logger.info('%s (%s) is newer than %s (%s)' % (device['hwid'], device['version'], best['hwid'], best['version']))
best = device
# Select the smallest image (disk space requirement)
elif LooseVersion(device['version']) == LooseVersion(best['version']):
if int(device['filesize']) + int(device['zipfilesize']) < int(best['filesize']) + int(best['zipfilesize']):
logger.info('%s (%d) is smaller than %s (%d)' % (device['hwid'], int(device['filesize']) + int(device['zipfilesize']), best['hwid'], int(best['filesize']) + int(best['zipfilesize'])))
best = device
return best
def get_platform():
import platform
build = xbmc.getInfoLabel("System.BuildVersion")
kodi_version = int(build.split()[0][:2])
ret = {
"auto_arch": sys.maxsize > 2 ** 32 and "64-bit" or "32-bit",
"arch": sys.maxsize > 2 ** 32 and "x64" or "ia32",
"os": "",
"version": platform.release(),
"kodi": kodi_version,
"build": build
}
if xbmc.getCondVisibility("system.platform.android"):
ret["os"] = "android"
if "arm" in platform.machine() or "aarch" in platform.machine():
ret["arch"] = "arm"
if "64" in platform.machine() and ret["auto_arch"] == "64-bit":
ret["arch"] = "arm64"
elif xbmc.getCondVisibility("system.platform.linux"):
ret["os"] = "linux"
if "aarch" in platform.machine() or "arm64" in platform.machine():
if xbmc.getCondVisibility("system.platform.linux.raspberrypi"):
ret["arch"] = "armv7"
elif ret["auto_arch"] == "32-bit":
ret["arch"] = "armv7"
elif ret["auto_arch"] == "64-bit":
ret["arch"] = "arm64"
elif platform.architecture()[0].startswith("32"):
ret["arch"] = "arm"
else:
ret["arch"] = "arm64"
elif "armv7" in platform.machine():
ret["arch"] = "armv7"
elif "arm" in platform.machine():
ret["arch"] = "arm"
elif xbmc.getCondVisibility("system.platform.xbox"):
ret["os"] = "win"
ret["arch"] = "x64"
elif xbmc.getCondVisibility("system.platform.windows"):
ret["os"] = "win"
if platform.machine().endswith('64'):
ret["arch"] = "x64"
elif xbmc.getCondVisibility("system.platform.osx"):
ret["os"] = "mac"
ret["arch"] = "x64"
elif xbmc.getCondVisibility("system.platform.ios"):
ret["os"] = "ios"
ret["arch"] = "arm"
return ret

View File

@@ -326,7 +326,7 @@ def updateFromZip(message=config.get_localized_string(80050)):
hash = fixZipGetHash(localfilename)
logger.info(hash)
with zipfile.ZipFile(fOpen(localfilename, 'rb')) as zip:
with zipfile.ZipFile(filetools.file_open(localfilename, 'rb', vfs=False)) as zip:
size = sum([zinfo.file_size for zinfo in zip.filelist])
cur_size = 0
for member in zip.infolist():
@@ -425,7 +425,7 @@ def rename(dir1, dir2):
# https://stackoverflow.com/questions/3083235/unzipping-file-results-in-badzipfile-file-is-not-a-zip-file
def fixZipGetHash(zipFile):
hash = ''
with fOpen(zipFile, 'r+b') as f:
with filetools.file_open(zipFile, 'r+b', vfs=False) as f:
data = f.read()
pos = data.find(b'\x50\x4b\x05\x06') # End of central directory signature
if pos > 0:
@@ -438,17 +438,6 @@ def fixZipGetHash(zipFile):
return str(hash)
def fOpen(file, mode = 'r'):
# per android è necessario, su kodi 18, usare FileIO
# https://forum.kodi.tv/showthread.php?tid=330124
# per xbox invece, è necessario usare open perchè _io è rotto :(
# https://github.com/jellyfin/jellyfin-kodi/issues/115#issuecomment-538811017
if xbmc.getCondVisibility('system.platform.linux') and xbmc.getCondVisibility('system.platform.android'):
logger.info('android, uso FileIO per leggere')
return io.FileIO(file, mode)
else:
return io.open(file, mode)
def _pbhook(numblocks, blocksize, filesize, url, dp):
try:

View File

@@ -525,6 +525,21 @@ def search_library_path():
return None
def search_local_path(item):
ids = [item.infoLabels['imdb_id'], item.infoLabels['tmdb_id'], item.infoLabels['tvdb_id']]
for Id in ids:
nun_ids, ids = execute_sql_kodi('SELECT idShow FROM tvshow_view WHERE uniqueid_value LIKE "%s"' % Id)
if nun_ids >= 1:
nun_records, records = execute_sql_kodi('SELECT idPath FROM tvshowlinkpath WHERE idShow LIKE "%s"' % ids[0][0])
if nun_records >= 1:
for record in records:
num_path, path_records = execute_sql_kodi('SELECT strPath FROM path WHERE idPath LIKE "%s"' % record[0])
for path in path_records:
if config.get_setting('videolibrarypath') not in path[0]:
return path[0]
return ''
def set_content(content_type, silent=False, custom=False):
"""
Procedure to auto-configure the kodi video library with the default values
@@ -904,7 +919,7 @@ def clean(path_list=[]):
if path.startswith("special://"):
path = path.replace('/profile/', '/%/').replace('/home/userdata/', '/%/')
sep = '/'
elif '://' in path:
elif '://' in path or path.startswith('/') or path.startswith('%/'):
sep = '/'
else: sep = os.sep
@@ -1001,6 +1016,20 @@ def clean(path_list=[]):
progress.close()
def check_db(path):
if '\\' in path: sep = '\\'
else: sep = '/'
if path.endswith(sep): path = path[:-len(sep)]
ret = False
sql_path = '%' + sep + path.split(sep)[-1] + sep + '%'
sql = 'SELECT idShow FROM tvshow_view where strPath LIKE "%s"' % sql_path
logger.info('sql: ' + sql)
nun_records, records = execute_sql_kodi(sql)
if records:
ret = True
return ret
def execute_sql_kodi(sql):
"""
Run sql query against kodi database

View File

@@ -304,7 +304,7 @@ msgid "Documentaries"
msgstr ""
msgctxt "#30126"
msgid ""
msgid "This video needs inputstream.adaptive addon, but automatic install failed, install it manually"
msgstr ""
msgctxt "#30130"
@@ -3801,7 +3801,7 @@ msgid "Reorder"
msgstr ""
msgctxt "#70246"
msgid " - Preferred language"
msgid "Preferred language"
msgstr ""
msgctxt "#70247"
@@ -6029,7 +6029,47 @@ msgstr ""
msgctxt "#70807"
msgid "Elementum does not support network folder downloads, do you want to change the download location?"
msgstr "Elementum non supporta i download su cartella di rete, vuoi cambiare il percorso di download?"
msgstr ""
msgctxt "#70808"
msgid "In order to view this content you need to install [B]Widevine CDM[/B]. Do you want to install it?"
msgstr ""
msgctxt "#70809"
msgid "An update of [B]Widevine CDM[/B] is available. Do you want to install it?"
msgstr ""
msgctxt "#70810"
msgid "Update available"
msgstr "Aggiornamento disponibile"
msgctxt "#70811"
msgid "Extracting Widevine CDM"
msgstr ""
msgctxt "#70812"
msgid "Preparing downloaded image..."
msgstr ""
msgctxt "#70813"
msgid "Identifying wanted partition..."
msgstr ""
msgctxt "#70814"
msgid "Scanning the filesystem for the Widevine CDM..."
msgstr "."
msgctxt "#70815"
msgid "Widevine CDM found, analyzing..."
msgstr "W"
msgctxt "#70816"
msgid "Extracting Widevine CDM from image..."
msgstr ""
msgctxt "#70817"
msgid "La Serie "%s" non è gestita da KoD, Vuoi cercarla?"
msgstr ""
# DNS start [ settings and declaration ]
msgctxt "#707401"

View File

@@ -304,8 +304,8 @@ msgid "Documentaries"
msgstr "Documentari"
msgctxt "#30126"
msgid ""
msgstr ""
msgid "This video needs inputstream.adaptive addon, but automatic install failed, install it manually"
msgstr "Questo video necessita dell'addon inputstream.adaptive ma l'installazione automatica è fallita, installalo manualmente."
msgctxt "#30130"
msgid "News"
@@ -3800,8 +3800,8 @@ msgid "Reorder"
msgstr "Riordina"
msgctxt "#70246"
msgid " - Preferred language"
msgstr " - Lingua preferita"
msgid "Preferred language"
msgstr "Lingua preferita"
msgctxt "#70247"
msgid "Home page"
@@ -6031,6 +6031,46 @@ msgctxt "#70807"
msgid "Elementum does not support network folder downloads, do you want to change the download location?"
msgstr "Elementum non supporta i download su cartella di rete, vuoi cambiare il percorso di download?"
msgctxt "#70808"
msgid "In order to view this content you need to install [B]Widevine CDM[/B]. Do you want to install it?"
msgstr "Per poter visionare questo contenuto devi installare [B]Widevine CDM[/B]. Vuoi installarlo?"
msgctxt "#70809"
msgid "An update of [B]Widevine CDM[/B] is available. Do you want to install it?"
msgstr "È disponibile un aggiornamento di [B]Widevine CDM[/B]. Vuoi installarlo?"
msgctxt "#70810"
msgid "Update available"
msgstr "Aggiornamento disponibile"
msgctxt "#70811"
msgid "Extracting Widevine CDM"
msgstr "Estrazione di Widevine CDM"
msgctxt "#70812"
msgid "Preparing downloaded image..."
msgstr "Preparazione dell'immagine scaricata..."
msgctxt "#70813"
msgid "Identifying wanted partition..."
msgstr "Identificazione della partizione desiderata..."
msgctxt "#70814"
msgid "Scanning the filesystem for the Widevine CDM..."
msgstr "Scansione del filesystem per Widevine CDM..."
msgctxt "#70815"
msgid "Widevine CDM found, analyzing..."
msgstr "Widevine CDM trovato, analisi..."
msgctxt "#70816"
msgid "Extracting Widevine CDM from image..."
msgstr "Estrazione di Widevine CDM dall'immagine..."
msgctxt "#70817"
msgid "La Serie "%s" non è gestita da KoD, Vuoi cercarla?"
msgstr "La Serie "%s" non è gestita da KoD, Vuoi cercarla?"
# DNS start [ settings and declaration ]
msgctxt "#707401"
msgid "Enable DNS check alert"
@@ -6370,4 +6410,4 @@ msgstr "Rimuovi episodi in locale"
msgctxt "#80050"
msgid "Downloading..."
msgstr "Download in corso..."
msgstr "Download in corso..."

View File

@@ -106,7 +106,8 @@
<setting id="library_move" type="bool" label="70231" visible="eq(-3,true)" default="true"/>
<setting id="browser" type="bool" label="70232" visible="eq(-4,true)" default="true"/>
<setting id="server_speed" type="bool" label="70242" visible="eq(-5,true)" default="true"/>
<setting id="quality" type="select" label="70240" lvalues="70241|70763|70764|70765" visible="eq(-6,true)" default="0"/>
<setting id="language" type="select" label="70246" values="ITA|Sub-ITA" visible="eq(-6,true)" default="0"/>
<setting id="quality" type="select" label="70240" lvalues="70241|70763|70764|70765" visible="eq(-7,true)" default="0"/>
<setting id="download_adv" type="action" label="30030" visible="eq(-7,true)" action="RunPlugin(plugin://plugin.video.kod/?ew0KCSJhY3Rpb24iOiJjaGFubmVsX2NvbmZpZyIsDQoJImNvbmZpZyI6ImRvd25sb2FkcyIsDQogICAgImNoYW5uZWwiOiJzZXR0aW5nIg0KfQ==)"/>
</category>

View File

@@ -1,9 +1,8 @@
# -*- coding: utf-8 -*-
# by DrZ3r0
import urllib
from core import httptools
from core import httptools, support
from core import scrapertools
from platformcode import logger, config
@@ -54,28 +53,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
from lib import jsunpack
data = jsunpack.unpack(data_pack)
block = scrapertools.find_single_match(data, "sources:\s\[([^\]]+)\]")
data = block if block else data
# URL
# logger.info(data)
if vres:
matches = scrapertools.find_multiple_matches(data, '''src:\s*["']?(http.*?\.mp4)''')
else:
matches = scrapertools.find_multiple_matches(data, '''src:\s*["']?(http.*?\.mp4)(?:[^,]+,[^,]+,res:([^,]+))?''')
if matches:
if len(matches[0])==2:
i=0
for m in matches:
vres.append("%sx" % m[1])
matches[i]=m[0]
i+=1
_headers = urllib.urlencode(httptools.default_headers)
video_urls = support.get_jwplayer_mediaurl(data, 'akvideo', onlyHttp=True)
i = 0
for media_url in matches:
# URL del vídeo
video_urls.append([vres[i] if i<len(vres) else "" + " mp4 [Akvideo] ", media_url.replace('https://', 'http://') + '|' + _headers])
i = i + 1
return sorted(video_urls, key=lambda x: int(x[0].split('x')[0])) if vres else video_urls

View File

@@ -11,7 +11,7 @@ from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
response = httptools.downloadpage(page_url)
if not response.sucess or "Not Found" in response.data or "File was deleted" in response.data or "is no longer available" in response.data:
if not response.success or "Not Found" in response.data or "File was deleted" in response.data or "is no longer available" in response.data:
return False, config.get_localized_string(70449) % "anonfile"
return True, ""

View File

@@ -14,7 +14,7 @@ def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global page
page = httptools.downloadpage(page_url)
if not page.sucess:
if not page.success:
return False, config.get_localized_string(70449) % "Badshare"
return True, ""

View File

@@ -12,8 +12,8 @@ def test_video_exists(page_url):
html = httptools.downloadpage(page_url)
global data
data = html.data
if html.code == 404:
return False, config.get_localized_string(70292) % "CloudVideo"
if html.code == 404 or 'No Signal 404 Error Page' in data:
return False, config.get_localized_string(70449) % "CloudVideo"
return True, ""

View File

@@ -3,6 +3,9 @@
from platformcode import logger, config
def test_video_exists(page_url):
return True, ""
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)

View File

@@ -14,7 +14,7 @@ def test_video_exists(page_url):
global data
data = httptools.downloadpage(page_url)
if not data.sucess or "Not Found" in data.data or "File was deleted" in data.data or "is no longer available" in data.data:
if not data.success or "Not Found" in data.data or "File was deleted" in data.data or "is no longer available" in data.data:
return False, config.get_localized_string(70449) % "Idtbox"
data = data.data

View File

@@ -15,7 +15,7 @@ def test_video_exists(page_url):
response = httptools.downloadpage(page_url)
global data
data = response.data
if not response.sucess or "Not Found" in data or "File was deleted" in data or "is no longer available" in data:
if not response.success or "Not Found" in data or "File was deleted" in data or "is no longer available" in data:
return False, config.get_localized_string(70449) % "jetload"
return True, ""

View File

@@ -3,7 +3,7 @@
# Conector Mixdrop By Alfa development Group
# --------------------------------------------------------
from core import httptools
from core import httptools, servertools
from core import scrapertools
from lib import jsunpack
from platformcode import logger, config
@@ -13,6 +13,12 @@ def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if 'window.location' in data:
domain = 'https://' + servertools.get_server_host('mixdrop')[0]
url = domain + scrapertools.find_single_match(data, "window\.location\s*=\s*[\"']([^\"']+)")
data = httptools.downloadpage(url).data
if "<h2>WE ARE SORRY</h2>" in data or '<title>404 Not Found</title>' in data:
return False, config.get_localized_string(70449) % "MixDrop"
return True, ""
@@ -23,6 +29,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls = []
ext = '.mp4'
global data
packed = scrapertools.find_single_match(data, r'(eval.*?)</script>')
unpacked = jsunpack.unpack(packed)

View File

@@ -12,7 +12,7 @@ def test_video_exists(page_url):
response = httptools.downloadpage(page_url)
if not response.sucess or \
if not response.success or \
"Not Found" in response.data \
or "File was deleted" in response.data \
or "is no longer available" in response.data:

View File

@@ -14,7 +14,7 @@ def test_video_exists(page_url):
response = httptools.downloadpage(page_url)
if not response.sucess or \
if not response.success or \
"Not Found" in response.data \
or "File was deleted" in response.data \
or "is no longer available" in response.data:

View File

@@ -17,6 +17,9 @@ monitor = filetools.join(config.get_data_path(), 'elementum_monitor.json')
extensions_list = ['.aaf', '.3gp', '.asf', '.avi', '.flv', '.mpeg', '.m1v', '.m2v', '.m4v', '.mkv', '.mov', '.mpg', '.mpe', '.mp4', '.ogg', '.wmv']
def test_video_exists(page_url):
return True, ""
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user='', password='', video_password=''):

View File

@@ -11,7 +11,7 @@ def test_video_exists(page_url):
response = httptools.downloadpage(page_url)
if not response.sucess or "Not Found" in response.data or "File was deleted" in response.data or "is no longer available" in response.data:
if not response.success or "Not Found" in response.data or "File was deleted" in response.data or "is no longer available" in response.data:
return False, config.get_localized_string(70449) % "Userscloud"
return True, ""

View File

@@ -15,7 +15,7 @@ def test_video_exists(page_url):
response = httptools.downloadpage(page_url)
global data
data = response.data
if not response.sucess or "Not Found" in data or "File was deleted" in data or "is no longer available" in data:
if not response.success or "Not Found" in data or "File was deleted" in data or "is no longer available" in data:
return False, config.get_localized_string(70449) % "vidfast"
return True, ""

View File

@@ -11,9 +11,9 @@ def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
global response
response = httptools.downloadpage(page_url)
if not response.sucess or "Not Found" in response.data:
if not response.success or "Not Found" in response.data:
return False, "[%s] El fichero no existe o ha sido borrado" %id_server
if not response.sucess or "Video is processing now." in response.data:
if not response.success or "Video is processing now." in response.data:
return False, "[%s] El video se está procesando." %id_server
return True, ""

View File

@@ -6,10 +6,10 @@ from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
page = httptools.downloadpage(page_url)
global data
data = data.data
if data.code == 404:
data = page.data
if page.code == 404:
return False, config.get_localized_string(70449)
return True, ""

View File

@@ -53,7 +53,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
ep_title = '[B]' + episode['title'] + '[/B]'
embed_info = vvvvid_decoder.dec_ei(episode['embed_info'])
embed_info = embed_info.replace('manifest.f4m','master.m3u8').replace('http://','https://').replace('/z/','/i/')
key_url = 'https://www.vvvvid.it/kenc?action=kt&conn_id=' + conn_id + '&url=' + embed_info.replace(':','%3A').replace('/','%2F')
key = vvvvid_decoder.dec_ei(current_session.get(key_url, headers=headers, params=payload).json()['message'])
video_urls.append([ep_title, str(embed_info)])
video_urls.append([ep_title, str(embed_info) + '?' + key])
return video_urls

View File

@@ -92,7 +92,7 @@ def test_video_exists(page_url):
data = httptools.downloadpage(page_url).data
if "File was deleted" in data:
if "File was deleted" in data or "Video non disponibile" in data:
return False, config.get_localized_string(70449) % "Youtube"
return True, ""
@@ -107,7 +107,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_id = scrapertools.find_single_match(page_url, '(?:v=|embed/)([A-z0-9_-]{11})')
video_urls = extract_videos(video_id)
return video_urls
return sorted(video_urls, reverse=True)
def remove_additional_ending_delimiter(data):
@@ -215,8 +215,8 @@ def extract_videos(video_id):
url = re.search('url=(.*)', opt["cipher"]).group(1)
s = cipher.get('s')
url = "%s&sig=%s" % (urllib.unquote(url), signature([s]))
video_urls.append(["%s" % itag_list.get(opt["itag"], "audio"), url])
video_urls.append(["%s" % itag_list.get(opt["itag"], "video"), url])
elif opt["itag"] in itag_list:
video_urls.append(["%s" % itag_list.get(opt["itag"], "video"), opt["url"]])
return video_urls
return video_urls

View File

@@ -21,10 +21,12 @@ from servers import torrent
def update(path, p_dialog, i, t, serie, overwrite):
logger.info("Updating " + path)
insertados_total = 0
nfo_file = xbmc.translatePath(filetools.join(path, 'tvshow.nfo'))
head_nfo, it = videolibrarytools.read_nfo(path + '/tvshow.nfo')
# videolibrarytools.check_renumber_options(it)
head_nfo, it = videolibrarytools.read_nfo(nfo_file)
videolibrarytools.update_renumber_options(it, head_nfo, path)
if not serie.library_url: serie = it
category = serie.category
# logger.debug("%s: %s" %(serie.contentSerieName,str(list_canales) ))
@@ -34,7 +36,7 @@ def update(path, p_dialog, i, t, serie, overwrite):
###### Redirección al canal NewPct1.py si es un clone, o a otro canal y url si ha intervención judicial
try:
head_nfo, it = videolibrarytools.read_nfo(path + '/tvshow.nfo') #Refresca el .nfo para recoger actualizaciones
head_nfo, it = videolibrarytools.read_nfo(nfo_file) #Refresca el .nfo para recoger actualizaciones
if it.emergency_urls:
serie.emergency_urls = it.emergency_urls
serie.category = category
@@ -109,6 +111,7 @@ def check_for_update(overwrite=True):
update_when_finished = False
hoy = datetime.date.today()
estado_verify_playcount_series = False
local_ended = True
try:
if config.get_setting("update", "videolibrary") != 0 or overwrite:
@@ -127,6 +130,11 @@ def check_for_update(overwrite=True):
for i, tvshow_file in enumerate(show_list):
head_nfo, serie = videolibrarytools.read_nfo(tvshow_file)
if serie.local_episodes_path:
local_ended = True if serie.infoLabels['number_of_episodes'] == len(serie.local_episodes_list) else False
if serie.infoLabels['status'].lower() == 'ended' and local_ended:
serie.active = 0
filetools.write(tvshow_file, head_nfo + serie.tojson())
path = filetools.dirname(tvshow_file)
logger.info("serie=" + serie.contentSerieName)

View File

@@ -53,7 +53,7 @@ def export_videolibrary(item):
def import_videolibrary(item):
logger.info()
zip_file = u'' + platformtools.dialog_browse(1, config.get_localized_string(80005))
zip_file = u'' + platformtools.dialog_browse(1, config.get_localized_string(80005), mask=".zip")
if zip_file == "":
return
if not platformtools.dialog_yesno(config.get_localized_string(20000), config.get_localized_string(80006)):

View File

@@ -634,7 +634,7 @@ def load_and_check(item):
for key, channel in json['channels'].items():
if not 'checked' in channel:
response = httptools.downloadpage(channel['path'], follow_redirects=True, timeout=5)
if response.sucess:
if response.success:
channel['path'] = response.url
channel['channel_name'] = re.sub(r'\[[^\]]+\]', '', channel['channel_name'])
channel['check'] = True

View File

@@ -546,10 +546,8 @@ def sort_method(item):
@rtype: int
"""
lang_orders = {}
lang_orders[0] = ["IT", "SUB", "VOSI", "ENG"]
lang_orders[1] = ["IT", "ENG", "VOSI", "SUB"]
lang_orders[2] = ["ENG", "SUB", "IT", "VOSI"]
lang_orders[3] = ["ENG", "SUB", "VOSI", "IT"]
lang_orders[0] = ["ITA", "SUB"]
lang_orders[1] = ["SUB", "ITA"]
quality_orders = {}
quality_orders[0] = ["BLURAY", "FULLHD", "HD", "480P", "360P", "240P"]
@@ -557,11 +555,9 @@ def sort_method(item):
quality_orders[2] = ["HD", "480P", "360P", "240P", "FULLHD", "BLURAY"]
quality_orders[3] = ["480P", "360P", "240P", "BLURAY", "FULLHD", "HD"]
order_list_idiomas = lang_orders[int(config.get_setting("language", "downloads"))]
match_list_idimas = {"IT": ["ITA", "IT", "Italiano", "italiano", "ITALIANO"],
"SUB": ["Sottotitolato", "SUB", "sub-ita", "SUB-ITA", "Sub-ITA", "Sub-Ita"],
"ENG": ["EN", "ENG", "Inglés", "Ingles", "English"],
"VOSI": ["VOSI"]}
order_list_idiomas = lang_orders[int(lang_orders[0].index(config.get_setting("language")))]
match_list_idimas = {"ITA": ["ITA", "IT", "Italiano", "italiano", "ITALIANO"],
"SUB": ["Sottotitolato", "SUB", "sub-ita", "SUB-ITA", "Sub-ITA", "Sub-Ita"]}
order_list_calidad = ["BLURAY", "FULLHD", "HD", "480P", "360P", "240P"]
order_list_calidad = quality_orders[int(config.get_setting("quality"))]
@@ -844,7 +840,7 @@ def start_download(item):
def get_episodes(item):
log("contentAction: %s | contentChannel: %s | contentType: %s" % (item.contentAction, item.contentChannel, item.contentType))
if 'dlseason' in item:
season = True
season_number = item.dlseason
@@ -868,8 +864,7 @@ def get_episodes(item):
episodes = getattr(channel, item.contentAction)(item)
itemlist = []
if episodes and not scrapertools.find_single_match(episodes[0].title, r'(\d+.\d+)'):
if episodes and not scrapertools.find_single_match(episodes[0].title, r'(\d+.\d+)') and item.channel not in ['videolibrary']:
from specials.autorenumber import select_type, renumber, check
if not check(item):
select_type(item)
@@ -912,7 +907,7 @@ def get_episodes(item):
episode.downloadFilename = filetools.validate_path(filetools.join(item.downloadFilename, "%dx%0.2d - %s" % (episode.contentSeason, episode.contentEpisodeNumber, episode.contentTitle.strip())))
if season:
if int(scrapertools.find_single_match(episode.title, r'(\d+)x')) == int(season_number):
if episode.contentSeason == int(season_number):
itemlist.append(episode)
else:
itemlist.append(episode)
@@ -1082,11 +1077,11 @@ def save_download_movie(item):
def save_download_tvshow(item):
log("contentAction: %s | contentChannel: %s | contentType: %s | contentSerieName: %s" % (item.contentAction, item.contentChannel, item.contentType, item.contentSerieName))
progreso = platformtools.dialog_progress_bg(config.get_localized_string(30101), config.get_localized_string(70188))
try:
item.show = item.fulltitle
scraper.find_and_set_infoLabels(item)
if item.channel not in ['videolibrary']:
scraper.find_and_set_infoLabels(item)
if not item.contentSerieName: item.contentSerieName = item.fulltitle

View File

@@ -37,13 +37,13 @@ def download(item=None):
def extract():
import zipfile
from platformcode.updater import fixZipGetHash, fOpen
from platformcode.updater import fixZipGetHash
support.log('Estraggo Elementum in:', elementum_path)
try:
hash = fixZipGetHash(filename)
support.log(hash)
with zipfile.ZipFile(fOpen(filename, 'rb')) as zip_ref:
with zipfile.ZipFile(filetools.file_open(filename, 'rb', vfs=False)) as zip_ref:
zip_ref.extractall(xbmc.translatePath(addon_path))
except Exception as e:

View File

@@ -3,10 +3,12 @@
# Canale film in tv
# ------------------------------------------------------------
from datetime import datetime
import glob, time, gzip, xbmc
import glob, time, gzip, xbmc, sys
from core import filetools, downloadtools, support, scrapertools
from core.item import Item
from platformcode import logger
if sys.version_info[0] >= 3: from concurrent import futures
else: from concurrent_py2 import futures
host = "http://epg-guide.com/kltv.gz"
blacklisted_genres = ['attualita', 'scienza', 'religione', 'cucina', 'notiziario', 'altro', 'soap opera', 'viaggi', 'economia', 'tecnologia', 'magazine', 'show', 'reality show', 'lifestyle', 'societa', 'wrestling', 'azione', 'Musica', 'real life', 'real adventure', 'dplay original', 'natura', 'news', 'food', 'sport', 'moda', 'arte e cultura', 'crime', 'box set e serie tv', 'casa', 'storia', 'talk show', 'motori', 'attualit\xc3\xa0 e inchiesta', 'documentari', 'musica', 'spettacolo', 'medical', 'talent show', 'sex and love', 'beauty and style', 'news/current affairs', "children's/youth programmes", 'leisure hobbies', 'social/political issues/economics', 'education/science/factual topics', 'undefined content', 'show/game show', 'music/ballet/dance', 'sports', 'arts/culture', 'biografico', 'informazione', 'documentario']
@@ -28,7 +30,7 @@ def getEpg():
fileName = support.config.get_temp_file('guidatv-') + now.strftime('%Y %m %d')
archiveName = fileName + '.gz'
xmlName = fileName + '.xml'
if not filetools.exists(archiveName):
if not filetools.exists(xmlName):
support.log('downloading epg')
# cancello quelli vecchi
for f in glob.glob(support.config.get_temp_file('guidatv-') + '*'):
@@ -36,12 +38,12 @@ def getEpg():
# inmemory = io.BytesIO(httptools.downloadpage(host).data)
downloadtools.downloadfile(host, archiveName)
support.log('opening gzip and writing xml')
fStream = gzip.GzipFile(archiveName, mode='rb')
guide = fStream.read().replace('\n', ' ').replace('><', '>\n<')
with gzip.GzipFile(fileobj=filetools.file_open(archiveName, mode='rb', vfs=False)) as f:
guide = f.read().replace('\n', ' ').replace('><', '>\n<')
with open(xmlName, 'w') as f:
f.write(guide)
# else:
guide = open(xmlName)
guide = filetools.file_open(xmlName, vfs=False)
return guide
@@ -309,7 +311,22 @@ def new_search(item):
def live(item):
itemlist = []
from channels import raiplay#, mediasetplay
itemlist += raiplay.live(raiplay.mainlist(Item())[0])
# itemlist += mediasetplay.live(mediasetplay.mainlist(Item())[0])
channels_dict = {}
channels = ['raiplay', 'mediasetplay', 'la7']
with futures.ThreadPoolExecutor() as executor:
itlist = [executor.submit(load_live, channel) for channel in channels]
for res in futures.as_completed(itlist):
if res.result():
channel_name, itlist = res.result()
channels_dict[channel_name] = itlist
for channel in channels:
itemlist += channels_dict[channel]
return itemlist
def load_live(channel_name):
channel = __import__('%s.%s' % ('channels', channel_name), None, None, ['%s.%s' % ('channels', channel_name)])
itemlist = channel.live(channel.mainlist(Item())[0])
return channel_name, itemlist

View File

@@ -169,8 +169,13 @@ def channel_search(item):
ch_list = dict()
mode = item.mode
max_results = 10
if item.infoLabels['title']:
if item.infoLabels['tvshowtitle']:
item.text = item.infoLabels['tvshowtitle']
item.title = item.text
elif item.infoLabels['title']:
item.text = item.infoLabels['title']
item.title = item.text
searched_id = item.infoLabels['tmdb_id']

View File

@@ -15,6 +15,10 @@ from core.item import Item
from platformcode import config, logger, platformtools
from lib import generictools
from distutils import dir_util
if PY3:
from concurrent import futures
else:
from concurrent_py2 import futures
def mainlist(item):
@@ -25,11 +29,11 @@ def mainlist(item):
thumbnail=get_thumb("videolibrary_movie.png")),
Item(channel=item.channel, action="list_tvshows", title=config.get_localized_string(60600),
category=config.get_localized_string(70271),
thumbnail=get_thumb("videolibrary_tvshow.png")),
thumbnail=get_thumb("videolibrary_tvshow.png"),
context=[{"channel":"videolibrary", "action":"update_videolibrary", "title":config.get_localized_string(70269)}]),
Item(channel='shortcuts', action="SettingOnPosition",
category=2, setting=1, title=typo(config.get_localized_string(70287),'bold color kod'),
thumbnail = get_thumb("setting_0.png"))]
return itemlist
@@ -40,288 +44,192 @@ def channel_config(item):
def list_movies(item, silent=False):
logger.info()
itemlist = []
dead_list = []
zombie_list = []
for raiz, subcarpetas, ficheros in filetools.walk(videolibrarytools.MOVIES_PATH):
for s in subcarpetas:
nfo_path = filetools.join(raiz, s, s + ".nfo")
logger.debug(nfo_path)
local_movie = False
for f in filetools.listdir(filetools.join(raiz, s)):
movies_path = []
for root, folders, files in filetools.walk(videolibrarytools.MOVIES_PATH):
for f in folders:
movies_path += [filetools.join(root, f, f + ".nfo")]
local = False
for f in filetools.listdir(filetools.join(root, f)):
if f.split('.')[-1] not in ['nfo','json','strm']:
local_movie = True
local= True
break
if filetools.exists(nfo_path):
# We synchronize the movies seen from the Kodi video library with that of KoD
try:
# If it's Kodi, we do it
if config.is_xbmc():
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_content_as_watched_on_kod(nfo_path)
except:
logger.error(traceback.format_exc())
with futures.ThreadPoolExecutor() as executor:
for movie_path in movies_path:
item_movie, value = executor.submit(get_results, movie_path, root, 'movie', local).result()
# verify the existence of the channels
if item_movie.library_urls and len(item_movie.library_urls) > 0:
itemlist += [item_movie]
head_nfo, new_item = videolibrarytools.read_nfo(nfo_path)
# If you have not read the .nfo well, we will proceed to the next
if not new_item:
logger.error('.nfo erroneous in ' + str(nfo_path))
continue
if len(new_item.library_urls) > 1:
multicanal = True
else:
multicanal = False
# Verify the existence of the channels. If the channel does not exist, ask yourself if you want to remove the links from that channel.
for canal_org in new_item.library_urls:
canal = canal_org
try:
if canal in ['community', 'downloads']:
channel_verify = __import__('specials.%s' % canal, fromlist=["channels.%s" % canal])
else:
channel_verify = __import__('channels.%s' % canal, fromlist=["channels.%s" % canal])
logger.debug('Channel %s seems correct' % channel_verify)
except:
dead_item = Item(multicanal=multicanal,
contentType='movie',
dead=canal,
path=filetools.join(raiz, s),
nfo=nfo_path,
library_urls=new_item.library_urls,
infoLabels={'title': new_item.contentTitle})
if canal not in dead_list and canal not in zombie_list:
confirm = platformtools.dialog_yesno(config.get_localized_string(30131),
config.get_localized_string(30132) % canal.upper(),
config.get_localized_string(30133))
elif canal in zombie_list:
confirm = False
else:
confirm = True
if confirm:
delete(dead_item)
if canal not in dead_list:
dead_list.append(canal)
continue
else:
if canal not in zombie_list:
zombie_list.append(canal)
if len(dead_list) > 0:
for canal in dead_list:
if canal in new_item.library_urls:
del new_item.library_urls[canal]
new_item.nfo = nfo_path
new_item.path = filetools.join(raiz, s)
new_item.thumbnail = new_item.contentThumbnail
new_item.extra = filetools.join(config.get_setting("videolibrarypath"), config.get_setting("folder_movies"), s)
strm_path = new_item.strm_path.replace("\\", "/").rstrip("/")
if '/' in new_item.path:
new_item.strm_path = strm_path
logger.info('EXIST'+ str(local_movie))
if not filetools.exists(filetools.join(new_item.path, filetools.basename(strm_path))) and local_movie == False:
# If strm has been removed from kodi library, don't show it
continue
# Contextual menu: Mark as seen / not seen
visto = new_item.library_playcounts.get(os.path.splitext(f)[0], 0)
new_item.infoLabels["playcount"] = visto
if visto > 0:
texto_visto = config.get_localized_string(60016)
contador = 0
else:
texto_visto = config.get_localized_string(60017)
contador = 1
# Context menu: Delete series / channel
num_canales = len(new_item.library_urls)
if "downloads" in new_item.library_urls:
num_canales -= 1
if num_canales > 1:
texto_eliminar = config.get_localized_string(60018)
else:
texto_eliminar = config.get_localized_string(60019)
new_item.context = [{"title": texto_visto,
"action": "mark_content_as_watched",
"channel": "videolibrary",
"playcount": contador},
{"title": texto_eliminar,
"action": "delete",
"channel": "videolibrary",
"multicanal": multicanal}]
itemlist.append(new_item)
if silent == False:
return sorted(itemlist, key=lambda it: it.title.lower())
else:
return
if silent == False: return sorted(itemlist, key=lambda it: it.title.lower())
else: return
def list_tvshows(item):
from time import time
start = time()
logger.info()
itemlist = []
dead_list = []
zombie_list = []
lista = []
tvshows_path = []
# We get all the tvshow.nfo from the SERIES video library recursively
for raiz, subcarpetas, ficheros in filetools.walk(videolibrarytools.TVSHOWS_PATH):
for s in subcarpetas:
tvshow_path = filetools.join(raiz, s, "tvshow.nfo")
logger.debug(tvshow_path)
for root, folders, files in filetools.walk(videolibrarytools.TVSHOWS_PATH):
for f in folders:
tvshows_path += [filetools.join(root, f, "tvshow.nfo")]
if filetools.exists(tvshow_path):
# We synchronize the episodes seen from the Kodi video library with that of KoD
try:
# If it's Kodi, we do it
if config.is_xbmc():
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_content_as_watched_on_kod(tvshow_path)
except:
logger.error(traceback.format_exc())
head_nfo, item_tvshow = videolibrarytools.read_nfo(tvshow_path)
# If you have not read the .nfo well, we will proceed to the next
if not item_tvshow:
logger.error('.nfo erroneous in ' + str(tvshow_path))
continue
if len(item_tvshow.library_urls) > 1:
multicanal = True
else:
multicanal = False
# Verify the existence of the channels. If the channel does not exist, ask yourself if you want to remove the links from that channel.
for canal in item_tvshow.library_urls:
try:
if canal in ['community', 'downloads']:
channel_verify = __import__('specials.%s' % canal, fromlist=["channels.%s" % canal])
else:
channel_verify = __import__('channels.%s' % canal, fromlist=["channels.%s" % canal])
logger.debug('Channel %s seems correct' % channel_verify)
except:
dead_item = Item(multicanal=multicanal,
contentType='tvshow',
dead=canal,
path=filetools.join(raiz, s),
nfo=tvshow_path,
library_urls=item_tvshow.library_urls,
infoLabels={'title': item_tvshow.contentTitle})
if canal not in dead_list and canal not in zombie_list:
confirm = platformtools.dialog_yesno(config.get_localized_string(30131),
config.get_localized_string(30132) % canal.upper(),
config.get_localized_string(30133))
elif canal in zombie_list:
confirm = False
else:
confirm = True
if confirm:
delete(dead_item)
if canal not in dead_list:
dead_list.append(canal)
continue
else:
if canal not in zombie_list:
zombie_list.append(canal)
if len(dead_list) > 0:
for canal in dead_list:
if canal in item_tvshow.library_urls:
del item_tvshow.library_urls[canal]
# continue loading the elements of the video library
# Sometimes it gives random errors, for not finding the .nfo. Probably timing issues
try:
item_tvshow.title = item_tvshow.contentTitle
item_tvshow.path = filetools.join(raiz, s)
item_tvshow.nfo = tvshow_path
item_tvshow.extra = filetools.join(config.get_setting("videolibrarypath"), config.get_setting("folder_tvshows"), s)
# Contextual menu: Mark as seen / not seen
visto = item_tvshow.library_playcounts.get(item_tvshow.contentTitle, 0)
item_tvshow.infoLabels["playcount"] = visto
if visto > 0:
texto_visto = config.get_localized_string(60020)
contador = 0
else:
texto_visto = config.get_localized_string(60021)
contador = 1
except:
logger.error('Not find: ' + str(tvshow_path))
logger.error(traceback.format_exc())
continue
# Context menu: Automatically search for new episodes or not
if item_tvshow.active and int(item_tvshow.active) > 0:
texto_update = config.get_localized_string(60022)
value = 0
else:
texto_update = config.get_localized_string(60023)
value = 1
item_tvshow.title += " [B]" + u"\u2022".encode('utf-8') + "[/B]"
# Context menu: Delete series / channel
num_canales = len(item_tvshow.library_urls)
if "downloads" in item_tvshow.library_urls:
num_canales -= 1
if num_canales > 1:
texto_eliminar = config.get_localized_string(60024)
else:
texto_eliminar = config.get_localized_string(60025)
item_tvshow.context = [{"title": texto_visto,
"action": "mark_content_as_watched",
"channel": "videolibrary",
"playcount": contador},
{"title": texto_update,
"action": "mark_tvshow_as_updatable",
"channel": "videolibrary",
"active": value},
{"title": texto_eliminar,
"action": "delete",
"channel": "videolibrary",
"multicanal": multicanal},
{"title": config.get_localized_string(70269),
"action": "update_tvshow",
"channel": "videolibrary"}]
if item_tvshow.local_episodes_path == "":
item_tvshow.context.append({"title": config.get_localized_string(80048),
"action": "add_local_episodes",
"channel": "videolibrary"})
else:
item_tvshow.context.append({"title": config.get_localized_string(80049),
"action": "remove_local_episodes",
"channel": "videolibrary"})
# verify the existence of the channels
if len(item_tvshow.library_urls) > 0:
itemlist.append(item_tvshow)
lista.append({'title':item_tvshow.contentTitle,'thumbnail':item_tvshow.thumbnail,'fanart':item_tvshow.fanart, 'active': value, 'nfo':tvshow_path})
with futures.ThreadPoolExecutor() as executor:
for tvshow_path in tvshows_path:
item_tvshow, value = executor.submit(get_results, tvshow_path, root, 'tvshow').result()
# verify the existence of the channels
if item_tvshow.library_urls and len(item_tvshow.library_urls) > 0:
itemlist += [item_tvshow]
lista += [{'title':item_tvshow.contentTitle,'thumbnail':item_tvshow.thumbnail,'fanart':item_tvshow.fanart, 'active': value, 'nfo':tvshow_path}]
if itemlist:
itemlist = sorted(itemlist, key=lambda it: it.title.lower())
itemlist.append(Item(channel=item.channel, action="update_videolibrary", thumbnail=item.thumbnail,
title=typo(config.get_localized_string(70269), 'bold color kod'), folder=False))
itemlist.append(Item(channel=item.channel, action="configure_update_videolibrary", thumbnail=item.thumbnail,
title=typo(config.get_localized_string(60599), 'bold color kod'), lista=lista, folder=False))
itemlist += [Item(channel=item.channel, action="update_videolibrary", thumbnail=item.thumbnail,
title=typo(config.get_localized_string(70269), 'bold color kod'), folder=False),
Item(channel=item.channel, action="configure_update_videolibrary", thumbnail=item.thumbnail,
title=typo(config.get_localized_string(60599), 'bold color kod'), lista=lista, folder=False)]
logger.info('TEMPO= ' + str(time() - start))
return itemlist
def get_results(nfo_path, root, Type, local=False):
dead_list = []
zombie_list = []
value = 0
if Type == 'movie': folder = "folder_movies"
else: folder = "folder_tvshows"
if filetools.exists(nfo_path):
# We synchronize the episodes seen from the Kodi video library with that of KoD
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_content_as_watched_on_kod(nfo_path)
head_nfo, item = videolibrarytools.read_nfo(nfo_path)
# If you have not read the .nfo well, we will proceed to the next
if not item:
logger.error('.nfo erroneous in ' + str(nfo_path))
return Item(), 0
if len(item.library_urls) > 1: multichannel = True
else: multichannel = False
# Verify the existence of the channels. If the channel does not exist, ask yourself if you want to remove the links from that channel.
for canal in item.library_urls:
try:
if canal in ['community', 'downloads']: channel_verify = __import__('specials.%s' % canal, fromlist=["channels.%s" % canal])
else: channel_verify = __import__('channels.%s' % canal, fromlist=["channels.%s" % canal])
logger.debug('Channel %s seems correct' % channel_verify)
except:
dead_item = Item(multichannel=multichannel,
contentType='tvshow',
dead=canal,
path=filetools.split(nfo_path)[0],
nfo=nfo_path,
library_urls=item.library_urls,
infoLabels={'title': item.contentTitle})
if canal not in dead_list and canal not in zombie_list: confirm = platformtools.dialog_yesno(config.get_localized_string(30131), config.get_localized_string(30132) % canal.upper(), config.get_localized_string(30133))
elif canal in zombie_list: confirm = False
else: confirm = True
if confirm:
delete(dead_item)
if canal not in dead_list:
dead_list.append(canal)
continue
else:
if canal not in zombie_list:
zombie_list.append(canal)
if len(dead_list) > 0:
for canal in dead_list:
if canal in item.library_urls:
del item.library_urls[canal]
# continue loading the elements of the video library
if Type == 'movie':
item.path = filetools.split(nfo_path)[0]
item.nfo = nfo_path
sep = '/' if '/' in nfo_path else '\\'
item.extra = filetools.join(config.get_setting("videolibrarypath"), config.get_setting(folder), item.path.split(sep)[-1])
strm_path = item.strm_path.replace("\\", "/").rstrip("/")
if '/' in item.path: item.strm_path = strm_path
# If strm has been removed from kodi library, don't show it
if not filetools.exists(filetools.join(item.path, filetools.basename(strm_path))) and not local: return Item(), 0
# Contextual menu: Mark as seen / not seen
visto = item.library_playcounts.get(item.path.split(sep)[0], 0)
item.infoLabels["playcount"] = visto
if visto > 0:
seen_text = config.get_localized_string(60016)
counter = 0
else:
seen_text = config.get_localized_string(60017)
counter = 1
# Context menu: Delete series / channel
channels_num = len(item.library_urls)
if "downloads" in item.library_urls: channels_num -= 1
if channels_num > 1: delete_text = config.get_localized_string(60018)
else: delete_text = config.get_localized_string(60019)
item.context = [{"title": seen_text, "action": "mark_content_as_watched", "channel": "videolibrary", "playcount": counter},
{"title": delete_text, "action": "delete", "channel": "videolibrary", "multichannel": multichannel}]
else:
# Sometimes it gives random errors, for not finding the .nfo. Probably timing issues
try:
item.title = item.contentTitle
item.path = filetools.split(nfo_path)[0]
item.nfo = nfo_path
sep = '/' if '/' in nfo_path else '\\'
item.extra = filetools.join(config.get_setting("videolibrarypath"), config.get_setting(folder), item.path.split(sep)[-1])
# Contextual menu: Mark as seen / not seen
visto = item.library_playcounts.get(item.contentTitle, 0)
item.infoLabels["playcount"] = visto
logger.info('item\n' + str(item))
if visto > 0:
seen_text = config.get_localized_string(60020)
counter = 0
else:
seen_text = config.get_localized_string(60021)
counter = 1
except:
logger.error('Not find: ' + str(nfo_path))
logger.error(traceback.format_exc())
return Item(), 0
# Context menu: Automatically search for new episodes or not
if item.active and int(item.active) > 0:
update_text = config.get_localized_string(60022)
value = 0
else:
update_text = config.get_localized_string(60023)
value = 1
item.title += " [B]" + u"\u2022".encode('utf-8') + "[/B]"
# Context menu: Delete series / channel
channels_num = len(item.library_urls)
if "downloads" in item.library_urls: channels_num -= 1
if channels_num > 1: delete_text = config.get_localized_string(60024)
else: delete_text = config.get_localized_string(60025)
item.context = [{"title": seen_text, "action": "mark_content_as_watched", "channel": "videolibrary", "playcount": counter},
{"title": update_text, "action": "mark_tvshow_as_updatable", "channel": "videolibrary", "active": value},
{"title": delete_text, "action": "delete", "channel": "videolibrary", "multichannel": multichannel},
{"title": config.get_localized_string(70269), "action": "update_tvshow", "channel": "videolibrary"}]
if item.local_episodes_path == "": item.context.append({"title": config.get_localized_string(80048), "action": "add_local_episodes", "channel": "videolibrary"})
else: item.context.append({"title": config.get_localized_string(80049), "action": "remove_local_episodes", "channel": "videolibrary"})
else: item = Item()
return item, value
def configure_update_videolibrary(item):
import xbmcgui
# Load list of options (active user channels that allow global search)
@@ -355,7 +263,6 @@ def configure_update_videolibrary(item):
return True
def get_seasons(item):
logger.info()
# logger.debug("item:\n" + item.tostring('\n'))
@@ -810,9 +717,9 @@ def update_tvshow(item):
nfo_path = filetools.join(item.path, "tvshow.nfo")
head_nfo, item_nfo = videolibrarytools.read_nfo(nfo_path)
if item.active and not item_nfo.active:
if not platformtools.dialog_yesno(config.get_localized_string(60037).replace('...',''), config.get_localized_string(70268) % item.contentSerieName):
item_nfo.active = 1
filetools.write(nfo_path, head_nfo + item_nfo.tojson())
# if not platformtools.dialog_yesno(config.get_localized_string(60037).replace('...',''), config.get_localized_string(70268) % item.contentSerieName):
item_nfo.active = 1
filetools.write(nfo_path, head_nfo + item_nfo.tojson())
platformtools.itemlist_refresh()
@@ -820,7 +727,7 @@ def update_tvshow(item):
def add_local_episodes(item):
logger.info()
done, local_episodes_path = videolibrarytools.config_local_episodes_path(item.path, item.contentSerieName, silent=True)
done, local_episodes_path = videolibrarytools.config_local_episodes_path(item.path, item, silent=True)
if done < 0:
logger.info("An issue has occurred while configuring local episodes")
elif local_episodes_path:
@@ -1090,7 +997,7 @@ def delete(item):
heading = config.get_localized_string(70084)
else:
heading = config.get_localized_string(70085)
if item.multicanal:
if item.multichannel:
# Get channel list
if item.dead == '':
opciones = []

View File

@@ -6,12 +6,15 @@ import parameterized
from platformcode import config
config.set_setting('tmdb_active', False)
librerias = os.path.join(config.get_runtime_path(), 'lib')
sys.path.insert(0, librerias)
from core.support import typo
from core.item import Item
import channelselector
from core.httptools import downloadpage
from core import servertools
import channelselector
import re
validUrlRegex = re.compile(
@@ -117,18 +120,13 @@ from specials import news
dictNewsChannels, any_active = news.get_channels_list()
srvLinkDict = {
"wstream": ["https://wstream.video/video6zvimpy52/dvvwxyfs32ab"],
"akvideo": ["https://akvideo.stream/video.php?file_code=23god95lrtqv"]
}
servers_found = []
def getServers():
server_list = servertools.get_servers_list()
ret = []
for srv in server_list:
if srv in srvLinkDict:
ret.append({'srv': srv})
for srv in servers_found:
ret.append({'item': srv})
return ret
@@ -144,26 +142,27 @@ class GenericChannelTest(unittest.TestCase):
self.assertTrue(mainlist, 'channel ' + self.ch + ' has no menu')
for it in mainlist:
it.title = it.title.decode('ascii', 'ignore')
print 'testing ' + self.ch + ' -> ' + it.title
if it.action == 'channel_config':
hasChannelConfig = True
continue
if it.action == 'search': # channel specific
if it.action == 'search': # channel-specific
continue
itemlist = getattr(self.module, it.action)(it)
self.assertTrue(itemlist, 'channel ' + self.ch + ' -> ' + it.title + ' is empty')
if self.ch in chNumRis: # so a priori quanti risultati dovrebbe dare
if self.ch in chNumRis: # i know how much results should be
for content in chNumRis[self.ch]:
if content in it.title:
risNum = len([it for it in itemlist if not it.nextPage]) # not count nextpage
risNum = len([i for i in itemlist if not i.nextPage]) # not count nextpage
self.assertEqual(chNumRis[self.ch][content], risNum,
'channel ' + self.ch + ' -> ' + it.title + ' returned wrong number of results')
break
for resIt in itemlist:
self.assertLess(len(resIt.fulltitle), 100,
self.assertLess(len(resIt.fulltitle), 110,
'channel ' + self.ch + ' -> ' + it.title + ' might contain wrong titles\n' + resIt.fulltitle)
if resIt.url:
self.assertIsInstance(resIt.url, str, 'channel ' + self.ch + ' -> ' + it.title + ' -> ' + resIt.title + ' contain non-string url')
self.assertIsNotNone(re.match(validUrlRegex, resIt.url),
'channel ' + self.ch + ' -> ' + it.title + ' -> ' + resIt.title + ' might contain wrong url\n' + resIt.url)
if 'year' in resIt.infoLabels and resIt.infoLabels['year']:
@@ -176,6 +175,53 @@ class GenericChannelTest(unittest.TestCase):
nextPageItemlist = getattr(self.module, resIt.action)(resIt)
self.assertTrue(nextPageItemlist,
'channel ' + self.ch + ' -> ' + it.title + ' has nextpage not working')
# some sites might have no link inside, but if all results are without servers, there's something wrong
servers = []
for resIt in itemlist:
if hasattr(self.module, resIt.action):
servers = getattr(self.module, resIt.action)(resIt)
else:
servers = [resIt]
if servers:
break
self.assertTrue(servers, 'channel ' + self.ch + ' -> ' + it.title + ' has no servers on all results')
for server in servers:
srv = server.server.lower()
if not srv:
continue
module = __import__('servers.%s' % srv, fromlist=["servers.%s" % srv])
page_url = server.url
print 'testing ' + page_url
self.assert_(hasattr(module, 'test_video_exists'), srv + ' has no test_video_exists')
if module.test_video_exists(page_url)[0]:
urls = module.get_video_url(page_url)
server_parameters = servertools.get_server_parameters(srv)
self.assertTrue(urls or server_parameters.get("premium"), srv + ' scraper did not return direct urls for ' + page_url)
print urls
for u in urls:
spl = u[1].split('|')
if len(spl) == 2:
directUrl, headersUrl = spl
else:
directUrl, headersUrl = spl[0], ''
headers = {}
if headersUrl:
for name in headersUrl.split('&'):
h, v = name.split('=')
h = str(h)
headers[h] = str(v)
print headers
if 'magnet:?' in directUrl: # check of magnet links not supported
continue
page = downloadpage(directUrl, headers=headers, only_headers=True, use_requests=True)
self.assertTrue(page.success, srv + ' scraper returned an invalid link')
self.assertLess(page.code, 400, srv + ' scraper returned a ' + str(page.code) + ' link')
contentType = page.headers['Content-Type']
self.assert_(contentType.startswith('video') or 'mpegurl' in contentType or 'octet-stream' in contentType or 'dash+xml' in contentType,
srv + ' scraper did not return valid url for link ' + page_url + '\nDirect url: ' + directUrl + '\nContent-Type: ' + contentType)
self.assertTrue(hasChannelConfig, 'channel ' + self.ch + ' has no channel config')
def test_newest(self):
@@ -187,30 +233,5 @@ class GenericChannelTest(unittest.TestCase):
break
#
# @parameterized.parameterized_class(getServers())
# class GenericServerTest(unittest.TestCase):
# def __init__(self, *args):
# self.module = __import__('servers.%s' % self.srv, fromlist=["servers.%s" % self.srv])
# super(GenericServerTest, self).__init__(*args)
#
# def test_resolve(self):
# for link in srvLinkDict[self.srv]:
# find = servertools.findvideosbyserver(link, self.srv)
# self.assertTrue(find, 'link ' + link + ' not recognised')
# page_url = find[0][1]
# if self.module.test_video_exists(page_url)[0]:
# urls = self.module.get_video_url(page_url)
# print urls
# for u in urls:
# directUrl, headersUrl = u[1].split('|')
# headers = {}
# for name in headersUrl.split('&'):
# h, v = name.split('=')
# headers[h] = v
# print headers
# self.assertEqual(requests.head(directUrl, headers=headers, timeout=15).status_code, 200, self.srv + ' scraper did not return valid url for link ' + link)
if __name__ == '__main__':
config.set_setting('tmdb_active', False)
unittest.main()

111
updatetvshow.py Normal file
View File

@@ -0,0 +1,111 @@
# -*- coding: utf-8 -*-
import xbmc, sys, base64, json, xbmcgui, os, xbmcvfs, traceback
from platformcode import config, logger
from lib.sambatools import libsmb as samba
from core import scrapertools
def exists(path, silent=False, vfs=True):
path = xbmc.translatePath(path)
try:
if vfs:
result = bool(xbmcvfs.exists(path))
if not result and not path.endswith('/') and not path.endswith('\\'):
result = bool(xbmcvfs.exists(join(path, ' ').rstrip()))
return result
elif path.lower().startswith("smb://"):
return samba.exists(path)
else:
return os.path.exists(path)
except:
logger.error("ERROR when checking the path: %s" % path)
if not silent:
logger.error(traceback.format_exc())
return False
def join(*paths):
list_path = []
if paths[0].startswith("/"):
list_path.append("")
for path in paths:
if path:
list_path += path.replace("\\", "/").strip("/").split("/")
if scrapertools.find_single_match(paths[0], r'(^\w+:\/\/)'):
return str("/".join(list_path))
else:
return str(os.sep.join(list_path))
def search_paths(Id):
records = execute_sql('SELECT idPath FROM tvshowlinkpath WHERE idShow LIKE "%s"' % Id)
if len(records) >= 1:
for record in records:
path_records = execute_sql('SELECT strPath FROM path WHERE idPath LIKE "%s"' % record[0])
for path in path_records:
if config.get_setting('videolibrarypath') in path[0] and exists(join(path[0], 'tvshow.nfo')):
return path[0]
return ''
def execute_sql(sql):
logger.info()
file_db = ""
records = None
# We look for the archive of the video database according to the version of kodi
video_db = config.get_platform(True)['video_db']
if video_db:
file_db = os.path.join(xbmc.translatePath("special://userdata/Database"), video_db)
# alternative method to locate the database
if not file_db or not os.path.exists(file_db):
file_db = ""
for f in os.path.listdir(xbmc.translatePath("special://userdata/Database")):
path_f = os.path.join(xbmc.translatePath("special://userdata/Database"), f)
if os.path.pathoos.pathols.isfile(path_f) and f.lower().startswith('myvideos') and f.lower().endswith('.db'):
file_db = path_f
break
if file_db:
logger.info("DB file: %s" % file_db)
conn = None
try:
import sqlite3
conn = sqlite3.connect(file_db)
cursor = conn.cursor()
logger.info("Running sql: %s" % sql)
cursor.execute(sql)
conn.commit()
records = cursor.fetchall()
if sql.lower().startswith("select"):
if len(records) == 1 and records[0][0] is None:
records = []
conn.close()
logger.info("Query executed. Records: %s" % nun_records)
except:
logger.error("Error executing sql query")
if conn:
conn.close()
else:
logger.debug("Database not found")
return records
if __name__ == '__main__':
path = search_paths(sys.listitem.getVideoInfoTag().getDbId())
if path:
item = {"action": "update_tvshow", "channel": "videolibrary", 'path':path}
xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?" + base64.b64encode(json.dumps(item).encode()) + ")")
else:
dialog = xbmcgui.Dialog()
title = sys.listitem.getVideoInfoTag().getTitle()
if dialog.yesno(title, config.get_localized_string(70817) % title, nolabel=config.get_localized_string(70170), yeslabel=config.get_localized_string(30022)):
item = {"action": "new_search", "channel": "search", "mode":"tvshow", "search_text": sys.listitem.getVideoInfoTag().getTitle()}
xbmc.executebuiltin("ActivateWindow(10025,plugin://plugin.video.kod/?" + base64.b64encode(json.dumps(item).encode()) + ")")