* fix next page

* testing new filmontv

* Wstream quick fix, no resolution displayed :(

* new filmontv

* now regex is ok

* fix .po files

* +netlovers

* working on filmontv

* fix debriders

* new updater

* updater

* fix crash

* fix updater and re-add dev mode

* new url eurostreaming

* Delete netlovers.py

* Delete netlovers.json

* -net from menù

* fix eurostreaming: numero stagione e newest (#50)

* fix canale

* fix newest

* fix numero puntata

* cleanup

* cleanup 2

* fix updater crash on windows

* Fix Animeworld

* Nuovo Autorenumber

* initial background downloader support

* ops

* Update channels.json

* Update channels.json

* fix openload

* move json update to cohesist with updater

* disable json url updates

* fix typo

* fix typo 2

* Add files via upload

* Add files via upload

* fix autoplay in community channels

* fix toonitalia

* Fix Toonitalia

* workaround serietvsubita

* Nuova Rinumerazione Automatica

* Fix per Rinumerazione Automatica

* workaround updater

* Fix on air

* ops

* Personalizzazione sezione "Oggi in TV"

* Aggiunto orario sezione Oggi in TV

* aggiunto bit.ly (#56)

* aggiunto bit.ly

* Aggiunta personalizzazione homepage

* Revert "initial background downloader support"

This reverts commit f676ab0f

* KoD 0.4
This commit is contained in:
mac12m99
2019-06-30 10:35:48 +02:00
committed by GitHub
parent 7551aed8ba
commit 3fb9b068d9
47 changed files with 1340 additions and 664 deletions

View File

@@ -1,6 +0,0 @@
[Dolphin]
Timestamp=2019,4,23,18,58,8
Version=4
[Settings]
HiddenFilesShown=true

2
.gitignore vendored
View File

@@ -3,4 +3,6 @@
.DS_Store
.idea/
.directory
custom_code.json
last_commit.txt
__pycache__/

View File

@@ -1 +0,0 @@
theme: jekyll-theme-midnight

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.kod" name="Kodi on Demand" version="0.3.1" provider-name="KOD Team">
<addon id="plugin.video.kod" name="Kodi on Demand BETA" version="0.4" provider-name="KOD Team">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>

View File

@@ -16,18 +16,18 @@
"casacinema": "https://www.casacinema.site",
"casacinemainfo": "https://www.casacinema.info",
"cb01anime": "http://www.cineblog01.ink",
"cinemalibero": "https://cinemalibero.icu",
"cinemalibero": "https://cinemalibero.best",
"cinemastreaming": "https://cinemastreaming.icu",
"documentaristreamingda": "https://documentari-streaming-da.com",
"dreamsub": "https://www.dreamsub.stream",
"eurostreaming": "https://eurostreaming.gratis",
"eurostreaming": "https://eurostreaming.pink",
"eurostreaming_video": "https://www.eurostreaming.best",
"fastsubita": "http://fastsubita.com",
"ffilms":"https://ffilms.org",
"filmigratis": "https://filmigratis.net",
"filmgratis": "https://www.filmaltadefinizione.net",
"filmontv": "https://www.comingsoon.it",
"filmpertutti": "https://www.filmpertutti.tube",
"filmpertutti": "https://www.filmpertutti.media",
"filmsenzalimiti": "https://filmsenzalimiti.best",
"filmsenzalimiticc": "https://www.filmsenzalimiti.host",
"filmsenzalimiti_blue": "https://filmsenzalimiti.best",
@@ -51,8 +51,8 @@
"mondolunatico": "http://mondolunatico.org",
"mondolunatico2": "http://mondolunatico.org/stream/",
"mondoserietv": "https://mondoserietv.com",
"piratestreaming": "https://www.piratestreaming.watch",
"seriehd": "https://www.seriehd.info",
"piratestreaming": "https://www.piratestreaming.media",
"seriehd": "https://www.seriehd.pink",
"serietvonline": "https://serietvonline.xyz",
"serietvsubita": "http://serietvsubita.xyz",
"serietvu": "https://www.serietvu.club",

View File

@@ -57,6 +57,22 @@
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare", "IT"]
},
{
"id": "autorenumber",
"type": "bool",
"label": "@70712",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "autorenumber_mode",
"type": "bool",
"label": "@70688",
"default": false,
"enabled": true,
"visible": "eq(-1,true)"
}
]
}

View File

@@ -38,6 +38,7 @@ def mainlist(item):
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
support.channel_config(item, itemlist)
return itemlist

View File

@@ -65,6 +65,22 @@
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
},
{
"id": "autorenumber",
"type": "bool",
"label": "@70712",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "autorenumber_mode",
"type": "bool",
"label": "@70688",
"default": false,
"enabled": true,
"visible": "eq(-1,true)"
}
]
}

View File

@@ -65,6 +65,22 @@
"enabled": true,
"visible": true,
"lvalues": ["No filtrar","Italiano"]
}
},
{
"id": "autorenumber",
"type": "bool",
"label": "@70712",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "autorenumber_mode",
"type": "bool",
"label": "@70688",
"default": false,
"enabled": true,
"visible": "eq(-1,true)"
}
]
}

View File

@@ -172,7 +172,7 @@ def video(item):
log()
itemlist = []
matches, data = support.match(item, r'<a href="([^"]+)" class[^>]+><img src="([^"]+)"(.*?)data-jtitle="([^"]+)" .*?>(.*?)<\/a>', headers=headers)
matches, data = support.match(item, r'<a href="([^"]+)" class[^>]+><img src="([^"]+)"(.*?)data-jtitle="([^"]+)" .*?>(.*?)<\/a>', '<div class="widget-body">(.*?)<div id="sidebar"', headers=headers)
for scrapedurl, scrapedthumb ,scrapedinfo, scrapedoriginal, scrapedtitle in matches:
# Cerca Info come anno o lingua nel Titolo
@@ -245,21 +245,21 @@ def video(item):
fulltitle=title,
show=title,
thumbnail=scrapedthumb,
context = autoplay.context))
context = autoplay.context,
number= '1'))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
autorenumber.renumber(itemlist)
# Next page
support.nextPage(itemlist, item, data, r'<a\sclass="page-link"\shref="([^"]+)"\srel="next"\saria-label="Successiva')
support.nextPage(itemlist, item, data, r'href="([^"]+)" rel="next"', resub=['&amp;','&'])
return itemlist
def episodios(item):
log()
itemlist = []
patron_block = r'<div class="widget servers".*?>(.*?)<div id="download"'
patron_block = r'server active(.*?)server hidden '
patron = r'<li><a [^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+" href="([^"]+)"[^>]+>([^<]+)<'
matches = support.match(item, patron, patron_block)[0]
@@ -275,9 +275,10 @@ def episodios(item):
show=scrapedtitle,
plot=item.plot,
fanart=item.thumbnail,
thumbnail=item.thumbnail))
thumbnail=item.thumbnail,
number=scrapedtitle))
autorenumber.renumber(itemlist, item,'bold')
autorenumber.renumber(itemlist, item, 'bold')
support.videolibrary(itemlist, item)
return itemlist
@@ -285,31 +286,31 @@ def episodios(item):
def findvideos(item):
log()
itemlist = []
matches, data = support.match(item, r'class="tab.*?data-name="([0-9]+)">([^<]+)</span', headers=headers)
videoData = ''
for serverid, servername in matches:
block = scrapertoolsV2.find_multiple_matches(data,'data-id="'+serverid+'">(.*?)<div class="server')
id = scrapertoolsV2.find_single_match(str(block),r'<a data-id="([^"]+)" data-base="'+item.fulltitle+'"')
dataJson = httptools.downloadpage('%s/ajax/episode/info?id=%s&server=%s&ts=%s' % (host, id, serverid, int(time.time())), headers=[['x-requested-with', 'XMLHttpRequest']]).data
json = jsontools.load(dataJson)
log('JSON= ',json)
log('ITEM= ',item)
id = scrapertoolsV2.find_single_match(str(block),r'<a data-id="([^"]+)" data-base="'+item.number+'"')
if id:
dataJson = httptools.downloadpage('%s/ajax/episode/info?id=%s&server=%s&ts=%s' % (host, id, serverid, int(time.time())), headers=[['x-requested-with', 'XMLHttpRequest']]).data
json = jsontools.load(dataJson)
videoData +='\n'+json['grabber']
videoData +='\n'+json['grabber']
if serverid == '28':
itemlist.append(
Item(
channel=item.channel,
action="play",
title='diretto',
quality='',
url=json['grabber'],
server='directo',
show=item.show,
contentType=item.contentType,
folder=False))
if serverid == '28':
itemlist.append(
Item(
channel=item.channel,
action="play",
title='diretto',
quality='',
url=json['grabber'],
server='directo',
show=item.show,
contentType=item.contentType,
folder=False))
return support.server(item, videoData, itemlist)

View File

@@ -31,6 +31,22 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "autorenumber",
"type": "bool",
"label": "@70712",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "autorenumber_mode",
"type": "bool",
"label": "@70688",
"default": false,
"enabled": true,
"visible": "eq(-1,true)"
}
]
}

View File

@@ -14,8 +14,8 @@ from platformcode import logger, config
__channel__ = "dreamsub"
host = config.get_channel_url(__channel__)
list_servers = ['animeworld', 'verystream', 'streamango', 'openload', 'directo']
list_quality = ['default', '480p', '720p', '1080p']
list_servers = ['verystream', 'streamango', 'openload']
list_quality = ['default']
def mainlist(item):
@@ -25,7 +25,7 @@ def mainlist(item):
menu(itemlist, 'Anime / Cartoni', 'peliculas', host + '/anime', 'tvshow')
menu(itemlist, 'Categorie', 'categorie', host + '/filter?genere=', 'tvshow')
menu(itemlist, 'Ultimi Episodi', 'last', host, 'episode')
menu(itemlist, 'Cerca', 'search')
menu(itemlist, 'Cerca...', 'search')
support.aplay(item, itemlist, list_servers, list_quality)
support.channel_config(item, itemlist)
@@ -69,7 +69,8 @@ def newest(categoria):
def peliculas(item):
itemlist = scrape(item, r'Lingua[^<]+<br>\s*<a href="(?:Lista episodi )?([^"]+)" title="(?:Lista episodi )?(.*?)(?: \(([0-9]+)\))?(?: Streaming)?">', ['url', 'title', 'year'], action='episodios', patron_block='<input type="submit" value="Vai!" class="blueButton">(.*?)<div class="footer">', patronNext='<li class="currentPage">[^>]+><li[^<]+<a href="([^"]+)">')
return renumber(itemlist)
renumber(itemlist)
return itemlist
def last(item):
@@ -94,7 +95,8 @@ def categorie(item):
def episodios(item):
itemlist = scrape(item, r'<li><a href="([^"]+)"[^<]+<b>(.*?)<\/b>[^>]+>([^<]+)<\/i>', ['url','title','title2'], patron_block='<div class="seasonEp">(.*?)<div class="footer">')
return renumber(itemlist, item, 'bold')
renumber(itemlist, item, 'bold')
return itemlist
def findvideos(item):
log()

View File

@@ -12,7 +12,7 @@
"id": "channel_host",
"type": "text",
"label": "Host del canale",
"default": "https://eurostreaming.cafe/",
"default": "https://eurostreaming.cafe",
"enabled": true,
"visible": true
},

View File

@@ -96,7 +96,7 @@ def episodios(item):
#=========
patron = r'(?:<\/span>\w+ STAGIONE\s\d+ (?:\()?(ITA|SUB ITA)(?:\))?<\/div>'\
'<div class="su-spoiler-content su-clearfix" style="display:none">|'\
'(?:\s|\Wn)?(?:<strong>)?(\d&#.*?)(?:|)?<a\s(.*?)<\/a><br\s\/>)'
'(?:\s|\Wn)?(?:<strong>)?(\d+&#.*?)(?:|)?<a\s(.*?)<\/a><br\s\/>)'
## '(?:<\/span>\w+ STAGIONE\s\d+ (?:\()?(ITA|SUB ITA)(?:\))?'\
## '<\/div><div class="su-spoiler-content su-clearfix" style="display:none">|'\
## '(?:\s|\Wn)?(?:<strong>)?(\d[&#].*?)(?:|\W)?<a\s(.*?)<\/a><br\s\/>)'
@@ -141,7 +141,7 @@ def findvideos(item):
# =========== def ricerca =============
def search(item, texto):
support.log()
item.url = "%s?s=%s" % (host, texto)
item.url = "%s/?s=%s" % (host, texto)
try:
return serietv(item)
# Continua la ricerca in caso di errore
@@ -159,7 +159,7 @@ def newest(categoria):
item.contentType= 'episode'
item.args= 'True'
try:
item.url = "%saggiornamento-episodi/" % host
item.url = "%s/aggiornamento-episodi/" % host
item.action = "serietv"
itemlist = serietv(item)

View File

@@ -117,7 +117,7 @@ def categorias_film(item):
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
action="peliculas_categorias",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
@@ -139,7 +139,7 @@ def categorias_serie(item):
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
contentType='tvshow',
action="peliculas_serie",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",

View File

@@ -57,7 +57,7 @@ def carousel(item, regex=r'<h2>Ultime Richieste Inserite</h2>(.*?)<header>', con
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = re.sub(r'[0-9]{4}', "", scrapedtitle)
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
action="findvideos",
contentType=contentType,
title=scrapedtitle + " " + "[COLOR orange][" + year + "][/COLOR]",
@@ -113,7 +113,7 @@ def top_imdb(item, contentType='movie', regex=r'<h1.*?TOP IMDb.*?<h3>(.*?)<h3>')
scrapedtitle = re.sub(r'[0-9]{4}', "", scrapedtitle)
scrapedthumbnail = scrapedthumbnail.replace ("-90x135","").replace("/w92/", "/w600_and_h900_bestv2/")
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
action="findvideos" if "movie" in contentType else "episodios",
contentType=item.contentType,
contentTitle=scrapedtitle,
@@ -128,7 +128,7 @@ def top_imdb(item, contentType='movie', regex=r'<h1.*?TOP IMDb.*?<h3>(.*?)<h3>')
thumbnail = thumb(itemlist=[])
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
contentType=item.contentType,
action="top_imdb",
title="[COLOR blue][B]Successivo >[/B][/COLOR]",
@@ -173,7 +173,7 @@ def peliculas(item):
scrapedtitle = re.sub(r'[0-9]{4}', "", scrapedtitle)
type = "[COLOR aqua][Serie][/COLOR]" if "tvshows" in item.args else "[COLOR aqua][Film][/COLOR]"
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
action="episodios" if "tvshows" in item.args else "findvideos",
contentType="episode" if "tvshows" in item.args else "movie",
title=scrapedtitle + " " + "[COLOR orange][" + year + "][/COLOR]" + " " + type,
@@ -223,7 +223,7 @@ def episodios(item):
matches = re.compile(patron, re.DOTALL).findall(block)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
action="videoplayer",
contentType=item.contentType,
title=scrapedtitle,
@@ -250,7 +250,7 @@ def episodios(item):
if len(matches) > 1:
for scrapedtitle, scrapedurl in matches:
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
action="player_list",
contentType=item.contentType,
title=scrapedtitle,
@@ -310,7 +310,7 @@ def player(item):
for scrapedurl in matches:
scrapedurl = "https://fvs.io/" + scrapedurl
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
action="play",
contentType=item.contentType,
title=item.title,
@@ -345,7 +345,7 @@ def player_list(item):
scrapedtitle = re.sub('Pir8|UBi|M L|BEDLAM|REPACK|DD5.1|bloody|SVU', '', scrapedtitle)
scrapedtitle = scrapedtitle.replace(".", " ").replace(" - ", " ").replace(" -", "").replace(" ", "")
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
action="halfplayer",
contentType=item.contentType,
title=scrapedtitle,
@@ -393,7 +393,7 @@ def dooplayer(item):
for scrapedurl in matches:
scrapedurl = "https://fvs.io/" + scrapedurl
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
action="play",
contentType=item.contentType,
title=item.title,

View File

@@ -232,27 +232,29 @@ def peliculas_tv(item):
scrapedplot = ""
scrapedtitle = cleantitle(scrapedtitle)
infoLabels = {}
episode = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')[0]
title = scrapedtitle.split(" S0")[0].strip()
title = title.split(" S1")[0].strip()
title = title.split(" S2")[0].strip()
episode = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')
if episode: # workaround per quando mettono le serie intere o altra roba, sarebbero da intercettare TODO
episode = episode[0]
title = scrapedtitle.split(" S0")[0].strip()
title = title.split(" S1")[0].strip()
title = title.split(" S2")[0].strip()
infoLabels['season'] = episode[1]
infoLabels['episode'] = episode[2].zfill(2)
infoLabels['season'] = episode[1]
infoLabels['episode'] = episode[2].zfill(2)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=title + " - " + episode[0] + " " + support.typo("Sub-ITA", '_ [] color kod'),
url=scrapedurl,
thumbnail=scrapedthumbnail,
contentSerieName=title,
contentLanguage='Sub-ITA',
plot=scrapedplot,
infoLabels=infoLabels,
folder=True))
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=title + " - " + episode[0] + " " + support.typo("Sub-ITA", '_ [] color kod'),
url=scrapedurl,
thumbnail=scrapedthumbnail,
contentSerieName=title,
contentLanguage='Sub-ITA',
plot=scrapedplot,
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

View File

@@ -58,6 +58,22 @@
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
},
{
"id": "autorenumber",
"type": "bool",
"label": "@70712",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "autorenumber_mode",
"type": "bool",
"label": "@70688",
"default": false,
"enabled": true,
"visible": "eq(-1,true)"
}
]
}

View File

@@ -136,7 +136,8 @@ def peliculas(item):
else:
patron = r'<div class="media3">[^>]+><a href="([^"]+)"><img[^s]+src="([^"]+)"[^>]+><\/a><[^>]+><a[^<]+><p>([^<]+) \(([^\)]+)[^<]+<\/p>.*?<p>\s*([a-zA-Z-0-9]+)\s*<\/p>'
itemlist = support.scrape(item, patron, ['url', 'thumb', 'title', 'year', 'quality'], headers, action=action, patronNext='<a class="nextpostslink" rel="next" href="([^"]+)">')
return autorenumber.renumber(itemlist) if item.args == 'anime' else itemlist
if item.args == 'anime': autorenumber.renumber(itemlist)
return itemlist
def episodios(item):

View File

@@ -63,7 +63,7 @@ def insert(item):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
action="episodios",
contentType="episode",
title=scrapedtitle,
@@ -77,7 +77,7 @@ def insert(item):
if len(matches) >= p * minpage:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
args=item.args,
action="insert",
title="[COLOR blue][B]Successivo >[/B][/COLOR]",
@@ -109,7 +109,7 @@ def updates(item):
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
action="episodios",
contentType="episode",
title=scrapedtitle,
@@ -141,7 +141,7 @@ def most_view(item):
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
action="episodios",
contentType="episode",
title=scrapedtitle,
@@ -177,7 +177,7 @@ def list(item):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedplot = ""
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
action = 'episodios' if not 'film' in item.args else 'findvideos',
contentType=item.contentType,
title=scrapedtitle,
@@ -190,7 +190,7 @@ def list(item):
if len(matches) >= p * minpage:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
args=item.args,
contentType=item.contentType,
action="list",
@@ -224,7 +224,7 @@ def peliculas(item):
if i >= p * minpage: break
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
action="episodios",
contentType="episode",
title=scrapedtitle,
@@ -236,7 +236,7 @@ def peliculas(item):
if len(matches) >= p * minpage:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
extra=item.extra,
action="peliculas",
title="[COLOR blue][B]Successivo >[/B][/COLOR]",
@@ -257,7 +257,7 @@ def episodios(item):
matches = re.compile(patron, re.DOTALL).findall(data)
if "https://vcrypt.net" in data:
patron = r'(?:<p>|<br />)([^<]+)<a href="([^"]+)'
patron = r'(?:<p>|<br /> )([^<]+) &#8211; <a href="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
@@ -273,7 +273,7 @@ def episodios(item):
else:
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
action="findvideos",
contentType=item.contentType,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
@@ -298,7 +298,7 @@ def episodios(item):
else:
itemlist.append(
Item(channel=channel,
Item(channel=__channel__,
action="findvideos",
contentType=item.contentType,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
@@ -337,7 +337,7 @@ def findvideos(item):
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.channel = channel
videoitem.channel = __channel__
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
videoitem.title = "".join(['[COLOR blue] ' + "[[B]" + server + "[/B]][/COLOR] " + item.title])
videoitem.thumbnail = item.thumbnail
@@ -349,7 +349,7 @@ def findvideos(item):
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.channel = channel
videoitem.channel = __channel__
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
videoitem.title = "".join(['[COLOR blue] ' + "[[B]" + server + "[/B]] " + item.title + '[/COLOR]'])
videoitem.thumbnail = item.thumbnail

View File

@@ -17,31 +17,40 @@ def getmainlist(view="thumb_"):
itemlist = list()
# Añade los canales que forman el menú principal
itemlist.append(Item(title=config.get_localized_string(30130), channel="news", action="mainlist",
thumbnail=get_thumb("news.png", view),
category=config.get_localized_string(30119), viewmode="thumbnails",
context=[{"title": config.get_localized_string(70285), "channel": "news", "action": "menu_opciones",
"goto": True}]))
if addon.getSetting('enable_news_menu') == "true":
itemlist.append(Item(title=config.get_localized_string(30130), channel="news", action="mainlist",
thumbnail=get_thumb("news.png", view),
category=config.get_localized_string(30119), viewmode="thumbnails",
context=[{"title": config.get_localized_string(70285), "channel": "news", "action": "menu_opciones",
"goto": True}]))
itemlist.append(Item(title=config.get_localized_string(30118), channel="channelselector", action="getchanneltypes",
thumbnail=get_thumb("channels.png", view), view=view,
category=config.get_localized_string(30119), viewmode="thumbnails"))
if addon.getSetting('enable_channels_menu') == "true":
itemlist.append(Item(title=config.get_localized_string(30118), channel="channelselector", action="getchanneltypes",
thumbnail=get_thumb("channels.png", view), view=view,
category=config.get_localized_string(30119), viewmode="thumbnails"))
itemlist.append(Item(title=config.get_localized_string(70527), channel="kodfavorites", action="mainlist",
thumbnail=get_thumb("mylink.png", view), view=view,
category=config.get_localized_string(70527), viewmode="thumbnails"))
if addon.getSetting('enable_search_menu') == "true":
itemlist.append(Item(title=config.get_localized_string(30103), channel="search", path='special', action="mainlist",
thumbnail=get_thumb("search.png", view),
category=config.get_localized_string(30119), viewmode="list",
context=[{"title": config.get_localized_string(70286), "channel": "search", "action": "opciones",
"goto": True}]))
itemlist.append(Item(title=config.get_localized_string(30103), channel="search", path='special', action="mainlist",
thumbnail=get_thumb("search.png", view),
category=config.get_localized_string(30119), viewmode="list",
context=[{"title": config.get_localized_string(70286), "channel": "search", "action": "opciones",
"goto": True}]))
if addon.getSetting('enable_onair_menu') == "true":
itemlist.append(Item(channel="filmontv", action="mainlist", title=config.get_localized_string(50001),
thumbnail=get_thumb("on_the_air.png"), viewmode="thumbnails"))
itemlist.append(Item(title=config.get_localized_string(30102), channel="favorites", action="mainlist",
thumbnail=get_thumb("favorites.png", view),
category=config.get_localized_string(30102), viewmode="thumbnails"))
if addon.getSetting('enable_link_menu') == "true":
itemlist.append(Item(title=config.get_localized_string(70527), channel="kodfavorites", action="mainlist",
thumbnail=get_thumb("mylink.png", view), view=view,
category=config.get_localized_string(70527), viewmode="thumbnails"))
if config.get_videolibrary_support():
if addon.getSetting('enable_fav_menu') == "true":
itemlist.append(Item(title=config.get_localized_string(30102), channel="favorites", action="mainlist",
thumbnail=get_thumb("favorites.png", view),
category=config.get_localized_string(30102), viewmode="thumbnails"))
if config.get_videolibrary_support() and addon.getSetting('enable_library_menu') == "true":
itemlist.append(Item(title=config.get_localized_string(30131), channel="videolibrary", action="mainlist",
thumbnail=get_thumb("videolibrary.png", view),
category=config.get_localized_string(30119), viewmode="thumbnails",

View File

@@ -136,6 +136,8 @@ def get_channel_json(channel_name):
channel_path = filetools.join(config.get_runtime_path(), "specials", channel_name + ".json")
if not os.path.isfile(channel_path):
channel_path = filetools.join(config.get_runtime_path(), "servers", channel_name + ".json")
if not os.path.isfile(channel_path):
channel_path = filetools.join(config.get_runtime_path(), "servers", "debriders", channel_name + ".json")
if filetools.isfile(channel_path):
# logger.info("channel_data=" + channel_path)
channel_json = jsontools.load(filetools.read(channel_path))

View File

@@ -953,7 +953,7 @@ def downloadfileGzipped(url, pathfichero):
# print data
progreso.close()
logger.info("End download of the file)
logger.info("End download of the file")
return nombrefichero

View File

@@ -506,6 +506,11 @@ def get_server_json(server_name):
return server_json
def get_server_host(server_name):
from core import scrapertoolsV2
return [scrapertoolsV2.get_domain_from_url(pattern['url']) for pattern in get_server_json(server_name)['find_videos']['patterns']]
def get_server_controls_settings(server_name):
dict_settings = {}

View File

@@ -531,13 +531,14 @@ def videolibrary(itemlist, item, typography='', function_level=1):
return itemlist
def nextPage(itemlist, item, data='', patron='', function_level=1, next_page=''):
def nextPage(itemlist, item, data='', patron='', function_level=1, next_page='', resub=[]):
# Function_level is useful if the function is called by another function.
# If the call is direct, leave it blank
if next_page == '':
next_page = scrapertoolsV2.find_single_match(data, patron)
if next_page != "":
if resub: next_page = re.sub(resub[0], resub[1], next_page)
if 'http' not in next_page:
next_page = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + next_page
log('NEXT= ', next_page)

View File

@@ -1,3 +0,0 @@
{
"addon_version": "0.3"
}

167
lib/githash.py Normal file
View File

@@ -0,0 +1,167 @@
# https://github.com/chris3torek/scripts/blob/master/githash.py
#! /usr/bin/env python
"""
Compute git hash values.
This is meant to work with both Python2 and Python3; it
has been tested with Python2.7 and Python 3.4.
"""
from __future__ import print_function
import argparse
import os
import stat
import sys
from hashlib import sha1
if sys.version_info[0] >= 3:
# Python3 encodes "impossible" strings using UTF-8 and
# surrogate escapes. For instance, a file named <\300><\300>eek
# (where \300 is octal 300, 0xc0 hex) turns into '\udcc0\udcc0eek'.
# This is how we can losslessly re-encode this as a byte string:
path_to_bytes = lambda path: path.encode('utf8', 'surrogateescape')
# If we wish to print one of these byte strings, we have a
# problem, because they're not valid UTF-8. This method
# treats the encoded bytes as pass-through, which is
# probably the best we can do.
bpath_to_str = lambda path: path.decode('unicode_escape')
else:
# Python2 just uses byte strings, so OS paths are already
# byte strings and we return them unmodified.
path_to_bytes = lambda path: path
bpath_to_str = lambda path: path
def strmode(mode):
"""
Turn internal mode (octal with leading 0s suppressed) into
print form (i.e., left pad => right justify with 0s as needed).
"""
return mode.rjust(6, '0')
#
def classify(path):
"""
Return git classification of a path (as both mode,
100644/100755 etc, and git object type, i.e., blob vs tree).
Also throw in st_size field since we want it for file blobs.
"""
# We need the X bit of regular files for the mode, so
# might as well just use lstat rather than os.isdir().
st = os.lstat(path)
if stat.S_ISLNK(st.st_mode):
gitclass = 'blob'
mode = '120000'
elif stat.S_ISDIR(st.st_mode):
gitclass = 'tree'
mode = '40000' # note: no leading 0!
elif stat.S_ISREG(st.st_mode):
# 100755 if any execute permission bit set, else 100644
gitclass = 'blob'
mode = '100755' if (st.st_mode & 0o111) != 0 else '100644'
else:
raise ValueError('un-git-able file system entity %s' % fullpath)
return mode, gitclass, st.st_size
#
def blob_hash(stream, size):
"""
Return (as hash instance) the hash of a blob,
as read from the given stream.
"""
hasher = sha1()
hasher.update(('blob %u\0' % size).encode('ascii'))
nread = 0
while True:
# We read just 64K at a time to be kind to
# runtime storage requirements.
data = stream.read(65536)
if data == b'':
break
nread += len(data)
hasher.update(data)
if nread != size:
raise ValueError('%s: expected %u bytes, found %u bytes' %
(stream.name, size, nread))
return hasher
def symlink_hash(path):
"""
Return (as hash instance) the hash of a symlink.
Caller must use hexdigest() or digest() as needed on
the result.
"""
hasher = sha1()
data = path_to_bytes(os.readlink(path))
hasher.update(('blob %u\0' % len(data)).encode('ascii'))
hasher.update(data)
return hasher
def tree_hash(path):
"""
Return the hash of a tree. We need to know all
files and sub-trees. Since order matters, we must
walk the sub-trees and files in their natural (byte) order,
so we cannot use os.walk.
This is also slightly defective in that it does not know
about .gitignore files (we can't just read them since git
retains files that are in the index, even if they would be
ignored by a .gitignore directive).
We also do not (cannot) deal with submodules here.
"""
# Annoyingly, the tree object encodes its size, which requires
# two passes, one to find the size and one to compute the hash.
contents = os.listdir(path)
tsize = 0
to_skip = ('.', '..', '.git', '.DS_Store', '.idea', '.directory')
to_skip_ext = ('pyo', 'pyc')
pass1 = []
for entry in contents:
if entry not in to_skip and (entry.split('.')[1] not in to_skip_ext if '.' in entry else True):
fullpath = os.path.join(path, entry)
mode, gitclass, esize = classify(fullpath)
# git stores as mode<sp><entry-name>\0<digest-bytes>
encoded_form = path_to_bytes(entry)
tsize += len(mode) + 1 + len(encoded_form) + 1 + 20
pass1.append((fullpath, mode, gitclass, esize, encoded_form))
# Git's cache sorts foo/bar before fooXbar but after foo-bar,
# because it actually stores foo/bar as the literal string
# "foo/bar" in the index, rather than using recursion. That is,
# a directory name should sort as if it ends with '/' rather than
# with '\0'. Sort pass1 contents with funky sorting.
#
# (i[4] is the utf-8 encoded form of the name, i[1] is the
# mode which is '40000' for directories.)
pass1.sort(key = lambda i: i[4] + b'/' if i[1] == '40000' else i[4])
hasher = sha1()
hasher.update(('tree %u\0' % tsize).encode('ascii'))
for (fullpath, mode, gitclass, esize, encoded_form) in pass1:
sub_hash = generic_hash(fullpath, mode, esize)
# Annoyingly, git stores the tree hash as 20 bytes, rather
# than 40 ASCII characters. This is why we return the
# hash instance (so we can use .digest() directly).
# The format here is <mode><sp><path>\0<raw-hash>.
hasher.update(mode.encode('ascii'))
hasher.update(b' ')
hasher.update(encoded_form)
hasher.update(b'\0')
hasher.update(sub_hash.digest())
return hasher
def generic_hash(path, mode, size):
"""
Hash an object based on its mode.
"""
if mode == '120000':
hasher = symlink_hash(path)
elif mode == '40000':
hasher = tree_hash(path)
else:
# 100755 if any execute permission bit set, else 100644
with open(path, 'rb') as stream:
hasher = blob_hash(stream, size)
return hasher

View File

@@ -64,6 +64,10 @@ def run(item=None):
else:
item = Item(channel="channelselector", action="getmainlist", viewmode="movie")
if not config.get_setting('show_once'):
if not os.path.isdir(config.get_runtime_path() + '/.git'):
logger.info("DEV MODE OFF")
from platformcode import updater
updater.calcCurrHash()
from platformcode import xbmc_videolibrary
xbmc_videolibrary.ask_set_content(1, config.get_setting('videolibrary_kodi_force'))
config.set_setting('show_once', True)
@@ -75,9 +79,6 @@ def run(item=None):
if item.action == "":
logger.info("Item sin accion")
return
if item.action == "update":
updater.update()
# Action for main menu in channelselector
elif item.action == "getmainlist":

View File

@@ -1,314 +1,314 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------------
# Updater (kodi)
# --------------------------------------------------------------------------------
import json
import hashlib
import os
import sys
import threading
import time
import urllib
import shutil
import zipfile
from core import httptools, filetools, downloadtools
from platformcode import logger, platformtools, config
import json
import xbmc
import re
import xbmcaddon
from core import ziptools
from platformcode import config, logger
addon = xbmcaddon.Addon('plugin.video.kod')
_hdr_pat = re.compile("^@@ -(\d+),?(\d+)? \+(\d+),?(\d+)? @@.*")
branch = 'master'
user = 'kodiondemand'
repo = 'addon'
addonDir = xbmc.translatePath("special://home/addons/") + "plugin.video.kod/"
maxPage = 5 # le api restituiscono 30 commit per volta, quindi se si è rimasti troppo indietro c'è bisogno di andare avanti con le pagine
trackingFile = "last_commit.txt"
def loadCommits(page=1):
apiLink = 'https://api.github.com/repos/' + user + '/' + repo + '/commits?sha=' + branch + "&page=" + str(page)
commitsLink = httptools.downloadpage(apiLink).data
logger.info(apiLink)
return json.loads(commitsLink)
REMOTE_FILE = "https://github.com/kodiondemand/addon/archive/master.zip"
DESTINATION_FOLDER = xbmc.translatePath("special://home/addons") + "/plugin.video.kod"
REMOTE_VERSION_FILE = "https://raw.githubusercontent.com/kodiondemand/addon/master/version.json"
def check_addon_init():
logger.info()
# Subtarea de monitor. Se activa cada X horas para comprobar si hay FIXES al addon
# def check_addon_monitor():
# logger.info()
#
# # Obtiene el íntervalo entre actualizaciones y si se quieren mensajes
# try:
# timer = int(config.get_setting('addon_update_timer')) # Intervalo entre actualizaciones, en Ajustes de Alfa
# if timer <= 0:
# return # 0. No se quieren actualizaciones
# verbose = config.get_setting('addon_update_message')
# except:
# timer = 12 # Por defecto cada 12 horas
# verbose = False # Por defecto, sin mensajes
# timer = timer * 3600 # Lo pasamos a segundos
#
# if config.get_platform(True)['num_version'] >= 14: # Si es Kodi, lanzamos el monitor
# import xbmc
# monitor = xbmc.Monitor()
# else: # Lanzamos solo una actualización y salimos
# check_addon_updates(verbose) # Lanza la actualización
# return
#
# while not monitor.abortRequested(): # Loop infinito hasta cancelar Kodi
#
# check_addon_updates(verbose) # Lanza la actualización
#
# if monitor.waitForAbort(timer): # Espera el tiempo programado o hasta que cancele Kodi
# break # Cancelación de Kodi, salimos
#
# return
#
# # Lanzamos en Servicio de actualización de FIXES
# try:
# threading.Thread(target=check_addon_monitor).start() # Creamos un Thread independiente, hasta el fin de Kodi
# time.sleep(5) # Dejamos terminar la primera verificación...
# except: # Si hay problemas de threading, se llama una sola vez
# try:
# timer = int(config.get_setting('addon_update_timer')) # Intervalo entre actualizaciones, en Ajustes de Alfa
# if timer <= 0:
# return # 0. No se quieren actualizaciones
# verbose = config.get_setting('addon_update_message')
# except:
# verbose = False # Por defecto, sin mensajes
# pass
# check_addon_updates(verbose) # Lanza la actualización, en Ajustes de Alfa
# time.sleep(5) # Dejamos terminar la primera verificación...
return
def checkforupdates(plugin_mode=True):
logger.info("kodiondemand.core.updater checkforupdates")
response = urllib.urlopen(REMOTE_VERSION_FILE)
data = json.loads(response.read())
'''
{
"update": {
"name": "Kodi on Demand",
"tag": "1.0.0",
"version": "1000",
"date": "03/05/2019",
"changes": "Added Updater"
}
}
'''
# remote is addon version without dots
remote_version = data["update"]["version"]
# tag version is version with dots used to a betterview gui
tag_version = data["update"]["tag"]
logger.info("kodiondemand.core.updater version remota="+tag_version+" "+remote_version)
'''
# Lee el fichero con la versión instalada
localFileName = LOCAL_VERSION_FILE
logger.info("kodiondemand.core.updater fichero local version: "+localFileName)
infile = open( localFileName )
data = infile.read()
infile.close()
#logger.info("xml local="+data)
'''
path_local = xbmc.translatePath("special://home/addons/") + "plugin.video.kod/version.json"
data = json.loads(open(path_local).read())
version_local = data["update"]["version"]
tag_local = data["update"]["tag"]
logger.info("kodiondemand.core.updater version local="+tag_local+" "+version_local)
if not addon.getSetting('addon_update_enabled'):
return False
logger.info('Cerco aggiornamenti..')
commits = loadCommits()
try:
numero_remote_version = int(remote_version)
numero_version_local = int(version_local)
localCommitFile = open(addonDir+trackingFile, 'r+')
except:
import traceback
logger.info(traceback.format_exc())
remote_version = ""
version_local = ""
calcCurrHash()
localCommitFile = open(addonDir + trackingFile, 'r+')
localCommitSha = localCommitFile.read()
localCommitSha = localCommitSha.replace('\n', '') # da testare
logger.info('Commit locale: ' + localCommitSha)
updated = False
if remote_version=="" or version_local=="":
arraydescargada = tag_version.split(".")
arraylocal = tag_local.split(".")
# local 2.8.0 - descargada 2.8.0 -> no descargar
# local 2.9.0 - descargada 2.8.0 -> no descargar
# local 2.8.0 - descargada 2.9.0 -> descargar
if len(arraylocal) == len(arraydescargada):
logger.info("caso 1")
hayqueactualizar = False
for i in range(0, len(arraylocal)):
print arraylocal[i], arraydescargada[i], int(arraydescargada[i]) > int(arraylocal[i])
if int(arraydescargada[i]) > int(arraylocal[i]):
hayqueactualizar = True
# local 2.8.0 - descargada 2.8 -> no descargar
# local 2.9.0 - descargada 2.8 -> no descargar
# local 2.8.0 - descargada 2.9 -> descargar
if len(arraylocal) > len(arraydescargada):
logger.info("caso 2")
hayqueactualizar = False
for i in range(0, len(arraydescargada)):
#print arraylocal[i], arraydescargada[i], int(arraydescargada[i]) > int(arraylocal[i])
if int(arraydescargada[i]) > int(arraylocal[i]):
hayqueactualizar = True
# local 2.8 - descargada 2.8.8 -> descargar
# local 2.9 - descargada 2.8.8 -> no descargar
# local 2.10 - descargada 2.9.9 -> no descargar
# local 2.5 - descargada 3.0.0
if len(arraylocal) < len(arraydescargada):
logger.info("caso 3")
hayqueactualizar = True
for i in range(0, len(arraylocal)):
#print arraylocal[i], arraydescargada[i], int(arraylocal[i])>int(arraydescargada[i])
if int(arraylocal[i]) > int(arraydescargada[i]):
hayqueactualizar = False
elif int(arraylocal[i]) < int(arraydescargada[i]):
hayqueactualizar = True
break
pos = None
for n, c in enumerate(commits):
if c['sha'] == localCommitSha:
pos = n
break
else:
hayqueactualizar = (numero_remote_version > numero_version_local)
# evitiamo che dia errore perchè il file è già in uso
localCommitFile.close()
updateFromZip()
return True
if hayqueactualizar:
if plugin_mode:
logger.info("kodiondemand.core.updater actualizacion disponible")
# Añade al listado de XBMC
import xbmcgui
#thumbnail = IMAGES_PATH+"Crystal_Clear_action_info.png"
thumbnail = os.path.join(config.get_runtime_path() , "resources" , "images", "service_update.png")
logger.info("thumbnail="+thumbnail)
listitem = xbmcgui.ListItem( "Scarica la versione "+tag_version, thumbnailImage=thumbnail )
itemurl = '%s?action=update&version=%s' % ( sys.argv[ 0 ] , tag_version )
import xbmcplugin
xbmcplugin.addDirectoryItem( handle = int(sys.argv[ 1 ]), url = itemurl , listitem=listitem, isFolder=True)
# Avisa con un popup
dialog = xbmcgui.Dialog()
dialog.ok("Versione "+tag_version+" disponibile","E' possibile scaricare la nuova versione del plugin\nattraverso l'opzione nel menù principale.")
if pos > 0:
changelog = ''
nCommitApplied = 0
for c in reversed(commits[:pos]):
commit = httptools.downloadpage(c['url']).data
commitJson = json.loads(commit)
logger.info('aggiornando a' + commitJson['sha'])
alreadyApplied = True
for file in commitJson['files']:
if file["filename"] == trackingFile: # il file di tracking non si modifica
continue
else:
logger.info(file["filename"])
if file['status'] == 'modified' or file['status'] == 'added':
if 'patch' in file:
text = ""
try:
localFile = open(addonDir + file["filename"], 'r+')
for line in localFile:
text += line
except IOError: # nuovo file
localFile = open(addonDir + file["filename"], 'w')
patched = apply_patch(text, (file['patch']+'\n').encode('utf-8'))
if patched != text: # non eseguo se già applicata (es. scaricato zip da github)
if getSha(patched) == file['sha']:
localFile.seek(0)
localFile.truncate()
localFile.writelines(patched)
localFile.close()
alreadyApplied = False
else: # nel caso ci siano stati problemi
logger.info('lo sha non corrisponde, scarico il file')
downloadtools.downloadfile(file['raw_url'], addonDir + file['filename'],
silent=True, continuar=True)
else: # è un file NON testuale, lo devo scaricare
# se non è già applicato
if not (filetools.isfile(addonDir + file['filename']) and getSha(
filetools.read(addonDir + file['filename']) == file['sha'])):
downloadtools.downloadfile(file['raw_url'], addonDir + file['filename'], silent=True, continuar=True)
alreadyApplied = False
elif file['status'] == 'removed':
try:
filetools.remove(addonDir+file["filename"])
alreadyApplied = False
except:
pass
elif file['status'] == 'renamed':
# se non è già applicato
if not (filetools.isfile(addonDir + file['filename']) and getSha(
filetools.read(addonDir + file['filename']) == file['sha'])):
dirs = file['filename'].split('/')
for d in dirs[:-1]:
if not filetools.isdir(addonDir + d):
filetools.mkdir(addonDir + d)
filetools.move(addonDir + file['previous_filename'], addonDir + file['filename'])
alreadyApplied = False
if not alreadyApplied: # non mando notifica se già applicata (es. scaricato zip da github)
changelog += commitJson['commit']['message'] + " | "
nCommitApplied += 1
if addon.getSetting("addon_update_message"):
time = nCommitApplied * 2000 if nCommitApplied < 10 else 20000
platformtools.dialog_notification('Kodi on Demand', changelog, time)
localCommitFile.seek(0)
localCommitFile.truncate()
localCommitFile.writelines(c['sha'])
localCommitFile.close()
else:
logger.info('Nessun nuovo aggiornamento')
return updated
def calcCurrHash():
from lib import githash
treeHash = githash.tree_hash(addonDir).hexdigest()
logger.info('tree hash: ' + treeHash)
commits = loadCommits()
lastCommitSha = commits[0]['sha']
page = 1
while commits and page <= maxPage:
found = False
for n, c in enumerate(commits):
if c['commit']['tree']['sha'] == treeHash:
localCommitFile = open(addonDir + trackingFile, 'w')
localCommitFile.write(c['sha'])
localCommitFile.close()
found = True
break
else:
page += 1
commits = loadCommits(page)
import xbmcgui
yes_pressed = xbmcgui.Dialog().yesno( "Versione "+tag_version+" disponibile" , "Installarla?" )
if yes_pressed:
params = {"version":tag_version}
update(params)
if found:
break
else:
logger.info('Non sono riuscito a trovare il commit attuale, scarico lo zip')
updateFromZip()
# se ha scaricato lo zip si trova di sicuro all'ultimo commit
localCommitFile = open(addonDir + trackingFile, 'w')
localCommitFile.write(lastCommitSha)
localCommitFile.close()
def update():
# Descarga el ZIP
logger.info("kodiondemand.core.updater update")
remotefilename = REMOTE_FILE
# https://gist.github.com/noporpoise/16e731849eb1231e86d78f9dfeca3abc Grazie!
def apply_patch(s,patch,revert=False):
"""
Apply unified diff patch to string s to recover newer string.
If revert is True, treat s as the newer string, recover older string.
"""
s = s.splitlines(True)
p = patch.splitlines(True)
t = ''
i = sl = 0
(midx,sign) = (1,'+') if not revert else (3,'-')
while i < len(p) and p[i].startswith(("---","+++")): i += 1 # skip header lines
while i < len(p):
m = _hdr_pat.match(p[i])
if not m: raise Exception("Cannot process diff")
i += 1
l = int(m.group(midx))-1 + (m.group(midx+1) == '0')
t += ''.join(s[sl:l])
sl = l
while i < len(p) and p[i][0] != '@':
if i+1 < len(p) and p[i+1][0] == '\\': line = p[i][:-1]; i += 2
else: line = p[i]; i += 1
if len(line) > 0:
if line[0] == sign or line[0] == ' ': t += line[1:]
sl += (line[0] != sign)
t += ''.join(s[sl:])
return t
def getSha(fileText):
return hashlib.sha1("blob " + str(len(fileText)) + "\0" + fileText).hexdigest()
def updateFromZip():
platformtools.dialog_notification('Kodi on Demand', 'Aggiornamento in corso...')
remotefilename = 'https://github.com/' + user + "/" + repo + "/archive/" + branch + ".zip"
localfilename = xbmc.translatePath("special://home/addons/") + "plugin.video.kod.update.zip"
logger.info("kodiondemand.core.updater remotefilename=%s" % remotefilename)
logger.info("kodiondemand.core.updater localfilename=%s" % localfilename)
logger.info("kodiondemand.core.updater descarga fichero...")
urllib.urlretrieve(remotefilename,localfilename)
#from core import downloadtools
#downloadtools.downloadfile(remotefilename, localfilename, continuar=False)
import urllib
urllib.urlretrieve(remotefilename, localfilename)
# Lo descomprime
logger.info("kodiondemand.core.updater descomprime fichero...")
unzipper = ziptools.ziptools()
destpathname = xbmc.translatePath("special://home/addons/")
destpathname = xbmc.translatePath("special://home/addons/")
logger.info("kodiondemand.core.updater destpathname=%s" % destpathname)
unzipper.extract(localfilename,destpathname, os.path.join(xbmc.translatePath("special://home/addons/"), "plugin.video.kod/"))
unzipper = ziptools()
unzipper.extract(localfilename, destpathname)
# puliamo tutto
shutil.rmtree(addonDir)
filetools.rename(destpathname + "addon-" + branch, addonDir)
temp_dir = os.path.join(destpathname,"addon-master")
files = os.listdir(temp_dir)
#for f in files:
# shutil.move(os.path.join(temp_dir, f), os.path.join(xbmc.translatePath("special://home/addons/"), "plugin.video.kod/", f))
# Borra el zip descargado
logger.info("kodiondemand.core.updater borra fichero...")
os.remove(localfilename)
#os.remove(temp_dir)
logger.info("kodiondemand.core.updater ...fichero borrado")
# os.remove(temp_dir)
platformtools.dialog_notification('Kodi on Demand', 'Aggiornamento completato!')
'''
def check_addon_updates(verbose=False):
logger.info()
ADDON_UPDATES_JSON = 'https://extra.alfa-addon.com/addon_updates/updates.json'
ADDON_UPDATES_ZIP = 'https://extra.alfa-addon.com/addon_updates/updates.zip'
class ziptools:
def extract(self, file, dir, folder_to_extract="", overwrite_question=False, backup=False):
logger.info("file=%s" % file)
logger.info("dir=%s" % dir)
try:
last_fix_json = os.path.join(config.get_runtime_path(), 'last_fix.json') # información de la versión fixeada del usuario
# Se guarda en get_runtime_path en lugar de get_data_path para que se elimine al cambiar de versión
if not dir.endswith(':') and not os.path.exists(dir):
os.mkdir(dir)
# Descargar json con las posibles actualizaciones
# -----------------------------------------------
data = httptools.downloadpage(ADDON_UPDATES_JSON, timeout=2).data
if data == '':
logger.info('No se encuentran actualizaciones del addon')
if verbose:
platformtools.dialog_notification(config.get_localized_string(70667), config.get_localized_string(70668))
return False
zf = zipfile.ZipFile(file)
if not folder_to_extract:
self._createstructure(file, dir)
num_files = len(zf.namelist())
data = jsontools.load(data)
if 'addon_version' not in data or 'fix_version' not in data:
logger.info('No hay actualizaciones del addon')
if verbose:
platformtools.dialog_notification(config.get_localized_string(70667), config.get_localized_string(70668))
return False
for nameo in zf.namelist():
name = nameo.replace(':', '_').replace('<', '_').replace('>', '_').replace('|', '_').replace('"', '_').replace('?', '_').replace('*', '_')
logger.info("name=%s" % nameo)
if not name.endswith('/'):
logger.info("no es un directorio")
try:
(path, filename) = os.path.split(os.path.join(dir, name))
logger.info("path=%s" % path)
logger.info("name=%s" % name)
if folder_to_extract:
if path != os.path.join(dir, folder_to_extract):
break
else:
os.makedirs(path)
except:
pass
if folder_to_extract:
outfilename = os.path.join(dir, filename)
# Comprobar versión que tiene instalada el usuario con versión de la actualización
# --------------------------------------------------------------------------------
current_version = config.get_addon_version(with_fix=False)
if current_version != data['addon_version']:
logger.info('No hay actualizaciones para la versión %s del addon' % current_version)
if verbose:
platformtools.dialog_notification(config.get_localized_string(70667), config.get_localized_string(70668))
return False
if os.path.exists(last_fix_json):
try:
lastfix = {}
lastfix = jsontools.load(filetools.read(last_fix_json))
if lastfix['addon_version'] == data['addon_version'] and lastfix['fix_version'] == data['fix_version']:
logger.info(config.get_localized_string(70670) % (data['addon_version'], data['fix_version']))
if verbose:
platformtools.dialog_notification(config.get_localized_string(70667), config.get_localized_string(70671) % (data['addon_version'], data['fix_version']))
return False
except:
if lastfix:
logger.error('last_fix.json: ERROR en: ' + str(lastfix))
else:
logger.error('last_fix.json: ERROR desconocido')
lastfix = {}
outfilename = os.path.join(dir, name)
logger.info("outfilename=%s" % outfilename)
try:
if os.path.exists(outfilename) and overwrite_question:
from platformcode import platformtools
dyesno = platformtools.dialog_yesno("El archivo ya existe",
"El archivo %s a descomprimir ya existe" \
", ¿desea sobrescribirlo?" \
% os.path.basename(outfilename))
if not dyesno:
break
if backup:
import time
import shutil
hora_folder = "Copia seguridad [%s]" % time.strftime("%d-%m_%H-%M", time.localtime())
backup = os.path.join(config.get_data_path(), 'backups', hora_folder, folder_to_extract)
if not os.path.exists(backup):
os.makedirs(backup)
shutil.copy2(outfilename, os.path.join(backup, os.path.basename(outfilename)))
# Descargar zip con las actualizaciones
# -------------------------------------
localfilename = os.path.join(config.get_data_path(), 'temp_updates.zip')
if os.path.exists(localfilename): os.remove(localfilename)
outfile = open(outfilename, 'wb')
outfile.write(zf.read(nameo))
except:
logger.error("Error en fichero " + nameo)
downloadtools.downloadfile(ADDON_UPDATES_ZIP, localfilename, silent=True)
# Descomprimir zip dentro del addon
# ---------------------------------
def _createstructure(self, file, dir):
self._makedirs(self._listdirs(file), dir)
def create_necessary_paths(filename):
try:
unzipper = ziptools.ziptools()
unzipper.extract(localfilename, config.get_runtime_path())
(path, name) = os.path.split(filename)
os.makedirs(path)
except:
import xbmc
xbmc.executebuiltin('XBMC.Extract("%s", "%s")' % (localfilename, config.get_runtime_path()))
time.sleep(1)
# Borrar el zip descargado
# ------------------------
os.remove(localfilename)
# Guardar información de la versión fixeada
# -----------------------------------------
if 'files' in data: data.pop('files', None)
filetools.write(last_fix_json, jsontools.dump(data))
logger.info(config.get_localized_string(70672) % (data['addon_version'], data['fix_version']))
if verbose:
platformtools.dialog_notification(config.get_localized_string(70673), config.get_localized_string(70671) % (data['addon_version'], data['fix_version']))
return True
pass
except:
logger.error('Error al comprobar actualizaciones del addon!')
logger.error(traceback.format_exc())
if verbose:
platformtools.dialog_notification(config.get_localized_string(70674), config.get_localized_string(70675))
return False
'''
def _makedirs(self, directories, basedir):
for dir in directories:
curdir = os.path.join(basedir, dir)
if not os.path.exists(curdir):
os.mkdir(curdir)
def _listdirs(self, file):
zf = zipfile.ZipFile(file)
dirs = []
for name in zf.namelist():
if name.endswith('/'):
dirs.append(name)
dirs.sort()
return dirs

View File

@@ -93,6 +93,14 @@ msgctxt "#30021"
msgid "Number of links to check"
msgstr ""
msgctxt "#30022"
msgid "YES"
msgstr ""
msgctxt "#30023"
msgid "NO"
msgstr ""
msgctxt "#30043"
msgid "Force view mode:"
msgstr ""
@@ -4079,7 +4087,7 @@ msgid "[Trakt] Remove %s from your watchlist"
msgstr ""
msgctxt "#70344"
msgid "Add to %s your watchlist""
msgid "Add to %s your watchlist"
msgstr ""
msgctxt "#70345"
@@ -4703,7 +4711,7 @@ msgid "Genre: "
msgstr ""
msgctxt "#70500"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, 'http://i.imgur.com/mHgwcn3.png')"
msgstr ""
msgctxt "#70501"
@@ -5019,7 +5027,7 @@ msgid "Timeout (maximum waiting time)"
msgstr ""
msgctxt "#70581"
msgid "Interval between automatic updates (hours)"
msgid "Check for updates when kodi starts"
msgstr ""
msgctxt "#70582"
@@ -5035,7 +5043,7 @@ msgid "Do you want to update Quasar to avoid errors?"
msgstr ""
msgctxt "#70585"
msgid "[B]Renumbering[/B] (beta)"
msgid "Renumbering"
msgstr ""
msgctxt "#70586"
@@ -5454,11 +5462,11 @@ msgid "Enter the number of the starting season"
msgstr ""
msgctxt "#70687"
msgid "Enter the number of the starting episode"
msgid "Special Episodes"
msgstr ""
msgctxt "#70688"
msgid "Episode 0 is a special episode, enter the number of this episode (0 to skip it)"
msgid "Are there special episodes in the series \n(Episode 0 Excluded)?"
msgstr ""
msgctxt "#70689"
@@ -5543,4 +5551,24 @@ msgstr ""
msgctxt "#70711"
msgid "Ops! There's been a problem during saving your setting"
msgstr ""
msgctxt "#70712"
msgid "Enable Automatic Renumbering"
msgstr ""
msgctxt "#70713"
msgid "There is not enough information on TVDB for renumbering"
msgstr ""
msgctxt "#70714"
msgid "Modify "
msgstr " "
msgctxt "#70715"
msgid "Customize Homepage"
msgstr ""
msgctxt "#70716"
msgid "Choose menus you wanna in homepage"
msgstr ""

View File

@@ -93,6 +93,14 @@ msgctxt "#30021"
msgid "Number of links to check"
msgstr "Numero di link da verificare"
msgctxt "#30022"
msgid "YES"
msgstr "SÌ"
msgctxt "#30023"
msgid "NO"
msgstr "NO"
msgctxt "#30043"
msgid "Force view mode:"
msgstr "Forza modalità di visualizzazione:"
@@ -383,7 +391,7 @@ msgstr "Saghe"
msgctxt "#50001"
msgid "On Air"
msgstr "Adesso in onda"
msgstr "Adesso in Onda"
msgctxt "#50002"
msgid "Latest News"
@@ -4078,7 +4086,7 @@ msgid "[Trakt] Remove %s from your watchlist"
msgstr "[Trakt] Rimuovi %s dalla tua watchlist"
msgctxt "#70344"
msgid "Add to %s your watchlist""
msgid "Add to %s your watchlist"
msgstr "[Trakt] Aggiungi %s alla tua watchlist"
msgctxt "#70345"
@@ -4702,8 +4710,8 @@ msgid "Genre: "
msgstr "Genere: "
msgctxt "#70500"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgstr "Notifica([COLOR red][B]Aggiorna Kodi alla sua ultima versione[/B][/COLOR], [COLOR skyblue]per migliori info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")"
msgid "Notification([COLOR red][B]Update Kodi to its latest version[/B][/COLOR], [COLOR skyblue]for best info[/COLOR],8000, 'http://i.imgur.com/mHgwcn3.png')"
msgstr "Notification([COLOR red][B]Aggiorna Kodi alla sua ultima versione[/B][/COLOR], [COLOR skyblue]per migliori info[/COLOR],8000, 'http://i.imgur.com/mHgwcn3.png')"
msgctxt "#70501"
msgid "Search did not match (%s)"
@@ -5018,8 +5026,8 @@ msgid "Timeout (maximum waiting time)"
msgstr "Timeout (tempo massimo da aspettare)"
msgctxt "#70581"
msgid "Interval between automatic updates (hours)"
msgstr "Intervallo tra gli aggiornamenti automatici (ore)"
msgid "Check for updates when kodi starts"
msgstr "Controlla gli aggiornamenti all'apertura di kodi"
msgctxt "#70582"
msgid "Do you want to see messages about the updates?"
@@ -5034,8 +5042,8 @@ msgid "Do you want to update Quasar to avoid errors?"
msgstr "Vuoi aggiornare Quasar per evitare errori?"
msgctxt "#70585"
msgid "[B]Renumbering[/B] (beta)"
msgstr "[B]Rinumerazione[/B] (beta)"
msgid "Renumbering"
msgstr "Rinumerazione"
msgctxt "#70586"
msgid "Set up series number..."
@@ -5453,12 +5461,12 @@ msgid "Enter the number of the starting season"
msgstr "Inserisci il numero della stagione di partenza"
msgctxt "#70687"
msgid "Enter the number of the starting episode"
msgstr "Inserisci il numero dell'episodio di partenza"
msgid "Special Episodes"
msgstr "Episodi Speciali"
msgctxt "#70688"
msgid "Episode 0 is a special episode, enter the number of this episode (0 to skip it)"
msgstr "L'episodio 0 è un episodio speciale, inserisci il numero di questo episodio (0 per saltare)"
msgid "Are there special episodes in the series \n(Episode 0 Excluded)?"
msgstr "Sono presenti episodi speciali nella serie \n(Episodio 0 Escluso)?"
msgctxt "#70689"
msgid "Enable Download"
@@ -5542,4 +5550,24 @@ msgstr "Le nuove impostazioni sono state salvate"
msgctxt "#70710"
msgid "Ops! There's been a problem during saving your setting"
msgstr "Ops! C'è stato un problema durante il salvataggio delle impostazioni"
msgstr "Ops! C'è stato un problema durante il salvataggio delle impostazioni"
msgctxt "#70712"
msgid "Enable Automatic Renumbering"
msgstr "Abilita Rinumerazione Automatica"
msgctxt "#70713"
msgid "There is not enough information on TVDB for renumbering"
msgstr "Non ci sono Abbastanza informazioni su TVDB per la Rinumerazione"
msgctxt "#70714"
msgid "Modify "
msgstr "Modifica "
msgctxt "#70715"
msgid "Customize Homepage"
msgstr "Personalizza Homepage"
msgctxt "#70716"
msgid "Choose menus you wanna in homepage"
msgstr "Seleziona gli elementi che vuoi visualizzare nella homepage"

View File

@@ -33,6 +33,17 @@
<setting id="downloadlistpath" type="folder" label="30018" visible="eq(-2,true)" default=""/>
</category>
<category label="70715">
<setting label="70716" type="lsep"/>
<setting id="enable_news_menu" label="30130" type="bool" default="true"/>
<setting id="enable_channels_menu" label="30118" type="bool" default="true"/>
<setting id="enable_search_menu" label="30103" type="bool" default="true"/>
<setting id="enable_onair_menu" label="50001" type="bool" default="true"/>
<setting id="enable_link_menu" label="70527" type="bool" default="true"/>
<setting id="enable_fav_menu" label="30102" type="bool" default="true"/>
<setting id="enable_library_menu" label="30131" type="bool" default="true"/>
</category>
<category label="30501">
<!-- <setting id="downloadpath" type="folder" label="30017" default=""/>
<setting id="downloadlistpath" type="folder" label="30018" default=""/> -->
@@ -138,8 +149,8 @@
<setting type="sep"/>
<setting label="70579" type="lsep"/>
<setting id="addon_update_timer" type="labelenum" values="0|6|12|24" label="70581" default="12"/>
<setting id="addon_update_message" type="bool" label="70582" default="false"/>
<setting id="addon_update_enabled" type="bool" label="70581" default="true"/>
<setting id="addon_update_message" type="bool" label="70582" default="true"/>
<setting label="Lista activa" type="text" id="lista_activa" default="alfavorites-default.json" visible="false"/>

View File

@@ -44,7 +44,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
from lib import unshortenit
data, status = unshortenit.unshorten(url)
logger.info("Data - Status zcrypt vcrypt.net: [%s] [%s] " %(data, status))
elif 'linkup' in url:
elif 'linkup' in url or 'bit.ly' in url:
idata = httptools.downloadpage(url).data
data = scrapertoolsV2.find_single_match(idata, "<iframe[^<>]*src=\\'([^'>]*)\\'[^<>]*>")
#fix by greko inizio

View File

@@ -5,7 +5,7 @@
"patterns": [
{
"pattern": "(?:openload|oload|openloads).*?/(?:embed|f|e|f[0-9])/([0-9a-zA-Z-_]+)",
"url": "https://openload.co/embed/\\1/"
"url": "https://oload.stream/embed/\\1/"
}
]
},

View File

@@ -3,8 +3,11 @@
from core import httptools
from core import jsontools
from core import scrapertools
from core.servertools import get_server_host
from platformcode import config, logger
host = "https://" + get_server_host('openload')[0]
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
@@ -21,6 +24,7 @@ def test_video_exists(page_url):
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info()
itemlist = []
@@ -104,14 +108,13 @@ def decode(code, parseInt, _0x59ce16, _1x4bfb36):
_0x145894 += 1
url = "https://openload.co/stream/%s?mime=true" % _0x1bf6e5
url = host + "/stream/%s?mime=true" % _0x1bf6e5
return url
def login():
logger.info()
data = httptools.downloadpage('https://openload.co').data
data = httptools.downloadpage(host).data
_csrf = scrapertools.find_single_match(data, '<input type="hidden" name="_csrf" value="([^"]+)">')
post = {
@@ -120,7 +123,7 @@ def login():
'LoginForm[rememberMe]' : 1,
'_csrf' : _csrf
}
data = httptools.downloadpage('https://openload.co/login', post = post).data
data = httptools.downloadpage(host + '/login', post = post).data
if 'Login key has already been sent.' in data:
while True :
@@ -134,7 +137,7 @@ def login():
break
else:
post['LoginForm[loginkey]'] = code
data = httptools.downloadpage('https://openload.co/login', post = post).data
data = httptools.downloadpage(host + '/login', post = post).data
if 'Welcome back,' in data: break
@@ -145,14 +148,14 @@ def get_api_keys():
api_key = config.get_setting('api_key', __file__)
if not api_key or not api_login:
login()
data = httptools.downloadpage('https://openload.co/account').data
data = httptools.downloadpage(host + '/account').data
post = {
'FTPKey[password]' : config.get_setting('password', __file__),
'_csrf' : scrapertools.find_single_match(data, '<input type="hidden" name="_csrf" value="([^"]+)">')
}
data = httptools.downloadpage('https://openload.co/account', post = post).data
data = httptools.downloadpage(host + '/account', post = post).data
api_login = scrapertools.find_single_match(data, '<tr><td>ID:</td><td>([^<]+)</td></tr>')
api_key = scrapertools.find_single_match(data, 'Your FTP Password/API Key is: ([^<]+) </div>')
config.set_setting('api_login', api_login, __file__)
@@ -168,12 +171,12 @@ def get_link_api(page_url):
file_id = scrapertools.find_single_match(page_url, '(?:embed|f)/([0-9a-zA-Z-_]+)')
data = httptools.downloadpage("https://api.openload.co/1/file/dlticket?file=%s&login=%s&key=%s" % (file_id, api_login, api_key)).data
data = httptools.downloadpage(host + "/api/1/file/dlticket?file=%s&login=%s&key=%s" % (file_id, api_login, api_key)).data
data = jsontools.load_json(data)
# logger.info(data)
if data["status"] == 200:
ticket = data["result"]["ticket"]
data = httptools.downloadpage("https://api.openload.co/1/file/dl?file=%s&ticket=%s" % (file_id, ticket)).data
data = httptools.downloadpage(host + "/api/1/file/dl?file=%s&ticket=%s" % (file_id, ticket)).data
data = jsontools.load(data)
return data['result']['url'].replace("https", "http")

View File

@@ -9,11 +9,7 @@
{
"pattern": "wstream.video/(?:embed-|videos/|video/)?([a-z0-9A-Z]+)",
"url": "http:\/\/wstream.video\/\\1"
},
{
"pattern": "wstream.video/(?:embed-|videos/|video/)?([a-z0-9A-Z]+)",
"url": "http:\/\/wstream.video\/video\/\\1"
}
}
],
"ignore_urls": [ ]
},

View File

@@ -24,15 +24,14 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls = []
data = httptools.downloadpage(page_url, headers=headers).data.replace('https', 'http')
# logger.info("[wstream.py] data=" + data)
logger.info("[wstream.py] data=" + data)
vid = scrapertools.find_multiple_matches(data, 'download_video.*?>.*?<.*?<td>([^\,,\s]+)')
headers.append(['Referer', page_url])
post_data = scrapertools.find_single_match(data,"</div>\s*<script type='text/javascript'>(eval.function.p,a,c,k,e,.*?)\s*</script>")
if post_data != "":
from lib import jsunpack
data = jsunpack.unpack(post_data)
logger.info("[wstream.py] data=" + data)
block = scrapertools.find_single_match(data, 'sources:\s*\[[^\]]+\]')
if block: data = block
@@ -41,7 +40,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
i = 0
for media_url in media_urls:
video_urls.append([vid[i] + " mp4 [wstream] ", media_url + '|' + _headers])
video_urls.append([vid[i] if vid else 'video' + " mp4 [wstream] ", media_url + '|' + _headers])
i = i + 1
for video_url in video_urls:
@@ -54,7 +53,7 @@ def find_videos(data):
encontrados = set()
devuelve = []
patronvideos = r"wstream.video/(?:embed-)?([a-z0-9A-Z]+)"
patronvideos = r"wstream.video/(?:embed-|videos/|video/)?([a-z0-9A-Z]+)"
logger.info("[wstream.py] find_videos #" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -69,4 +68,4 @@ def find_videos(data):
else:
logger.info(" url duplicada=" + url)
return devuelve
return devuelve

View File

@@ -299,7 +299,10 @@ def start(itemlist, item):
# Intenta reproducir los enlaces
# Si el canal tiene metodo play propio lo utiliza
channel = __import__('channels.%s' % channel_id, None, None, ["channels.%s" % channel_id])
try:
channel = __import__('channels.%s' % channel_id, None, None, ["channels.%s" % channel_id])
except:
channel = __import__('specials.%s' % channel_id, None, None, ["specials.%s" % channel_id])
if hasattr(channel, 'play'):
resolved_item = getattr(channel, 'play')(videoitem)
if len(resolved_item) > 0:

View File

@@ -3,20 +3,43 @@
# autorenumber - Rinomina Automaticamente gli Episodi
# --------------------------------------------------------------------------------
try:
import xbmcgui
except:
xbmcgui = None
'''
USO:
1) utilizzare autorenumber.renumber(itemlist) nelle le funzioni peliculas e similari per aggiungere il menu contestuale
2) utilizzare autorenumber.renumber(itemlist, item, typography) nella funzione episodios
from core import jsontools, tvdb
3) Aggiungere le seguinti stringhe nel json del canale (per attivare la configurazione di autonumerazione del canale)
{
"id": "autorenumber",
"type": "bool",
"label": "Abilita Rinumerazione Automatica",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "autorenumber_mode",
"type": "bool",
"label": "Sono presenti episodi speciali nella serie (Episodio 0 Escluso)?",
"default": false,
"enabled": true,
"visible": "eq(-1,true)"
}
'''
import re, base64, json
from core import jsontools, tvdb, scrapertoolsV2
from core.support import typo, log
from platformcode import config
from platformcode import platformtools
from platformcode import config, platformtools
from platformcode.config import get_setting
TAG_TVSHOW_RENUMERATE = "TVSHOW_AUTORENUMBER"
TAG_SEASON_EPISODE = "season_episode"
__channel__ = "autorenumber"
TAG_ID = "ID"
TAG_SEASON = "Season"
TAG_EPISODE = "Episode"
TAG_MODE = "Mode"
__channel__ = "autorenumber"
def access():
allow = False
@@ -27,137 +50,290 @@ def access():
return allow
def context():
def context(exist):
if access():
_context = [{"title": config.get_localized_string(70585),
"action": "config_item",
modify = config.get_localized_string(70714) if exist else ''
_context = [{"title": typo(modify + config.get_localized_string(70585), 'bold'),
"action": "manual_config_item",
"channel": "autorenumber"}]
return _context
def config_item(item):
def manual_config_item(item):
# Configurazione Semi Automatica, utile in caso la numerazione automatica fallisca
log(item)
tvdb.find_and_set_infoLabels(item)
data = ''
data = add_season(data)
item.channel = item.from_channel
dict_series = jsontools.get_node_from_file(item.channel, TAG_TVSHOW_RENUMERATE)
title = item.show
count = 0
# Trova l'ID dellla serie
while not item.infoLabels['tvdb_id']:
try:
item.show = platformtools.dialog_input(default=item.show, heading=config.get_localized_string(30112))
tvdb.find_and_set_infoLabels(item)
count = count + 1
except:
heading = config.get_localized_string(70704)
item.infoLabels['tvdb_id'] = platformtools.dialog_numeric(0, heading)
data.append(item.infoLabels['tvdb_id'])
if item.infoLabels['tvdb_id'] != 0:
write_data(item.from_channel, title, data)
if item.infoLabels['tvdb_id']:
ID = item.infoLabels['tvdb_id']
dict_renumerate = {TAG_ID: ID}
dict_series[title] = dict_renumerate
# Trova la Stagione
if any( word in title.lower() for word in ['specials', 'speciali'] ):
heading = config.get_localized_string(70686)
season = platformtools.dialog_numeric(0, heading, '0')
dict_renumerate[TAG_SEASON] = season
elif RepresentsInt(title.split()[-1]):
heading = config.get_localized_string(70686)
season = platformtools.dialog_numeric(0, heading, title.split()[-1])
dict_renumerate[TAG_SEASON] = season
else:
heading = config.get_localized_string(70686)
season = platformtools.dialog_numeric(0, heading, '1')
dict_renumerate[TAG_SEASON] = season
# Richede se ci sono speciali nella stagione
mode = platformtools.dialog_yesno(config.get_localized_string(70687), config.get_localized_string(70688), nolabel=config.get_localized_string(30023), yeslabel=config.get_localized_string(30022))
if mode == 0: dict_renumerate[TAG_MODE] = False
else: dict_renumerate[TAG_MODE] = True
# Imposta la voce Episode
dict_renumerate[TAG_EPISODE] = []
# Scrive nel json
jsontools.update_node(dict_series, item.channel, TAG_TVSHOW_RENUMERATE)[0]
else:
message = config.get_localized_string(60444)
heading = item.show.strip()
platformtools.dialog_notification(heading, message)
def add_season(data=None):
log("data= ", data)
heading = config.get_localized_string(70686)
season = platformtools.dialog_numeric(0, heading)
if season != "":
heading = config.get_localized_string(70687)
episode = platformtools.dialog_numeric(0, heading)
if episode == "0":
heading = config.get_localized_string(70688)
special = platformtools.dialog_numeric(0, heading)
return [int(season), int(episode), int(special)]
elif episode != '':
return [int(season), int(episode), '']
def write_data(channel, show, data):
def config_item(item, itemlist=[], typography='', active=False):
# Configurazione Automatica, Tenta la numerazione Automatica degli episodi
log()
dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE)
tvshow = show.strip()
# list_season_episode = dict_series.get(tvshow, {}).get(TAG_SEASON_EPISODE, [])
title = item.fulltitle
if data:
dict_renumerate = {TAG_SEASON_EPISODE: data}
dict_series[tvshow] = dict_renumerate
else:
dict_series.pop(tvshow, None)
dict_series = jsontools.get_node_from_file(item.channel, TAG_TVSHOW_RENUMERATE)
try:
ID = dict_series[item.show.rstrip()][TAG_ID]
except:
ID = ''
# Pulizia del Titolo
if any( word in title.lower() for word in ['specials', 'speciali']):
item.show = re.sub(r'\sspecials|\sspeciali', '', item.show.lower())
log('ITEM SHOW= ',item.show)
tvdb.find_and_set_infoLabels(item)
elif not item.infoLabels['tvdb_id']:
item.show = title.rstrip('123456789 ')
tvdb.find_and_set_infoLabels(item)
result = jsontools.update_node(dict_series, channel, TAG_TVSHOW_RENUMERATE)[0]
if result:
if data:
message = config.get_localized_string(60446)
if not ID and active:
if item.infoLabels['tvdb_id']:
ID = item.infoLabels['tvdb_id']
dict_renumerate = {TAG_ID: ID}
dict_series[title] = dict_renumerate
# Trova La Stagione
if any( word in title.lower() for word in ['specials', 'speciali']):
dict_renumerate[TAG_SEASON] = '0'
elif RepresentsInt(title.split()[-1]):
dict_renumerate[TAG_SEASON] = title.split()[-1]
else: dict_renumerate[TAG_SEASON] = '1'
dict_renumerate[TAG_EPISODE] = []
settings_node = jsontools.get_node_from_file(item.channel, 'settings')
dict_renumerate[TAG_MODE] = settings_node['autorenumber_mode']
jsontools.update_node(dict_series, item.channel, TAG_TVSHOW_RENUMERATE)[0]
return renumber(itemlist, item, typography)
else:
message = config.get_localized_string(60444)
else:
message = config.get_localized_string(70593)
heading = show.strip()
platformtools.dialog_notification(heading, message)
return itemlist
else:
return renumber(itemlist, item, typography)
def renumber(itemlist, item='', typography=''):
log()
# Seleziona la funzione Adatta, Menu Contestuale o Rinumerazione
# import web_pdb; web_pdb.set_trace()
if item:
try:
settings_node = jsontools.get_node_from_file(item.channel, 'settings')
# Controlla se la Serie è già stata rinumerata
try:
dict_series = jsontools.get_node_from_file(item.channel, TAG_TVSHOW_RENUMERATE)
SERIES = dict_series[item.show.rstrip()]['season_episode']
S = SERIES[0]
E = SERIES[1]
SP = SERIES[2]
ID = SERIES[3]
page = 1
epList = []
exist = True
item.infoLabels['tvdb_id'] = ID
tvdb.set_infoLabels_item(item)
TITLE = item.fulltitle.rstrip()
ID = dict_series[TITLE][TAG_ID]
while exist:
data = tvdb.otvdb_global.get_list_episodes(ID,page)
if data:
for episodes in data['data']:
if episodes['airedSeason'] >= S:
if E == 0:
epList.append([0, SP])
E = 1
if episodes['airedEpisodeNumber'] >= E or episodes['airedSeason'] > S:
epList.append([episodes['airedSeason'], episodes['airedEpisodeNumber']])
page = page + 1
else:
exist = False
epList.sort()
ep = 0
for item in itemlist:
s = str(epList[ep][0])
e = str(epList[ep][1])
item.title = typo(s + 'x'+ e + ' - ', typography) + item.title
ep = ep + 1
exist = True
except:
return itemlist
exist = False
if exist:
ID = dict_series[TITLE][TAG_ID]
SEASON = dict_series[TITLE][TAG_SEASON]
EPISODE = dict_series[TITLE][TAG_EPISODE]
MODE = dict_series[TITLE][TAG_MODE]
return renumeration(itemlist, item, typography, dict_series, ID, SEASON, EPISODE, MODE, TITLE)
else:
# se non è stata rinumerata controlla se è attiva la rinumerazione automatica
if 'autorenumber' not in settings_node: return itemlist
if settings_node['autorenumber'] == True:
config_item(item, itemlist, typography, True)
else:
for item in itemlist:
try:
dict_series = jsontools.get_node_from_file(itemlist[0].channel, TAG_TVSHOW_RENUMERATE)
TITLE = item.show.rstrip()
ID = dict_series[TITLE][TAG_ID]
exist = True
except:
exist = False
if item.contentType != 'movie':
if item.context:
context2 = item.context
item.context = context() + context2
item.context = context(exist) + context2
else:
item.context = context()
item.context = context(exist)
def renumeration (itemlist, item, typography, dict_series, ID, SEASON, EPISODE, MODE, TITLE):
# import web_pdb; web_pdb.set_trace()
# Se ID è 0 salta la rinumerazione
if ID == '0':
return itemlist
# Numerazione per gli Speciali
elif SEASON == '0':
EpisodeDict = {}
for item in itemlist:
number = scrapertoolsV2.find_single_match(item.title, r'\d+')
item.title = typo('0x' + number + ' - ', typography) + item.title
# Usa la lista degli Episodi se esiste nel Json
elif EPISODE:
log('EPISODE')
EpisodeDict = json.loads(base64.b64decode(EPISODE))
# Controlla che la lista egli Episodi sia della stessa lunghezza di Itemlist
if EpisodeDict == 'none':
return error(itemlist)
log(len(EpisodeDict))
log(len(itemlist))
if len(EpisodeDict) == len(itemlist):
for item in itemlist:
number = scrapertoolsV2.find_single_match(item.title, r'\d+')
item.title = typo(EpisodeDict[str(number)] + ' - ', typography) + item.title
else:
make_list(itemlist, item, typography, dict_series, ID, SEASON, EPISODE, MODE, TITLE)
else:
make_list(itemlist, item, typography, dict_series, ID, SEASON, EPISODE, MODE, TITLE)
def make_list(itemlist, item, typography, dict_series, ID, SEASON, EPISODE, MODE, TITLE):
log('RINUMERAZIONE')
page = 1
EpDict = {}
EpDateList = []
EpList = []
EpisodeDict = {}
exist = True
item.infoLabels['tvdb_id'] = ID
tvdb.set_infoLabels_item(item)
ABS = 0
ep = 1
# Ricava Informazioni da TVDB
while exist:
data = tvdb.otvdb_global.get_list_episodes(ID,page)
log('DATA= ',data)
if data: page = page + 1
else: exist = False
if data:
for episodes in data['data']:
log(episodes)
try: ABS = int(episodes['absoluteNumber'])
except: ABS = ep
EpDict[str(ABS)] = [str(episodes['airedSeason']) + 'x' + str(episodes['airedEpisodeNumber']), episodes['firstAired']]
EpDateList.append(episodes['firstAired'])
EpList.append([int(ABS), episodes['airedSeason'], episodes['airedEpisodeNumber']])
ep = ep + 1
EpDateList.sort()
EpList.sort()
log(EpDateList)
log(EpDict)
log(EpList)
# seleziona l'Episodio di partenza
if int(SEASON) > 1:
for name, episode in EpDict.items():
if episode[0] == SEASON + 'x1':
ep = int(name)-1
else:
ep = 0
# rinumera gli episodi
Break = False
for item in itemlist:
number = int(scrapertoolsV2.find_single_match(item.title, r'\d+'))
episode = ep + number - 1
if len(EpList) < episode: return error(itemlist)
# Crea una lista di Episodi in base alla modalità di rinumerazione
if MODE == False and number != 0:
while Break:
log('Long= ',len(EpList))
log('NUMBER= ',EpList[episode][1])
log('Eisode= ',episode)
episode = episode + 1
if EpList[episode][1] == 0 or len(EpList) <= episode: Break = True
ep = ep + 1
elif number == 0:
episode = previous(EpDateList, EpDict, ep + 1)
if config.get_localized_string(30161) not in item.title:
EpisodeDict[str(number)] = (str(EpList[episode][1]) + 'x' + str(EpList[episode][2]))
item.title = typo(str(EpList[episode][1]) + 'x' + str(EpList[episode][2]) + ' - ', typography) + item.title
# Scrive sul json
EpisodeDict = base64.b64encode(json.dumps(EpisodeDict))
dict_series[TITLE][TAG_EPISODE] = EpisodeDict
jsontools.update_node(dict_series, item.channel, TAG_TVSHOW_RENUMERATE)[0]
return itemlist
def RepresentsInt(s):
# Controllo Numro Stagione
log()
try:
int(s)
return True
except ValueError:
return False
def previous(date_list, Dict, search):
# Seleziona Eventuale Episodio 0
log()
P = None
result = 0
for ep, variants in Dict.items():
if variants[1] == Dict[str(search)][1]:
date = variants[1]
for index, obj in enumerate(date_list):
if obj == date:
if index > 0:
P = date_list[index - 1]
for name, variants in Dict.items():
log(variants[1], ' = ', P)
if variants[1] == P:
result = int(name)-1
return result
def error(itemlist):
message = config.get_localized_string(70713)
heading = itemlist[0].fulltitle.strip()
platformtools.dialog_notification(heading, message)
return itemlist

View File

@@ -20,8 +20,8 @@ from specials import filtertools
list_data = {}
list_language = ['LAT', 'CAST', 'VO', 'VOSE']
list_servers = ['directo']
list_language = ['ITA', 'SUB-ITA']
list_servers = ['directo', 'akvideo', 'verystream', 'openload']
list_quality = ['SD', '720', '1080', '4k']
def mainlist(item):
@@ -45,7 +45,6 @@ def show_channels(item):
"action": "remove_channel",
"channel": "community"}]
path = os.path.join(config.get_data_path(), 'community_channels.json')
file = open(path, "r")
json = jsontools.load(file.read())
@@ -53,18 +52,20 @@ def show_channels(item):
itemlist.append(Item(channel=item.channel, title=config.get_localized_string(70676), action='add_channel', thumbnail=get_thumb('add.png')))
for key, channel in json['channels'].items():
if 'thumbnail' in channel:
thumbnail = channel['thumbnail']
else:
thumbnail = ''
file_path = channel ['path']
file_url = httptools.downloadpage(file_path, follow_redirects=True).data
json_url = jsontools.load(file_url)
thumbnail = json_url['thumbnail'] if 'thumbnail' in json_url else ''
fanart = json_url['fanart'] if 'fanart' in json_url else ''
if 'fanart' in channel:
fanart = channel['fanart']
else:
fanart = ''
itemlist.append(Item(channel=item.channel, title=channel['channel_name'], url=channel['path'],
thumbnail=thumbnail, fanart=fanart, action='show_menu', channel_id = key, context=context))
itemlist.append(Item(channel=item.channel,
title=channel['channel_name'],
url=file_path,
thumbnail=thumbnail,
fanart=fanart,
action='show_menu',
channel_id = key,
context=context))
return itemlist
def load_json(item):
@@ -127,6 +128,8 @@ def list_all(item):
new_item = Item(channel=item.channel, title=title, quality=quality,
language=language, plot=plot, thumbnail=poster)
new_item.infoLabels['year'] = media['year'] if 'year' in media else ''
new_item.infoLabels['tmdb_id'] = media['tmdb_id'] if 'tmdb_id' in media else ''
if 'movies_list' in json_data:
new_item.url = media
@@ -134,10 +137,16 @@ def list_all(item):
new_item.action = 'findvideos'
if 'year' in media:
new_item.infoLabels['year'] = media['year']
if 'tmdb_id' in media:
new_item.infoLabels['tmdb_id'] = media['tmdb_id']
else:
new_item.url = media['seasons_list']
new_item.contentSerieName = media['title']
new_item.action = 'seasons'
if 'year' in media:
new_item.infoLabels['year'] = media['year']
if 'tmdb_id' in media:
new_item.infoLabels['tmdb_id'] = media['tmdb_id']
itemlist.append(new_item)

View File

@@ -7,6 +7,87 @@
"thumbnail": null,
"banner": null,
"categories": [],
"settings": [],
"settings": [
{
"id": "film1",
"type": "text",
"label": "Inserisci nome Film #1",
"default": "[Oggi in TV] Film #1",
"enabled": true,
"visible": true
},
{
"id": "film2",
"type": "text",
"label": "Inserisci nome Film #2",
"default": "[Oggi in TV] Film #2",
"enabled": true,
"visible": true
},
{
"id": "film3",
"type": "text",
"label": "Inserisci nome Film #3",
"default": "[Oggi in TV] Film #3",
"enabled": true,
"visible": true
},
{
"id": "film4",
"type": "text",
"label": "Inserisci nome Film #4",
"default": "[Oggi in TV] Film #4",
"enabled": true,
"visible": true
},
{
"id": "film5",
"type": "text",
"label": "Inserisci nome Film #5",
"default": "[Oggi in TV] Film #5",
"enabled": true,
"visible": true
},
{
"id": "now1",
"type": "text",
"label": "Inserisci nome adesso in onda #1",
"default": "[Adesso in onda] Tutte le trasmissioni #1",
"enabled": true,
"visible": true
},
{
"id": "now2",
"type": "text",
"label": "Inserisci nome adesso in onda #2",
"default": "[Adesso in onda] Tutte le trasmissioni #2",
"enabled": true,
"visible": true
},
{
"id": "now3",
"type": "text",
"label": "Inserisci nome adesso in onda #3",
"default": "[Adesso in onda] Tutte le trasmissioni #3",
"enabled": true,
"visible": true
},
{
"id": "now4",
"type": "text",
"label": "Inserisci nome adesso in onda #4",
"default": "[Adesso in onda] Tutte le trasmissioni #4",
"enabled": true,
"visible": true
},
{
"id": "now5",
"type": "text",
"label": "Inserisci nome adesso in onda #5",
"default": "[Adesso in onda] Tutte le trasmissioni #5",
"enabled": true,
"visible": true
}
],
"channel": false
}

View File

@@ -1,61 +1,186 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale film in tv
# Ringraziamo Icarus crew
# ------------------------------------------------------------
import re
import urllib
from channelselector import get_thumb
from core import httptools, scrapertools, tmdb, support
from core import httptools, scrapertools, support, tmdb, filetools
from core.item import Item
from platformcode import logger, config
from platformcode import logger, config, platformtools
host = "https://www.comingsoon.it"
host = "https://www.superguidatv.it"
TIMEOUT_TOTAL = 60
def mainlist(item):
logger.info(" mainlist")
itemlist = [Item(channel="search", action='discover_list', title=config.get_localized_string(70309),
search_type='list', list_type='movie/now_playing',
thumbnail=get_thumb("now_playing.png")),
Item(channel="search", action='discover_list', title=config.get_localized_string(70312),
search_type='list', list_type='tv/on_the_air', thumbnail=get_thumb("on_the_air.png")),
Item(channel=item.channel,
title="[Oggi in TV] [B]Adesso in onda[/B]",
action="tvoggi",
url="%s/filmtv/" % host,
itemlist = [#Item(channel="search", action='discover_list', title=config.get_localized_string(70309),
#search_type='list', list_type='movie/now_playing',
# thumbnail=get_thumb("now_playing.png")),
#Item(channel="search", action='discover_list', title=config.get_localized_string(70312),
# search_type='list', list_type='tv/on_the_air', thumbnail=get_thumb("on_the_air.png")),
Item(channel=item.channel,
title=config.get_setting("film1", channel="filmontv"),
action="now_on_tv",
url="%s/film-in-tv/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title="[Oggi in TV] [B]Primafila[/B]",
action="primafila",
url="https://www.superguidatv.it/film-in-tv/oggi/sky-primafila/",
Item(channel=item.channel,
title=config.get_setting("film2", channel="filmontv"),
action="now_on_tv",
url="%s/film-in-tv/oggi/premium/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title="[Oggi in TV] Mattina",
action="tvoggi",
url="%s/filmtv/oggi/mattina/" % host,
Item(channel=item.channel,
title=config.get_setting("film3", channel="filmontv"),
action="now_on_tv",
url="%s/film-in-tv/oggi/sky-intrattenimento/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title="[Oggi in TV] Pomeriggio",
action="tvoggi",
url="%s/filmtv/oggi/pomeriggio/" % host,
Item(channel=item.channel,
title=config.get_setting("film4", channel="filmontv"),
action="now_on_tv",
url="%s/film-in-tv/oggi/sky-cinema/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title="[Oggi in TV] Sera",
action="tvoggi",
url="%s/filmtv/oggi/sera/" % host,
Item(channel=item.channel,
title=config.get_setting("film5", channel="filmontv"),
action="now_on_tv",
url="%s/film-in-tv/oggi/sky-primafila/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title="[Oggi in TV] Notte",
action="tvoggi",
url="%s/filmtv/oggi/notte/" % host,
thumbnail=item.thumbnail)]
Item(channel=item.channel,
title=config.get_setting("now1", channel="filmontv"),
action="now_on_misc",
url="%s/ora-in-onda/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title=config.get_setting("now2", channel="filmontv"),
action="now_on_misc",
url="%s/ora-in-onda/premium/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title=config.get_setting("now3", channel="filmontv"),
action="now_on_misc",
url="%s/ora-in-onda/sky-intrattenimento/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title=config.get_setting("now4", channel="filmontv"),
action="now_on_misc",
url="%s/ora-in-onda/sky-doc-e-lifestyle/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title=config.get_setting("now5", channel="filmontv"),
action="now_on_misc_film",
url="%s/ora-in-onda/sky-cinema/" % host,
thumbnail=item.thumbnail),
Item(channel=item.channel,
title="Personalizza Oggi in TV",
action="server_config",
config="filmontv",
folder=False,
thumbnail=item.thumbnail)]
return itemlist
def server_config(item):
return platformtools.show_channel_settings(channelpath=filetools.join(config.get_runtime_path(), "specials", item.config))
def now_on_misc_film(item):
logger.info("filmontv tvoggi")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
#patron = r'spanTitleMovie">([A-Za-z À-ÖØ-öø-ÿ\-\']*)[a-z \n<>\/="_\-:0-9;A-Z.]*GenresMovie">([\-\'A-Za-z À-ÖØ-öø-ÿ\/]*)[a-z \n<>\/="_\-:0-9;A-Z.%]*src="([a-zA-Z:\/\.0-9?]*)[a-z \n<>\/="_\-:0-9;A-Z.%\-\']*Year">([A-Z 0-9a-z]*)'
patron = r'table-cell[;" ]*alt="([^"]+)".*?backdrop" alt="([^"]+)"[ ]*src="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedchannel, scrapedtitle, scrapedthumbnail in matches:
# for scrapedthumbnail, scrapedtitle, scrapedtv in matches:
scrapedurl = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
infoLabels = {}
#infoLabels["year"] = ""
infoLabels['title'] = "movie"
itemlist.append(
Item(channel=item.channel,
action="do_search",
extra=urllib.quote_plus(scrapedtitle) + '{}' + 'movie',
title="[B]" + scrapedtitle + "[/B] - " + scrapedchannel,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail.replace("?width=320", "?width=640"),
contentTitle=scrapedtitle,
contentType='movie',
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def now_on_misc(item):
logger.info("filmontv tvoggi")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
#patron = r'spanTitleMovie">([A-Za-z À-ÖØ-öø-ÿ\-\']*)[a-z \n<>\/="_\-:0-9;A-Z.]*GenresMovie">([\-\'A-Za-z À-ÖØ-öø-ÿ\/]*)[a-z \n<>\/="_\-:0-9;A-Z.%]*src="([a-zA-Z:\/\.0-9?]*)[a-z \n<>\/="_\-:0-9;A-Z.%\-\']*Year">([A-Z 0-9a-z]*)'
patron = r'table-cell[;" ]*alt="([^"]+)".*?backdrop" alt="([^"]+)"[ ]*src="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedchannel, scrapedtitle, scrapedthumbnail in matches:
# for scrapedthumbnail, scrapedtitle, scrapedtv in matches:
scrapedurl = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
infoLabels = {}
infoLabels["year"] = ""
infoLabels['tvshowtitle'] = scrapedtitle
itemlist.append(
Item(channel=item.channel,
action="do_search",
extra=urllib.quote_plus(scrapedtitle) + '{}' + 'tvshow',
title="[B]" + scrapedtitle + "[/B] - " + scrapedchannel,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail.replace("?width=320", "?width=640"),
contentTitle=scrapedtitle,
contentType='tvshow',
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def now_on_tv(item):
logger.info("filmontv tvoggi")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
#patron = r'spanTitleMovie">([A-Za-z À-ÖØ-öø-ÿ\-\']*)[a-z \n<>\/="_\-:0-9;A-Z.]*GenresMovie">([\-\'A-Za-z À-ÖØ-öø-ÿ\/]*)[a-z \n<>\/="_\-:0-9;A-Z.%]*src="([a-zA-Z:\/\.0-9?]*)[a-z \n<>\/="_\-:0-9;A-Z.%\-\']*Year">([A-Z 0-9a-z]*)'
patron = r'view_logo" alt="([a-zA-Z 0-9]*)".*?spanMovieDuration">([^<]+).*?spanTitleMovie">([A-Za-z ,0-9\.À-ÖØ-öø-ÿ\-\']*).*?GenresMovie">([\-\'A-Za-z À-ÖØ-öø-ÿ\/]*).*?src="([a-zA-Z:\/\.0-9?]*).*?Year">([A-Z 0-9a-z]*)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedchannel, scrapedduration, scrapedtitle, scrapedgender, scrapedthumbnail, scrapedyear in matches:
# for scrapedthumbnail, scrapedtitle, scrapedtv in matches:
scrapedurl = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
infoLabels = {}
infoLabels["year"] = scrapedyear
itemlist.append(
Item(channel=item.channel,
action="do_search",
extra=urllib.quote_plus(scrapedtitle) + '{}' + 'movie',
title="[B]" + scrapedtitle + "[/B] - " + scrapedchannel + " - " + scrapedduration,
fulltitle="[B]" + scrapedtitle + "[/B] - " + scrapedchannel+ " - " + scrapedduration,
url=scrapedurl,
thumbnail=scrapedthumbnail.replace("?width=240", "?width=480"),
contentTitle=scrapedtitle,
contentType='movie',
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def primafila(item):
logger.info("filmontv tvoggi")
@@ -64,7 +189,7 @@ def primafila(item):
# Carica la pagina
data = httptools.downloadpage(item.url).data
#patron = r'spanTitleMovie">([A-Za-z À-ÖØ-öø-ÿ]*)[a-z \n<>\/="_\-:0-9;A-Z.]*GenresMovie">([A-Za-z À-ÖØ-öø-ÿ\/]*)[a-z \n<>\/="_\-:0-9;A-Z.%]*src="([a-zA-Z:\/\.0-9?=]*)'
patron = r'spanTitleMovie">([A-Za-z À-ÖØ-öø-ÿ]*)[a-z \n<>\/="_\-:0-9;A-Z.]*GenresMovie">([A-Za-z À-ÖØ-öø-ÿ\/]*)[a-z \n<>\/="_\-:0-9;A-Z.%]*src="([a-zA-Z:\/\.0-9?]*)[a-z \n<>\/="_\-:0-9;A-Z.%]*Year">([A-Z 0-9a-z]*)'
patron = r'spanTitleMovie">([A-Za-z À-ÖØ-öø-ÿ\-\']*)[a-z \n<>\/="_\-:0-9;A-Z.]*GenresMovie">([\-\'A-Za-z À-ÖØ-öø-ÿ\/]*)[a-z \n<>\/="_\-:0-9;A-Z.%]*src="([a-zA-Z:\/\.0-9?]*)[a-z \n<>\/="_\-:0-9;A-Z.%\-\']*Year">([A-Z 0-9a-z]*)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedgender, scrapedthumbnail, scrapedyear in matches:
# for scrapedthumbnail, scrapedtitle, scrapedtv in matches:
@@ -89,42 +214,6 @@ def primafila(item):
return itemlist
def tvoggi(item):
logger.info("filmontv tvoggi")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = r'<div class="col-xs-5 box-immagine">[^<]+<img src="([^"]+)[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<.*?titolo">(.*?)<[^<]+<[^<]+<[^<]+<[^>]+><br />(.*?)<[^<]+</div>[^<]+<[^<]+<[^<]+<[^>]+>[^<]+<[^<]+<[^<]+<[^>]+><[^<]+<[^>]+>:\s*([^<]+)[^<]+<[^<]+[^<]+<[^<]+[^<]+<[^<]+[^<]+[^>]+>:\s*([^<]+)'
# patron = r'<div class="col-xs-5 box-immagine">[^<]+<img src="([^"]+)[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<.*?titolo">(.*?)<[^<]+<[^<]+<[^<]+<[^>]+><br />(.*?)<[^<]+</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedtv, scrapedgender, scrapedyear in matches:
# for scrapedthumbnail, scrapedtitle, scrapedtv in matches:
scrapedurl = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
infoLabels = {}
infoLabels["year"] = scrapedyear
itemlist.append(
Item(channel=item.channel,
action="do_search",
extra=urllib.quote_plus(scrapedtitle) + '{}' + 'movie',
title=scrapedtitle + "[COLOR yellow] " + scrapedtv + "[/COLOR]",
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
contentTitle=scrapedtitle,
contentType='movie',
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def do_search(item):
from specials import search
return search.do_search(item)

View File

@@ -31,8 +31,6 @@ def mainlist(item):
context = [{"title": config.get_localized_string(60412), "action": "setting_channel", "channel": item.channel}]
itemlist.append(Item(channel=item.channel, action="sub_menu", title="[B]" + config.get_localized_string(70305)+ "[/B]", context=context,
thumbnail=get_thumb("search.png")))
itemlist.append(Item(channel="filmontv", action="mainlist", title=config.get_localized_string(50001),
thumbnail=get_thumb("on_the_air.png"), viewmode="thumbnails"))
itemlist.append(Item(channel=item.channel, action='genres_menu', title=config.get_localized_string(70306), type='movie',
thumbnail=get_thumb("genres.png")))
itemlist.append (Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70307),

View File

@@ -85,8 +85,7 @@ def menu_channels(item):
def channel_config(item):
return platformtools.show_channel_settings(channelpath=filetools.join(config.get_runtime_path(), "channels",
item.config))
return platformtools.show_channel_settings(channelpath=filetools.join(config.get_runtime_path(), "channels", item.config))
def autostart(item): # item necessario launcher.py linea 265
@@ -145,7 +144,7 @@ def menu_servers(item):
if server_parameters["has_settings"]:
itemlist.append(
Item(channel=CHANNELNAME, title = ". " + config.get_localized_string(60553) % server_parameters["name"],
action="server_config", config=server, folder=False, thumbnail=""))
action="server_debrid_config", config=server, folder=False, thumbnail=""))
itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60554),
action="", folder=False, text_bold = True, thumbnail=get_thumb("setting_0.png")))
@@ -167,8 +166,10 @@ def menu_servers(item):
def server_config(item):
return platformtools.show_channel_settings(channelpath=filetools.join(config.get_runtime_path(), "servers",
item.config))
return platformtools.show_channel_settings(channelpath=filetools.join(config.get_runtime_path(), "servers", item.config))
def server_debrid_config(item):
return platformtools.show_channel_settings(channelpath=filetools.join(config.get_runtime_path(), "servers", "debriders", item.config))
def servers_blacklist(item):

View File

@@ -69,7 +69,7 @@ def buscartrailer(item, trailers=[]):
if "youtube" in url:
url = url.replace("embed/", "watch?v=")
titulo, url, server = servertools.findvideos(url)[0]
title = "Trailer por defecto [" + server + "]"
title = "Trailer [" + server + "]"
itemlist.append(item.clone(title=title, url=url, server=server, action="play"))
if item.show or item.infoLabels['tvshowtitle'] or item.contentType != "movie":
tipo = "tv"
@@ -81,7 +81,7 @@ def buscartrailer(item, trailers=[]):
else:
for trailer in trailers:
title = trailer['name'] + " [" + trailer['size'] + "p] (" + trailer['language'].replace("en", "ING") \
.replace("es", "ESP") + ") [tmdb/youtube]"
.replace("it", "ITA") + ") [tmdb/youtube]"
itemlist.append(item.clone(action="play", title=title, url=trailer['url'], server="youtube"))
except:
import traceback
@@ -278,7 +278,7 @@ def search_links_abando(item):
else:
for scrapedurl, language, scrapedtitle in matches:
if language == "1":
idioma = " (ESP)"
idioma = " (ITA)"
else:
idioma = " (V.O)"
scrapedurl = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, scrapedurl)

View File

@@ -1,9 +0,0 @@
{
"update": {
"name": "Kodi on Demand",
"version":"101",
"tag": "1.0.1",
"date": "03/05/2019",
"changes": "Added Updater"
}
}

View File

@@ -309,37 +309,35 @@ def monitor_update():
logger.info("Inicio actualizacion programada para las %s h.: %s" % (update_start, datetime.datetime.now()))
check_for_update(overwrite=False)
def get_channel_json():
import urllib, os, xbmc
addon = config.get_addon_core()
ROOT_DIR = config.get_runtime_path()
LOCAL_FILE = os.path.join(ROOT_DIR, "channels.json")
if os.path.exists(LOCAL_FILE):
os.remove(LOCAL_FILE)
urllib.urlretrieve("https://raw.githubusercontent.com/kodiondemand/addon/master/channels.json", LOCAL_FILE)
if addon.getSetting("use_custom_url") != "true":
channels_path = os.path.join(ROOT_DIR, "channels", '*.json')
channel_files = sorted(glob.glob(channels_path), key=lambda x: os.path.basename(x))
for channel_file in channel_files:
if channel_file:
try: import json
except: import simplejson as json
with open(LOCAL_FILE) as f:
data = json.load(f)
try:
if data[channel_file]:
config.set_setting(name=data[channel_file], value="value", channel=channel_file)
except: pass #channel not in json
# def get_channel_json():
# import urllib, os, xbmc
# addon = config.get_addon_core()
# ROOT_DIR = config.get_runtime_path()
# LOCAL_FILE = os.path.join(ROOT_DIR, "channels.json")
#
# if os.path.exists(LOCAL_FILE):
# os.remove(LOCAL_FILE)
# urllib.urlretrieve("https://raw.githubusercontent.com/kodiondemand/addon/master/channels.json", LOCAL_FILE)
#
# if addon.getSetting("use_custom_url") != "true":
# channels_path = os.path.join(ROOT_DIR, "channels", '*.json')
# channel_files = sorted(glob.glob(channels_path), key=lambda x: os.path.basename(x))
# for channel_file in channel_files:
# if channel_file:
# try: import json
# except: import simplejson as json
# with open(LOCAL_FILE) as f:
# data = json.load(f)
# try:
# if data[channel_file]:
# config.set_setting(name=data[channel_file], value="value", channel=channel_file)
# except: pass #channel not in json
if __name__ == "__main__":
# Se ejecuta en cada inicio
import xbmc
import time
get_channel_json()
# modo adulto:
# sistema actual 0: Nunca, 1:Siempre, 2:Solo hasta que se reinicie Kodi
# si es == 2 lo desactivamos.
@@ -361,6 +359,7 @@ if __name__ == "__main__":
logger.info("DEV MODE OFF")
from platformcode import updater
updater.check_addon_init()
# get_channel_json() -> disabilitato, lo si fa con l'updater
else:
logger.info("DEV MODE ON")