KoD 1.6.2

- Migliorata funzione cerca trailer\n- Episodio successivo: è ora disponibile la modalità playlist (puoi usare il tasto riproduci successivo di kodi)\n- aggiunto www.accuradio.com\n- migliorie varie\n
This commit is contained in:
mac12m99
2021-04-07 20:16:21 +02:00
parent 8441b1e28a
commit b2bd3e61d1
30 changed files with 457 additions and 391 deletions

View File

@@ -1,4 +1,4 @@
<addon id="plugin.video.kod" name="Kodi on Demand" version="1.6.1" provider-name="KoD Team">
<addon id="plugin.video.kod" name="Kodi on Demand" version="1.6.2" provider-name="KoD Team">
<requires>
<!-- <import addon="script.module.libtorrent" optional="true"/> -->
<import addon="metadata.themoviedb.org"/>
@@ -23,12 +23,14 @@
<assets>
<icon>resources/media/logo.png</icon>
<fanart>resources/media/fanart.jpg</fanart>
<screenshot>resources/media/themes/ss/1.png</screenshot>
<screenshot>resources/media/themes/ss/2.png</screenshot>
<screenshot>resources/media/themes/ss/3.png</screenshot>
<screenshot>resources/media/screenshot-1.png</screenshot>
<screenshot>resources/media/screenshot-2.png</screenshot>
<screenshot>resources/media/screenshot-3.png</screenshot>
</assets>
<news>-Migliorata l'efficacia del riconoscimento dei contenuti in ricerca film/serie
- corretti alcuni bug e fatti alcuni fix per i soliti cambi di struttura</news>
<news>- Migliorata funzione &quot;cerca trailer&quot;
- Episodio successivo: è ora disponibile la modalità playlist (puoi usare il tasto riproduci successivo di kodi)
- aggiunto www.accuradio.com
- migliorie varie</news>
<description lang="it">Naviga velocemente sul web e guarda i contenuti presenti</description>
<disclaimer>[COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR]
[COLOR yellow]Kodi © is a registered trademark of the XBMC Foundation. We are not connected to or in any other way affiliated with Kodi, Team Kodi, or the XBMC Foundation. Furthermore, any software, addons, or products offered by us will receive no support in official Kodi channels, including the Kodi forums and various social networks.[/COLOR]</disclaimer>

View File

@@ -14,7 +14,7 @@
"casacinema": "https://www.casacinema.page",
"cb01anime": "https://www.cineblog01.red",
"cineblog01": "https://cb01.uno",
"cinemalibero": "https://cinemalibero.website",
"cinemalibero": "https://cinemalibero.pink",
"cinetecadibologna": "http://cinestore.cinetecadibologna.it",
"discoveryplus": "https://www.discoveryplus.com",
"dreamsub": "https://dreamsub.stream",
@@ -22,24 +22,24 @@
"eurostreaming": "https://eurostreaming.team",
"filmgratis": "https://www.filmaltadefinizione.me",
"filmigratis": "https://filmigratis.org",
"filmsenzalimiticc": "https://www.filmsenzalimiti01.xyz",
"filmsenzalimiticc": "https://www.filmsenzalimiti01.website",
"filmstreaming01": "https://filmstreaming01.com",
"guardaserie_stream": "https://guardaserie.yoga",
"guardaseriecam": "https://guardaserie.cam",
"guardaserieclick": "https://www.guardaserie.plus",
"guardaserieicu": "https://guardaserie.cloud",
"guardaserieclick": "https://www.guardaserie.vision",
"guardaserieicu": "https://guardaserie.world",
"hd4me": "https://hd4me.net",
"ilcorsaronero": "https://ilcorsaronero.link",
"ilgeniodellostreaming": "https://ilgeniodellostreaming.soy",
"ilgeniodellostreaming_cam": "https://ilgeniodellostreaming.gold",
"italiaserie": "https://italiaserie.fit",
"ilgeniodellostreaming": "https://ilgeniodellostreaming.moe",
"ilgeniodellostreaming_cam": "https://ilgeniodellostreaming.photo",
"italiaserie": "https://italiaserie.casa",
"mediasetplay": "https://www.mediasetplay.mediaset.it",
"mondoserietv": "https://mondoserietv.fun",
"paramount": "https://www.paramountnetwork.it",
"piratestreaming": "https://www.piratestreaming.guru",
"piratestreaming": "https://www.piratestreaming.codes",
"polpotv": "https://roma.polpo.tv",
"raiplay": "https://www.raiplay.it",
"serietvonline": "https://serietvonline.pink",
"serietvonline": "https://serietvonline.blue",
"serietvsubita": "http://serietvsubita.xyz",
"serietvu": "https://www.serietvu.link",
"streamingcommunity": "https://streamingcommunity.co",

11
channels/accuradio.json Normal file
View File

@@ -0,0 +1,11 @@
{
"id": "accuradio",
"name": "AccuRadio",
"active": true,
"language": ["*"],
"thumbnail": "accuradio.png",
"banner": "accuradio.png",
"categories": ["music"],
"not_active":["include_in_global_search"],
"settings" :[]
}

87
channels/accuradio.py Normal file
View File

@@ -0,0 +1,87 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per accuradio
# ------------------------------------------------------------
import random
from core import httptools, support
from platformcode import logger
host = 'https://www.accuradio.com'
api_url = host + '/c/m/json/{}/'
headers = [['Referer', host]]
def mainlist(item):
itemlist = []
item.action = 'peliculas'
js = httptools.downloadpage(api_url.format('brands')).json
for it in js.get('features',[]):
itemlist.append(
item.clone(url= '{}/{}'.format(host,it.get('canonical_url','')),
title=support.typo(it['name'],'italic') + support.typo(it.get('channels',''),'_ [] color kod')
))
for it in js.get('brands',[]):
itemlist.append(
item.clone(url= '{}/{}'.format(host,it.get('canonical_url','')),
title=support.typo(it['name'],'bullet bold') + support.typo(it.get('channels',''),'_ [] color kod')
))
itemlist.append(item.clone(title=support.typo('Cerca...', 'bold color kod'), action='search', thumbnail=support.thumb('search')))
support.channel_config(item, itemlist)
return itemlist
@support.scrape
def peliculas(item):
action = 'playradio'
patron = r'data-id="(?P<id>[^"]+)"\s*data-oldid="(?P<oldid>[^"]+)".*?data-name="(?P<title>[^"]+)(?:[^>]+>){5}<img class="[^"]+"\s*src="(?P<thumb>[^"]+)(?:[^>]+>){6}\s*(?P<plot>[^<]+)'
return locals()
def playradio(item):
import xbmcgui, xbmc
items = httptools.downloadpage('{}/playlist/json/{}/?ando={}&rand={}'.format(host, item.id, item.oldid, random.random())).json
playlist = xbmc.PlayList(xbmc.PLAYLIST_MUSIC)
playlist.clear()
for i in items:
if 'id' in i:
url = i['primary'] + i['fn'] + '.m4a'
title = i['title']
artist = i['track_artist']
album = i['album']['title']
year = i['album']['year']
thumb = 'https://www.accuradio.com/static/images/covers300' + i['album']['cdcover']
duration = i.get('duration',0)
info = {'duration':duration,
'album':album,
'artist':artist,
'title':title,
'year':year,
'mediatype':'music'}
item = xbmcgui.ListItem(title, path=url)
item.setArt({'thumb':thumb, 'poster':thumb, 'icon':thumb})
item.setInfo('music',info)
playlist.add(url, item)
xbmc.Player().play(playlist)
def search(item, text):
support.info(text)
item.url = host + '/search/' + text
itemlist = []
try:
data = support.match(item.url).data
artists = support.match(data, patronBlock=r'artistResults(.*?)</ul', patron=r'href="(?P<url>[^"]+)"\s*>(?P<title>[^<]+)').matches
if artists:
for url, artist in artists:
itemlist.append(item.clone(title=support.typo(artist,'bullet bold'), thumbnail=support.thumb('music'), url=host+url, action='peliculas'))
item.data = data
itemlist += peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return itemlist

View File

@@ -77,8 +77,8 @@ def peliculas(item):
item.title += support.typo(item.lang2, '_ [] color kod')
if item.args == 'update':
item.title = item.title.replace('-', ' ')
if item.args == 'search':
item.contentType = 'tvshow' if 'serie-' in item.url else 'movie'
# if item.args == 'search':
# item.contentType = 'tvshow' if 'serie-' in item.url else 'movie'
return item

View File

@@ -186,9 +186,10 @@ def play(item):
if item.contentType == 'episode': data = session.get('{}/playback/v2/videoPlaybackInfo/{}?usePreAuth=true'.format(api, item.id), headers=headers).json().get('data',{}).get('attributes',{})
else: data = session.get('{}/playback/v2/channelPlaybackInfo/{}?usePreAuth=true'.format(api, item.id), headers=headers).json().get('data',{}).get('attributes',{})
if data.get('protection', {}).get('drm_enabled',True):
url = data['streaming']['dash']['url']
item.url = data['streaming']['dash']['url']
item.drm = 'com.widevine.alpha'
item.license = data['protection']['schemes']['widevine']['licenseUrl'] + '|PreAuthorization=' + data['protection']['drmToken'] + '|R{SSM}|'
else:
url = data['streaming']['hls']['url']
return support.servertools.find_video_items(item, data=url)
item.url = data['streaming']['hls']['url']
item.manifest = 'hls'
return [item]

View File

@@ -115,7 +115,6 @@ def select(item):
def search(item, texto):
support.info()
item.url = host + "/?s=" + texto
item.contentType = 'episode'
item.args = 'search'
try:
return peliculas(item)

View File

@@ -157,12 +157,17 @@ def episodios(item):
def findvideos(item):
logger.debug()
return support.server(item, item.url, Download=False)
return support.server(item, itemlist=[item.clone(title='Paramount', server='directo', action='play')], Download=False)
def play(item):
logger.debug()
item.server = 'paramount_server'
item.manifest = 'hls'
mgid = support.match(item.url, patron=r'uri":"([^"]+)"').match
url = 'https://media.mtvnservices.com/pmt/e1/access/index.html?uri=' + mgid + '&configtype=edge&ref=' + item.url
ID, rootUrl = support.match(url, patron=[r'"id":"([^"]+)",',r'brightcove_mediagenRootURL":"([^"]+)"']).matches
item.url = jsontools.load(support.match(rootUrl.replace('&device={device}','').format(uri = ID)).data)['package']['video']['item'][0]['rendition'][0]['src']
if item.livefilter:
d = liveDict()[item.livefilter]
item = item.clone(title=support.typo(item.livefilter, 'bold'), fulltitle=item.livefilter, url=d['url'], plot=d['plot'], action='play', forcethumb=True, no_return=True)

View File

@@ -1,9 +1,10 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per altadefinizione01
# Canale per tunein
# ------------------------------------------------------------
from core import support
from core import scrapertools, support
from platformcode import logger
host = 'http://api.radiotime.com'
headers = [['Referer', host]]
@@ -32,6 +33,7 @@ def radio(item):
data = support.match(item, patron= r'text="(?P<title>[^\("]+)(?:\((?P<location>[^\)]+)\))?" URL="(?P<url>[^"]+)" bitrate="(?P<quality>[^"]+)" reliability="[^"]+" guide_id="[^"]+" subtext="(?P<song>[^"]+)" genre_id="[^"]+" formats="(?P<type>[^"]+)" (?:playing="[^"]+" )?(?:playing_image="[^"]+" )?(?:show_id="[^"]+" )?(?:item="[^"]+" )?image="(?P<thumb>[^"]+)"')
if data.matches:
for title, location, url, quality, song, type, thumbnail in data.matches:
title = scrapertools.unescape(title)
itemlist.append(
item.clone(title = support.typo(title, 'bold') + support.typo(quality + ' kbps','_ [] bold color kod'),
thumbnail = thumbnail,
@@ -43,6 +45,7 @@ def radio(item):
matches = support.match(data.data, patron= r'text="(?P<title>[^\("]+)(?:\([^\)]+\))?" URL="(?P<url>[^"]+)" (?:guide_id="[^"]+" )?(?:stream_type="[^"]+" )?topic_duration="(?P<duration>[^"]+)" subtext="(?P<plot>[^"]+)" item="[^"]+" image="(?P<thumb>[^"]+)"').matches
if matches:
for title, url, duration, plot, thumbnail in matches:
title = scrapertools.unescape(title)
infoLabels={}
infoLabels['duration'] = duration
itemlist.append(
@@ -56,12 +59,14 @@ def radio(item):
else:
matches = support.match(data.data, patron= r'text="(?P<title>[^"]+)" URL="(?P<url>[^"]+)"').matches
for title, url in matches:
title = scrapertools.unescape(title)
itemlist.append(
item.clone(channel = item.channel,
title = support.typo(title, 'bold'),
thumbnail = item.thumbnail,
url = url,
action = 'radio'))
support.nextPage(itemlist, item, data.data, r'(?P<url>[^"]+)" key="nextStations')
return itemlist
@@ -86,5 +91,5 @@ def search(item, text):
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
logger.error("%s" % line)
return []

View File

@@ -23,7 +23,7 @@ except:
conn_id = ''
main_host = host + '/vvvvid/ondemand/'
main_host = host + '/vvvvid/ondemand/'
@support.menu

View File

@@ -104,6 +104,7 @@ def unescape(text):
from Fredrik Lundh
http://effbot.org/zone/re-sub.htm#unescape-html
"""
if not ('&' in text and ';' in text):
return text
@@ -129,13 +130,16 @@ def unescape(text):
import html.entities as htmlentitydefs
else:
import htmlentitydefs
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8")
ret = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8")
except KeyError:
logger.error("keyerror")
pass
except:
pass
return text # leave as is
# from core.support import dbg;dbg()
if type(ret) != str:
ret = ret.decode()
return ret # leave as is
return re.sub("&#?\w+;", fixup, str(text))

View File

@@ -327,8 +327,8 @@ def filter_list(episodelist, action=None, path=None):
# Make Language List
for episode in episodelist:
if not episode.contentLanguage: episode.contentLanguage = 'ITA'
if type(episode.contentLanguage) == list and episode.contentLanguage not in lang_list:
#lang_list = episode.contentLanguage
pass
else:
if episode.contentLanguage and episode.contentLanguage not in lang_list:
@@ -338,6 +338,7 @@ def filter_list(episodelist, action=None, path=None):
if sub not in sub_list: sub_list.append(sub)
else:
lang_list.append(episode.contentLanguage)
# add to Language List subtitled languages
if sub_list:
for sub in sub_list:

View File

@@ -119,7 +119,7 @@ def get_channel_url(findhostMethod=None, name=None, forceFindhost=False):
name = os.path.basename(frame[0].f_code.co_filename).replace('.py', '')
if findhostMethod:
url = jsontools.get_node_from_file(name, 'url')
if not url or 'web.archive.org' in url or forceFindhost: # per eliminare tutti i webarchive salvati causa bug httptools CF, eliminare in futuro
if not url or forceFindhost:
url = findhostMethod(channels_data['findhost'][name])
jsontools.update_node(url, name, 'url')
return url
@@ -139,38 +139,6 @@ def get_system_platform():
return platform
def is_autorun_enabled():
try:
if "xbmc.executebuiltin('RunAddon(plugin.video.kod)')" in open(os.path.join(xbmc.translatePath('special://userdata'),'autoexec.py')).read():
return True
else:
return False
except:
# if error in reading from file autoexec doesnt exists
return False
def enable_disable_autorun(is_enabled):
# old method, now using service.py
path = os.path.join(xbmc.translatePath('special://userdata'),'autoexec.py')
append_write = 'a' if os.path.exists(path) else 'w'
if is_enabled is False:
with open(path, append_write) as file:
file.write("import xbmc\nxbmc.executebuiltin('RunAddon(plugin.video.kod)')")
set_setting('autostart', 'On')
else:
file = open(path, "r")
old_content = file.read()
new_content = old_content.replace("xbmc.executebuiltin('RunAddon(plugin.video.kod)')", "")
file.close()
with open(path, "w") as file:
file.write(new_content)
set_setting('autostart', True)
return True
def get_all_settings_addon():
# Read the settings.xml file and return a dictionary with {id: value}
from core import scrapertools

View File

@@ -151,10 +151,6 @@ def run(item=None):
import urllib
short = urllib.urlopen('https://u.nu/api.php?action=shorturl&format=simple&url=' + item.url).read().decode('utf-8')
platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(70740) % short)
# Action in certain channel specified in "action" and "channel" parameters
elif item.action == "check_channels":
from platformcode import checkhost
checkhost.check_channels()
else:
# Checks if channel exists
if os.path.isfile(os.path.join(config.get_runtime_path(), 'channels', item.channel + ".py")):
@@ -461,7 +457,8 @@ def play_from_library(item):
# Modify the action (currently the video library needs "findvideos" since this is where the sources are searched
item.action = "findvideos"
window_type = config.get_setting("window_type", "videolibrary")
window_type = config.get_setting("window_type", "videolibrary") if config.get_setting('next_ep') < 3 and item.contentType != 'movie' else 1
# and launch kodi again
if xbmc.getCondVisibility('Window.IsMedia') and not window_type == 1:
xbmc.executebuiltin("Container.Update(" + sys.argv[0] + "?" + item.tourl() + ")")

View File

@@ -294,6 +294,10 @@ def render_items(itemlist, parent_item):
"""
Function used to render itemlist on kodi
"""
# if it's not a list, do nothing
if not isinstance(itemlist, list):
return
logger.debug('START render_items')
thumb_type = config.get_setting('video_thumbnail_type')
from platformcode import shortcuts
@@ -309,9 +313,6 @@ def render_items(itemlist, parent_item):
check_sf = os.path.exists(sf_file_path)
superfavourites = check_sf and xbmc.getCondVisibility('System.HasAddon("plugin.program.super.favourites")')
# if it's not a list, do nothing
if not isinstance(itemlist, list):
return
# if there's no item, add "no elements" item
if not len(itemlist):
itemlist.append(Item(title=config.get_localized_string(60347), thumbnail=get_thumb('nofolder.png')))
@@ -583,7 +584,7 @@ def set_context_commands(item, item_url, parent_item, **kwargs):
if item.infoLabels['tmdb_id'] or item.infoLabels['imdb_id'] or item.infoLabels['tvdb_id']:
context_commands.append(("InfoPlus", "RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'channel=infoplus&action=Main&from_channel=' + item.channel)))
# Go to the Main Menu (channel.mainlist)
# Open in browser and previous menu
if parent_item.channel not in ["news", "channelselector", "downloads", "search"] and item.action != "mainlist" and not parent_item.noMainMenu:
if parent_item.action != "mainlist":
context_commands.insert(0, (config.get_localized_string(60349), "Container.Refresh (%s?%s)" % (sys.argv[0], Item(channel=item.channel, action="mainlist").tourl())))
@@ -609,8 +610,10 @@ def set_context_commands(item, item_url, parent_item, **kwargs):
else:
mediatype = item.contentType
context_commands.append((config.get_localized_string(60350), "Container.Update (%s?%s&%s)" % (sys.argv[0], item_url, urllib.urlencode({'channel': 'search', 'action': "from_context", 'from_channel': item.channel, 'contextual': True, 'text': item.wanted}))))
if config.get_setting('new_search'):
context_commands.append((config.get_localized_string(60350), "RunPlugin (%s?%s&%s)" % (sys.argv[0], item_url, urllib.urlencode({'channel': 'search', 'action': "from_context", 'from_channel': item.channel, 'contextual': True}))))
else:
context_commands.append((config.get_localized_string(60350), "Container.Refresh (%s?%s&%s)" % (sys.argv[0], item_url, urllib.urlencode({'channel': 'search', 'action': "from_context", 'from_channel': item.channel, 'contextual': True, 'text': item.wanted}))))
context_commands.append( (config.get_localized_string(70561), "Container.Update (%s?%s&%s)" % (sys.argv[0], item_url, 'channel=search&action=from_context&search_type=list&page=1&list_type=%s/%s/similar' % (mediatype, item.infoLabels['tmdb_id']))))
if item.channel != "videolibrary" and item.videolibrary != False:
@@ -642,7 +645,7 @@ def set_context_commands(item, item_url, parent_item, **kwargs):
# Search trailer...
if (item.contentTitle and item.contentType in ['movie', 'tvshow']) or "buscar_trailer" in context:
context_commands.append((config.get_localized_string(60359), "RunPlugin(%s?%s)" % (sys.argv[0], urllib.urlencode({ 'channel': "trailertools", 'action': "buscartrailer", 'search_title': item.contentTitle if item.contentTitle else item.fulltitle, 'contextual': True}))))
context_commands.append((config.get_localized_string(60359), "RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, urllib.urlencode({ 'channel': "trailertools", 'action': "buscartrailer", 'search_title': item.contentTitle if item.contentTitle else item.fulltitle, 'contextual': True}))))
if kwargs.get('superfavourites'):
context_commands.append((config.get_localized_string(60361), "RunScript(special://home/addons/plugin.program.super.favourites/LaunchSFMenu.py)"))
@@ -936,8 +939,8 @@ def get_dialogo_opciones(item, default_action, strm, autoplay):
if default_action == 3:
seleccion = len(opciones) - 1
# Search for trailers on youtube
if item.channel not in ["Trailer", "ecarteleratrailers"]:
# Search for trailers
if item.channel not in ["trailertools"]:
# "Search Trailer"
opciones.append(config.get_localized_string(30162))
@@ -1094,6 +1097,7 @@ def set_player(item, xlistitem, mediaurl, view, strm):
playlist.add(mediaurl, xlistitem)
# Reproduce
xbmc_player.play(playlist, xlistitem)
add_next_to_playlist(item)
if config.get_setting('trakt_sync'):
from core import trakt_tools
@@ -1129,6 +1133,29 @@ def set_player(item, xlistitem, mediaurl, view, strm):
xbmcgui.Window(12005).show()
def add_next_to_playlist(item):
import threading
from core import filetools, videolibrarytools
from platformcode import xbmc_videolibrary
def add_to_playlist(item):
if item.contentType != 'movie' and item.strm_path:
next= xbmc_videolibrary.next_ep(item)
if next:
next.back = True
nfo_path = filetools.join(config.get_videolibrary_path(), config.get_setting("folder_tvshows"), next.strm_path.replace('strm','nfo'))
if nfo_path and filetools.isfile(nfo_path):
head_nfo, item_nfo = videolibrarytools.read_nfo(nfo_path)
nextItem = xbmcgui.ListItem(path=item_nfo.url)
nextItem.setArt({"thumb": item_nfo.contentThumbnail if item_nfo.contentThumbnail else item_nfo.thumbnail})
set_infolabels(nextItem, item_nfo, True)
nexturl = "plugin://plugin.video.kod/?" + next.tourl()
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.add(nexturl, nextItem)
add_to_playlist(next)
if item.contentType != 'movie' and config.get_setting('next_ep') == 3:
threading.Thread(target=add_to_playlist, args=[item]).start()
def torrent_client_installed(show_tuple=False):
# External plugins found in servers / torrent.json node clients
from core import filetools
@@ -1216,7 +1243,8 @@ def resume_playback(played_time):
if action in self.action_exitkeys_id:
self.set_values(False)
self.close()
if played_time:
if played_time and played_time > 30:
Dialog = ResumePlayback('ResumePlayback.xml', config.get_runtime_path(), played_time=played_time)
Dialog.show()
t = 0

View File

@@ -25,8 +25,6 @@ def mark_auto_as_watched(item):
logger.debug()
actual_time = 0
total_time = 0
# logger.debug("item:\n" + item.tostring('\n'))
# if item.options['continue']: item.played_time = platformtools.resume_playback(platformtools.get_played_time(item))
time_limit = time.time() + 30
while not platformtools.is_playing() and time.time() < time_limit:
@@ -40,7 +38,7 @@ def mark_auto_as_watched(item):
percentage = float(config.get_setting("watched_setting")) / 100
time_from_end = config.get_setting('next_ep_seconds')
if item.contentType != 'movie' and config.get_setting('next_ep'):
if item.contentType != 'movie' and config.get_setting('next_ep') < 3:
next_dialogs = ['NextDialog.xml', 'NextDialogExtended.xml', 'NextDialogCompact.xml']
next_ep_type = config.get_setting('next_ep_type')
ND = next_dialogs[next_ep_type]
@@ -49,8 +47,10 @@ def mark_auto_as_watched(item):
logger.debug(next_episode)
while platformtools.is_playing():
actual_time = xbmc.Player().getTime()
total_time = xbmc.Player().getTotalTime()
try: actual_time = xbmc.Player().getTime()
except: pass
try: total_time = xbmc.Player().getTotalTime()
except: pass
if item.played_time and xbmcgui.getCurrentWindowId() == 12005:
xbmc.Player().seekTime(item.played_time)
item.played_time = 0 # Fix for Slow Devices
@@ -75,7 +75,6 @@ def mark_auto_as_watched(item):
# check for next Episode
if next_episode and sync and time_from_end >= difference:
nextdialog = NextDialog(ND, config.get_runtime_path())
nextdialog.show()
while platformtools.is_playing() and not nextdialog.is_exit():
xbmc.sleep(100)
if nextdialog.continuewatching:
@@ -83,10 +82,9 @@ def mark_auto_as_watched(item):
xbmc.Player().stop()
nextdialog.close()
break
xbmc.sleep(1000)
# if item.options['continue']:
if 10 < actual_time < mark_time:
if actual_time < mark_time:
item.played_time = actual_time
else: item.played_time = 0
platformtools.set_played_time(item)
@@ -96,13 +94,16 @@ def mark_auto_as_watched(item):
while platformtools.is_playing():
xbmc.sleep(100)
if not show_server and item.play_from != 'window' and not item.no_return:
xbmc.sleep(700)
xbmc.executebuiltin('Action(ParentDir)')
xbmc.sleep(500)
if next_episode and next_episode.next_ep and config.get_setting('next_ep') == 1:
from platformcode.launcher import play_from_library
play_from_library(next_episode)
# db need to be closed when not used, it will cause freezes
from core import db
db.close()
@@ -1368,7 +1369,6 @@ def next_ep(item):
return item
class NextDialog(xbmcgui.WindowXMLDialog):
item = None
cancel = False
@@ -1393,6 +1393,7 @@ class NextDialog(xbmcgui.WindowXMLDialog):
self.setProperty("next_img", img)
self.setProperty("title", info["tvshowtitle"])
self.setProperty("ep_title", "%dx%02d - %s" % (info["season"], info["episode"], info["title"]))
self.doModal()
def set_exit(self, EXIT):
self.EXIT = EXIT

View File

@@ -1425,7 +1425,7 @@ msgid "The unzipped %s file already exists, or you want to overwrite it.?"
msgstr ""
msgctxt "#60305"
msgid ""
msgid "[COLOR red]Do not open this link[/COLOR]"
msgstr ""
msgctxt "#60306"
@@ -1437,7 +1437,7 @@ msgid "Use 'Preferences' to change your password"
msgstr ""
msgctxt "#60308"
msgid ""
msgid "[COLOR red]It's one use, so write it[/COLOR]"
msgstr ""
msgctxt "#60309"
@@ -1541,7 +1541,7 @@ msgid "Configuration"
msgstr ""
msgctxt "#60334"
msgid ""
msgid "Log file too large. Restart Kodi and retry"
msgstr ""
msgctxt "#60335"
@@ -4809,7 +4809,7 @@ msgid "Notification(Update Kodi to its latest version, for best info,8000, 'http
msgstr ""
msgctxt "#70501"
msgid "Search did not match (%s)"
msgid "Search did not match"
msgstr ""
msgctxt "#70502"
@@ -4841,11 +4841,11 @@ msgid "Search in Mymovies"
msgstr ""
msgctxt "#70510"
msgid "Manual Search in Youtube"
msgid "Manual Search"
msgstr ""
msgctxt "#70511"
msgid "Manual Search in Mymovies"
msgid ""
msgstr ""
msgctxt "#70512"
@@ -4853,11 +4853,11 @@ msgid "Searching in Mymovies"
msgstr ""
msgctxt "#70513"
msgid "Manual Searching in Filmaffinity"
msgid ""
msgstr ""
msgctxt "#70514"
msgid "Manual Search in Jayhap"
msgid ""
msgstr ""
msgctxt "#70515"
@@ -6129,11 +6129,15 @@ msgstr ""
msgctxt "#70832"
msgid "Disabled"
msgstr "Disabilitato"
msgstr ""
msgctxt "#70833"
msgid "Automatic"
msgstr "Automatico"
msgstr ""
msgctxt "#70834"
msgid "Playlist"
msgstr ""
# DNS start [ settings and declaration ]
msgctxt "#707401"

View File

@@ -1424,8 +1424,8 @@ msgid "The unzipped %s file already exists, or you want to overwrite it.?"
msgstr "il file %s da decomprimere esiste già, vuoi sovrascriverlo?"
msgctxt "#60305"
msgid ""
msgstr ""
msgid "[COLOR red]Do not open this link[/COLOR]"
msgstr "[COLOR red]NON aprire questo link[/COLOR]"
msgctxt "#60306"
msgid "The fields 'New password' and 'Confirm new password' do not match"
@@ -1436,8 +1436,8 @@ msgid "Use 'Preferences' to change your password"
msgstr "Entra in 'Preferenze' per cambiare la password"
msgctxt "#60308"
msgid ""
msgstr ""
msgid "[COLOR red]It's one use, so write it[/COLOR]"
msgstr "[COLOR red]Funziona una volta sola, quindi scrivilo[/COLOR]"
msgctxt "#60309"
msgid "The password is not correct."
@@ -1540,8 +1540,8 @@ msgid "Configuration"
msgstr "Configurazione"
msgctxt "#60334"
msgid ""
msgstr ""
msgid "Log file too large. Restart Kodi and retry"
msgstr "File di log troppo pesante. Riapri kodi e riprova"
msgctxt "#60335"
msgid "Watch in..."
@@ -4808,8 +4808,8 @@ msgid "Notification(Update Kodi to its latest version, for best info,8000, 'http
msgstr "Notification(Aggiorna Kodi alla sua ultima versione, per migliori info,8000, 'http://i.imgur.com/mHgwcn3.png')"
msgctxt "#70501"
msgid "Search did not match (%s)"
msgstr "La ricerca non ha dato risultati (%s)"
msgid "Search did not match"
msgstr "La ricerca non ha dato risultati"
msgctxt "#70502"
msgid ">> Next"
@@ -4840,24 +4840,24 @@ msgid "Search in Mymovies"
msgstr "Ricerca in Mymovies"
msgctxt "#70510"
msgid "Manual Search in Youtube"
msgstr "Ricerca Manuale in Youtube"
msgid "Manual Search"
msgstr "Ricerca Manuale"
msgctxt "#70511"
msgid "Manual Search in Mymovies"
msgstr "Ricerca Manuale in Mymovies"
msgid ""
msgstr ""
msgctxt "#70512"
msgid "Searching in Mymovies"
msgstr "Ricerca in Mymovies"
msgctxt "#70513"
msgid "Manual Searching in Filmaffinity"
msgstr "Ricerca Manuale in Filmaffinity"
msgid ""
msgstr ""
msgctxt "#70514"
msgid "Manual Search in Jayhap"
msgstr "Ricerca Manuale in Jayhap"
msgid ""
msgstr ""
msgctxt "#70515"
msgid "Completed %s"
@@ -6136,6 +6136,10 @@ msgctxt "#70833"
msgid "Automatic"
msgstr "Automatico"
msgctxt "#70834"
msgid "Playlist"
msgstr "Playlist"
# DNS start [ settings and declaration ]
msgctxt "#707401"
msgid "Enable DNS check alert"

View File

@@ -39,7 +39,7 @@
<setting id="videolibrary_kodi" type="bool" label="70120" default="false"/>
<setting label="59997" type="lsep"/>
<setting id="videolibrary_max_quality" type="bool" label="70729" default="false" visible="true"/>
<setting id="next_ep" type="select" label="70748" lvalues="70832|70833|70732" default="1"/>
<setting id="next_ep" type="select" label="70748" lvalues="70832|70833|70732|70834" default="1"/>
<setting id="next_ep_type" type="select" label="70754" lvalues="70755|70756|70757" default="0" visible="!eq(-1,0)" subsetting="true"/>
<setting id="next_ep_seconds" type="slider" option="int" range="20,10,240" label="70749" default="40" visible="!eq(-2,0)" subsetting="true"/>
<setting id="watched_setting" type="slider" option="int" range="20,5,90" label="60634" default="80"/>

View File

@@ -1,5 +1,5 @@
{
"active": true,
"active": false,
"find_videos": {
"ignore_urls": [],
"patterns": [

View File

@@ -12,7 +12,7 @@ def test_video_exists(page_url):
logger.debug("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "<h2>WE ARE SORRY</h2>" in data or '<title>404 Not Found</title>' in data:
if "as it expired or has been deleted" in data:
return False, config.get_localized_string(70449) % "UPstream"
return True, ""

View File

@@ -16,24 +16,14 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.debug("url=" + page_url)
video_urls = []
global data
return support.get_jwplayer_mediaurl(data, 'VUP')
# patron = r'sources:\s*\[\{src:\s*"([^"]+)"'
# matches = scrapertools.find_multiple_matches(data, patron)
# if not matches:
# data = scrapertools.find_single_match(data, r"<script type='text/javascript'>(eval.function.p,a,c,k,e,.*?)\s*</script>")
# if data:
# from lib import jsunpack
# data = jsunpack.unpack(data)
# matches = scrapertools.find_multiple_matches(data, patron)
# for url in matches:
# quality = 'm3u8'
# video_url = url
# if 'label' in url:
# url = url.split(',')
# video_url = url[0]
# quality = url[1].replace('label:','')
# video_urls.append(['VUP Player [%s]' % quality, video_url.replace(',','')])
# return video_urls
matches = support.get_jwplayer_mediaurl(data, 'VUP')
if not matches:
data = scrapertools.find_single_match(data, r"<script type='text/javascript'>(eval.function.p,a,c,k,e,.*?)\s*</script>")
if data:
from lib import jsunpack
data = jsunpack.unpack(data)
matches = support.get_jwplayer_mediaurl(data, 'VUP')
return matches

View File

@@ -458,9 +458,6 @@ if __name__ == "__main__":
if config.get_setting('autostart'):
xbmc.executebuiltin('RunAddon(plugin.video.' + config.PLUGIN_NAME + ')')
# handling old autoexec method
if config.is_autorun_enabled():
config.enable_disable_autorun(True)
# port old db to new
old_db_name = filetools.join(config.get_data_path(), "kod_db.sqlite")
if filetools.isfile(old_db_name):

View File

@@ -343,6 +343,11 @@ class SearchWindow(xbmcgui.WindowXML):
and self.item.infoLabels['year']:
logger.debug('retring adding year on channel ' + channel)
dummy, valid, dummy = search(self.item.text + " " + str(self.item.infoLabels['year']))
# some channels may use original title
if self.item.mode != 'all' and not valid and self.item.infoLabels.get('originaltitle'):
logger.debug('retring with original title on channel ' + channel)
dummy, valid, dummy = search(self.item.infoLabels.get('originaltitle'))
except:
pass

View File

@@ -749,6 +749,10 @@ def from_context(item):
else:
return
if config.get_setting('new_search'):
from specials import globalsearch
return globalsearch.Search(item)
if 'list_type' not in item:
if 'wanted' in item:
item.title = item.wanted

View File

@@ -836,9 +836,15 @@ def report_menu(item):
action = 'call_browser'
url = item.url
itemlist.append(Item(channel=item.channel, action=action,
title="**- LOG: [COLOR gold]%s[/COLOR] -**" % item.url, url=url,
title="LOG: [COLOR gold]%s[/COLOR]" % item.url, url=url,
thumbnail=thumb_next, unify=False, folder=False))
if item.one_use:
itemlist.append(Item(channel=item.channel, action="",
title=config.get_localized_string(60305),
thumbnail=thumb_next, folder=False))
itemlist.append(Item(channel=item.channel, action="",
title=config.get_localized_string(60308),
thumbnail=thumb_next, folder=False))
itemlist.append(Item(channel=item.channel, action="call_browser",
title="su Github (raccomandato)", url='https://github.com/kodiondemand/addon/issues',
thumbnail=thumb_next,
@@ -847,14 +853,6 @@ def report_menu(item):
url='https://t.me/kodiondemand', title="Su telegram",
thumbnail=thumb_next, unify=False, folder=False))
if item.one_use:
itemlist.append(Item(channel=item.channel, action="",
title="[COLOR orange]NO ACCEDA al INFORME: se BORRARÁ[/COLOR]",
thumbnail=thumb_next, folder=False))
itemlist.append(Item(channel=item.channel, action="",
title="[COLOR orange]ya que es de un solo uso[/COLOR]",
thumbnail=thumb_next, folder=False))
return itemlist
@@ -917,33 +915,34 @@ def report_send(item, description='', fatal=False):
# directly on the forum. If it is a size problem, you are asked to reset Kodi and redo the fault, to
# that the LOG is smaller.
pastebin_list = {
'hastebin': ('1', 'https://hastebin.com/', 'documents', 'random', '', '',
'data', 'json', 'key', '', '0.29', '10', True, 'raw/', '', ''),
'dpaste': ('1', 'http://dpaste.com/', 'api/v2/', 'random', 'content=',
'&syntax=text&title=%s&poster=alfa&expiry_days=7',
'headers', '', '', 'location', '0.23', '15', True, '', '.txt', ''),
'ghostbin': ('1', 'https://ghostbin.com/', 'paste/new', 'random', 'lang=text&text=',
'&expire=2d&password=&title=%s',
'data', 'regex', '<title>(.*?)\s*-\s*Ghostbin<\/title>', '',
'0.49', '15', False, 'paste/', '', ''),
'write.as': ('1', 'https://write.as/', 'api/posts', 'random', 'body=', '&title=%s',
'data', 'json', 'data', 'id', '0.018', '15', True, '', '', ''),
'oneclickpaste': ('1', 'http://oneclickpaste.com/', 'index.php', 'random', 'paste_data=',
'&title=%s&format=text&paste_expire_date=1W&visibility=0&pass=&submit=Submit',
'data', 'regex', '<a class="btn btn-primary" href="[^"]+\/(\d+\/)">\s*View\s*Paste\s*<\/a>',
'', '0.060', '5', True, '', '', ''),
'bpaste': ('1', 'https://bpaste.net/', '', 'random', 'code=', '&lexer=text&expiry=1week',
'data', 'regex', 'View\s*<a\s*href="[^*]+/(.*?)">raw<\/a>', '',
'0.79', '15', True, 'raw/', '', ''),
'dumpz': ('0', 'http://dumpz.org/', 'api/dump', 'random', 'code=', '&lexer=text&comment=%s&password=',
'headers', '', '', 'location', '0.99', '15', False, '', '', ''),
'file.io': ('1', 'https://file.io/', '', 'random', '', 'expires=1w',
'requests', 'json', 'key', '', '99.0', '30', False, '', '.log', ''),
'uploadfiles': ('1', 'https://up.uploadfiles.io/upload', '', 'random', '', '',
'requests', 'json', 'url', '', '99.0', '30', False, None, '', '')
}
'hastebin': ('1', 'https://hastebin.com/', 'documents', 'random', '', '',
'data', 'json', 'key', '', '0.29', '10', True, 'raw/', '', ''),
'dpaste': ('1', 'http://dpaste.com/', 'api/v2/', 'random', 'content=',
'&syntax=text&title=%s&poster=alfa&expiry_days=7',
'headers', '', '', 'location', '0.23', '15', True, '', '.txt', ''),
'ghostbin': ('1', 'https://ghostbin.com/', 'paste/new', 'random', 'lang=text&text=',
'&expire=2d&password=&title=%s',
'data', 'regex', '<title>(.*?)\s*-\s*Ghostbin<\/title>', '',
'0.49', '15', False, 'paste/', '', ''),
'write.as': ('1', 'https://write.as/', 'api/posts', 'random', 'body=', '&title=%s',
'data', 'json', 'data', 'id', '0.018', '15', True, '', '', ''),
'oneclickpaste': ('1', 'http://oneclickpaste.com/', 'index.php', 'random', 'paste_data=',
'&title=%s&format=text&paste_expire_date=1W&visibility=0&pass=&submit=Submit',
'data', 'regex', '<a class="btn btn-primary" href="[^"]+\/(\d+\/)">\s*View\s*Paste\s*<\/a>',
'', '0.060', '5', True, '', '', ''),
'bpaste': ('1', 'https://bpaste.net/', '', 'random', 'code=', '&lexer=text&expiry=1week',
'data', 'regex', 'View\s*<a\s*href="[^*]+/(.*?)">raw<\/a>', '',
'0.79', '15', True, 'raw/', '', ''),
'dumpz': ('0', 'http://dumpz.org/', 'api/dump', 'random', 'code=', '&lexer=text&comment=%s&password=',
'headers', '', '', 'location', '0.99', '15', False, '', '', ''),
'file.io': ('1', 'https://file.io/', '', 'random', '', 'expires=1w',
'requests', 'json', 'key', '', '99.0', '30', False, '', '', ''),
'uploadfiles': ('0', 'https://up.ufile.io/v1/upload', '', 'random', '', '',
'curl', 'json', 'url', '', '99.0', '30', False, None, '', {'Referer': 'https://ufile.io/'}),
'anonfiles': ('1', 'https://api.anonfiles.com/upload', 'upload', 'random', '', '',
'requests', 'json', 'data', 'file,url,short', '99.0', '30', False, None, '', '')
}
pastebin_list_last = ['hastebin', 'ghostbin', 'file.io'] # We leave these services the last
pastebin_one_use = ['file.io'] # Single-use servers and deletes
pastebin_dir = []
@@ -994,7 +993,7 @@ def report_send(item, description='', fatal=False):
random.shuffle(pastebin_dir)
pastebin_dir.extend(pastebin_list_last) # We leave these services the last
#pastebin_dir = ['uploadfiles'] # For testing a service
# pastebin_dir = ['file.io'] # For testing a service
#log_data = 'TEST FOR SERVICE TESTS'
# The list of "pastebin" servers is scrolled to locate an active one, with capacity and availability
@@ -1018,7 +1017,7 @@ def report_send(item, description='', fatal=False):
paste_file_size = float(pastebin_list[paste_name][10]) # Server capacity in MB
if paste_file_size > 0: # If it is 0, the capacity is unlimited
if log_size > paste_file_size: # Capacity and size verification
msg = 'Log file too large. Restart Kodi and retry'
msg = config.get_localized_string(60334)
continue
paste_timeout = int(pastebin_list[paste_name][11]) # Timeout for the server
paste_random_headers = pastebin_list[paste_name][12] # Do you use RAMDOM headers to mislead the serv?
@@ -1029,15 +1028,12 @@ def report_send(item, description='', fatal=False):
paste_headers.update(jsontools.load((pastebin_list[paste_name][15])))
if paste_name in pastebin_one_use:
pastebin_one_use_msg = 'DO NOT ACCESS THE REPORT: it will be DELETED'
item.one_use = True
else:
pastebin_one_use_msg = ''
try:
# POST is created with server options "pastebin"
# This is the "requests" format
if paste_type == 'requests':
if paste_type in ['requests', 'curl']:
paste_file = {'file': (paste_title+'.log', log_data)}
if paste_post1:
paste_file.update(paste_post1)
@@ -1079,8 +1075,50 @@ def report_send(item, description='', fatal=False):
data = httptools.downloadpage(paste_host, params=paste_params, file=log_data,
file_name=paste_title+'.log', timeout=paste_timeout,
random_headers=paste_random_headers, headers=paste_headers)
elif paste_type == 'curl':
paste_sufix = '/create_session'
data_post = {'file_size': len(log_data)}
logger.error(data_post)
data = httptools.downloadpage(paste_host+paste_sufix, params=paste_params,
ignore_response_code=True, post=data_post, timeout=paste_timeout, alfa_s=True,
random_headers=paste_random_headers, headers=paste_headers).data
data = jsontools.load(data)
if not data.get("fuid", ""):
logger.error("fuid: %s" % str(data))
raise
fuid = data["fuid"]
paste_sufix = '/chunk'
log_data_chunks = log_data
i = 0
chunk_len = 1024
while len(log_data_chunks) > 0:
i += 1
chunk = log_data_chunks[:chunk_len]
log_data_chunks = log_data_chunks[chunk_len:]
data_post = {'fuid': fuid, 'chunk_index': i}
data = httptools.downloadpage(paste_host+paste_sufix, params=paste_params, file=chunk, alfa_s=True,
ignore_response_code=True, post=data_post, timeout=paste_timeout, CF_test=False,
random_headers=paste_random_headers, headers=paste_headers).data
if not 'successful' in data:
logger.error("successful: %s" % str(data))
raise
data = {}
paste_sufix = '/finalise'
data_post = {'fuid': fuid, 'total_chunks': i, 'file_name': paste_title+'.log', 'file_type': 'doc'}
resp = httptools.downloadpage(paste_host+paste_sufix, params=paste_params,
ignore_response_code=True, post=data_post, timeout=paste_timeout,
random_headers=paste_random_headers, headers=paste_headers)
if not resp.data:
logger.error("resp.content: %s" % str(resp.data))
raise
data['data'] = resp.data
data = type('HTTPResponse', (), data)
except:
msg = 'Inténtelo más tarde'
msg = 'Try later'
logger.error('Failed to save report. ' + msg)
logger.error(traceback.format_exc())
continue
@@ -1093,16 +1131,20 @@ def report_send(item, description='', fatal=False):
paste_host_return = ''
# Responses to REQUESTS requests
if paste_type == 'requests': # Response of request type "requests"?
if paste_type in ['requests', 'curl']: # Response of request type "requests"?
if paste_resp == 'json': # Answer in JSON format?
if paste_resp_key in data.data:
if not paste_url:
key = jsontools.load(data.data)[paste_resp_key] # with a label
else:
key = jsontools.load(data.data)[paste_resp_key][paste_url] # with two nested tags
item.url = "%s%s%s" % (paste_host_resp+paste_host_return, key,
paste_host_return_tail)
else:
key = jsontools.load(data.data)[paste_resp_key]
if paste_url and key: # hay etiquetas adicionales?
try:
for key_part in paste_url.split(','):
key = key[key_part] # por cada etiqueta adicional
except:
key = ''
if key:
item.url = "%s%s%s" % (paste_host_resp+paste_host_return, key,
paste_host_return_tail)
if not key:
logger.error('ERROR in data return format. data.data=' + str(data.data))
continue

View File

@@ -1,36 +1,40 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------------
# Search trailers from youtube, filmaffinity, mymovies, vimeo, etc...
# Search trailers from tmdb, youtube and mymovies...
# --------------------------------------------------------------------------------
from __future__ import division
#from builtins import str
# from builtins import str
import sys
import xbmcaddon
from channelselector import get_thumb
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
from past.utils import old_div
if PY3:
#from future import standard_library
#standard_library.install_aliases()
import urllib.parse as urllib # It is very slow in PY2. In PY3 it is native
# from future import standard_library
# standard_library.install_aliases()
import urllib.parse as urllib # It is very slow in PY2. In PY3 it is native
import urllib.parse as urlparse
from concurrent import futures
else:
import urllib # We use the native of PY2 which is faster
import urllib # We use the native of PY2 which is faster
import urlparse
from concurrent_py2 import futures
import re
from core import httptools, jsontools, scrapertools, servertools
from core import httptools, scrapertools, servertools
from core.support import match, thumb
from core.item import Item
from platformcode import config, logger
from platformcode import config, logger, launcher
from platformcode import platformtools
info_language = ["it", "en", "es", "fr", "de", "pt"] # from videolibrary.json
info_language = ["de", "en", "es", "fr", "it", "pt"] # from videolibrary.json
def_lang = info_language[config.get_setting("info_language", "videolibrary")]
result = None
@@ -44,10 +48,14 @@ else:
def buscartrailer(item, trailers=[]):
logger.debug()
if item.contentType != "movie":
tipo = "tv"
else:
tipo = "movie"
# List of actions if run from context menu
if item.action == "manual_search" and item.contextual:
itemlist = manual_search(item)
itemlist = manual_search(item, tipo)
item.contentTitle = itemlist[0].contentTitle
elif 'search' in item.action and item.contextual:
itemlist = globals()[item.action](item)
@@ -67,7 +75,8 @@ def buscartrailer(item, trailers=[]):
item.contentTitle = item.contentTitle.strip()
elif keyboard:
contentTitle = re.sub(r'\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.contentTitle.strip())
item.contentTitle = platformtools.dialog_input(default=contentTitle, heading=config.get_localized_string(70505))
item.contentTitle = platformtools.dialog_input(default=contentTitle,
heading=config.get_localized_string(70505))
if item.contentTitle is None:
item.contentTitle = contentTitle
else:
@@ -87,32 +96,28 @@ def buscartrailer(item, trailers=[]):
title, url, server = servertools.findvideos(url)[0]
title = "Trailer [" + server + "]"
itemlist.append(item.clone(title=title, url=url, server=server, action="play"))
if item.show or item.infoLabels['tvshowtitle'] or item.contentType != "movie":
tipo = "tv"
else:
tipo = "movie"
try:
if not trailers:
itemlist.extend(tmdb_trailers(item, tipo))
else:
for trailer in trailers:
title = trailer['name'] + " [" + trailer['size'] + "p] (" + trailer['language'].replace("en", "ING").replace("it", "ITA") + ") [tmdb/youtube]"
itemlist.append(item.clone(action="play", title=title, url=trailer['url'], server="youtube"))
for trailer in trailers:
title = trailer['name'] + " [" + trailer['size'] + "p] (" + trailer['language'].replace("en",
"ING").replace(
"it", "ITA") + ") [tmdb/youtube]"
itemlist.append(item.clone(action="play", title=title, url=trailer['url'], server="youtube"))
except:
import traceback
logger.error(traceback.format_exc())
if item.contextual: title = "%s"
else: title = "%s"
itemlist.append(item.clone(title=title % config.get_localized_string(70507), action="youtube_search", thumbnail=thumb('search')))
itemlist.append(item.clone(title=title % config.get_localized_string(70508), action="mymovies_search", thumbnail=thumb('search')))
itemlist.append(item.clone(title=title % config.get_localized_string(70024), action="filmaffinity_search", thumbnail=thumb('search')))
multi_search(item, itemlist, tipo)
if not itemlist:
itemlist.append(item.clone(title=config.get_localized_string(70501), title2=item.contentTitle,
action="", thumbnail=get_thumb('nofolder.png'), text_color=""))
from lib.fuzzy_match import algorithims
itemlist.sort(key=lambda r: algorithims.trigram(item.contentTitle + ' trailer', r.title), reverse=True)
if item.contextual:
global window_select, result
select = Select("DialogSelect.xml", config.get_runtime_path(), item=item, itemlist=itemlist, caption=config.get_localized_string(70506) + item.contentTitle)
select = Select("DialogSelect.xml", config.get_runtime_path(), item=item, itemlist=itemlist,
caption=config.get_localized_string(70506) + item.contentTitle)
window_select.append(select)
select.doModal()
@@ -121,16 +126,24 @@ def buscartrailer(item, trailers=[]):
return itemlist
def manual_search(item):
def multi_search(item, itemlist, tipo):
ris = []
with futures.ThreadPoolExecutor() as executor:
ris.append(executor.submit(mymovies_search, item))
ris.append(executor.submit(youtube_search, item))
ris.append(executor.submit(tmdb_trailers, item, tipo))
for r in futures.as_completed(ris):
itemlist.extend(r.result())
def manual_search(item, tipo):
logger.debug()
itemlist = []
texto = platformtools.dialog_input(default=item.contentTitle, heading=config.get_localized_string(30112))
if texto is not None:
if item.extra == "mymovies":
return mymovies_search(item.clone(contentTitle=texto))
elif item.extra == "youtube":
return youtube_search(item.clone(contentTitle=texto, page=""))
elif item.extra == "filmaffinity":
return filmaffinity_search(item.clone(contentTitle=texto, page="", year=""))
multi_search(item.clone(contentTitle=texto), itemlist, tipo)
return itemlist
def tmdb_trailers(item, tipo="movie"):
@@ -145,9 +158,18 @@ def tmdb_trailers(item, tipo="movie"):
tmdb_search = Tmdb(texto_buscado=item.contentTitle, tipo=tipo, year=item.infoLabels['year'])
if tmdb_search:
for result in tmdb_search.get_videos():
title = result['name'] + " [" + result['size'] + "p] (" + result['language'].replace("en", "ING").replace("it", "ITA") + ") [tmdb/youtube]"
itemlist.append(item.clone(action="play", title=title, url=result['url'], server="youtube"))
for vid in tmdb_search.get_videos():
found = False
if vid['type'].lower() == 'trailer':
title = vid['name']
it = item.clone(action="play", title=title, title2="TMDB(youtube) - " + vid['language'].replace("en", "ING").replace("it", "ITA") + " [" + vid['size'] + "p]", url=vid['url'], server="youtube")
itemlist.append(it)
if vid['language'] == def_lang and not found: # play now because lang is correct and TMDB is trusted
found = True
launcher.run(it)
while platformtools.is_playing():
xbmc.sleep(100)
return itemlist
@@ -164,30 +186,25 @@ def youtube_search(item):
else:
title = urllib.quote(title)
title = title.replace("%20", "+")
data = httptools.downloadpage("https://www.youtube.com/results?sp=EgIQAQ%253D%253D&q=" + title).data
patron = r'thumbnails":\[\{"url":"(https://i.ytimg.com/vi[^"]+).*?'
data = httptools.downloadpage("https://www.youtube.com/results?sp=EgIQAQ%253D%253D&search_query=" + title).data
patron = r'thumbnails":\[\{"url":"(https://i.ytimg.com/vi[^"]+).*?'
patron += r'text":"([^"]+).*?'
patron += r'simpleText":"[^"]+.*?simpleText":"([^"]+).*?'
patron += r'url":"([^"]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, scrapedduration, scrapedurl in matches:
scrapedtitle = scrapedtitle if PY3 else scrapedtitle.decode('utf8').encode('utf8') + " (" + scrapedduration + ")"
scrapedtitle = scrapedtitle if PY3 else scrapedtitle.decode('utf8').encode('utf8')
if item.contextual:
scrapedtitle = "%s" % scrapedtitle
url = urlparse.urljoin('https://www.youtube.com/', scrapedurl)
itemlist.append(item.clone(title=scrapedtitle, action="play", server="youtube", url=url, thumbnail=scrapedthumbnail))
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]+><span class="yt-uix-button-content">')
if next_page != "":
next_page = urlparse.urljoin("https://www.youtube.com", next_page)
itemlist.append(item.clone(title=config.get_localized_string(30992), action="youtube_search", extra="youtube", page=next_page, thumbnail=thumb('search'), text_color=""))
if not itemlist:
itemlist.append(item.clone(title=config.get_localized_string(70501) % title, action="", thumbnail="", text_color=""))
if keyboard:
if item.contextual:
title = "%s"
else:
title = "%s"
itemlist.append(item.clone(title=title % config.get_localized_string(70510), action="manual_search", thumbnail=thumb('search'), extra="youtube"))
itemlist.append(item.clone(title=scrapedtitle, title2='Youtube - ' + scrapedduration, action="play", server="youtube",
url=url, thumbnail=scrapedthumbnail))
# next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]+><span class="yt-uix-button-content">')
# if next_page != "":
# next_page = urlparse.urljoin("https://www.youtube.com", next_page)
# itemlist.append(item.clone(title=config.get_localized_string(30992), action="youtube_search", extra="youtube",
# page=next_page, thumbnail=thumb('search'), text_color=""))
return itemlist
@@ -197,146 +214,48 @@ def mymovies_search(item):
title = item.contentTitle
url = 'https://www.mymovies.it/ricerca/ricerca.php?limit=true&q=' + title
js = json.loads(httptools.downloadpage(url).data)['risultati']['film']['elenco']
try:
js = json.loads(httptools.downloadpage(url).data)['risultati']['film']['elenco']
except:
return []
itemlist = []
for it in js:
itemlist.append(item.clone(title=it['titolo'], thumbnail=it['immagine'].replace('\\',''), url=it['url'].replace('\\',''), action ='search_links_mymovies'))
if not itemlist:
itemlist.append(item.clone(title=config.get_localized_string(70501), action="", thumbnail="", text_color=""))
if keyboard:
if item.contextual: title = "%s"
else: title = "%s"
itemlist.append(item.clone(title=title % config.get_localized_string(70511), action="manual_search", thumbnail=thumb('search'), extra="mymovies"))
with futures.ThreadPoolExecutor() as executor:
ris = [executor.submit(search_links_mymovies, item.clone(title=it['titolo'], title2='MYmovies', thumbnail=it['immagine'].replace('\\', ''), url=it['url'].replace('\\', ''))) for it in js]
for r in futures.as_completed(ris):
if r.result():
itemlist.append(r.result())
return itemlist
def search_links_mymovies(item):
global result
logger.debug()
trailer_url = match(item, patron=r'<li class="bottone_playlist"[^>]+><a href="([^"]+)"').match
itemlist = []
data = httptools.downloadpage(item.url).data
trailer_url = match(item, patron=r'<source src="([^"]+)').match
if trailer_url:
itemlist.append(item.clone(title=config.get_localized_string(60221) + ' ' + item.title, url=trailer_url, server='directo', action="play"))
itemlist = servertools.get_servers_itemlist(itemlist)
else:
if keyboard:
if item.contextual:
title = "%s"
else:
title = "%s"
itemlist.append(item.clone(title=title % config.get_localized_string(70513), action="manual_search", thumbnail=thumb('search'), extra="filmaffinity"))
return itemlist
def filmaffinity_search(item):
logger.debug()
if item.filmaffinity:
item.url = item.filmaffinity
return search_links_filmaff(item)
# Check if it is a zero search or comes from the Next option
if item.page != "":
data = httptools.downloadpage(item.page).data
else:
params = urllib.urlencode([('stext', item.contentTitle), ('stype%5B%5D', 'title'), ('country', ''), ('genre', ''), ('fromyear', item.year), ('toyear', item.year)])
url = "http://www.filmaffinity.com/es/advsearch.php?%s" % params
data = httptools.downloadpage(url).data
itemlist = []
patron = '<div class="mc-poster">.*?<img.*?src="([^"]+)".*?' \
'<div class="mc-title"><a href="/es/film(\d+).html"[^>]+>(.*?)<img'
matches = scrapertools.find_multiple_matches(data, patron)
# If there is only one result, search directly for the trailers, but list all the results
if len(matches) == 1:
item.url = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % matches[0][1]
item.thumbnail = matches[0][0]
if not item.thumbnail.startswith("http"): item.thumbnail = "http://www.filmaffinity.com" + item.thumbnail
itemlist = search_links_filmaff(item)
elif len(matches) > 1:
for scrapedthumbnail, id, scrapedtitle in matches:
if not scrapedthumbnail.startswith("http"): scrapedthumbnail = "http://www.filmaffinity.com" + scrapedthumbnail
scrapedurl = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % id
if PY3: scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore")
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="search_links_filmaff", thumbnail=scrapedthumbnail))
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">&gt;&gt;</a>')
if next_page != "":
next_page = urlparse.urljoin("http://www.filmaffinity.com/es/", next_page)
itemlist.append(item.clone(title=config.get_localized_string(30992), page=next_page, action="filmaffinity_search", thumbnail=thumb('search'), text_color=""))
if not itemlist: itemlist.append(item.clone(title=config.get_localized_string(70501) % item.contentTitle, action="", thumbnail="", text_color=""))
if keyboard:
if item.contextual: title = "%s"
else: title = "%s"
itemlist.append(item.clone(title=title % config.get_localized_string(70513), action="manual_search", thumbnail=thumb('search'), extra="filmaffinity"))
return itemlist
def search_links_filmaff(item):
logger.debug()
itemlist = []
data = httptools.downloadpage(item.url).data
if not '<a class="lnkvvid"' in data:
itemlist.append(item.clone(title=config.get_localized_string(70503), action="", text_color=""))
else:
patron = '<a class="lnkvvid".*?<b>(.*?)</b>.*?iframe.*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtitle, scrapedurl in matches:
if not scrapedurl.startswith("http:"):
scrapedurl = urlparse.urljoin("http:", scrapedurl)
trailer_url = scrapedurl.replace("-nocookie.com/embed/", ".com/watch?v=")
if "youtube" in trailer_url:
server = "youtube"
code = scrapertools.find_single_match(trailer_url, 'v=([A-z0-9\-_]+)')
thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code
else:
server = ""
thumbnail = item.thumbnail
if PY3:
scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore")
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
scrapedtitle += " [" + server + "]"
if item.contextual:
scrapedtitle = "%s" % scrapedtitle
itemlist.append(item.clone(title=scrapedtitle, url=trailer_url, server=server, action="play", thumbnail=thumbnail))
itemlist = servertools.get_servers_itemlist(itemlist)
if keyboard:
if item.contextual:
title = "%s"
else:
title = "%s"
itemlist.append(item.clone(title=title % config.get_localized_string(70513), action="manual_search", thumbnail="", extra="filmaffinity"))
return itemlist
it = item.clone(url=trailer_url, server='directo', action="play")
if 'tmdb_id' in it.infoLabels:
del it.infoLabels['tmdb_id'] # for not saving watch time
return it
try:
import xbmcgui
import xbmc
class Select(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
self.item = kwargs.get('item')
self.itemlist = kwargs.get('itemlist')
self.caption = kwargs.get('caption')
self.result = None
def onInit(self):
try:
self.control_list = self.getControl(6)
self.getControl(5).setNavigation(self.control_list, self.control_list, self.control_list, self.control_list)
self.getControl(5).setNavigation(self.control_list, self.control_list, self.control_list,
self.control_list)
self.getControl(3).setEnabled(0)
self.getControl(3).setVisible(0)
except:
@@ -347,35 +266,30 @@ try:
except:
pass
self.getControl(1).setLabel("" + self.caption + "")
self.getControl(5).setLabel(config.get_localized_string(60495))
if keyboard:
self.getControl(5).setLabel(config.get_localized_string(70510))
self.items = []
for item in self.itemlist:
item_l = xbmcgui.ListItem(item.title)
item_l = xbmcgui.ListItem(item.title, item.title2)
item_l.setArt({'thumb': item.thumbnail})
item_l.setProperty('item_copy', item.tourl())
self.items.append(item_l)
self.control_list.reset()
self.control_list.addItems(self.items)
self.setFocus(self.control_list)
def onClick(self, id):
global window_select, result
# Cancel button y [X]
if id == 7:
window_select[-1].close()
if id == 5:
self.result = "_no_video"
result = "no_video"
if id == 5 and keyboard:
self.close()
window_select.pop()
if not window_select:
if not self.item.windowed:
del window_select
else:
window_select[-1].doModal()
buscartrailer(self.item.clone(action="manual_search", extra="youtube"))
def onAction(self, action):
global window_select, result
if action == 92 or action == 110:
self.result = "no_video"
result = "no_video"
self.close()
window_select.pop()
@@ -394,10 +308,8 @@ try:
xbmc.sleep(200)
if puede:
result = video_urls[-1][1]
self.result = video_urls[-1][1]
else:
result = None
self.result = None
elif item.action == "play" and not self.item.windowed:
for window in window_select:
window.close()

View File

@@ -417,6 +417,8 @@ def findvideos(item):
all_videolibrary = []
ch_results = []
list_servers = []
with futures.ThreadPoolExecutor() as executor:
for nom_canal, json_path in list(list_canales.items()):
if filtro_canal and filtro_canal != nom_canal.capitalize():
@@ -476,7 +478,6 @@ def findvideos(item):
del item.library_urls[nom_canal]
item_json = Item().fromjson(filetools.read(json_path))
list_servers = []
# support.dbg()
try: from urllib.parse import urlsplit
except ImportError: from urlparse import urlsplit

View File

@@ -3,9 +3,9 @@ rm tests/home/userdata/addon_data/plugin.video.kod/settings_servers/*.json
rm tests/home/userdata/addon_data/plugin.video.kod/cookies.dat
rm tests/home/userdata/addon_data/plugin.video.kod/kod_db.sqlite
python3 -m pip install --upgrade pip
pip install sakee
pip install html-testRunner
pip install parameterized
pip install -U sakee
pip install -U html-testRunner
pip install -U parameterized
export PYTHONPATH=$PWD
export KODI_INTERACTIVE=0
export KODI_HOME=$PWD/tests/home

View File

@@ -315,6 +315,7 @@ class GenericServerTest(unittest.TestCase):
def test_get_video_url(self):
module = __import__('servers.%s' % self.name, fromlist=["servers.%s" % self.name])
page_url = self.server.url
httptools.default_headers['Referer'] = self.server.referer
print('testing ' + page_url)
print('Found on ' + self.server.foundOn)
print()
@@ -342,10 +343,10 @@ class GenericServerTest(unittest.TestCase):
print(headers)
if 'magnet:?' in directUrl: # check of magnet links not supported
continue
if directUrl.split('.')[-1] == 'm3u8': # m3u8 is a text file and HEAD may be forbidden
page = downloadpage(directUrl, headers=headers, only_headers=True, use_requests=True, verify=False)
if not page.success and directUrl.split('.')[-1] == 'm3u8': # m3u8 is a text file and HEAD may be forbidden
page = downloadpage(directUrl, headers=headers, use_requests=True, verify=False)
else:
page = downloadpage(directUrl, headers=headers, only_headers=True, use_requests=True, verify=False)
self.assertTrue(page.success, self.name + ' scraper returned an invalid link')
self.assertLess(page.code, 400, self.name + ' scraper returned a ' + str(page.code) + ' link')
contentType = page.headers['Content-Type']
@@ -356,10 +357,7 @@ class GenericServerTest(unittest.TestCase):
if __name__ == '__main__':
if 'KOD_TST_CH' not in os.environ:
unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(report_name='report', add_timestamp=False, combine_reports=True,
report_title='KoD Test Suite', template=os.path.join(config.get_runtime_path(), 'tests', 'template.html')), exit=False)
import webbrowser
webbrowser.open(os.path.join(outDir, 'report.html'))
else:
unittest.main()
unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(report_name='report', add_timestamp=False, combine_reports=True,
report_title='KoD Test Suite', template=os.path.join(config.get_runtime_path(), 'tests', 'template.html')), exit=False)
import webbrowser
webbrowser.open(os.path.join(outDir, 'report.html'))