From 9fbd01524e567377b6f51a77b8aad4a4d5777322 Mon Sep 17 00:00:00 2001 From: Alhaziel Date: Wed, 9 Oct 2019 12:02:34 +0200 Subject: [PATCH 01/61] - Nascosto Download dai server (Provvisoriamente?) - Forza Riproduzione Direct da Widget (Provvisoriamente?) --- platformcode/launcher.py | 1 + platformcode/platformtools.py | 10 ++++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/platformcode/launcher.py b/platformcode/launcher.py index 059d276e..fdd09864 100644 --- a/platformcode/launcher.py +++ b/platformcode/launcher.py @@ -515,6 +515,7 @@ def play_from_library(item): return else: item = videolibrary.play(itemlist[seleccion])[0] + item.play_from = 'window' platformtools.play_video(item) from specials import autoplay diff --git a/platformcode/platformtools.py b/platformcode/platformtools.py index 62378f9e..ec2adae7 100644 --- a/platformcode/platformtools.py +++ b/platformcode/platformtools.py @@ -673,6 +673,8 @@ def is_playing(): def play_video(item, strm=False, force_direct=False, autoplay=False): logger.info() + if item.play_from == 'window': + force_direct=True # logger.debug(item.tostring('\n')) logger.debug('item play: %s'%item) xbmc_player = XBMCPlayer() @@ -916,10 +918,10 @@ def get_dialogo_opciones(item, default_action, strm, autoplay): # "Descargar" import xbmcaddon addon = xbmcaddon.Addon('plugin.video.kod') - downloadenabled = addon.getSetting('downloadenabled') - if downloadenabled != "false": - opcion = config.get_localized_string(30153) - opciones.append(opcion) + # downloadenabled = addon.getSetting('downloadenabled') + # if downloadenabled != "false": + # opcion = config.get_localized_string(30153) + # opciones.append(opcion) if item.isFavourite: # "Quitar de favoritos" From 7c3de03d22da46fc6a3ee92decb692c9120eb247 Mon Sep 17 00:00:00 2001 From: Alhaziel Date: Wed, 9 Oct 2019 17:28:27 +0200 Subject: [PATCH 02/61] Formatta testo per community channel: {submenu} -> stile sottomenu {italic} -> Corsivo {color red} -> colore testo BOLD di default --- specials/community.py | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/specials/community.py b/specials/community.py index 79b170e2..55d1fa08 100644 --- a/specials/community.py +++ b/specials/community.py @@ -17,6 +17,7 @@ from core.item import Item from platformcode import logger, config, platformtools from specials import autoplay from specials import filtertools +from core.support import typo list_data = {} @@ -49,7 +50,7 @@ def show_channels(item): file = open(path, "r") json = jsontools.load(file.read()) - itemlist.append(Item(channel=item.channel, title=config.get_localized_string(70676), action='add_channel', thumbnail=get_thumb('add.png'))) + itemlist.append(Item(channel=item.channel, title=typo(config.get_localized_string(70676),'bold color kod'), action='add_channel', thumbnail=get_thumb('add.png'))) for key, channel in json['channels'].items(): file_path = channel ['path'] @@ -59,7 +60,7 @@ def show_channels(item): fanart = json_url['fanart'] if 'fanart' in json_url else '' itemlist.append(Item(channel=item.channel, - title=channel['channel_name'], + title=typo(channel['channel_name'],'bold'), url=file_path, thumbnail=thumbnail, fanart=fanart, @@ -101,7 +102,7 @@ def show_menu(item): plot = option['plot'] else: plot = item.plot - itemlist.append(Item(channel=item.channel, title=option['title'], thumbnail=thumbnail, fanart=fanart, plot=plot, action='show_menu', url=option['link'])) + itemlist.append(Item(channel=item.channel, title=format_title(option['title']), thumbnail=thumbnail, fanart=fanart, plot=plot, action='show_menu', url=option['link'])) autoplay.show_option(item.channel, itemlist) return itemlist @@ -132,7 +133,7 @@ def list_all(item): title = media['title'] title = set_title(title, language, quality) - new_item = Item(channel=item.channel, title=title, quality=quality, + new_item = Item(channel=item.channel, title=format_title(title), quality=quality, language=language, plot=plot, thumbnail=poster) new_item.infoLabels['year'] = media['year'] if 'year' in media else '' @@ -161,7 +162,7 @@ def seasons(item): for season in list_seasons: infoLabels['season'] = season['season'] title = config.get_localized_string(60027) % season['season'] - itemlist.append(Item(channel=item.channel, title=title, url=season['link'], action='episodesxseason', + itemlist.append(Item(channel=item.channel, title=format_title(title), url=season['link'], action='episodesxseason', contentSeasonNumber=season['season'], infoLabels=infoLabels)) tmdb.set_infoLabels(itemlist, seekTmdb=True) @@ -185,7 +186,7 @@ def episodesxseason(item): title = config.get_localized_string(70677) + ' %s' % (episode_number) - itemlist.append(Item(channel=item.channel, title=title, url=episode, action='findvideos', + itemlist.append(Item(channel=item.channel, title=format_title(title), url=episode, action='findvideos', contentEpisodeNumber=episode_number, infoLabels=infoLabels)) tmdb.set_infoLabels(itemlist, seekTmdb=True) @@ -201,7 +202,7 @@ def findvideos(item): title = '' title = set_title(title, language, quality) - itemlist.append(Item(channel=item.channel, title='%s'+title, url=url['url'], action='play', quality=quality, + itemlist.append(Item(channel=item.channel, title=format_title('%s'+title), url=url['url'], action='play', quality=quality, language=language, infoLabels = item.infoLabels)) itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) @@ -307,13 +308,21 @@ def set_title(title, language, quality): if not config.get_setting('unify'): if quality != '': - title += ' [%s]' % quality + title += typo(quality, '_ [] color kod') if language != '': if not isinstance(language, list): - title += ' [%s]' % language.upper() + title += typo(language.upper(), '_ [] color kod') else: - title += ' ' for lang in language: - title += '[%s]' % lang.upper() + title += typo(lang.upper(), '_ [] color kod') - return title.capitalize() + return title + +def format_title(title): + t = scrapertools.find_single_match(title, r'\{([^\}]+)\}') + logger.info(t) + logger.info(title) + if 'bold' not in t: t += ' bold' + title = re.sub(r'(\{[^\}]+\})','',title) + logger.info(title) + return typo(title,t) \ No newline at end of file From cfd77783cbbe56039b65be35461fc4de3fe3664c Mon Sep 17 00:00:00 2001 From: Alhaziel Date: Wed, 9 Oct 2019 17:42:33 +0200 Subject: [PATCH 03/61] Fix Wstream --- servers/wstream.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/servers/wstream.json b/servers/wstream.json index eea9e36e..62ecc754 100644 --- a/servers/wstream.json +++ b/servers/wstream.json @@ -8,7 +8,7 @@ "patterns": [ { "pattern": "wstream.video/(?:embed-|videos/|video/)?([a-z0-9A-Z]+)", - "url": "http:\/\/wstream.video\/video\/\\1" + "url": "http:\/\/wstream.video\/videow\/\\1" } ], "ignore_urls": [ ] From 0afd723690b808f5fb04c69dc4ec27344699c7a8 Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 9 Oct 2019 18:09:48 +0200 Subject: [PATCH 04/61] fix updater --- platformcode/updater.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/platformcode/updater.py b/platformcode/updater.py index 8b13f23f..c10b6836 100644 --- a/platformcode/updater.py +++ b/platformcode/updater.py @@ -3,6 +3,7 @@ import hashlib import io import os import shutil +from cStringIO import StringIO from core import httptools, filetools, downloadtools from core.ziptools import ziptools @@ -100,7 +101,7 @@ def check_addon_init(): patched = apply_patch(text, (file['patch']+'\n').encode('utf-8')) if patched != text: # non eseguo se già applicata (es. scaricato zip da github) - if getSha(patched) == file['sha']: + if getShaStr(patched) == file['sha']: localFile.seek(0) localFile.truncate() localFile.writelines(patched) @@ -210,7 +211,10 @@ def apply_patch(s,patch,revert=False): def getSha(path): f = open(path).read() - return githash.generic_hash(path, '100644', len(f)).hexdigest() + return githash.blob_hash(path, len(f)).hexdigest() + +def getShaStr(str): + return githash.blob_hash(StringIO(str), len(str)).hexdigest() def updateFromZip(): From a06a98d6b48bf990d1b23e085426e192cb50c52f Mon Sep 17 00:00:00 2001 From: Alhaziel Date: Wed, 9 Oct 2019 18:25:46 +0200 Subject: [PATCH 05/61] Fix CB01 --- channels/cineblog01.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/channels/cineblog01.py b/channels/cineblog01.py index 78e858a3..20699aee 100644 --- a/channels/cineblog01.py +++ b/channels/cineblog01.py @@ -18,7 +18,10 @@ headers = "" def findhost(): global host, headers permUrl = httptools.downloadpage('https://www.cb01.uno/', follow_redirects=False).headers - host = 'https://www.'+permUrl['location'].replace('https://www.google.it/search?q=site:', '') + if host[:4] != 'http': + host = 'https://'+permUrl['location'].replace('https://www.google.it/search?q=site:', '') + else: + host = permUrl['location'].replace('https://www.google.it/search?q=site:', '') headers = [['Referer', host]] list_servers = ['verystream', 'openload', 'streamango', 'wstream'] From 56533b5ff653597ac2b85fb7e54536623373fa7d Mon Sep 17 00:00:00 2001 From: Alhaziel Date: Wed, 9 Oct 2019 18:32:00 +0200 Subject: [PATCH 06/61] Fix server CB01 --- channels/cineblog01.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/channels/cineblog01.py b/channels/cineblog01.py index 20699aee..548cc499 100644 --- a/channels/cineblog01.py +++ b/channels/cineblog01.py @@ -163,13 +163,13 @@ def findvideos(item): QualityStr = scrapertoolsV2.decodeHtmlentities(match.group(1))[6:] # Estrae i contenuti - Streaming - load_links(itemlist, 'Streaming:(.*?)', "orange", "Streaming", "SD") + load_links(itemlist, 'Streaming:(.*?)cbtable', "orange", "Streaming", "SD") # Estrae i contenuti - Streaming HD - load_links(itemlist, 'Streaming HD[^<]+(.*?)', "yellow", "Streaming HD", "HD") + load_links(itemlist, 'Streaming HD[^<]+(.*?)cbtable', "yellow", "Streaming HD", "HD") # Estrae i contenuti - Streaming 3D - load_links(itemlist, 'Streaming 3D[^<]+(.*?)', "pink", "Streaming 3D") + load_links(itemlist, 'Streaming 3D[^<]+(.*?)cbtable', "pink", "Streaming 3D") return support.server(item, itemlist=itemlist) From 92d83adabf5e95052d8f370c545e09df732ac962 Mon Sep 17 00:00:00 2001 From: Alhaziel Date: Wed, 9 Oct 2019 18:40:41 +0200 Subject: [PATCH 07/61] Fix Wstream --- servers/wstream.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/servers/wstream.json b/servers/wstream.json index 62ecc754..d32574f1 100644 --- a/servers/wstream.json +++ b/servers/wstream.json @@ -7,7 +7,7 @@ "find_videos": { "patterns": [ { - "pattern": "wstream.video/(?:embed-|videos/|video/)?([a-z0-9A-Z]+)", + "pattern": "wstream.video/(?:embed-|videos/|video/|videow/)?([a-z0-9A-Z]+)", "url": "http:\/\/wstream.video\/videow\/\\1" } ], From 0c19c15d38285cd2eeb995d6fe37e3f07870ce2e Mon Sep 17 00:00:00 2001 From: marco Date: Wed, 9 Oct 2019 19:44:02 +0200 Subject: [PATCH 08/61] miglioramenti scraper vcrypt --- lib/unshortenit.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/lib/unshortenit.py b/lib/unshortenit.py index cd1c82bf..f43637b7 100644 --- a/lib/unshortenit.py +++ b/lib/unshortenit.py @@ -491,18 +491,22 @@ class UnshortenIt(object): headers = { "Cookie": hashlib.md5(ip+day).hexdigest() + "=1" } - uri = uri.replace('sb/','sb1/') - uri = uri.replace('akv/','akv1/') - uri = uri.replace('wss/','wss1/') - uri = uri.replace('wsd/','wsd1/') + uri = uri.replace('sb/', 'sb1/') + uri = uri.replace('akv/', 'akv1/') + uri = uri.replace('wss/', 'wss1/') + uri = uri.replace('wsd/', 'wsd1/') r = httptools.downloadpage(uri, timeout=self._timeout, headers=headers, follow_redirects=False) - uri = r.headers['location'] + if 'Wait 1 hour' in r.data: + uri = '' + logger.info('IP bannato da vcrypt, aspetta un ora') + else: + uri = r.headers['location'] if "4snip" in uri: if 'out_generator' in uri: uri = re.findall('url=(.*)$', uri)[0] - else: - uri = decrypt(uri) + elif '/decode/' in uri: + uri = decrypt(uri.split('/')[-1]) return uri, r.code if r else 200 From 94b82a66321225a9db49bfa96eb5a805770c958d Mon Sep 17 00:00:00 2001 From: Alhaziel Date: Wed, 9 Oct 2019 20:42:02 +0200 Subject: [PATCH 09/61] Fix Default_channel_settings --- core/channeltools.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/channeltools.py b/core/channeltools.py index 9f0a4bed..359dd5c7 100644 --- a/core/channeltools.py +++ b/core/channeltools.py @@ -258,7 +258,7 @@ def get_default_settings(channel_name): else: control['label'] = config.get_localized_string(70727) + ' - ' + label.capitalize() - control['default'] = True if control['id'] not in default_off else False + control['default'] = control['default'] if control['id'] not in default_off else False channel_controls.append(control) # elif control['id'] == 'filter_languages': @@ -269,7 +269,7 @@ def get_default_settings(channel_name): elif control['id'] not in not_active and 'include_in_newest' not in control['id']: if type(control['default']) == bool: - control['default'] = True if control['id'] not in default_off else False + control['default'] = control['default'] if control['id'] not in default_off else False channel_controls.append(control) if renumber: From 0ef0ba36fbe4f048daad38be6cf631afb8836ff0 Mon Sep 17 00:00:00 2001 From: Alhaziel Date: Wed, 9 Oct 2019 21:12:15 +0200 Subject: [PATCH 10/61] =?UTF-8?q?Fix=20Novit=C3=A0=20CB01?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- channels/cineblog01.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/channels/cineblog01.py b/channels/cineblog01.py index 548cc499..f0aba4c7 100644 --- a/channels/cineblog01.py +++ b/channels/cineblog01.py @@ -75,7 +75,7 @@ def newest(categoria): else: patronBlock = r'Ultimi 100 film Aggiornati:(?P.*?)<\/td>' item = categoria - patron = "[^>]+)>(?P[^<([]+)(?:\[(?P<lang>Sub-ITA|B/N)\])?\s?(?:\[(?P<quality>HD|SD|HD/3D)\])?\s?\((?P<year>[0-9]{4})\)<\/a>" + patron = r'<a href="(?P<url>[^"]+)"\s*>(?P<title>[^<([]+)(?:\[(?P<lang>Sub-ITA|B/N)\])?\s?(?:\[(?P<quality>HD|SD|HD/3D)\])?\s?\((?P<year>[0-9]{4})\)<\/a>' pagination = 20 return locals() From 4a7fe5c7842a4aae0cd0ac2a1c3739a852199265 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Wed, 9 Oct 2019 21:12:35 +0200 Subject: [PATCH 11/61] =?UTF-8?q?Fix=20Pagination=20in=20Novit=C3=A0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- core/support.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/core/support.py b/core/support.py index 8a58591c..ad30c339 100644 --- a/core/support.py +++ b/core/support.py @@ -389,17 +389,18 @@ def scrape(func): # next page for pagination if pagination and len(matches) >= pag * pagination: - itemlist.append( - Item(channel=item.channel, - action = item.action, - contentType=item.contentType, - title=typo(config.get_localized_string(30992), 'color kod bold'), - fulltitle= item.fulltitle, - show= item.show, - url=item.url, - args=item.args, - page=pag + 1, - thumbnail=thumb())) + if inspect.stack()[1][3] != 'get_newest': + itemlist.append( + Item(channel=item.channel, + action = item.action, + contentType=item.contentType, + title=typo(config.get_localized_string(30992), 'color kod bold'), + fulltitle= item.fulltitle, + show= item.show, + url=item.url, + args=item.args, + page=pag + 1, + thumbnail=thumb())) if action != 'play' and function != 'episodios' and 'patronMenu' not in args: tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) From 6bc96e98455a46697533749d04ff0f4812a30e15 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Fri, 11 Oct 2019 17:30:42 +0200 Subject: [PATCH 12/61] Fix Server Youtube --- servers/youtube.json | 5 ++ servers/youtube.py | 119 ++++++++++++++++++++++++++----------------- 2 files changed, 78 insertions(+), 46 deletions(-) diff --git a/servers/youtube.json b/servers/youtube.json index c8e0bb0c..39f5872d 100644 --- a/servers/youtube.json +++ b/servers/youtube.json @@ -15,6 +15,11 @@ "pattern": "youtube.com/v/([0-9A-Za-z_-]{11})", "url": "http://www.youtube.com/watch?v=\\1" } + , + { + "pattern": "youtu.be/([0-9A-Za-z_-]{11})", + "url": "http://www.youtube.com/watch?v=\\1" + } ] }, "free": true, diff --git a/servers/youtube.py b/servers/youtube.py index aa28acbe..068bcfb4 100644 --- a/servers/youtube.py +++ b/servers/youtube.py @@ -86,7 +86,7 @@ def extract_flashvars(data): def extract_videos(video_id): fmt_value = { 5: "240p h263 flv", - 6: "240p h263 flv", + 6: "270p h263 flv", 18: "360p h264 mp4", 22: "720p h264 mp4", 26: "???", @@ -108,10 +108,31 @@ def extract_videos(video_id): 85: "1080p h264 3D", 100: "360p vp8 3D", 101: "480p vp8 3D", - 102: "720p vp8 3D" + 102: "720p vp8 3D", + 167: "360p vp8 webm", + 168: "480p vp8 webm", + 169: "720p vp8 webm", + 170: "1080p vp8 webm", + 218: "480p vp8 webm", + 219: "480p vp8 webm", + 278: "144p vp9 webm", + 242: "240p vp9 webm", + 243: "360p vp9 webm", + 244: "480p vp9 webm", + 245: "480p vp9 webm", + 246: "480p vp9 webm", + 247: "720p vp9 webm", + 248: "1080p vp9 webm", + 271: "1440p vp9 webm", + 272: "2160p vp9 webm", + 302: "720p vp9 webm 60fps", + 303: "1080p vp9 webm 60fps", + 308: "1440p vp9 webm 60fps", + 313: "2160p vp9 webm", + 315: "2160p vp9 webm 60fps" } - - url = 'http://www.youtube.com/get_video_info?video_id=%s&eurl=https://youtube.googleapis.com/v/%s&ssl_stream=1' % \ + # from core.support import dbg; dbg() + url = 'https://www.youtube.com/get_video_info?video_id=%s&eurl=https://youtube.googleapis.com/v/%s&ssl_stream=1' % \ (video_id, video_id) data = httptools.downloadpage(url).data @@ -132,53 +153,59 @@ def extract_videos(video_id): js_signature = "" youtube_page_data = httptools.downloadpage("http://www.youtube.com/watch?v=%s" % video_id).data params = extract_flashvars(youtube_page_data) + data_flashvars =[] + if params.get('adaptive_fmts'): + data_flashvars += scrapertools.find_multiple_matches(params['adaptive_fmts'], '(quality.*?url[^,]+)') if params.get('url_encoded_fmt_stream_map'): - data_flashvars = params["url_encoded_fmt_stream_map"].split(",") - for url_desc in data_flashvars: - url_desc_map = dict(urlparse.parse_qsl(url_desc)) - if not url_desc_map.get("url") and not url_desc_map.get("stream"): + data_flashvars += params["url_encoded_fmt_stream_map"].split(",") + + for url_desc in data_flashvars: + url_desc_map = dict(urlparse.parse_qsl(url_desc)) + if not url_desc_map.get("url") and not url_desc_map.get("stream"): + continue + try: + key = int(url_desc_map["itag"]) + if not fmt_value.get(key): continue - try: - key = int(url_desc_map["itag"]) - if not fmt_value.get(key): - continue + if url_desc_map.get("url"): + url = urllib.unquote(url_desc_map["url"]) + elif url_desc_map.get("conn") and url_desc_map.get("stream"): + url = urllib.unquote(url_desc_map["conn"]) + if url.rfind("/") < len(url) - 1: + url += "/" + url += urllib.unquote(url_desc_map["stream"]) + elif url_desc_map.get("stream") and not url_desc_map.get("conn"): + url = urllib.unquote(url_desc_map["stream"]) - if url_desc_map.get("url"): - url = urllib.unquote(url_desc_map["url"]) - elif url_desc_map.get("conn") and url_desc_map.get("stream"): - url = urllib.unquote(url_desc_map["conn"]) - if url.rfind("/") < len(url) - 1: - url += "/" - url += urllib.unquote(url_desc_map["stream"]) - elif url_desc_map.get("stream") and not url_desc_map.get("conn"): - url = urllib.unquote(url_desc_map["stream"]) + if url_desc_map.get("sig"): + url += "&signature=" + url_desc_map["sig"] + elif url_desc_map.get("s"): + sig = url_desc_map["s"] + if not js_signature: + urljs = scrapertools.find_single_match(youtube_page_data, '"assets":.*?"js":\s*"([^"]+)"') + urljs = urljs.replace("\\", "") + if urljs: + if not re.search(r'https?://', urljs): + urljs = urlparse.urljoin("https://www.youtube.com", urljs) + data_js = httptools.downloadpage(urljs).data + from jsinterpreter import JSInterpreter + funcname = scrapertools.find_single_match(data_js, '\.sig\|\|([A-z0-9$]+)\(') + if not funcname: + funcname = scrapertools.find_single_match(data_js, '["\']signature["\']\s*,\s*' + '([A-z0-9$]+)\(') + if not funcname: + funcname = scrapertools.find_single_match(data_js, r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(') + jsi = JSInterpreter(data_js) + js_signature = jsi.extract_function(funcname) - if url_desc_map.get("sig"): - url += "&signature=" + url_desc_map["sig"] - elif url_desc_map.get("s"): - sig = url_desc_map["s"] - if not js_signature: - urljs = scrapertools.find_single_match(youtube_page_data, '"assets":.*?"js":\s*"([^"]+)"') - urljs = urljs.replace("\\", "") - if urljs: - if not re.search(r'https?://', urljs): - urljs = urlparse.urljoin("https://www.youtube.com", urljs) - data_js = httptools.downloadpage(urljs).data - from jsinterpreter import JSInterpreter - funcname = scrapertools.find_single_match(data_js, '\.sig\|\|([A-z0-9$]+)\(') - if not funcname: - funcname = scrapertools.find_single_match(data_js, '["\']signature["\']\s*,\s*' - '([A-z0-9$]+)\(') - jsi = JSInterpreter(data_js) - js_signature = jsi.extract_function(funcname) - signature = js_signature([sig]) - url += "&signature=" + signature - url = url.replace(",", "%2C") - video_urls.append(["(" + fmt_value[key] + ") [youtube]", url]) - except: - import traceback - logger.info(traceback.format_exc()) + signature = js_signature([sig]) + url += "&sig=" + signature + url = url.replace(",", "%2C") + video_urls.append(["(" + fmt_value[key] + ") [youtube]", url]) + except: + import traceback + logger.info(traceback.format_exc()) return video_urls From d126eab38e4f138519b73ea0adf71bae9812c128 Mon Sep 17 00:00:00 2001 From: greko17 <sex1712@email.it> Date: Sat, 12 Oct 2019 11:48:41 +0200 Subject: [PATCH 13/61] fix: aggiunte/modifiche ai files del canale 0example --- channels/0example.json | 10 ++-- channels/0example.py | 125 ++++++++++++++++++++++++++--------------- 2 files changed, 84 insertions(+), 51 deletions(-) diff --git a/channels/0example.json b/channels/0example.json index 5dc735d4..a66bf1a8 100644 --- a/channels/0example.json +++ b/channels/0example.json @@ -1,5 +1,5 @@ -Rev:0.1 -Update: 18-9-2019 +Rev:0.2 +Update: 03-10-2019 ##################### Promemoria da cancellare pena la non visibilità del canale in KOD!! @@ -11,8 +11,7 @@ le voci in settings sono state inserite per l'unico scopo di velocizzare la scrittura del file Vanno lasciate solo quelle voci il cui funzionamento sul canale non vanno attivate. -Per esempio se il canale non ha: newest() -lasciare le voci dove c'è newest nell'id. Es: include_in_newest_series +"not_active": ["include_in_newest"], VA INSERITO nei canali che NON hanno nessuna voce newest. Ovviamente va mantenuto tutto il codice di quell'id tra le {} se vanno cancellati tutti deve rimanere la voce: "settings": [] @@ -20,12 +19,13 @@ se vanno cancellati tutti deve rimanere la voce: { "id": "nome del file .json", "name": "Nome del canale visualizzato in KOD", - "language": ["ita", "vos"], + "language": ["ita", "vosi"], "active": false, "adult": false, "thumbnail": "", "banner": "", "categories": ["movie", "tvshow", "anime", "vos", "documentary", "adult"], + "not_active": ["include_in_newest"], "settings": [ { "id": "include_in_global_search", diff --git a/channels/0example.py b/channels/0example.py index 3d6eb8a2..f418da4a 100644 --- a/channels/0example.py +++ b/channels/0example.py @@ -1,30 +1,42 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ # Canale per 'idcanale nel json' +# By: pincopallo! +# Eventuali crediti se vuoi aggiungerli # ------------------------------------------------------------ # Rev: 0.2 -# Update 18-09-2019 +# Update 12-10-2019 # fix: # 1. aggiunto pagination e sistemate alcune voci +# 2. modificato problemi in eccezioni +# 3. aggiunta la def select +# 4. modifica alla legenda e altre aggiunte # Questo vuole solo essere uno scheletro per velocizzare la scrittura di un canale. +# La maggior parte dei canali può essere scritta con il decoratore. # I commenti sono più un promemoria... che una vera e propria spiegazione! # Niente di più. # Ulteriori informazioni sono reperibili nel wiki: # https://github.com/kodiondemand/addon/wiki/decoratori + """ + Questi sono commenti per i beta-tester. - Problemi noti che non superano il test del canale: - - indicare i problemi + Su questo canale, nella categoria 'Ricerca Globale' + non saranno presenti le voci 'Aggiungi alla Videoteca' + e 'Scarica Film'/'Scarica Serie', dunque, + la loro assenza, nel Test, NON dovrà essere segnalata come ERRORE. + + Novità. Indicare in quale/i sezione/i è presente il canale: + - Nessuna, film, serie, anime... Avvisi: - Eventuali avvisi per i tester Ulteriori info: - """ -# CANCELLARE Ciò CHE NON SERVE per il canale, lascia il codice commentato +# CANCELLARE Ciò CHE NON SERVE per il canale, lascia il codice commentato ove occorre, # ma fare PULIZIA quando si è finito di testarlo # Qui gli import @@ -37,7 +49,7 @@ from platformcode import config # in caso di necessità #from core import scrapertoolsV2, httptools, servertools, tmdb -#from core.item import Item +from core.item import Item # per newest #from lib import unshortenit ##### fine import @@ -48,7 +60,7 @@ from platformcode import config # da cancellare se non utilizzata __channel__ = "id nel json" # da cancellare se si utilizza findhost() -host = config.get_channel_url('id nel json OR '__channel__) # <-- ATTENZIONE +host = config.get_channel_url('id nel json' OR __channel__) # <-- ATTENZIONE headers = [['Referer', host]] # Inizio findhost() - da cancellare se usato l'altro metodo @@ -62,7 +74,7 @@ def findhost(): permUrl = httptools.downloadpage('INSERIRE-URL-QUI', follow_redirects=False).headers host = 'https://www.'+permUrl['location'].replace('https://www.google.it/search?q=site:', '') # cancellare host non utilizzato - host = scrapertoolsV2.find_single_match(data, r'<div class="elementor-button-wrapper"> <a href="([^"]+)"') + host = scrapertoolsV2.find_single_match(permUrl, r'<div class="elementor-button-wrapper"> <a href="([^"]+)"') headers = [['Referer', host]] findhost() # così le imposta una volta per tutte @@ -83,8 +95,7 @@ def mainlist(item): # Ordine delle voci # Voce FILM, puoi solo impostare l'url - film = ['', - #'url', # url per la voce FILM, se possibile la pagina principale con le ultime novità + film = ['', # url per la voce FILM, se possibile la pagina principale con le ultime novità #Voce Menu,['url','action','args',contentType] ('Al Cinema', ['', 'peliculas', '']), ('Generi', ['', 'genres', 'genres']), @@ -97,19 +108,17 @@ def mainlist(item): ] # Voce SERIE, puoi solo impostare l'url - tvshow = ['', - #'url', # url per la voce Serie, se possibile la pagina con titoli di serie + tvshow = ['', # url per la voce Serie, se possibile la pagina con titoli di serie #Voce Menu,['url','action','args',contentType] - ('Novità', ['', '', '']) + ('Novità', ['', '', '']), ('Per Lettera', ['', 'genres', 'letters']), ('Per Genere', ['', 'genres', 'genres']), ('Per anno', ['', 'genres', 'years']) - + ] # Voce ANIME, puoi solo impostare l'url - anime = ['', - #'url', # url per la voce Anime, se possibile la pagina con titoli di anime + anime = ['', # url per la voce Anime, se possibile la pagina con titoli di anime #Voce Menu,['url','action','args',contentType] - ('Novità', ['', '', '']) + ('Novità', ['', '', '']), ('In Corso',['', '', '', '']), ('Ultimi Episodi',['', '', '', '']), ('Ultime Serie',['', '', '', '']) @@ -133,52 +142,53 @@ def mainlist(item): nome = [( '' ['', '', '', '']) return locals() -# riepilogo key per il match nei patron -# known_keys = ['url', 'title', 'title2', 'season', 'episode', 'thumb', 'quality', -# 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang'] -# url = link relativo o assoluto -# title = titolo Film/Serie/Anime/Altro -# title2 = titolo dell'episodio Serie/Anime/Altro -# season = stagione in formato numerico -# episode = numero episodio, in formato numerico. Se il sito ha stagionexepisodio potete omettere season -# thumb = locandina Film/Serie/Anime/Altro -# quality = qualità indicata del video -# year = anno in formato numerico (4 cifre) -# duration = durata del Film/Serie/Anime/Altro -# genere = genere del Film/Serie/Anime/Altro. Es: avventura, commedia -# rating = punteggio/voto in formato numerico -# type = tipo del video. Es. movie per film o tvshow per le serie. Di solito sono discrimanti usati dal sito -# lang = lingua del video. Es: ITA, Sub-ITA, Sub, SUB ITA. Se non appare 'ITA' è di default + # Legenda known_keys per i groups nei patron + # known_keys = ['url', 'title', 'title2', 'season', 'episode', 'thumb', 'quality', + # 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang'] + # url = link relativo o assoluto alla pagina titolo film/serie + # title = titolo Film/Serie/Anime/Altro + # title2 = titolo dell'episodio Serie/Anime/Altro + # season = stagione in formato numerico + # episode = numero episodio, in formato numerico. + # thumb = linkrealtivo o assoluto alla locandina Film/Serie/Anime/Altro + # quality = qualità indicata del video + # year = anno in formato numerico (4 cifre) + # duration = durata del Film/Serie/Anime/Altro + # genere = genere del Film/Serie/Anime/Altro. Es: avventura, commedia + # rating = punteggio/voto in formato numerico + # type = tipo del video. Es. movie per film o tvshow per le serie. Di solito sono discrimanti usati dal sito + # lang = lingua del video. Es: ITA, Sub-ITA, Sub, SUB ITA. + # AVVERTENZE: Se il titolo è trovato nella ricerca TMDB/TVDB/Altro allora le locandine e altre info non saranno quelle recuperate nel sito.!!!! @support.scrape def peliculas(item): support.log(item) - #dbg # decommentare per attivare web_pdb + #support.dbg() # decommentare per attivare web_pdb action = '' blacklist = [''] patron = r'' patronBlock = r'' patronNext = '' - pagination = 0 + pagination = '' - debug = False # True per testare le regex sul sito + #debug = True # True per testare le regex sul sito return locals() @support.scrape def episodios(item): support.log(item) - #dbg + #support.dbg() action = '' blacklist = [''] patron = r'' patronBlock = r'' patronNext = '' - pagination = 0 + pagination = '' - debug = False + #debug = True return locals() # Questa def è utilizzata per generare i menu del canale @@ -186,28 +196,50 @@ def episodios(item): @support.scrape def genres(item): support.log(item) - #dbg + #support.dbg() action = '' blacklist = [''] patron = r'' patronBlock = r'' patronNext = '' - pagination = 0 + pagination = '' - debug = False + #debug = True return locals() ############## Fine ordine obbligato ## Def ulteriori +# per quei casi dove il sito non differenzia film e/o serie e/o anime +# e la ricerca porta i titoli mischiati senza poterli distinguere tra loro +# andranno modificate anche le def peliculas e episodios ove occorre +def select(item): + support.log('select --->', item) + #support.dbg() + data = httptools.downloadpage(item.url, headers=headers).data + # pulizia di data, in caso commentare le prossime 2 righe + data = re.sub('\n|\t', ' ', data) + data = re.sub(r'>\s+<', '> <', data) + block = scrapertoolsV2.find_single_match(data, r'') + if re.findall('', data, re.IGNORECASE): + support.log('select = ### è una serie ###') + return episodios(Item(channel=item.channel, + title=item.title, + fulltitle=item.fulltitle, + url=item.url, + args='serie', + contentType='tvshow', + #data1 = data decommentando portiamo data nella def senza doverla riscaricare + )) + ############## Fondo Pagina # da adattare al canale def search(item, text): support.log('search', item) itemlist = [] text = text.replace(' ', '+') - item.url = '/index.php?do=search&story=%s&subaction=search' % (text) + item.url = host + '/index.php?do=search&story=%s&subaction=search' % (text) # bisogna inserire item.contentType per la ricerca globale # se il canale è solo film, si può omettere, altrimenti bisgona aggiungerlo e discriminare. item.contentType = item.contentType @@ -240,14 +272,15 @@ def newest(categoria): except: import sys for line in sys.exc_info(): - log('newest log: ', {0}.format(line)) + support.log('newest log: ', {0}.format(line)) return [] return itemlist -# da adattare... ( support.server ha vari parametri ) +# da adattare... +# consultare il wiki sia per support.server che ha vari parametri, +# sia per i siti con hdpass #support.server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=True) def findvideos(item): support.log('findvideos ->', item) return support.server(item, headers=headers) - From 44bb8fc91ce07eb9723d90b37cce5945abb16624 Mon Sep 17 00:00:00 2001 From: greko17 <sex1712@email.it> Date: Sat, 12 Oct 2019 11:49:29 +0200 Subject: [PATCH 14/61] fix: core/support.py contentSerieName e aggiunta legenda delle key dei groups contentSerieName = scraped['title'] altrimenti non viene trovata nessuna informazione sul sito Tmdb --- core/support.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/core/support.py b/core/support.py index ad30c339..8b21bcdd 100644 --- a/core/support.py +++ b/core/support.py @@ -182,6 +182,24 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t regexDbg(item, patron, headers, block) known_keys = ['url', 'title', 'title2', 'season', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang'] + # Legenda known_keys per i groups nei patron + # known_keys = ['url', 'title', 'title2', 'season', 'episode', 'thumb', 'quality', + # 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang'] + # url = link relativo o assoluto alla pagina titolo film/serie + # title = titolo Film/Serie/Anime/Altro + # title2 = titolo dell'episodio Serie/Anime/Altro + # season = stagione in formato numerico + # episode = numero episodio, in formato numerico. + # thumb = linkrealtivo o assoluto alla locandina Film/Serie/Anime/Altro + # quality = qualità indicata del video + # year = anno in formato numerico (4 cifre) + # duration = durata del Film/Serie/Anime/Altro + # genere = genere del Film/Serie/Anime/Altro. Es: avventura, commedia + # rating = punteggio/voto in formato numerico + # type = tipo del video. Es. movie per film o tvshow per le serie. Di solito sono discrimanti usati dal sito + # lang = lingua del video. Es: ITA, Sub-ITA, Sub, SUB ITA. + # AVVERTENZE: Se il titolo è trovato nella ricerca TMDB/TVDB/Altro allora le locandine e altre info non saranno quelle recuperate nel sito.!!!! + stagione = '' # per quei siti che hanno la stagione nel blocco ma non nelle puntate for i, match in enumerate(matches): if pagination and (pag - 1) * pagination > i: continue # pagination @@ -275,7 +293,7 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t infoLabels=infolabels, thumbnail=item.thumbnail if function == 'episodios' else scraped["thumb"] , args=item.args, - contentSerieName= title if item.contentType or CT != 'movie' and function != 'episodios' else item.fulltitle if function == 'episodios' else '', + contentSerieName= scraped['title'],#title if item.contentType or CT != 'movie' and function != 'episodios' else item.fulltitle if function == 'episodios' else '', contentTitle= title if item.contentType or CT == 'movie' else '', contentLanguage = lang1, contentEpisodeNumber=episode if episode else '' From 7c468bb98419d64c1944f1e6ef83424b7896c857 Mon Sep 17 00:00:00 2001 From: greko17 <sex1712@email.it> Date: Sat, 12 Oct 2019 12:15:53 +0200 Subject: [PATCH 15/61] revert: core/support.py --- core/support.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/support.py b/core/support.py index 8b21bcdd..11e9b197 100644 --- a/core/support.py +++ b/core/support.py @@ -293,7 +293,7 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t infoLabels=infolabels, thumbnail=item.thumbnail if function == 'episodios' else scraped["thumb"] , args=item.args, - contentSerieName= scraped['title'],#title if item.contentType or CT != 'movie' and function != 'episodios' else item.fulltitle if function == 'episodios' else '', + contentSerieName= title if item.contentType or CT != 'movie' and function != 'episodios' else item.fulltitle if function == 'episodios' else '', contentTitle= title if item.contentType or CT == 'movie' else '', contentLanguage = lang1, contentEpisodeNumber=episode if episode else '' From b2915685270503791cdc79c67ca8f353a2b4ee64 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Sat, 12 Oct 2019 12:20:31 +0200 Subject: [PATCH 16/61] Migliorie a Community Channels: - Relative Path - plot anche per il canale - concatena il plot di TMDB con quello del Json --- specials/community.py | 58 ++++++++++++++++++++----------------------- 1 file changed, 27 insertions(+), 31 deletions(-) diff --git a/specials/community.py b/specials/community.py index 55d1fa08..e2510f8e 100644 --- a/specials/community.py +++ b/specials/community.py @@ -3,21 +3,15 @@ # -*- Created for Alfa-addon -*- # -*- By the Alfa Develop Group -*- -import re -import urllib -import os +import re, urllib, os -from core import httptools -from core import scrapertools -from core import servertools -from core import jsontools -from channelselector import get_thumb -from core import tmdb +from core import httptools, scrapertoolsV2, servertools, jsontools, tmdb from core.item import Item +from core.support import typo +from channelselector import get_thumb from platformcode import logger, config, platformtools from specials import autoplay -from specials import filtertools -from core.support import typo + list_data = {} @@ -41,7 +35,7 @@ def mainlist(item): def show_channels(item): logger.info() itemlist = [] - + context = [{"title": config.get_localized_string(50005), "action": "remove_channel", "channel": "community"}] @@ -53,17 +47,23 @@ def show_channels(item): itemlist.append(Item(channel=item.channel, title=typo(config.get_localized_string(70676),'bold color kod'), action='add_channel', thumbnail=get_thumb('add.png'))) for key, channel in json['channels'].items(): - file_path = channel ['path'] - file_url = httptools.downloadpage(file_path, follow_redirects=True).data + file_path = channel['path'] + path = os.path.dirname(os.path.abspath(file_path)) + if file_path.startswith('http'): + file_url = httptools.downloadpage(file_path, follow_redirects=True).data + else: + file_url = open(file_path, "r").read() json_url = jsontools.load(file_url) - thumbnail = json_url['thumbnail'] if 'thumbnail' in json_url else '' - fanart = json_url['fanart'] if 'fanart' in json_url else '' + thumbnail = json_url['thumbnail'] if 'thumbnail' in json_url and ':/' in json_url['thumbnail'] else path + json_url['thumbnail'] if 'thumbnail' in json_url and '/' in json_url['thumbnail'] else '' + fanart = json_url['fanart'] if 'fanart' in json_url and ':/' in json_url['fanart'] else path + json_url['fanart'] if 'fanart' in json_url and '/' in json_url['fanart'] else '' + plot = json_url['plot'] if 'plot' in json_url else '' itemlist.append(Item(channel=item.channel, title=typo(channel['channel_name'],'bold'), url=file_path, thumbnail=thumbnail, fanart=fanart, + plot=plot, action='show_menu', channel_id = key, context=context)) @@ -83,26 +83,26 @@ def load_json(item): def show_menu(item): global list_data - logger.info() itemlist = [] json_data = load_json(item) - + path = os.path.dirname(os.path.abspath(item.url)) if "menu" in json_data: for option in json_data['menu']: if 'thumbnail' in json_data: - thumbnail = option['thumbnail'] + thumbnail = option['thumbnail'] if ':/' in option['thumbnail'] else path + option['thumbnail'] if '/' in option['thumbnail'] else get_thumb(option['thumbnail']) else: thumbnail = '' if 'fanart' in option and option['fanart']: - fanart = option['fanart'] + fanart = option['fanart'] if ':/' in option['fanart'] else path + option['fanart'] else: fanart = item.fanart if 'plot' in option and option['plot']: plot = option['plot'] else: plot = item.plot - itemlist.append(Item(channel=item.channel, title=format_title(option['title']), thumbnail=thumbnail, fanart=fanart, plot=plot, action='show_menu', url=option['link'])) + url = option['link'] if ':/' in option['link'] else path + option['link'] + itemlist.append(Item(channel=item.channel, title=format_title(option['title']), thumbnail=thumbnail, fanart=fanart, plot=plot, action='show_menu', url=url)) autoplay.show_option(item.channel, itemlist) return itemlist @@ -134,7 +134,7 @@ def list_all(item): title = set_title(title, language, quality) new_item = Item(channel=item.channel, title=format_title(title), quality=quality, - language=language, plot=plot, thumbnail=poster) + language=language, plot=plot, personal_plot=plot, thumbnail=poster) new_item.infoLabels['year'] = media['year'] if 'year' in media else '' new_item.infoLabels['tmdb_id'] = media['tmdb_id'] if 'tmdb_id' in media else '' @@ -149,9 +149,13 @@ def list_all(item): new_item.action = 'seasons' itemlist.append(new_item) + personal_plot = new_item.plot if not 'generic_list' in json_data: tmdb.set_infoLabels(itemlist, seekTmdb=True) + for item in itemlist: + if item.personal_plot != item.plot and item.personal_plot: + item.plot += '\n\n' + typo('','submenu') + '\n\n' + item.personal_plot return itemlist def seasons(item): @@ -207,11 +211,6 @@ def findvideos(item): itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - # Requerido para FilterTools - # itemlist = filtertools.get_links(itemlist, item, list_language) - - # Requerido para AutoPlay - autoplay.start(itemlist, item) return itemlist @@ -319,10 +318,7 @@ def set_title(title, language, quality): return title def format_title(title): - t = scrapertools.find_single_match(title, r'\{([^\}]+)\}') - logger.info(t) - logger.info(title) + t = scrapertoolsV2.find_single_match(title, r'\{([^\}]+)\}') if 'bold' not in t: t += ' bold' title = re.sub(r'(\{[^\}]+\})','',title) - logger.info(title) return typo(title,t) \ No newline at end of file From 8ed2b17098f65bd67d93c3a808a13ace2558db00 Mon Sep 17 00:00:00 2001 From: greko17 <sex1712@email.it> Date: Sat, 12 Oct 2019 12:24:26 +0200 Subject: [PATCH 17/61] test: cinemalibero --- channels/cinemalibero.json | 82 +------ channels/cinemalibero.py | 426 +++++++++++++++---------------------- 2 files changed, 181 insertions(+), 327 deletions(-) diff --git a/channels/cinemalibero.json b/channels/cinemalibero.json index 0124d1ad..d66af440 100644 --- a/channels/cinemalibero.json +++ b/channels/cinemalibero.json @@ -8,79 +8,13 @@ "banner": "https://www.cinemalibero.center/wp-content/themes/Cinemalibero%202.0/images/logo02.png", "categories": ["tvshow", "movie","anime"], "settings": [ - { - "id": "channel_host", - "type": "text", - "label": "Host del canale", - "default": "https://www.cinemalibero.fun/", - "enabled": true, - "visible": true - }, - { - "id": "include_in_global_search", - "type": "bool", - "label": "Includi ricerca globale", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_peliculas", - "type": "bool", - "label": "Includi in Novità - Film", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_series", - "type": "bool", - "label": "Includi in Novità - Serie TV", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_anime", - "type": "bool", - "label": "Includi in Novità - Anime", - "default": false, - "enabled": false, - "visible": false - }, - { - "id": "include_in_newest_italiano", - "type": "bool", - "label": "Includi in Novità - Italiano", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "checklinks", - "type": "bool", - "label": "Verifica se i link esistono", - "default": false, - "enabled": true, - "visible": true - }, - { - "id": "checklinks_number", - "type": "list", - "label": "Numero de link da verificare", - "default": 1, - "enabled": true, - "visible": "eq(-1,true)", - "lvalues": [ "5", "10", "15", "20" ] - }, - { - "id": "filter_languages", - "type": "list", - "label": "Mostra link in lingua...", - "default": 0, - "enabled": true, - "visible": true, - "lvalues": ["Non filtrare","IT"] - } + { + "id": "include_in_newest_series", + "type": "bool", + "label": "@70727", + "default": false, + "enabled": false, + "visible": false + } ] } diff --git a/channels/cinemalibero.py b/channels/cinemalibero.py index 9d64d536..f9edd8a7 100644 --- a/channels/cinemalibero.py +++ b/channels/cinemalibero.py @@ -1,320 +1,240 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -# Canale per CinemaLibero - First Version +# Canale per cinemalibero # ------------------------------------------------------------ """ - Trasformate le sole def per support.menu e support.scrape - da non inviare nel test. - Test solo a trasformazione completa + + Il canale non permette di filtrare film, serie e altro nella ricerca. + Quindi vengono disabilitate le voci: + - "Aggiungi in videoteca" + - "Scarica film/serie" + per le solo ricerce: nel canale e globale. + + Problemi noti che non superano il test del canale: + - + + Avvisi: + - + + Ulteriori info: + """ import re -from core import scrapertools, servertools, httptools, support -from core import tmdb -from core.item import Item -from lib import unshortenit +# per l'uso dei decoratori, per i log, e funzioni per siti particolari +from core import support +# se non si fa uso di findhost() from platformcode import config -from platformcode import logger -from specials import autoplay -import channelselector -# Necessario per Autoplay -IDIOMAS = {'Italiano': 'IT'} -list_language = IDIOMAS.values() -list_servers = ['akstream', 'wstream', 'openload', 'streamango'] -list_quality = ['default'] - -# Necessario per Verifica Link -checklinks = config.get_setting('checklinks', 'cinemalibero') -checklinks_number = config.get_setting('checklinks_number', 'cinemalibero') +# in caso di necessità +from core import scrapertoolsV2, httptools#, servertools +from core.item import Item # per newest +#from lib import unshortenit __channel__ = "cinemalibero" host = config.get_channel_url(__channel__) - headers = [['Referer', host]] +##headers = [ +## ['Host', host.split("//")[-1].split("/")[0]], +## ['User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'], +## ['Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'], +## ['Accept-Language', 'en-US,en;q=0.5'], +## ['Accept-Encoding', 'gzip, deflate'], +## ['Referer', host], +## ['DNT', '1'], +## ['Connection', 'keep-alive'], +## ['Upgrade-Insecure-Requests', '1'], +## ['Cache-Control', 'max-age=0'] +## ] + +list_servers = ['akstream', 'wstream', 'openload', 'streamango'] +list_quality = ['default'] @support.menu def mainlist(item): - support.log() - film = '/category/film/' - filmSub = [ - ('Generi', ['', 'genres']), - ('Sport', ['/category/sport/', 'peliculas']), - ] - tvshow = '/category/serie-tv/' - tvshowSub = [ - ('Anime ', ['/category/anime-giapponesi/', 'video']) - ] + support.log(item) + + film = ['/category/film/', + ('Generi', ['', 'genres', 'genres']), + ] + + tvshow = ['/category/serie-tv/', +## ('Novità', ['/aggiornamenti-serie-tv/', 'peliculas', 'update']), + ] + + anime = ['/category/anime-giapponesi/', + ] + + search = '' return locals() @support.scrape -def genres(item): - support.log() - action='video' - patron=r'<a class="dropdown-item" href="(?P<url>[^"]+)" title="(?P<title>[A-z]+)"' -## return support.scrape2(item, patronBlock=r'<div id="bordobar" class="dropdown-menu(?P<block>.*)</li>', -## patron=r'<a class="dropdown-item" href="([^"]+)" title="([A-z]+)"', -## listGroups=['url', 'title'], action='video') +def peliculas(item): + support.log(item) + #support.dbg() # decommentare per attivare web_pdb + debug = True + blacklist = [''] + + if item.args == 'search': + patron = r'href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">.+?class="titolo">(?P<title>[^<]+)<' + patronBlock = r'style="color: #2C3549 !important;" class="fon my-3"><small>.+?</small></h1>(?P<block>.*?)<div class="bg-dark ">' + action = 'select' + else: + if item.contentType == 'tvshow': + # da sistemare per matchare le ultime serie inserite + if item.args == 'update': + patron = r'<div class="card-body p-0"> <a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^)]+)\)">[^>]+>(?P<title>.+?)(?:[ ]\((?P<lang>SubITA)\))?(?:[ ](?P<year>\d{4}))?</div> <div class="genere">(?:|Ep.)(?:|.+?)?</div>' + action = 'select' + else: +## #patron = r'href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)<[^>]+>[^>]+>(?:.+?) (?:\()?(?P<lang>ITA|iTA|Sub)(?:\))?' +## #patron = r'<div class="card-body p-0"> <a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)(?: \(.+?\))?(?: \(\d+\)| \d+)?<[^>]+>(?:<div class="genere">.+?(?:\()?(?P<lang>ITA|iTA|Sub)(?:\))?)?' + patron = r'<div class="card-body p-0"> <a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)(?: \(.+?\))?(?: \(\d+\)| \d+)?</div><div class="genere">(?:.?(?P<episode>\d+x\d+-\d+|\d+-\d+|\d+x\d+|\d+)[ ]?(?:\()?(?:(?P<lang>ITA|iTA|Sub ITA|Sub iTA|Sub))?[ ]?(?:(?P<quality>HD))?.+?)</div>' + action = 'episodios' + if 'anime' in item.url: + patron = r'<div class="card-body p-0"> <a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)(?: \(.+?\))?(?: \(\d+\)| \d+)?<[^>]+>(?:<div class="genere">.+?(?:\()?(?P<lang>ITA|iTA|Sub)(?:\))?)?' + action = 'select' + elif item.contentType == 'movie': + action = 'findvideos' + patron = r'href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">[^>]+>[^>]+>[^>]+>(?:[ ](?P<rating>\d+.\d+))?[^>]+>[^>]+>(?P<title>.+?)(?:\[(?P<lang>Sub-iTA|Sub-ITA|Sub)\])?[ ]\((?P<year>\d+)\)</div>(?:<div class="genere">(?P<quality>[^<]+)<)?' + + patronBlock = r'<h1(?: style="color: #2C3549 !important; text-transform: uppercase;"| style="text-transform: uppercase; color: #2C3549 !important;"| style="color: #2C3549 !important; text-transform: uppercase;" style="text-shadow: 1px 1px 1px #FF8C00; color:#FF8C00;"| style="text-shadow: 1px 1px 1px #0f0f0f;" class="darkorange"| style="color:#2C3549 !important;")>.+?</h1>(?P<block>.*?)<div class=(?:"container"|"bg-dark ")>' + + patronNext = '<a class="next page-numbers".*?href="([^"]+)">' return locals() -def peliculas(item): - logger.info('[cinemalibero.py] video') - itemlist = [] - if host not in item.url: - item.url = host + item.url +@support.scrape +def episodios(item): + support.log(item) - # Carica la pagina - data = httptools.downloadpage(item.url).data.replace('\n','').replace('\t','') - block = scrapertools.find_single_match(data, '<div class="container">.*?class="col-md-12[^"]*?">(.*?)<div class=(?:"container"|"bg-dark ")>') + #dbg +## if item.args == '': +## patron = r'<a target=.+?href="(?P<url>[^"]+)"[^>]+>(?P<title>Epis.+?(\d+)?)(?:\((?P<lang>Sub ITA)\))?</a><br />' +## patronBlock = r'(?:class="txt_dow">Streaming:(?P<block>.*?)at-below-post)' + if item.data1 and 'stagione' not in item.data1.lower(): + # è un movie + item.contentType = 'tvshow' + #patron = r'(?:href="[ ]?(?P<url>[^"]+)"[^>]+>(?P<title>[^<]+)<|(?P<episode>\d+(?:×|×)?\d+\-\d+|\d+(?:×|×)\d+)[;]?(?:(\4[^<]+)(\2.*?)|(\2[ ])(?:<(\3.*?)))(?:</a><br />|</a></p>))' + patron = r'<a target=.+?href="(?P<url>[^"]+)"[^>]+>(?P<title>Epis.+?(?P<episode>\d+)?)(?:\((?P<lang>Sub ITA)\))?</a>(?:<br />)?' + patronBlock = r'(?:class="txt_dow">Streaming:(?P<block>.*?)at-below-post)' + else: + patron = r'(?P<episode>\d+(?:×|×)?\d+\-\d+|\d+(?:×|×)\d+)[;]?[ ]?(?:(?P<title>[^<]+)(?P<url>.*?)|(\2[ ])(?:<(\3.*?)))(?:</a><br />|</a></p>)' +## patron = r'<a target=.+?href="(?P<url>[^"]+)"[^>]+>(?P<title>Epis.+?(\d+)?)(?:\((?P<lang>Sub ITA)\))?</a><br />' + patronBlock = r'<p><strong>(?P<block>(?:.+?[Ss]tagione.+?(?P<lang>iTA|ITA|Sub-ITA|Sub-iTA))?(?:|.+?|</strong>)(/?:</span>)?</p>.*?</p>)' + item.contentType = 'tvshow' - # Estrae i contenuti - matches = re.compile(r'<div class="col-lg-3">(.*?)<\/a><\/div>', re.DOTALL).findall(block) + action = 'findvideos' + blacklist = [''] - for match in matches: - url = scrapertools.find_single_match(match, r'href="([^"]+)"') - long_title = scrapertools.find_single_match(match, r'<div class="titolo">([^<]+)<\/div>') - thumb = scrapertools.find_single_match(match, r'url=\((.*?)\)') - quality = scrapertools.find_single_match(match, r'<div class="voto">([^<]+)<\/div>') - genere = scrapertools.find_single_match(match, r'<div class="genere">([^<]+)<\/div>') +## pagination = '' - year = scrapertools.find_single_match(long_title, r'\(([0-9)]+)') or scrapertools.find_single_match(long_title, r'\) ([0-9)]+)') - lang = scrapertools.find_single_match(long_title, r'\(([a-zA-Z)]+)') - - title = re.sub(r'\(.*','',long_title) - title = re.sub(r'(?:\(|\))','',title) - if genere: - genere = ' - [' + genere + ']' - if year: - long_title = title + ' - ('+ year + ')' + genere - if lang: - long_title = '[B]' + title + '[/B]' + ' - ('+ lang + ')' + genere - else: - long_title = '[B]' + title + '[/B]' - - # Seleziona fra Serie TV e Film - if item.contentType == 'movie': - tipologia = 'movie' - action = 'findvideos' - elif item.contentType == 'episode': - tipologia = 'tv' - action = 'episodios' - else: - tipologia = 'movie' - action = 'select' +## debug = True + return locals() - itemlist.append( - Item(channel=item.channel, - action=action, - contentType=item.contentType, - title=long_title, - fulltitle=title, - quality=quality, - url=url, - thumbnail=thumb, - infoLabels={'year': year}, - show=title)) - # Next page - next_page = scrapertools.find_single_match(data, '<a class="next page-numbers".*?href="([^"]+)">') +@support.scrape +def genres(item): + support.log(item) + #dbg - if next_page != '': - itemlist.append( - Item(channel=item.channel, - action='video', - title='[B]' + config.get_localized_string(30992) + ' »[/B]', - url=next_page, - contentType=item.contentType, - thumbnail='http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png')) + action = 'peliculas' + blacklist = [''] + patron = r'<a class="dropdown-item" href="(?P<url>[^"]+)" title="(?P<title>[A-z]+)"' - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - return itemlist + return locals() def select(item): + support.log('select --->', item) + #support.dbg() data = httptools.downloadpage(item.url, headers=headers).data - block = scrapertools.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)<\/div>') + block = scrapertoolsV2.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)<div style="margin-left: 0.5%; color: #FFF;">') if re.findall('rel="category tag">serie', data, re.IGNORECASE): - logger.info('select = ### è una serie ###') + support.log('select = ### è una serie ###') return episodios(Item(channel=item.channel, title=item.title, fulltitle=item.fulltitle, url=item.url, - extra='serie', - contentType='episode')) + args='serie', + contentType='tvshow', + data1 = data + )) elif re.findall('rel="category tag">anime', data, re.IGNORECASE): - if re.findall('episodio', block, re.IGNORECASE): - logger.info('select = ### è un anime ###') + if re.findall('episodio', block, re.IGNORECASE) or re.findall('stagione', data, re.IGNORECASE): + support.log('select = ### è un anime ###') return episodios(Item(channel=item.channel, title=item.title, fulltitle=item.fulltitle, url=item.url, - extra='anime', - contentType='episode')) + args='anime', + contentType='tvshow', + data1 = data + )) else: - logger.info('select = ### è un film ###') + support.log('select = ### è un film ###') return findvideos(Item(channel=item.channel, title=item.title, fulltitle=item.fulltitle, url=item.url, - contentType='movie')) + args = '', + contentType='movie', + #data = data + )) else: - logger.info('select = ### è un film ###') + support.log('select = ### è un film ###') return findvideos(Item(channel=item.channel, title=item.title, fulltitle=item.fulltitle, url=item.url, - contentType='movie')) + contentType='movie', + #data = data + )) +def search(item, texto): + support.log("[cinemalibero.py] " + item.url + " search " + texto) + item.url = host + "/?s=" + texto + try: + item.args = 'search' + item.contentType = 'episode' # non fa uscire le voci nel context menu + return peliculas(item) + # Continua la ricerca in caso di errore + except: + import sys + for line in sys.exc_info(): + support.log("%s" % line) + return [] -def findvideos(item): # Questa def. deve sempre essere nominata findvideos - logger.info('[cinemalibero.py] findvideos') +def newest(categoria): + support.log('newest ->', categoria) itemlist = [] + item = Item() + try: + if categoria == 'peliculas': + item.args = 'update' + item.url = host+'/aggiornamenti-serie-tv/' + item.contentType = 'tvshow' + item.action = 'peliculas' + itemlist = peliculas(item) - if item.args == 'direct': - return servertools.find_video_items(item) + if itemlist[-1].action == 'peliculas': + itemlist.pop() + # Continua la ricerca in caso di errore + except: + import sys + for line in sys.exc_info(): + log('newest log: ', {0}.format(line)) + return [] - if item.contentType == 'episode': - data = item.url.lower() - block = scrapertools.find_single_match(data,r'>streaming.*?<\/strong>*?<\/h2>(.*?)<\/div>') - urls = re.findall('<a.*?href="([^"]+)"', block, re.DOTALL) - else: - data = httptools.downloadpage(item.url, headers=headers).data - data = re.sub(r'\n|\t','',data).lower() - block = scrapertools.find_single_match(data,r'>streaming.*?<\/strong>(.*?)<strong>') - urls = re.findall('<a href="([^"]+)".*?class="external"', block, re.DOTALL) - - logger.info('URLS'+ str(urls)) - if urls: - data ='' - for url in urls: - url, c = unshortenit.unshorten(url) - data += url + '\n' - - logger.info('DATA'+ data) - itemlist = servertools.find_video_items(data=data) - - for videoitem in itemlist: - videoitem.title = item.fulltitle + ' - [COLOR limegreen][[/COLOR]'+videoitem.title+' [COLOR limegreen]][/COLOR]' - videoitem.fulltitle = item.fulltitle - videoitem.thumbnail = item.thumbnail - videoitem.show = item.show - videoitem.plot = item.plot - videoitem.channel = item.channel - videoitem.contentType = item.contentType - - # Link Aggiungi alla Libreria - if item.contentType != 'episode': - if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findservers': - itemlist.append( - Item(channel=item.channel, title='[COLOR lightblue][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url, - action='add_pelicula_to_library', extra='findservers', contentTitle=item.contentTitle)) - - # Necessario per filtrare i Link - if checklinks: - itemlist = servertools.check_list_links(itemlist, checklinks_number) - - # Necessario per FilterTools - # itemlist = filtertools.get_links(itemlist, item, list_language) - - # Necessario per AutoPlay - autoplay.start(itemlist, item) - - return itemlist -def episodios(item): # Questa def. deve sempre essere nominata episodios - logger.info('[cinemalibero.py] episodios') - itemlist = [] - extra = '' - - # Carica la pagina - data = httptools.downloadpage(item.url, headers=headers).data - block = scrapertools.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)at-below-post') - if re.findall('rel="category tag">serie', data, re.IGNORECASE): - # logger.info('select = ### è una serie ###') - extra='serie' - elif re.findall('rel="category tag">anime', data, re.IGNORECASE): - if re.findall('episodi', block, re.IGNORECASE): - # logger.info('select = ### è un anime ###') - extra='anime' - - block = re.sub(r'<h2>.*?<\/h2>','',block) - block = block.replace('<p>','').replace('<p style="text-align: left;">','').replace('–<','<').replace('-<','<').replace('–<','<').replace('– <','<').replace('<strong>','<stop><start><strong>')+'<stop>' - block = re.sub(r'stagione completa.*?<\/p>','',block,flags=re.IGNORECASE) - - - if extra == 'serie': - block = block.replace('<br /> <a','<a') - matches = re.compile(r'<start>.*?(?:stagione|Stagione)(.*?)<\/(?:strong|span)><\/p>(.*?)<stop>', re.DOTALL).findall(block) - - if not matches: - matches = scrapertools.find_multiple_matches(block, r'<a href="([^"]+)"[^>]+>(Episodio [0-9]+)</a>') - for scrapedurl, scrapedtitle in matches: - scrapedtitle = re.sub(r'Episodio ([0-9]+)', r'Episodio 1x\1', scrapedtitle) - itemlist.append( - Item(channel=item.channel, - action="findvideos", - contentType='episode', - title=scrapedtitle, - fulltitle=scrapedtitle, - show=item.fulltitle, - url=scrapedurl, - args='direct')) - else: - for lang, html in matches: - lang = re.sub('<.*?>','',lang) - html = html.replace('<br />','\n').replace('</p>', '\n') - - matches = re.compile(r'([^<]+)([^\n]+)\n', re.DOTALL).findall(html) - for scrapedtitle, html in matches: - itemlist.append( - Item(channel=item.channel, - action="findvideos", - contentType='episode', - title=scrapedtitle + ' - (' + lang + ')', - fulltitle=scrapedtitle, - show=item.fulltitle, - url=html)) - - elif extra == 'anime': - block = re.sub(r'<start.*?(?:download:|Download:).*?<stop>','',block) - block = re.sub(r'(?:mirror|Mirror)[^<]+<','',block) - block = block.replace('<br />','\n').replace('/a></p>','\n') - block = re.sub(r'<start.*?(?:download|Download).*?\n','',block) - matches = re.compile('<a(.*?)\n', re.DOTALL).findall(block) - for html in matches: - scrapedtitle = scrapertools.find_single_match(html, r'>(.*?)<\/a>') - itemlist.append( - Item(channel=item.channel, - action="findvideos", - contentType='episode', - title=scrapedtitle, - fulltitle=scrapedtitle, - show=item.fulltitle, - url=html)) - +def findvideos(item): + support.log(item) + if item.contentType == 'movie': + return support.server(item) else: - logger.info('select = ### è un film ###') - return findvideos(Item(channel=item.channel, - title=item.title, - fulltitle=item.fulltitle, - url=item.url, - show=item.fulltitle, - contentType='movie')) - - if config.get_videolibrary_support() and len(itemlist) != 0: - itemlist.append( - Item(channel=item.channel, - title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161), - url=item.url, - action="add_serie_to_library", - extra="episodios", - show=item.show)) - - return itemlist + return support.server(item, data= item.url) From 34a6cf6bf35538590117c221e1b273d98e91c060 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Sat, 12 Oct 2019 16:42:11 +0200 Subject: [PATCH 18/61] Fix contentTitle/contentSerieName in support.py --- core/support.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/support.py b/core/support.py index 11e9b197..e0a831ac 100644 --- a/core/support.py +++ b/core/support.py @@ -293,8 +293,8 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t infoLabels=infolabels, thumbnail=item.thumbnail if function == 'episodios' else scraped["thumb"] , args=item.args, - contentSerieName= title if item.contentType or CT != 'movie' and function != 'episodios' else item.fulltitle if function == 'episodios' else '', - contentTitle= title if item.contentType or CT == 'movie' else '', + contentSerieName= scraped['title'] if item.contentType or CT != 'movie' and function != 'episodios' else item.fulltitle if function == 'episodios' else '', + contentTitle= scraped['title'] if item.contentType or CT == 'movie' else '', contentLanguage = lang1, contentEpisodeNumber=episode if episode else '' ) From f77a07f23aa5990377940180568de0bdd1e805ba Mon Sep 17 00:00:00 2001 From: greko17 <sex1712@email.it> Date: Sat, 12 Oct 2019 18:47:49 +0200 Subject: [PATCH 19/61] fix: animeforce.py per newest --- channels/animeforce.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/channels/animeforce.py b/channels/animeforce.py index 3aa5109d..43a6d8f8 100644 --- a/channels/animeforce.py +++ b/channels/animeforce.py @@ -29,7 +29,7 @@ def mainlist(item): ] return locals() - + def newest(categoria): support.log(categoria) itemlist = [] @@ -38,11 +38,11 @@ def newest(categoria): if categoria == "anime": item.url = host item.args = 'update' - itemlist = peliculas(item) + itemlist = peliculas(item) if itemlist[-1].action == "peliculas": itemlist.pop() - # Continua la ricerca in caso di errore + # Continua la ricerca in caso di errore except: import sys for line in sys.exc_info(): @@ -56,7 +56,7 @@ def search(item, texto): search = texto item.contentType = 'tvshow' patron = '<strong><a href="(?P<url>[^"]+)">(?P<title>.*?) [Ss][Uu][Bb]' - action = 'episodios' + action = 'episodios' return locals() @@ -66,10 +66,11 @@ def peliculas(item): if item.args == 'update': patron = r'src="(?P<thumb>[^"]+)" class="attachment-grid-post[^"]+" alt="[^"]*" title="(?P<title>[^"]+").*?<h2><a href="(?P<url>[^"]+)"' def itemHook(item): + item.contentType = 'episode' delete = support.scrapertoolsV2.find_single_match(item.fulltitle, r'( Episodio.*)') number = support.scrapertoolsV2.find_single_match(item.title, r'Episodio (\d+)') item.title = support.typo(number + ' - ','bold') + item.title.replace(delete,'') - item.fulltitle = item.show = item.fulltitle.replace(delete,'') + item.fulltitle = item.show = item.title.replace(delete,'') item.url = item.url.replace('-episodio-'+ number,'') item.number = number return item @@ -102,16 +103,16 @@ def findvideos(item): support.log(item) itemlist = [] - - if item.number: - item.url = support.match(item, r'<a href="([^"]+)"[^>]*>', patronBlock=r'Episodio %s(.*?)</tr>' % item.number)[0][0] - + + if item.number: + item.url = support.match(item, r'<a href="([^"]+)"[^>]*>', patronBlock=r'Episodio %s(.*?)</tr>' % item.number)[0][0] + if 'http' not in item.url: if '//' in item.url[:2]: item.url = 'http:' + item.url - elif host not in item.url: + elif host not in item.url: item.url = host + item.url - + if 'adf.ly' in item.url: item.url = adfly.get_long_url(item.url) elif 'bit.ly' in item.url: @@ -129,4 +130,4 @@ def findvideos(item): support.server(item, itemlist=itemlist) - return itemlist \ No newline at end of file + return itemlist From 44b682e76cb429ee8b3a1aa04630f34dbf98de0e Mon Sep 17 00:00:00 2001 From: greko17 <sex1712@email.it> Date: Sat, 12 Oct 2019 18:48:41 +0200 Subject: [PATCH 20/61] fix: specials/news.py pulizia titolo --- specials/news.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specials/news.py b/specials/news.py index 29b2717b..18cac52a 100644 --- a/specials/news.py +++ b/specials/news.py @@ -409,9 +409,9 @@ def get_title(item): title = item.title # Limpiamos el titulo de etiquetas de formato anteriores -## title = re.compile("\[/*COLO.*?\]", re.DOTALL).sub("", title) -## title = re.compile("\[/*B\]", re.DOTALL).sub("", title) -## title = re.compile("\[/*I\]", re.DOTALL).sub("", title) + title = re.compile("\[/*COLO.*?\]", re.DOTALL).sub("", title) + title = re.compile("\[/*B\]", re.DOTALL).sub("", title) + title = re.compile("\[/*I\]", re.DOTALL).sub("", title) title = '[B]'+title+'[/B]' From 3351f6f310a4bc42ee67a7c0b3cc5d30d0dc1720 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Sat, 12 Oct 2019 19:00:35 +0200 Subject: [PATCH 21/61] Fix CB01 Anime --- channels/cb01anime.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/channels/cb01anime.py b/channels/cb01anime.py index b6a5dc35..71b32bd8 100644 --- a/channels/cb01anime.py +++ b/channels/cb01anime.py @@ -43,6 +43,7 @@ def search(item, texto): @support.scrape def peliculas(item): blacklist = Blacklist + item.contentType = 'tvshow' patron = r'<div class="span4">\s*<a href="(?P<url>[^"]+)"><img src="(?P<thumb>[^"]+)"[^>]+><\/a>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+> <h1>(?P<title>[^<\[]+)(?:\[(?P<lang>[^\]]+)\])?</h1></a>.*?-->(?:.*?<br />)?\s*(?P<plot>[^<]+)' patronNext = r'<link rel="next" href="([^"]+)"' action = 'check' @@ -65,21 +66,27 @@ def episodios(item): season = 1 s = 1 e = 0 + sp = 0 for match in item.url: if 'stagione' in match.lower(): find_season = support.match(match, r'Stagione\s*(\d+)')[0] season = int(find_season[0]) if find_season else season + 1 if 'prima' not in match.lower() else season - else: + else: title = support.match(match,'<a[^>]+>([^<]+)</a>')[0][0] if 'episodio' in title.lower(): - ep = int(support.match(match, r'Episodio (\d+)')[0][0]) - if season > s and ep > 1: - s += 1 - e = ep - 1 - title = str(season) + 'x' + str(ep-e).zfill(2) + ' - ' + title - data += title + '|' + match + '\n' - - + ep = support.match(match, r'Episodio ((?:\d+.\d|\d+))')[0][0] + if '.' in ep: + sp += 1 + title = '0' + 'x' + str(sp).zfill(2) + ' - ' + title + else: + ep = int(ep) + if season > s and ep > 1: + s += 1 + e = ep - 1 + title = str(season) + 'x' + str(ep-e).zfill(2) + ' - ' + title + data += title + '|' + match + '\n' + + patron = r'(?P<title>[^\|]+)\|(?P<url>[^\n]+)\n' action = 'findvideos' return locals() From d74652ad771b17c9a5dfc65e1a864e34d096f715 Mon Sep 17 00:00:00 2001 From: greko <50103632+greko17@users.noreply.github.com> Date: Sun, 13 Oct 2019 12:33:22 +0200 Subject: [PATCH 22/61] Update test-canale.md piccole modifiche per i titoli in grassetto! --- .github/ISSUE_TEMPLATE/test-canale.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/test-canale.md b/.github/ISSUE_TEMPLATE/test-canale.md index 5e1a2f31..74759500 100644 --- a/.github/ISSUE_TEMPLATE/test-canale.md +++ b/.github/ISSUE_TEMPLATE/test-canale.md @@ -93,9 +93,9 @@ Cerca un titolo a caso in KOD e lo stesso titolo sul sito. Confronta i risultati - [ ] OK - indica il tipo di problema -**Sezione FILM (se il sito non ha film elimina questa parte) +**Sezione FILM (se il sito non ha film elimina questa parte)** -**TestN.3: Pagina dei Titoli +**TestN.3: Pagina dei Titoli** *Test da effettuare mentre sei dentro un menu del canale (film, serietv, in corso ecc..)*. Voci nel menu contestuale di KOD. Posizionati su di un titolo e controlla se hai le seguenti voci, nel menu contestuale (tasto c o tenendo enter premuto): @@ -109,9 +109,9 @@ Voci nel menu contestuale di KOD. Posizionati su di un titolo e controlla se hai - [ ] Si - [ ] No -**Fine test menu contestuale +**Fine test menu contestuale** -**Fondo pagina dei titoli +**Fondo pagina dei titoli** **3. Paginazione, controlla ci sia la voce "Successivo" (se non c'è controlla sul sito se è presente)** @@ -128,7 +128,7 @@ Voci nel menu contestuale di KOD. Posizionati su di un titolo e controlla se hai **5. Eventuali problemi riscontrati** - scrivi qui il problema/i -**Sezione Serie TV (se il sito non ha serietv elimina questa parte) +**Sezione Serie TV (se il sito non ha serietv elimina questa parte)** Test da effettuare mentre sei nella pagina dei titoli. Per ogni titolo verifica ci siano le voci nel menu contestuale. @@ -167,7 +167,7 @@ Cerca un titolo a caso in KOD e lo stesso titolo sul sito. Confronta i risultati **7. Eventuali problemi riscontrati** - scrivi qui il problema/i -**Sezione Anime (se il sito non ha anime elimina questa parte) +**Sezione Anime (se il sito non ha anime elimina questa parte)** Test da effettuare mentre sei nella pagina dei titoli. Per ogni titolo verifica ci siano le voci nel menu contestuale. From 443813fa78ce4680404098191e18bba19fcc0c1b Mon Sep 17 00:00:00 2001 From: Alhaziel01 <alhaziel01@gmail.com> Date: Sun, 13 Oct 2019 22:19:22 +0200 Subject: [PATCH 23/61] Community Channel, fix reletive links --- specials/community.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/specials/community.py b/specials/community.py index e2510f8e..3bf91e9c 100644 --- a/specials/community.py +++ b/specials/community.py @@ -49,6 +49,7 @@ def show_channels(item): for key, channel in json['channels'].items(): file_path = channel['path'] path = os.path.dirname(os.path.abspath(file_path)) + if 'http' in path: path = path[path.find('http'):].replace('\\','/').replace(':/','://') if file_path.startswith('http'): file_url = httptools.downloadpage(file_path, follow_redirects=True).data else: @@ -66,7 +67,8 @@ def show_channels(item): plot=plot, action='show_menu', channel_id = key, - context=context)) + context=context, + path=path)) return itemlist def load_json(item): @@ -84,25 +86,26 @@ def load_json(item): def show_menu(item): global list_data itemlist = [] + logger.info(item) - json_data = load_json(item) - path = os.path.dirname(os.path.abspath(item.url)) + json_data = load_json(item) + if "menu" in json_data: for option in json_data['menu']: if 'thumbnail' in json_data: - thumbnail = option['thumbnail'] if ':/' in option['thumbnail'] else path + option['thumbnail'] if '/' in option['thumbnail'] else get_thumb(option['thumbnail']) + thumbnail = option['thumbnail'] if ':/' in option['thumbnail'] else item.path + option['thumbnail'] if '/' in option['thumbnail'] else get_thumb(option['thumbnail']) else: thumbnail = '' if 'fanart' in option and option['fanart']: - fanart = option['fanart'] if ':/' in option['fanart'] else path + option['fanart'] + fanart = option['fanart'] if ':/' in option['fanart'] else item.path + option['fanart'] else: fanart = item.fanart if 'plot' in option and option['plot']: plot = option['plot'] else: plot = item.plot - url = option['link'] if ':/' in option['link'] else path + option['link'] - itemlist.append(Item(channel=item.channel, title=format_title(option['title']), thumbnail=thumbnail, fanart=fanart, plot=plot, action='show_menu', url=url)) + url = option['link'] if ':/' in option['link'] else item.path + option['link'] + itemlist.append(Item(channel=item.channel, title=format_title(option['title']), thumbnail=thumbnail, fanart=fanart, plot=plot, action='show_menu', url=url, path=item.path)) autoplay.show_option(item.channel, itemlist) return itemlist @@ -155,7 +158,7 @@ def list_all(item): tmdb.set_infoLabels(itemlist, seekTmdb=True) for item in itemlist: if item.personal_plot != item.plot and item.personal_plot: - item.plot += '\n\n' + typo('','submenu') + '\n\n' + item.personal_plot + item.plot = item.personal_plot + '\n\n' + typo('','submenu') + '\n\n' + item.plot return itemlist def seasons(item): From d2b486926122f3443941883178b2eac433161ed7 Mon Sep 17 00:00:00 2001 From: greko17 <sex1712@email.it> Date: Mon, 14 Oct 2019 12:08:37 +0200 Subject: [PATCH 24/61] featured: specials/news.py aggiunta dicitura "Serie Completa" per i siti come seriehd --- specials/news.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/specials/news.py b/specials/news.py index 18cac52a..305c0e06 100644 --- a/specials/news.py +++ b/specials/news.py @@ -392,7 +392,7 @@ def get_newest(channel_id, categoria): def get_title(item): - support.log("ITEM NEWEST ->", item) + #support.log("ITEM NEWEST ->", item) if item.contentSerieName: # Si es una serie title = item.contentSerieName #title = re.compile("\[.*?\]", re.DOTALL).sub("", item.contentSerieName) @@ -413,6 +413,7 @@ def get_title(item): title = re.compile("\[/*B\]", re.DOTALL).sub("", title) title = re.compile("\[/*I\]", re.DOTALL).sub("", title) + title = '[B]'+title+'[/B]' if item.contentLanguage == '': @@ -426,6 +427,10 @@ def get_title(item): if item.quality: title += support.typo(item.quality, '_ [] color kod') + + season_ = support.typo(config.get_localized_string(70736), '_ [] color white bold') if (type(item.args) != bool and 'season_completed' in item.args) else '' + if season_: + title += season_ return title @@ -434,7 +439,7 @@ def no_group(list_result_canal): global channels_id_name for i in list_result_canal: - support.log("NO GROUP i -> ", i) + #support.log("NO GROUP i -> ", i) canale = channels_id_name[i.channel] canale = canale # per differenziarlo dal colore delle altre voci i.title = get_title(i) + " [" + canale + "]" From b270c10c9ebe6c33fc4a1535a490be0dee7fec41 Mon Sep 17 00:00:00 2001 From: greko17 <sex1712@email.it> Date: Mon, 14 Oct 2019 12:09:23 +0200 Subject: [PATCH 25/61] add: aggiunta voce "Serie Completa" --- resources/language/Italian/strings.po | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/resources/language/Italian/strings.po b/resources/language/Italian/strings.po index 642096e6..1088840b 100644 --- a/resources/language/Italian/strings.po +++ b/resources/language/Italian/strings.po @@ -5660,3 +5660,7 @@ msgstr "Seleziona gli Episodi della Stagione" msgctxt "#70735" msgid "%s Special Episode Number" msgstr "Numero dell'Episodio Speciale %s" + +msgctxt "#70736" +msgid "Completed Serie" +msgstr "Serie Completa" From 89b0e349c3e79729469769f7e4789b99cd371a6e Mon Sep 17 00:00:00 2001 From: greko17 <sex1712@email.it> Date: Mon, 14 Oct 2019 12:09:49 +0200 Subject: [PATCH 26/61] add: "serie completa" --- core/support.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/core/support.py b/core/support.py index e0a831ac..e23cdd75 100644 --- a/core/support.py +++ b/core/support.py @@ -199,7 +199,7 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t # type = tipo del video. Es. movie per film o tvshow per le serie. Di solito sono discrimanti usati dal sito # lang = lingua del video. Es: ITA, Sub-ITA, Sub, SUB ITA. # AVVERTENZE: Se il titolo è trovato nella ricerca TMDB/TVDB/Altro allora le locandine e altre info non saranno quelle recuperate nel sito.!!!! - + stagione = '' # per quei siti che hanno la stagione nel blocco ma non nelle puntate for i, match in enumerate(matches): if pagination and (pag - 1) * pagination > i: continue # pagination @@ -222,6 +222,9 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t season = scraped['season'] if stagione: episode = season +'x'+ scraped['episode'] + elif item.contentType == 'tvshow' and (scraped['episode'] == '' and season == ''): + item.args = 'season_completed' + episode = '' else: episode = re.sub(r'\s-\s|-|x|–|×', 'x', scraped['episode']) if scraped['episode'] else '' @@ -234,12 +237,11 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t # make formatted Title [longtitle] s = ' - ' - title = episode + (s if episode and title else '') + title + title = episode + (s if episode and title else '') + title longtitle = title + (s if title and title2 else '') + title2 longtitle = typo(longtitle, 'bold') longtitle += (typo(Type,'_ () bold') if Type else '') + (typo(quality, '_ [] color kod') if quality else '') - lang1, longtitle = scrapeLang(scraped, lang, longtitle) # if title is set, probably this is a list of episodes or video sources From ab687f9786963c9aee73a57f5015cc09c6986cfc Mon Sep 17 00:00:00 2001 From: greko17 <sex1712@email.it> Date: Mon, 14 Oct 2019 12:11:56 +0200 Subject: [PATCH 27/61] fix: seriehd MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit aggiunto in novità --- channels/seriehd.json | 1 - channels/seriehd.py | 25 +++++++++++++++++++------ 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/channels/seriehd.json b/channels/seriehd.json index c6a937e5..b6be2f7b 100644 --- a/channels/seriehd.json +++ b/channels/seriehd.json @@ -7,6 +7,5 @@ "thumbnail": "seriehd.png", "banner": "seriehd.png", "categories": ["tvshow"], - "not_active": ["include_in_newest"], "settings": [] } diff --git a/channels/seriehd.py b/channels/seriehd.py index 98c1ed58..51648003 100644 --- a/channels/seriehd.py +++ b/channels/seriehd.py @@ -7,9 +7,6 @@ from core import scrapertoolsV2, httptools, support from core.item import Item -##__channel__ = 'seriehd' -# host = support.config.get_channel_url(__channel__) - # impostati dinamicamente da findhost() host = '' headers = '' @@ -26,9 +23,6 @@ findhost() list_servers = ['verystream', 'openload', 'streamango', 'thevideome'] list_quality = ['1080p', '720p', '480p', '360'] -##checklinks = support.config.get_setting('checklinks', __channel__) -##checklinks_number = support.config.get_setting('checklinks_number', __channel__) - @support.menu def mainlist(item): @@ -89,6 +83,25 @@ def episodios(item): action = 'findvideos' return locals() +def newest(categoria): + support.log(categoria) + itemlist = [] + item = support.Item() + try: + if categoria == "series": + item.url = host + item.contentType = 'tvshow' + itemlist = peliculas(item) + itemlist.pop() + # Continua la ricerca in caso di errore + except: + import sys + for line in sys.exc_info(): + support.logger.error("{0}".format(line)) + return [] + + return itemlist + def findvideos(item): support.log() From a5159714f9434bd02367be3cb7e6b52a60adcf62 Mon Sep 17 00:00:00 2001 From: Alhaziel01 <alhaziel01@gmail.com> Date: Mon, 14 Oct 2019 12:44:07 +0200 Subject: [PATCH 28/61] Fix Season in Community Channels --- specials/community.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/specials/community.py b/specials/community.py index 3bf91e9c..5b9bb6ff 100644 --- a/specials/community.py +++ b/specials/community.py @@ -106,7 +106,7 @@ def show_menu(item): plot = item.plot url = option['link'] if ':/' in option['link'] else item.path + option['link'] itemlist.append(Item(channel=item.channel, title=format_title(option['title']), thumbnail=thumbnail, fanart=fanart, plot=plot, action='show_menu', url=url, path=item.path)) - autoplay.show_option(item.channel, itemlist) + if 'channel_name' in json_data: autoplay.show_option(item.channel, itemlist) return itemlist if "movies_list" in json_data: @@ -129,6 +129,7 @@ def list_all(item): itemlist = [] media_type = item.media_type json_data = load_json(item) + logger.info('JSON= ' + str(json_data)) for media in json_data[media_type]: quality, language, plot, poster = set_extra_values(media) @@ -137,12 +138,12 @@ def list_all(item): title = set_title(title, language, quality) new_item = Item(channel=item.channel, title=format_title(title), quality=quality, - language=language, plot=plot, personal_plot=plot, thumbnail=poster) + language=language, plot=plot, personal_plot=plot, thumbnail=poster, path=item.path) new_item.infoLabels['year'] = media['year'] if 'year' in media else '' new_item.infoLabels['tmdb_id'] = media['tmdb_id'] if 'tmdb_id' in media else '' - if 'movies_list' or 'generic_list' in json_data: + if 'movies_list' in json_data or 'generic_list' in json_data: new_item.url = media new_item.contentTitle = media['title'] new_item.action = 'findvideos' @@ -162,14 +163,15 @@ def list_all(item): return itemlist def seasons(item): - logger.info() + logger.info('PATH= ' + item.path) itemlist = [] infoLabels = item.infoLabels list_seasons = item.url for season in list_seasons: infoLabels['season'] = season['season'] title = config.get_localized_string(60027) % season['season'] - itemlist.append(Item(channel=item.channel, title=format_title(title), url=season['link'], action='episodesxseason', + url = season['link'] if ':/' in season['link'] else item.path + season['link'] + itemlist.append(Item(channel=item.channel, title=format_title(title), url=url, action='episodesxseason', contentSeasonNumber=season['season'], infoLabels=infoLabels)) tmdb.set_infoLabels(itemlist, seekTmdb=True) From ac0d36674b9c38ff12cf511118d5542c755692f1 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Mon, 14 Oct 2019 16:38:01 +0200 Subject: [PATCH 29/61] Fix Relative Path per Short Links --- specials/community.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/specials/community.py b/specials/community.py index 5b9bb6ff..90dd4547 100644 --- a/specials/community.py +++ b/specials/community.py @@ -4,6 +4,7 @@ # -*- By the Alfa Develop Group -*- import re, urllib, os +import requests from core import httptools, scrapertoolsV2, servertools, jsontools, tmdb from core.item import Item @@ -47,7 +48,8 @@ def show_channels(item): itemlist.append(Item(channel=item.channel, title=typo(config.get_localized_string(70676),'bold color kod'), action='add_channel', thumbnail=get_thumb('add.png'))) for key, channel in json['channels'].items(): - file_path = channel['path'] + if 'http' in channel['path']: file_path = requests.get(channel['path']).url + else: file_path = channel['path'] path = os.path.dirname(os.path.abspath(file_path)) if 'http' in path: path = path[path.find('http'):].replace('\\','/').replace(':/','://') if file_path.startswith('http'): @@ -88,8 +90,8 @@ def show_menu(item): itemlist = [] logger.info(item) - json_data = load_json(item) - + json_data = load_json(item) + if "menu" in json_data: for option in json_data['menu']: if 'thumbnail' in json_data: From 8c5f47a606d7776db1662c3b3dcab9fbfd4e10c6 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Mon, 14 Oct 2019 17:45:03 +0200 Subject: [PATCH 30/61] =?UTF-8?q?Inibito=20findvideos=20se=20"links"=20non?= =?UTF-8?q?=20=C3=A8=20presente?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- specials/community.py | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/specials/community.py b/specials/community.py index 90dd4547..4e58557e 100644 --- a/specials/community.py +++ b/specials/community.py @@ -155,13 +155,12 @@ def list_all(item): new_item.action = 'seasons' itemlist.append(new_item) - personal_plot = new_item.plot if not 'generic_list' in json_data: tmdb.set_infoLabels(itemlist, seekTmdb=True) for item in itemlist: if item.personal_plot != item.plot and item.personal_plot: - item.plot = item.personal_plot + '\n\n' + typo('','submenu') + '\n\n' + item.plot + item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot return itemlist def seasons(item): @@ -207,20 +206,20 @@ def episodesxseason(item): def findvideos(item): logger.info() itemlist = [] + if 'links' in item.url: + for url in item.url['links']: + quality, language, plot, poster = set_extra_values(url) + title = '' + title = set_title(title, language, quality) - for url in item.url['links']: - quality, language, plot, poster = set_extra_values(url) - title = '' - title = set_title(title, language, quality) + itemlist.append(Item(channel=item.channel, title=format_title('%s'+title), url=url['url'], action='play', quality=quality, + language=language, infoLabels = item.infoLabels)) - itemlist.append(Item(channel=item.channel, title=format_title('%s'+title), url=url['url'], action='play', quality=quality, - language=language, infoLabels = item.infoLabels)) + itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) + autoplay.start(itemlist, item) - autoplay.start(itemlist, item) - - return itemlist + return itemlist def add_channel(item): logger.info() From 2a31dc5ef1bb3ee085e68f6e522f85373aee888f Mon Sep 17 00:00:00 2001 From: greko17 <sex1712@email.it> Date: Mon, 14 Oct 2019 17:53:09 +0200 Subject: [PATCH 31/61] fix: specials/news.py stagione ed episodio nel titolo --- specials/news.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/specials/news.py b/specials/news.py index 305c0e06..3c924531 100644 --- a/specials/news.py +++ b/specials/news.py @@ -392,14 +392,19 @@ def get_newest(channel_id, categoria): def get_title(item): - #support.log("ITEM NEWEST ->", item) - if item.contentSerieName: # Si es una serie + support.log("ITEM NEWEST ->", item) + # item.contentSerieName c'è anche se è un film + if item.contentSerieName and item.contentType != 'movie': # Si es una serie title = item.contentSerieName #title = re.compile("\[.*?\]", re.DOTALL).sub("", item.contentSerieName) if not scrapertools.get_season_and_episode(title) and item.contentEpisodeNumber: + # contentSeason non c'è in support if not item.contentSeason: item.contentSeason = '1' title = "%s - %sx%s" % (title, item.contentSeason, str(item.contentEpisodeNumber).zfill(2)) + else: + seas = scrapertools.get_season_and_episode(item.title) + title = "%s - %s" % (seas, title) elif item.contentTitle: # Si es una pelicula con el canal adaptado title = item.contentTitle From fc8c99535fbfd02d47e592b485ad8914606a265c Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Mon, 14 Oct 2019 17:58:02 +0200 Subject: [PATCH 32/61] Se link = "" non fare nulla --- specials/community.py | 65 ++++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 31 deletions(-) diff --git a/specials/community.py b/specials/community.py index 4e58557e..5eecc378 100644 --- a/specials/community.py +++ b/specials/community.py @@ -76,12 +76,15 @@ def show_channels(item): def load_json(item): logger.info() - if item.url.startswith('http'): - json_file = httptools.downloadpage(item.url).data - else: - json_file = open(item.url, "r").read() + if item.url: + if item.url.startswith('http'): + json_file = httptools.downloadpage(item.url).data + else: + json_file = open(item.url, "r").read() - json_data = jsontools.load(json_file) + json_data = jsontools.load(json_file) + else: + json_data = '' return json_data @@ -106,7 +109,7 @@ def show_menu(item): plot = option['plot'] else: plot = item.plot - url = option['link'] if ':/' in option['link'] else item.path + option['link'] + url = '' if not option['link'] else option['link'] if ':/' in option['link'] else item.path + option['link'] itemlist.append(Item(channel=item.channel, title=format_title(option['title']), thumbnail=thumbnail, fanart=fanart, plot=plot, action='show_menu', url=url, path=item.path)) if 'channel_name' in json_data: autoplay.show_option(item.channel, itemlist) return itemlist @@ -131,37 +134,37 @@ def list_all(item): itemlist = [] media_type = item.media_type json_data = load_json(item) - logger.info('JSON= ' + str(json_data)) - for media in json_data[media_type]: + if json_data: + for media in json_data[media_type]: - quality, language, plot, poster = set_extra_values(media) + quality, language, plot, poster = set_extra_values(media) - title = media['title'] - title = set_title(title, language, quality) + title = media['title'] + title = set_title(title, language, quality) - new_item = Item(channel=item.channel, title=format_title(title), quality=quality, - language=language, plot=plot, personal_plot=plot, thumbnail=poster, path=item.path) + new_item = Item(channel=item.channel, title=format_title(title), quality=quality, + language=language, plot=plot, personal_plot=plot, thumbnail=poster, path=item.path) - new_item.infoLabels['year'] = media['year'] if 'year' in media else '' - new_item.infoLabels['tmdb_id'] = media['tmdb_id'] if 'tmdb_id' in media else '' + new_item.infoLabels['year'] = media['year'] if 'year' in media else '' + new_item.infoLabels['tmdb_id'] = media['tmdb_id'] if 'tmdb_id' in media else '' - if 'movies_list' in json_data or 'generic_list' in json_data: - new_item.url = media - new_item.contentTitle = media['title'] - new_item.action = 'findvideos' - else: - new_item.url = media['seasons_list'] - new_item.contentSerieName = media['title'] - new_item.action = 'seasons' + if 'movies_list' in json_data or 'generic_list' in json_data: + new_item.url = media + new_item.contentTitle = media['title'] + new_item.action = 'findvideos' + else: + new_item.url = media['seasons_list'] + new_item.contentSerieName = media['title'] + new_item.action = 'seasons' - itemlist.append(new_item) + itemlist.append(new_item) - if not 'generic_list' in json_data: - tmdb.set_infoLabels(itemlist, seekTmdb=True) - for item in itemlist: - if item.personal_plot != item.plot and item.personal_plot: - item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot - return itemlist + if not 'generic_list' in json_data: + tmdb.set_infoLabels(itemlist, seekTmdb=True) + for item in itemlist: + if item.personal_plot != item.plot and item.personal_plot: + item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot + return itemlist def seasons(item): logger.info('PATH= ' + item.path) @@ -171,7 +174,7 @@ def seasons(item): for season in list_seasons: infoLabels['season'] = season['season'] title = config.get_localized_string(60027) % season['season'] - url = season['link'] if ':/' in season['link'] else item.path + season['link'] + url = '' if not season['link'] else season['link'] if ':/' in season['link'] else item.path + season['link'] itemlist.append(Item(channel=item.channel, title=format_title(title), url=url, action='episodesxseason', contentSeasonNumber=season['season'], infoLabels=infoLabels)) From 98ef22649e69fb5062fd614fdc688f450ab4f879 Mon Sep 17 00:00:00 2001 From: marco <m.toma99@gmail.com> Date: Mon, 14 Oct 2019 18:08:23 +0200 Subject: [PATCH 33/61] migliorie updater --- platformcode/updater.py | 55 +++++++++++++++++++++++++++++++++++------ 1 file changed, 48 insertions(+), 7 deletions(-) diff --git a/platformcode/updater.py b/platformcode/updater.py index c10b6836..c9a1b95a 100644 --- a/platformcode/updater.py +++ b/platformcode/updater.py @@ -142,7 +142,7 @@ def check_addon_init(): localCommitFile.truncate() localCommitFile.writelines(c['sha']) localCommitFile.close() - + xbmc.executebuiltin("UpdateLocalAddons") else: logger.info('Nessun nuovo aggiornamento') @@ -218,20 +218,26 @@ def getShaStr(str): def updateFromZip(): - dp = platformtools.dialog_progress_bg('Kodi on Demand', 'Aggiornamento in corso...') + dp = platformtools.dialog_progress_bg('Kodi on Demand', 'Installazione in corso...') dp.update(0) remotefilename = 'https://github.com/' + user + "/" + repo + "/archive/" + branch + ".zip" - localfilename = xbmc.translatePath("special://home/addons/") + "plugin.video.kod.update.zip" + localfilename = (xbmc.translatePath("special://home/addons/") + "plugin.video.kod.update.zip").encode('utf-8') + destpathname = xbmc.translatePath("special://home/addons/") + logger.info("remotefilename=%s" % remotefilename) logger.info("localfilename=%s" % localfilename) + # pulizia preliminare + remove(localfilename) + removeTree(destpathname + "addon-" + branch) + import urllib - urllib.urlretrieve(remotefilename, localfilename, lambda nb, bs, fs, url=remotefilename: _pbhook(nb, bs, fs, url, dp)) + urllib.urlretrieve(remotefilename, localfilename, + lambda nb, bs, fs, url=remotefilename: _pbhook(nb, bs, fs, url, dp)) # Lo descomprime logger.info("decompressione...") - destpathname = xbmc.translatePath("special://home/addons/") logger.info("destpathname=%s" % destpathname) try: @@ -246,17 +252,52 @@ def updateFromZip(): dp.update(95) # puliamo tutto - shutil.rmtree(addonDir) + removeTree(addonDir) - filetools.rename(destpathname + "addon-" + branch, addonDir) + rename(destpathname + "addon-" + branch, addonDir) logger.info("Cancellando il file zip...") remove(localfilename) dp.update(100) + dp.close() + xbmc.executebuiltin("UpdateLocalAddons") + return hash +def remove(file): + if os.path.isfile(file): + removed = False + while not removed: + try: + os.remove(file) + removed = True + except: + logger.info('File ' + file + ' NON eliminato') + + +def removeTree(dir): + if os.path.isdir(dir): + removed = False + while not removed: + try: + shutil.rmtree(dir) + removed = True + except: + logger.info('Cartella ' + dir + ' NON eliminato') + + +def rename(dir1, dir2): + renamed = False + while not renamed: + try: + filetools.rename(dir1, dir2) + renamed = True + except: + logger.info('cartella ' + dir1 + ' NON rinominata') + + # https://stackoverflow.com/questions/3083235/unzipping-file-results-in-badzipfile-file-is-not-a-zip-file def fixZipGetHash(zipFile): f = io.FileIO(zipFile, 'r+b') From a8d0f11cb1a6069c82148610a7e6e01dadc6958d Mon Sep 17 00:00:00 2001 From: greko17 <sex1712@email.it> Date: Mon, 14 Oct 2019 18:20:26 +0200 Subject: [PATCH 34/61] fix: ../specials/news.py titoli uniformi --- specials/news.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/specials/news.py b/specials/news.py index 3c924531..49b48ad9 100644 --- a/specials/news.py +++ b/specials/news.py @@ -401,10 +401,11 @@ def get_title(item): # contentSeason non c'è in support if not item.contentSeason: item.contentSeason = '1' - title = "%s - %sx%s" % (title, item.contentSeason, str(item.contentEpisodeNumber).zfill(2)) + title = "%sx%s - %s" % (item.contentSeason, str(item.contentEpisodeNumber).zfill(2), title) else: seas = scrapertools.get_season_and_episode(item.title) - title = "%s - %s" % (seas, title) + if seas: + title = "%s - %s" % (seas, title) elif item.contentTitle: # Si es una pelicula con el canal adaptado title = item.contentTitle From 2af6d39c4d6f82066e8a3ba0ce0d0c83a46efddb Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Mon, 14 Oct 2019 19:44:23 +0200 Subject: [PATCH 35/61] Fix CB01 Anime --- channels/cb01anime.py | 67 ++++++++++++++++++++++++++++++++----------- 1 file changed, 50 insertions(+), 17 deletions(-) diff --git a/channels/cb01anime.py b/channels/cb01anime.py index 71b32bd8..b996eb58 100644 --- a/channels/cb01anime.py +++ b/channels/cb01anime.py @@ -21,7 +21,8 @@ headers = [['Referer', host]] def mainlist(item): anime = [('Genere',['','menu', '2']), ('Per Lettera',['','menu', '1']), - ('Per Anno',['','menu', '3'])] + ('Per Anno',['','menu', '3']), + ('Ultimi Anime Aggiornati',['','peliculas', 'newest'])] return locals() @@ -39,13 +40,34 @@ def search(item, texto): item.url = host + "/?s=" + texto return peliculas(item) +def newest(categoria): + support.log(categoria) + itemlist = [] + item = support.Item() + try: + if categoria == "anime": + item.url = host + item.args = 'newest' + itemlist = peliculas(item) + # Continua la ricerca in caso di errore + except: + import sys + for line in sys.exc_info(): + support.logger.error("{0}".format(line)) + return [] + + return itemlist @support.scrape def peliculas(item): blacklist = Blacklist item.contentType = 'tvshow' - patron = r'<div class="span4">\s*<a href="(?P<url>[^"]+)"><img src="(?P<thumb>[^"]+)"[^>]+><\/a>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+> <h1>(?P<title>[^<\[]+)(?:\[(?P<lang>[^\]]+)\])?</h1></a>.*?-->(?:.*?<br />)?\s*(?P<plot>[^<]+)' - patronNext = r'<link rel="next" href="([^"]+)"' + if item.args == 'newest': + data = support.match(item)[1] + patron = r'<div id="blockvids"><ul><li><a href="(?P<url>[^"]+)"[^>]+><img src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^\[]+)\[(?P<lang>[^\]]+)\]' + else: + patron = r'<div class="span4">\s*<a href="(?P<url>[^"]+)"><img src="(?P<thumb>[^"]+)"[^>]+><\/a>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+> <h1>(?P<title>[^<\[]+)(?:\[(?P<lang>[^\]]+)\])?</h1></a>.*?-->(?:.*?<br />)?\s*(?P<plot>[^<]+)' + patronNext = r'<link rel="next" href="([^"]+)"' action = 'check' return locals() @@ -72,20 +94,31 @@ def episodios(item): find_season = support.match(match, r'Stagione\s*(\d+)')[0] season = int(find_season[0]) if find_season else season + 1 if 'prima' not in match.lower() else season else: - title = support.match(match,'<a[^>]+>([^<]+)</a>')[0][0] - if 'episodio' in title.lower(): - ep = support.match(match, r'Episodio ((?:\d+.\d|\d+))')[0][0] - if '.' in ep: - sp += 1 - title = '0' + 'x' + str(sp).zfill(2) + ' - ' + title - else: - ep = int(ep) - if season > s and ep > 1: - s += 1 - e = ep - 1 - title = str(season) + 'x' + str(ep-e).zfill(2) + ' - ' + title - data += title + '|' + match + '\n' - + try: title = support.match(match,'<a[^>]+>([^<]+)</a>')[0][0] + except: title = '' + if title: + if 'episodio' in title.lower(): + ep = support.match(match, r'Episodio ((?:\d+.\d|\d+|\D+))')[0][0] + check = ep.isdigit() + if check or '.' in ep: + if '.' in ep: + sp += 1 + title = '0' + 'x' + str(sp).zfill(2) + ' - ' + title + else: + ep = int(ep) + if season > s and ep > 1: + s += 1 + e = ep - 1 + title = str(season) + 'x' + str(ep-e).zfill(2) + ' - ' + title + data += title + '|' + match + '\n' + else: + title += ' #movie' + data += title + '|' + match + '\n' + def itemHook(item): + if '#movie' in item.title: + item.contentType='movie' + item.title = item.title.replace(' #movie','') + return item patron = r'(?P<title>[^\|]+)\|(?P<url>[^\n]+)\n' action = 'findvideos' From 5fe0450a1714e8df4f2eb3767d6bce990b9ba974 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Mon, 14 Oct 2019 20:49:30 +0200 Subject: [PATCH 36/61] Fix Animeforce --- channels/animeforce.py | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/channels/animeforce.py b/channels/animeforce.py index 43a6d8f8..08fad2be 100644 --- a/channels/animeforce.py +++ b/channels/animeforce.py @@ -24,7 +24,7 @@ headers = [['Referer', host]] def mainlist(item): anime = ['/lista-anime/', ('In Corso',['/lista-anime-in-corso/']), - ('Ultimi Episodi',['','peliculas','update']), + ('Ultimei Aggiornamenti',['','peliculas','newest']), ('Ultime Serie',['/category/anime/articoli-principali/','peliculas','last']) ] return locals() @@ -36,12 +36,10 @@ def newest(categoria): item = support.Item() try: if categoria == "anime": + item.contentType = 'tvshow' item.url = host - item.args = 'update' + item.args = 'newest' itemlist = peliculas(item) - - if itemlist[-1].action == "peliculas": - itemlist.pop() # Continua la ricerca in caso di errore except: import sys @@ -63,27 +61,16 @@ def search(item, texto): @support.scrape def peliculas(item): anime = True - if item.args == 'update': - patron = r'src="(?P<thumb>[^"]+)" class="attachment-grid-post[^"]+" alt="[^"]*" title="(?P<title>[^"]+").*?<h2><a href="(?P<url>[^"]+)"' - def itemHook(item): - item.contentType = 'episode' - delete = support.scrapertoolsV2.find_single_match(item.fulltitle, r'( Episodio.*)') - number = support.scrapertoolsV2.find_single_match(item.title, r'Episodio (\d+)') - item.title = support.typo(number + ' - ','bold') + item.title.replace(delete,'') - item.fulltitle = item.show = item.title.replace(delete,'') - item.url = item.url.replace('-episodio-'+ number,'') - item.number = number - return item - action = 'findvideos' + if item.args == 'newest': + patron = r'src="(?P<thumb>[^"]+)" class="attachment-grid-post[^"]+" alt="[^"]*" title="(?P<title>.*?) Episodi[^"]+".*?<h2><a href="(?P<url>[^"]+)"' elif item.args == 'last': patron = r'src="(?P<thumb>[^"]+)" class="attachment-grid-post[^"]+" alt="[^"]*" title="(?P<title>.*?)(?: Sub| sub| SUB|").*?<h2><a href="(?P<url>[^"]+)"' - action = 'episodios' else: pagination = '' patron = '<strong><a href="(?P<url>[^"]+)">(?P<title>.*?) [Ss][Uu][Bb]' - action = 'episodios' + action = 'episodios' return locals() From 0aba0a833aa2a459662e2201c05aa655127fbed8 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Tue, 15 Oct 2019 17:40:38 +0200 Subject: [PATCH 37/61] Rimossi Itag con sole tracce video --- servers/youtube.py | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/servers/youtube.py b/servers/youtube.py index 068bcfb4..b00e94ed 100644 --- a/servers/youtube.py +++ b/servers/youtube.py @@ -109,27 +109,14 @@ def extract_videos(video_id): 100: "360p vp8 3D", 101: "480p vp8 3D", 102: "720p vp8 3D", - 167: "360p vp8 webm", - 168: "480p vp8 webm", - 169: "720p vp8 webm", - 170: "1080p vp8 webm", - 218: "480p vp8 webm", - 219: "480p vp8 webm", - 278: "144p vp9 webm", - 242: "240p vp9 webm", - 243: "360p vp9 webm", - 244: "480p vp9 webm", - 245: "480p vp9 webm", - 246: "480p vp9 webm", - 247: "720p vp9 webm", - 248: "1080p vp9 webm", - 271: "1440p vp9 webm", - 272: "2160p vp9 webm", - 302: "720p vp9 webm 60fps", - 303: "1080p vp9 webm 60fps", - 308: "1440p vp9 webm 60fps", - 313: "2160p vp9 webm", - 315: "2160p vp9 webm 60fps" + 91:"144 h264 mp4", + 92:"240 h264 mp4", + 93:"360 h264 mp4", + 94:"480 h264 mp4", + 95:"720 h264 mp4", + 96:"1080 h264 mp4", + 132:"240 h264 mp4", + 151:"72 h264 mp4" } # from core.support import dbg; dbg() url = 'https://www.youtube.com/get_video_info?video_id=%s&eurl=https://youtube.googleapis.com/v/%s&ssl_stream=1' % \ @@ -155,10 +142,10 @@ def extract_videos(video_id): params = extract_flashvars(youtube_page_data) data_flashvars =[] if params.get('adaptive_fmts'): - data_flashvars += scrapertools.find_multiple_matches(params['adaptive_fmts'], '(quality.*?url[^,]+)') + data_flashvars += scrapertools.find_multiple_matches(params['adaptive_fmts'], '(fps.*?url[^,]+)') if params.get('url_encoded_fmt_stream_map'): data_flashvars += params["url_encoded_fmt_stream_map"].split(",") - + for url_desc in data_flashvars: url_desc_map = dict(urlparse.parse_qsl(url_desc)) if not url_desc_map.get("url") and not url_desc_map.get("stream"): From f50e460e33d2d528557b3649d50c6272cc1170da Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Tue, 15 Oct 2019 18:27:27 +0200 Subject: [PATCH 38/61] Fix Mega Server --- core/servertools.py | 2 +- lib/megaserver/client.py | 40 ++++++++++++++++++++-------------------- lib/megaserver/cursor.py | 19 +++++++++++-------- 3 files changed, 32 insertions(+), 29 deletions(-) diff --git a/core/servertools.py b/core/servertools.py index 925dd10d..1b2b3d12 100644 --- a/core/servertools.py +++ b/core/servertools.py @@ -318,7 +318,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo video_urls.extend(response) except: logger.error("Error al obtener la url en modo free") - error_messages.append("Se ha producido un error en %s" % server_name) + error_messages.append(config.get_localized_string(60006) % server_name) import traceback logger.error(traceback.format_exc()) diff --git a/lib/megaserver/client.py b/lib/megaserver/client.py index 0afe49b8..239f5158 100644 --- a/lib/megaserver/client.py +++ b/lib/megaserver/client.py @@ -84,17 +84,31 @@ class Client(object): def get_files(self): files = [] + enc_url = None if self.files: for file in self.files: n = file.name.encode("utf8") u = "http://" + self.ip + ":" + str(self.port) + "/" + urllib.quote(n) s = file.size file_id = file.file_id + enc_url = file.url files.append({"name":n,"url":u,"size":s, "id": file_id}) + if len(self.files) == 1: + try: + code = httptools.downloadpage(enc_url, only_headers=True).code + if code > 300: + return code + else: + return files + + except: + print(traceback.format_exc()) + pass + return files def add_url(self, url): - url = url.split("/#")[1] + url = url.split("#")[1] id_video = None if "|" in url: url, id_video = url.split("|") @@ -135,7 +149,8 @@ class Client(object): def api_req(self, req, get=""): seqno = random.randint(0, 0xFFFFFFFF) url = 'https://g.api.mega.co.nz/cs?id=%d%s' % (seqno, get) - return json.loads(self.post(url, json.dumps([req])))[0] + page = httptools.downloadpage(url, post=json.dumps([req])).data + return json.loads(page)[0] def base64urldecode(self,data): data += '=='[(2 - len(data) * 3) % 4:] @@ -165,12 +180,11 @@ class Client(object): def aes_cbc_decrypt(self, data, key): try: + from Cryptodome.Cipher import AES + decryptor = AES.new(key, AES.MODE_CBC, '\0' * 16) + except: from Crypto.Cipher import AES decryptor = AES.new(key, AES.MODE_CBC, '\0' * 16) - #decryptor = aes.AESModeOfOperationCBC(key, iv='\0' * 16) - except: - import jscrypto - decryptor = jscrypto.new(key, jscrypto.MODE_CBC, '\0' * 16) return decryptor.decrypt(data) def aes_cbc_decrypt_a32(self,data, key): @@ -179,20 +193,6 @@ class Client(object): def decrypt_key(self,a, key): return sum((self.aes_cbc_decrypt_a32(a[i:i+4], key) for i in xrange(0, len(a), 4)), ()) - def post(self, url, data): - return httptools.downloadpage(url, data).data - import ssl - from functools import wraps - def sslwrap(func): - @wraps(func) - def bar(*args, **kw): - kw['ssl_version'] = ssl.PROTOCOL_TLSv1 - return func(*args, **kw) - return bar - - ssl.wrap_socket = sslwrap(ssl.wrap_socket) - return urllib.urlopen(url, data).read() - def dec_attr(self, attr, key): attr = self.aes_cbc_decrypt(attr, self.a32_to_str(key)).rstrip('\0') if not attr.endswith("}"): diff --git a/lib/megaserver/cursor.py b/lib/megaserver/cursor.py index ec01c2ba..37eeab2b 100644 --- a/lib/megaserver/cursor.py +++ b/lib/megaserver/cursor.py @@ -1,4 +1,5 @@ import urllib2 +import traceback class Cursor(object): def __init__(self, file): @@ -21,9 +22,11 @@ class Cursor(object): req.headers['Range'] = 'bytes=%s-' % (offset) try: self.conn = urllib2.urlopen(req) - self.prepare_decoder(offset) + try: + self.prepare_decoder(offset) + except: + print(traceback.format_exc()) except: - #La url del archivo expira transcurrido un tiempo, si da error 403, reintenta volviendo a solicitar la url mediante la API self.mega_request(offset, True) def read(self,n=None): @@ -35,7 +38,6 @@ class Cursor(object): self.pos+=len(res) return res - def seek(self,n): if n>self._file.size: n=self._file.size @@ -53,20 +55,21 @@ class Cursor(object): def __exit__(self,exc_type, exc_val, exc_tb): self._file.cursors.remove(self) if len(self._file.cursors) == 0: self._file.cursor = False - + def decode(self, data): return self.decryptor.decrypt(data) def prepare_decoder(self,offset): initial_value = self.initial_value + int(offset/16) try: + from Cryptodome.Cipher import AES + from Cryptodome.Util import Counter + self.decryptor = AES.new(self._file._client.a32_to_str(self.k), AES.MODE_CTR, counter = Counter.new(128, initial_value = initial_value)) + except: from Crypto.Cipher import AES from Crypto.Util import Counter self.decryptor = AES.new(self._file._client.a32_to_str(self.k), AES.MODE_CTR, counter = Counter.new(128, initial_value = initial_value)) - except: - from pyaes import aes - self.decryptor = aes.AESModeOfOperationCTR(f=self,key=self._client.a32_to_str(self.k),counter=aes.Counter(initial_value=initial_value)) rest = offset - int(offset/16)*16 if rest: - self.decode(str(0)*rest) \ No newline at end of file + self.decode(str(0)*rest) From 11101190ad34f03f052294b354b04bca958b165e Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Wed, 16 Oct 2019 19:56:29 +0200 Subject: [PATCH 39/61] Animeforce Fix --- channels/animeforce.py | 17 +++++++++++++---- core/support.py | 3 ++- specials/news.py | 2 +- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/channels/animeforce.py b/channels/animeforce.py index 08fad2be..26cc1d02 100644 --- a/channels/animeforce.py +++ b/channels/animeforce.py @@ -24,7 +24,6 @@ headers = [['Referer', host]] def mainlist(item): anime = ['/lista-anime/', ('In Corso',['/lista-anime-in-corso/']), - ('Ultimei Aggiornamenti',['','peliculas','newest']), ('Ultime Serie',['/category/anime/articoli-principali/','peliculas','last']) ] return locals() @@ -61,16 +60,26 @@ def search(item, texto): @support.scrape def peliculas(item): anime = True + action = 'episodios' + if item.args == 'newest': - patron = r'src="(?P<thumb>[^"]+)" class="attachment-grid-post[^"]+" alt="[^"]*" title="(?P<title>.*?) Episodi[^"]+".*?<h2><a href="(?P<url>[^"]+)"' + patron = r'src="(?P<thumb>[^"]+)" class="attachment-grid-post[^"]+" alt="[^"]*" title="(?P<title>[^"]+").*?<h2><a href="(?P<url>[^"]+)"' + def itemHook(item): + item.url = support.match(item, '<a href="([^"]+)" class="btn', headers=headers)[0][0] + delete = support.scrapertoolsV2.find_single_match(item.fulltitle, r'( Episodi.*)') + number = support.scrapertoolsV2.find_single_match(item.title, r'Episodi(?:o)? (?:\d+÷)?(\d+)') + item.title = support.typo(number + ' - ','bold') + item.title.replace(delete,'') + item.fulltitle = item.show = item.title.replace(delete,'') + item.number = number + return item + action = 'findvideos' elif item.args == 'last': patron = r'src="(?P<thumb>[^"]+)" class="attachment-grid-post[^"]+" alt="[^"]*" title="(?P<title>.*?)(?: Sub| sub| SUB|").*?<h2><a href="(?P<url>[^"]+)"' else: pagination = '' - patron = '<strong><a href="(?P<url>[^"]+)">(?P<title>.*?) [Ss][Uu][Bb]' - action = 'episodios' + patron = r'<strong><a href="(?P<url>[^"]+)">(?P<title>.*?) [Ss][Uu][Bb]' return locals() diff --git a/core/support.py b/core/support.py index e23cdd75..1748daf8 100644 --- a/core/support.py +++ b/core/support.py @@ -223,7 +223,7 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t if stagione: episode = season +'x'+ scraped['episode'] elif item.contentType == 'tvshow' and (scraped['episode'] == '' and season == ''): - item.args = 'season_completed' + item.news = 'season_completed' episode = '' else: episode = re.sub(r'\s-\s|-|x|–|×', 'x', scraped['episode']) if scraped['episode'] else '' @@ -768,6 +768,7 @@ def match(item, patron='', patronBlock='', headers='', url='', post=''): if patron: matches = scrapertoolsV2.find_multiple_matches(block, patron) + if not matches: matches = [''] log('MATCHES= ',matches) return matches, block diff --git a/specials/news.py b/specials/news.py index 49b48ad9..b4d2e734 100644 --- a/specials/news.py +++ b/specials/news.py @@ -434,7 +434,7 @@ def get_title(item): if item.quality: title += support.typo(item.quality, '_ [] color kod') - season_ = support.typo(config.get_localized_string(70736), '_ [] color white bold') if (type(item.args) != bool and 'season_completed' in item.args) else '' + season_ = support.typo(config.get_localized_string(70736), '_ [] color white bold') if (type(item.args) != bool and 'season_completed' in item.news) else '' if season_: title += season_ return title From 258dc1c324925002a1cb1d4e8cd3f1ec6836e368 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Wed, 16 Oct 2019 20:00:36 +0200 Subject: [PATCH 40/61] Fix season_completed --- core/support.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/support.py b/core/support.py index 1748daf8..7961ce0a 100644 --- a/core/support.py +++ b/core/support.py @@ -298,7 +298,8 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t contentSerieName= scraped['title'] if item.contentType or CT != 'movie' and function != 'episodios' else item.fulltitle if function == 'episodios' else '', contentTitle= scraped['title'] if item.contentType or CT == 'movie' else '', contentLanguage = lang1, - contentEpisodeNumber=episode if episode else '' + contentEpisodeNumber=episode if episode else '', + news= item.news if item.news else '' ) for lg in list(set(listGroups).difference(known_keys)): From a8fe4b4a98a6ba78675264236b57c7c9ce64ef20 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Thu, 17 Oct 2019 12:10:55 +0200 Subject: [PATCH 41/61] Ricerca per i Community Channels --- specials/community.py | 68 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 60 insertions(+), 8 deletions(-) diff --git a/specials/community.py b/specials/community.py index 5eecc378..163ca25b 100644 --- a/specials/community.py +++ b/specials/community.py @@ -74,13 +74,14 @@ def show_channels(item): return itemlist def load_json(item): - logger.info() - if item.url: - if item.url.startswith('http'): - json_file = httptools.downloadpage(item.url).data + url= item if type(item) == str else item.url + + if url: + if url.startswith('http'): + json_file = httptools.downloadpage(url).data else: - json_file = open(item.url, "r").read() + json_file = open(url, "r").read() json_data = jsontools.load(json_file) else: @@ -91,7 +92,7 @@ def load_json(item): def show_menu(item): global list_data itemlist = [] - logger.info(item) + logger.info() json_data = load_json(item) @@ -111,6 +112,7 @@ def show_menu(item): plot = item.plot url = '' if not option['link'] else option['link'] if ':/' in option['link'] else item.path + option['link'] itemlist.append(Item(channel=item.channel, title=format_title(option['title']), thumbnail=thumbnail, fanart=fanart, plot=plot, action='show_menu', url=url, path=item.path)) + itemlist.append(Item(channel=item.channel, title=typo('Cerca...','color kod bold'), thumbnail=get_thumb('search.png'), action='search', url=item.url, path=item.path)) if 'channel_name' in json_data: autoplay.show_option(item.channel, itemlist) return itemlist @@ -167,7 +169,7 @@ def list_all(item): return itemlist def seasons(item): - logger.info('PATH= ' + item.path) + logger.info() itemlist = [] infoLabels = item.infoLabels list_seasons = item.url @@ -330,4 +332,54 @@ def format_title(title): t = scrapertoolsV2.find_single_match(title, r'\{([^\}]+)\}') if 'bold' not in t: t += ' bold' title = re.sub(r'(\{[^\}]+\})','',title) - return typo(title,t) \ No newline at end of file + return typo(title,t) + +def search(item, text): + logger.info('Search '+ text) + itemlist = [] + json_data = load_json(item) + + return load_links(item, itemlist, json_data, text) + +def load_links(item, itemlist, json_data, text): + for option in json_data['menu']: + json_data = load_json(option['link'] if option['link'].startswith('http') else item.path+option['link']) + if not 'menu' in json_data: + if "movies_list" in json_data: media_type= 'movies_list' + elif "tvshows_list" in json_data: media_type = 'tvshows_list' + elif "episodes_list" in json_data: media_type = 'episodes_list' + if "generic_list" in json_data: media_type= 'generic_list' + + if json_data: + for media in json_data[media_type]: + if text.lower() in media['title'].lower(): + quality, language, plot, poster = set_extra_values(media) + + title = media['title'] + title = set_title(title, language, quality) + + new_item = Item(channel=item.channel, title=format_title(title), quality=quality, + language=language, plot=plot, personal_plot=plot, thumbnail=poster, path=item.path) + + new_item.infoLabels['year'] = media['year'] if 'year' in media else '' + new_item.infoLabels['tmdb_id'] = media['tmdb_id'] if 'tmdb_id' in media else '' + + if 'movies_list' in json_data or 'generic_list' in json_data: + new_item.url = media + new_item.contentTitle = media['title'] + new_item.action = 'findvideos' + else: + new_item.url = media['seasons_list'] + new_item.contentSerieName = media['title'] + new_item.action = 'seasons' + + itemlist.append(new_item) + + if not 'generic_list' in json_data: + tmdb.set_infoLabels(itemlist, seekTmdb=True) + for item in itemlist: + if item.personal_plot != item.plot and item.personal_plot: + item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot + else: + load_links(item, itemlist, json_data, text) + return itemlist \ No newline at end of file From 89f7e48ff4d1de767b09adc1c4dc8150a80f3a90 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Thu, 17 Oct 2019 19:31:33 +0200 Subject: [PATCH 42/61] Community, supporto Libreria e Download --- core/support.py | 9 ++-- core/videolibrarytools.py | 5 +- platformcode/platformtools.py | 16 +++---- specials/community.py | 40 ++++++++++------ specials/downloads.py | 27 ++++++++--- specials/videolibrary.py | 86 ++++++++++++++++++++--------------- 6 files changed, 113 insertions(+), 70 deletions(-) diff --git a/core/support.py b/core/support.py index 7961ce0a..53799944 100644 --- a/core/support.py +++ b/core/support.py @@ -805,7 +805,8 @@ def download(itemlist, item, typography='', function_level=1, function=''): url=item.url, action='save_download', from_action=from_action, - contentTitle=contentTitle + contentTitle=contentTitle, + path=item.path )) if from_action == 'episodios': itemlist.append( @@ -850,7 +851,8 @@ def videolibrary(itemlist, item, typography='', function_level=1, function=''): contentTitle=item.contentTitle if item.contentTitle else '' if (function == 'findvideos' and contentType == 'movie') \ - or (function == 'episodios' and contentType != 'movie'): + or (function == 'episodios' and contentType != 'movie') \ + or function == 'get_seasons' and item.channel == 'community': if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, @@ -862,7 +864,8 @@ def videolibrary(itemlist, item, typography='', function_level=1, function=''): url=item.url, action=action, extra=extra, - contentTitle=contentTitle + contentTitle=contentTitle, + path=item.path )) return itemlist diff --git a/core/videolibrarytools.py b/core/videolibrarytools.py index 0604a45d..a90321dd 100644 --- a/core/videolibrarytools.py +++ b/core/videolibrarytools.py @@ -833,7 +833,10 @@ def add_tvshow(item, channel=None): # del item.tmdb_stat #Limpiamos el status para que no se grabe en la Videoteca # Obtiene el listado de episodios - itemlist = getattr(channel, item.action)(item) + if item.channel == 'community': + itemlist = getattr(channel, 'get_seasons')(item) + else: + itemlist = getattr(channel, item.action)(item) insertados, sobreescritos, fallidos, path = save_tvshow(item, itemlist) diff --git a/platformcode/platformtools.py b/platformcode/platformtools.py index ec2adae7..ca567aee 100644 --- a/platformcode/platformtools.py +++ b/platformcode/platformtools.py @@ -587,7 +587,7 @@ def set_context_commands(item, parent_item): if (item.channel != "videolibrary" and not config.get_localized_string(70585) in str(item.context)) \ or (item.channel != "videolibrary" and config.get_localized_string(70585) in str(item.context) and config.get_localized_string(70714) in str(item.context)): # Añadir Serie a la videoteca - if item.action in ["episodios", "get_episodios"] and item.contentSerieName: + if item.action in ["episodios", "get_episodios", "get_seasons"] and item.contentSerieName: context_commands.append((config.get_localized_string(60352), "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(action="add_serie_to_library", from_action=item.action).tourl()))) @@ -608,7 +608,7 @@ def set_context_commands(item, parent_item): # elif item.contentSerieName: # Descargar serie - elif item.contentType == "tvshow" and item.action in ["episodios"]: + elif item.contentType == "tvshow" and item.action in ["episodios", "episodesxseason"]: item.contentType == "tvshow" context_commands.append((config.get_localized_string(60355), "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(channel="downloads", action="save_download", @@ -629,12 +629,12 @@ def set_context_commands(item, parent_item): from_action=item.action).tourl()))) # Descargar temporada - # elif item.contentType == "season": - # context_commands.append((config.get_localized_string(60357), "XBMC.RunPlugin(%s?%s)" % - # (sys.argv[0], item.clone(channel="downloads", action="save_download", - # from_channel=item.channel, - # from_action=item.action, - # download='season').tourl()))) + elif item.contentType == "season": + context_commands.append((config.get_localized_string(60357), "XBMC.RunPlugin(%s?%s)" % + (sys.argv[0], item.clone(channel="downloads", action="save_download", + from_channel=item.channel, + from_action=item.action, + download='season').tourl()))) # Abrir configuración if parent_item.channel not in ["setting", "news", "search"]: diff --git a/specials/community.py b/specials/community.py index 163ca25b..2d1a4cc9 100644 --- a/specials/community.py +++ b/specials/community.py @@ -3,10 +3,10 @@ # -*- Created for Alfa-addon -*- # -*- By the Alfa Develop Group -*- -import re, urllib, os +import re, urllib, os, inspect import requests -from core import httptools, scrapertoolsV2, servertools, jsontools, tmdb +from core import httptools, scrapertoolsV2, servertools, jsontools, tmdb, support from core.item import Item from core.support import typo from channelselector import get_thumb @@ -74,7 +74,7 @@ def show_channels(item): return itemlist def load_json(item): - + support.log(item) url= item if type(item) == str else item.url if url: @@ -144,7 +144,7 @@ def list_all(item): title = media['title'] title = set_title(title, language, quality) - new_item = Item(channel=item.channel, title=format_title(title), quality=quality, + new_item = Item(channel=item.channel, title=format_title(title), fulltitle=title, show=title, quality=quality, language=language, plot=plot, personal_plot=plot, thumbnail=poster, path=item.path) new_item.infoLabels['year'] = media['year'] if 'year' in media else '' @@ -154,10 +154,11 @@ def list_all(item): new_item.url = media new_item.contentTitle = media['title'] new_item.action = 'findvideos' + if 'movies_list' in json_data: new_item.contentType = 'movie' else: new_item.url = media['seasons_list'] new_item.contentSerieName = media['title'] - new_item.action = 'seasons' + new_item.action = 'get_seasons' itemlist.append(new_item) @@ -168,20 +169,31 @@ def list_all(item): item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot return itemlist -def seasons(item): - logger.info() +def get_seasons(item): itemlist = [] - infoLabels = item.infoLabels + infoLabels = item.infoLabels if item.infolabels else {} list_seasons = item.url + for season in list_seasons: + support.log() infoLabels['season'] = season['season'] title = config.get_localized_string(60027) % season['season'] url = '' if not season['link'] else season['link'] if ':/' in season['link'] else item.path + season['link'] - itemlist.append(Item(channel=item.channel, title=format_title(title), url=url, action='episodesxseason', - contentSeasonNumber=season['season'], infoLabels=infoLabels)) + itemlist.append(Item(channel=item.channel, title=format_title(title), fulltitle=item.fulltitle, show=item.show, url=url, action='episodesxseason', + contentSeason=season['season'], infoLabels=infoLabels ,contentType = 'tvshow')) - tmdb.set_infoLabels(itemlist, seekTmdb=True) - itemlist = sorted(itemlist, key=lambda i: i.title) + logger.info('CANALE= '+ str(inspect.stack()[1][3])) + if inspect.stack()[1][3] in ['add_tvshow', "get_seasons"]: + it = [] + for item in itemlist: + logger.info(str(item)) + it += episodesxseason(item) + + itemlist = it + else: + tmdb.set_infoLabels(itemlist, seekTmdb=True) + itemlist = sorted(itemlist, key=lambda i: i.title) + support.videolibrary(itemlist,item) return itemlist @@ -199,10 +211,10 @@ def episodesxseason(item): infoLabels['season'] = season_number infoLabels['episode'] = episode_number - title = config.get_localized_string(70677) + ' %s' % (episode_number) + title = '%sx%s - %s' % (item.contentSeason, episode_number, episode['title']) itemlist.append(Item(channel=item.channel, title=format_title(title), url=episode, action='findvideos', - contentEpisodeNumber=episode_number, infoLabels=infoLabels)) + contentSeason= item.contentSeason, contentEpisode=episode_number, infoLabels=infoLabels, contentType = 'episode')) tmdb.set_infoLabels(itemlist, seekTmdb=True) return itemlist diff --git a/specials/downloads.py b/specials/downloads.py index 3577d7f5..68bd197e 100644 --- a/specials/downloads.py +++ b/specials/downloads.py @@ -539,7 +539,10 @@ def download_from_server(item): unsupported_servers = ["torrent"] progreso = platformtools.dialog_progress(config.get_localized_string(30101), config.get_localized_string(70178) % item.server) - channel = __import__('channels.%s' % item.contentChannel, None, None, ['channels.%s' % item.contentChannel]) + if item.contentChannel == 'community': + channel = __import__('specials.%s' % item.contentChannel, None, None, ['specials.%s' % item.contentChannel]) + else: + channel = __import__('channels.%s' % item.contentChannel, None, None, ['channels.%s' % item.contentChannel]) if hasattr(channel, "play") and not item.play_menu: progreso.update(50, config.get_localized_string(70178) % item.server, config.get_localized_string(60003) % item.contentChannel) @@ -606,7 +609,10 @@ def download_from_best_server(item): result = {"downloadStatus": STATUS_CODES.error} progreso = platformtools.dialog_progress(config.get_localized_string(30101), config.get_localized_string(70179)) - channel = __import__('channels.%s' % item.contentChannel, None, None, ['channels.%s' % item.contentChannel]) + if item.contentChannel == 'community': + channel = __import__('specials.%s' % item.contentChannel, None, None, ['specials.%s' % item.contentChannel]) + else: + channel = __import__('channels.%s' % item.contentChannel, None, None, ['channels.%s' % item.contentChannel]) progreso.update(50, config.get_localized_string(70184), config.get_localized_string(70180) % item.contentChannel) @@ -652,7 +658,10 @@ def select_server(item): "contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) progreso = platformtools.dialog_progress(config.get_localized_string(30101), config.get_localized_string(70179)) - channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel]) + if item.contentChannel == 'community': + channel = __import__('specials.%s' % item.contentChannel, None, None, ['specials.%s' % item.contentChannel]) + else: + channel = __import__('channels.%s' % item.contentChannel, None, None, ['channels.%s' % item.contentChannel]) progreso.update(50, config.get_localized_string(70184), config.get_localized_string(70180) % item.contentChannel) if hasattr(channel, item.contentAction): @@ -720,9 +729,13 @@ def get_episodes(item): # El item es uma serie o temporada if item.contentType in ["tvshow", "season"]: # importamos el canal - channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel]) - # Obtenemos el listado de episodios - episodes = getattr(channel, item.contentAction)(item) + if item.contentChannel == 'community': + channel = __import__('specials.%s' % item.contentChannel, None, None, ["specials.%s" % item.contentChannel]) + episodes = getattr(channel, 'episodesxseason')(item) + else: + channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel]) + episodes = getattr(channel, item.contentAction)(item) + itemlist = [] @@ -818,7 +831,7 @@ def save_download(item): item.contentAction = item.from_action if item.from_action else item.action if item.contentType in ["tvshow", "episode", "season"]: - if 'download' in item: + if 'download' in item and item.channel != 'community': heading = config.get_localized_string(70594) # <- Enter the season number item.dlseason = platformtools.dialog_numeric(0, heading, '') if item.dlseason: diff --git a/specials/videolibrary.py b/specials/videolibrary.py index 48b78cca..55793688 100644 --- a/specials/videolibrary.py +++ b/specials/videolibrary.py @@ -42,7 +42,7 @@ def list_movies(item, silent=False): for f in ficheros: if f.endswith(".nfo"): nfo_path = filetools.join(raiz, f) - + #Sincronizamos las películas vistas desde la videoteca de Kodi con la de Alfa try: if config.is_xbmc(): #Si es Kodi, lo hacemos @@ -50,12 +50,12 @@ def list_movies(item, silent=False): xbmc_videolibrary.mark_content_as_watched_on_alfa(nfo_path) except: logger.error(traceback.format_exc()) - + head_nfo, new_item = videolibrarytools.read_nfo(nfo_path) if not new_item: #Si no ha leído bien el .nfo, pasamos a la siguiente continue - + if len(new_item.library_urls) > 1: multicanal = True else: @@ -67,7 +67,10 @@ def list_movies(item, silent=False): for canal_org in new_item.library_urls: canal = generictools.verify_channel(canal_org) try: - channel_verify = __import__('channels.%s' % canal, fromlist=["channels.%s" % canal]) + if canal == 'community': + channel_verify = __import__('specials.%s' % canal, fromlist=["channels.%s" % canal]) + else: + channel_verify = __import__('channels.%s' % canal, fromlist=["channels.%s" % canal]) logger.debug('Channel %s seems correct' % channel_verify) except: dead_item = Item(multicanal=multicanal, @@ -113,7 +116,7 @@ def list_movies(item, silent=False): if not filetools.exists(filetools.join(new_item.path, filetools.basename(strm_path))): # Si se ha eliminado el strm desde la bilbioteca de kodi, no mostrarlo continue - + # Menu contextual: Marcar como visto/no visto visto = new_item.library_playcounts.get(os.path.splitext(f)[0], 0) new_item.infoLabels["playcount"] = visto @@ -165,7 +168,7 @@ def list_tvshows(item): if f == "tvshow.nfo": tvshow_path = filetools.join(raiz, f) # logger.debug(tvshow_path) - + #Sincronizamos los episodios vistos desde la videoteca de Kodi con la de Alfa try: if config.is_xbmc(): #Si es Kodi, lo hacemos @@ -173,7 +176,7 @@ def list_tvshows(item): xbmc_videolibrary.mark_content_as_watched_on_alfa(tvshow_path) except: logger.error(traceback.format_exc()) - + head_nfo, item_tvshow = videolibrarytools.read_nfo(tvshow_path) if len(item_tvshow.library_urls) > 1: @@ -187,7 +190,10 @@ def list_tvshows(item): for canal in item_tvshow.library_urls: canal = generictools.verify_channel(canal) try: - channel_verify = __import__('channels.%s' % canal, fromlist=["channels.%s" % canal]) + if canal == 'community': + channel_verify = __import__('specials.%s' % canal, fromlist=["channels.%s" % canal]) + else: + channel_verify = __import__('channels.%s' % canal, fromlist=["channels.%s" % canal]) logger.debug('El canal %s parece correcto' % channel_verify) except: dead_item = Item(multicanal=multicanal, @@ -236,7 +242,7 @@ def list_tvshows(item): else: texto_visto = config.get_localized_string(60021) contador = 1 - + except: logger.error('Not find: ' + str(tvshow_path)) logger.error(traceback.format_exc()) @@ -499,15 +505,18 @@ def findvideos(item): for nom_canal, json_path in list_canales.items(): if filtro_canal and filtro_canal != nom_canal.capitalize(): continue - + item_canal = Item() item_canal.channel = nom_canal nom_canal = item_canal.channel - + # Importamos el canal de la parte seleccionada try: - channel = __import__('channels.%s' % nom_canal, fromlist=["channels.%s" % nom_canal]) + if nom_canal == 'community': + channel = __import__('specials.%s' % nom_canal, fromlist=["channels.%s" % nom_canal]) + else: + channel = __import__('channels.%s' % nom_canal, fromlist=["channels.%s" % nom_canal]) except ImportError: exec "import channels." + nom_canal + " as channel" @@ -549,7 +558,7 @@ def findvideos(item): server.channel = "videolibrary" server.nfo = item.nfo server.strm_path = item.strm_path - + #### Compatibilidad con Kodi 18: evita que se quede la ruedecedita dando vueltas en enlaces Directos if server.action == 'play': server.folder = False @@ -576,7 +585,10 @@ def play(item): # logger.debug("item:\n" + item.tostring('\n')) if not item.contentChannel == "local": - channel = __import__('channels.%s' % item.contentChannel, fromlist=["channels.%s" % item.contentChannel]) + if item.contentChannel == 'community': + channel = __import__('specials.%s' % item.contentChannel, fromlist=["channels.%s" % item.contentChannel]) + else: + channel = __import__('channels.%s' % item.contentChannel, fromlist=["channels.%s" % item.contentChannel]) if hasattr(channel, "play"): itemlist = getattr(channel, "play")(item) @@ -646,22 +658,22 @@ def update_tvshow(item): def verify_playcount_series(item, path): logger.info() - + """ Este método revisa y repara el PlayCount de una serie que se haya desincronizado de la lista real de episodios en su carpeta. Las entradas de episodios, temporadas o serie que falten, son creado con la marca de "no visto". Posteriormente se envia a verificar los contadores de Temporadas y Serie - + En el retorno envía de estado de True si se actualizado o False si no, normalmente por error. Con este estado, el caller puede actualizar el estado de la opción "verify_playcount" en "videolibrary.py". La intención de este método es la de dar una pasada que repare todos los errores y luego desactivarse. Se puede volver a activar en el menú de Videoteca de Alfa. - + """ #logger.debug("item:\n" + item.tostring('\n')) - + #Si no ha hecho nunca la verificación, lo forzamos estado = config.get_setting("verify_playcount", "videolibrary") if not estado or estado == False: estado = True #Si no ha hecho nunca la verificación, lo forzamos else: estado = False - + if item.contentType == 'movie': #Esto es solo para Series return (item, False) if filetools.exists(path): @@ -670,7 +682,7 @@ def verify_playcount_series(item, path): if not hasattr(it, 'library_playcounts') or not it.library_playcounts: #Si el .nfo no tiene library_playcounts se lo creamos logger.error('** It does not have PlayCount') it.library_playcounts = {} - + # Obtenemos los archivos de los episodios raiz, carpetas_series, ficheros = filetools.walk(path).next() # Crear un item en la lista para cada strm encontrado @@ -685,15 +697,15 @@ def verify_playcount_series(item, path): if season_episode not in it.library_playcounts: #No está incluido el episodio it.library_playcounts.update({season_episode: 0}) #actualizamos el playCount del .nfo estado_update = True #Marcamos que hemos actualizado algo - + if 'season %s' % season not in it.library_playcounts: #No está incluida la Temporada it.library_playcounts.update({'season %s' % season: 0}) #actualizamos el playCount del .nfo estado_update = True #Marcamos que hemos actualizado algo - + if it.contentSerieName not in it.library_playcounts: #No está incluida la Serie it.library_playcounts.update({item.contentSerieName: 0}) #actualizamos el playCount del .nfo estado_update = True #Marcamos que hemos actualizado algo - + if estado_update: logger.error('** Estado de actualización: ' + str(estado) + ' / PlayCount: ' + str(it.library_playcounts)) estado = estado_update @@ -713,35 +725,35 @@ def mark_content_as_watched2(item): # logger.debug("item:\n" + item.tostring('\n')) if filetools.exists(item.nfo): - head_nfo, it = videolibrarytools.read_nfo(item.nfo) - #logger.debug(it) + head_nfo, it = videolibrarytools.read_nfo(item.nfo) + #logger.debug(it) if item.contentType == 'movie': name_file = os.path.splitext(os.path.basename(item.nfo))[0] - + if name_file != 'tvshow' : - it.library_playcounts.update({name_file: item.playcount}) + it.library_playcounts.update({name_file: item.playcount}) if item.contentType == 'episode' or item.contentType == 'tvshow' or item.contentType == 'list' or name_file == 'tvshow': # elif item.contentType == 'episode': name_file = os.path.splitext(os.path.basename(item.strm_path))[0] num_season = name_file [0] - item.__setattr__('contentType', 'episode') - item.__setattr__('contentSeason', num_season) - #logger.debug(name_file) - + item.__setattr__('contentType', 'episode') + item.__setattr__('contentSeason', num_season) + #logger.debug(name_file) + else: name_file = item.contentTitle - # logger.debug(name_file) + # logger.debug(name_file) if not hasattr(it, 'library_playcounts'): it.library_playcounts = {} - it.library_playcounts.update({name_file: item.playcount}) + it.library_playcounts.update({name_file: item.playcount}) # se comprueba que si todos los episodios de una temporada están marcados, se marque tb la temporada if item.contentType != 'movie': it = check_season_playcount(it, item.contentSeason) - #logger.debug(it) + #logger.debug(it) # Guardamos los cambios en item.nfo if filetools.write(item.nfo, head_nfo + it.tojson()): @@ -756,9 +768,9 @@ def mark_content_as_watched2(item): if config.is_xbmc(): from platformcode import xbmc_videolibrary xbmc_videolibrary.mark_content_as_watched_on_kodi(item , item.playcount) - # logger.debug(item) + # logger.debug(item) - platformtools.itemlist_refresh() + platformtools.itemlist_refresh() def mark_content_as_watched(item): @@ -970,7 +982,7 @@ def check_tvshow_playcount(item, season): temporadas_vistas_serie += 1 #logger.debug(temporadas_serie) - if temporadas_serie == temporadas_vistas_serie: + if temporadas_serie == temporadas_vistas_serie: item.library_playcounts.update({item.title: 1}) else: item.library_playcounts.update({item.title: 0}) From 7ae608f7cce4658b65562bbd9629796921855c8a Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Thu, 17 Oct 2019 20:26:45 +0200 Subject: [PATCH 43/61] Menu rapido da Menu Contestuale --- platformcode/platformtools.py | 9 +++++++-- resources/language/English/strings.po | 4 ++++ resources/language/Italian/strings.po | 4 ++++ specials/side_menu.py | 4 ++++ 4 files changed, 19 insertions(+), 2 deletions(-) diff --git a/platformcode/platformtools.py b/platformcode/platformtools.py index ca567aee..52f15ce3 100644 --- a/platformcode/platformtools.py +++ b/platformcode/platformtools.py @@ -22,7 +22,7 @@ from channelselector import get_thumb from core import channeltools from core import trakt_tools, scrapertoolsV2 from core.item import Item -from platformcode import logger +from platformcode import logger, keymaptools from platformcode import unify addon = xbmcaddon.Addon('plugin.video.kod') @@ -656,12 +656,17 @@ def set_context_commands(item, parent_item): context_commands = sorted(context_commands, key=lambda comand: comand[0]) # Menu Rapido context_commands.insert(0, (config.get_localized_string(60360), + "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], Item(channel='side_menu', + action="open_shortcut_menu", + parent=parent_item.tourl()).tourl( + )))) + context_commands.insert(1, (config.get_localized_string(70737), "XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(channel='side_menu', action="open_menu", parent=parent_item.tourl()).tourl( )))) if config.dev_mode(): - context_commands.insert(1, ("item info", + context_commands.insert(2, ("item info", "XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(action="itemInfo", parent=item.tojson()).tourl()))) return context_commands diff --git a/resources/language/English/strings.po b/resources/language/English/strings.po index 5a883a25..34aaf082 100644 --- a/resources/language/English/strings.po +++ b/resources/language/English/strings.po @@ -5660,3 +5660,7 @@ msgstr "" msgctxt "#70735" msgid "%s Special Episode Number" msgstr "" + +msgctxt "#70737" +msgid "[B]SIDE MENU[/B]" +msgstr "" \ No newline at end of file diff --git a/resources/language/Italian/strings.po b/resources/language/Italian/strings.po index 1088840b..ccf02711 100644 --- a/resources/language/Italian/strings.po +++ b/resources/language/Italian/strings.po @@ -5664,3 +5664,7 @@ msgstr "Numero dell'Episodio Speciale %s" msgctxt "#70736" msgid "Completed Serie" msgstr "Serie Completa" + +msgctxt "#70737" +msgid "[B]SIDE MENU[/B]" +msgstr "[B]MENU LATERALE[/B]" \ No newline at end of file diff --git a/specials/side_menu.py b/specials/side_menu.py index 1de8ef50..ef2818e7 100644 --- a/specials/side_menu.py +++ b/specials/side_menu.py @@ -104,6 +104,10 @@ def open_menu(item): main.doModal() del main +def open_shortcut_menu(item): + from platformcode import keymaptools + keymaptools.open_shortcut_menu() + class Main(xbmcgui.WindowXMLDialog): def __init__(self, *args, **kwargs): From f60d449c6948c4c006db255936fd8bbaeca2f97f Mon Sep 17 00:00:00 2001 From: marco <m.toma99@gmail.com> Date: Thu, 17 Oct 2019 20:46:15 +0200 Subject: [PATCH 44/61] inizio fix streamingaltadefinizione e modifiche varie --- channels.json | 10 ++++---- channels/streamingaltadefinizione.json | 16 ++++--------- channels/streamingaltadefinizione.py | 32 +++++++++++--------------- channels/vedohd.py | 2 +- core/support.py | 22 ++++++++++-------- 5 files changed, 36 insertions(+), 46 deletions(-) diff --git a/channels.json b/channels.json index e42f75b6..af2f9220 100644 --- a/channels.json +++ b/channels.json @@ -3,9 +3,9 @@ "altadefinizione01_club": "https://www.altadefinizione01.cc", "altadefinizione01_link": "http://altadefinizione01.town", "altadefinizioneclick": "https://altadefinizione.cloud", - "altadefinizionehd": "https://altadefinizionetv.best", + "altadefinizionehd": "https://altadefinizione.wtf", "animeforce": "https://ww1.animeforce.org", - "animeleggendari": "https://animepertutti.net", + "animeleggendari": "https://animepertutti.com", "animespace": "http://www.animespace.tv", "animestream": "https://www.animeworld.it", "animesubita": "http://www.animesubita.org", @@ -45,10 +45,10 @@ "serietvonline": "https://serietvonline.tech", "serietvsubita": "http://serietvsubita.xyz", "serietvu": "https://www.serietvu.club", - "streamingaltadefinizione": "https://www.streamingaltadefinizione.me", + "streamingaltadefinizione": "https://www.popcornstream.best", "streamtime": "https://t.me/s/StreamTime", "tantifilm": "https://www.tantifilm.eu", "toonitalia": "https://toonitalia.org", - "vedohd": "https://vedohd.icu/", + "vedohd": "https://vedohd.video", "vvvvid": "https://www.vvvvid.it" -} +} \ No newline at end of file diff --git a/channels/streamingaltadefinizione.json b/channels/streamingaltadefinizione.json index 1cca3246..2fbcfdb5 100644 --- a/channels/streamingaltadefinizione.json +++ b/channels/streamingaltadefinizione.json @@ -1,19 +1,11 @@ { "id": "streamingaltadefinizione", - "name": "Streaming Altadefinizione", + "name": "Popcorn Stream", "language": ["ita"], "active": true, "adult": false, - "thumbnail": "https://www.streamingaltadefinizione.world/wp-content/uploads/2018/09/StreamingAltadefinizioneLogo.png", + "thumbnail": "https://www.popcornstream.best/wp-content/uploads/2019/09/PopLogo40.png", + "banner": "https://www.popcornstream.info/media/PopcornStream820x428.png", "categories": ["movie","tvshow","anime"], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Includi in Ricerca Globale", - "default": true, - "enabled": true, - "visible": true - } - ] + "settings": [] } diff --git a/channels/streamingaltadefinizione.py b/channels/streamingaltadefinizione.py index 12b641a5..41b4b8e3 100644 --- a/channels/streamingaltadefinizione.py +++ b/channels/streamingaltadefinizione.py @@ -11,7 +11,6 @@ from core import support from core.item import Item -from specials import autoplay from platformcode import config __channel__ = "streamingaltadefinizione" @@ -49,7 +48,8 @@ def generos(item): def peliculas(item): - return support.dooplay_films(item) + support.dbg() + return support.dooplay_peliculas(item, True if "/genere/anime/" in item.url else False) def episodios(item): @@ -59,20 +59,16 @@ def episodios(item): def findvideos(item): itemlist = [] for link in support.dooplay_get_links(item, host): - server = link['server'][:link['server'].find(".")] - itemlist.append( - Item(channel=item.channel, - action="play", - title=server + " [COLOR blue][" + link['title'] + "][/COLOR]", - url=link['url'], - server=server, - fulltitle=item.fulltitle, - thumbnail=item.thumbnail, - show=item.show, - quality=link['title'], - contentType=item.contentType, - folder=False)) + if link['title'] != 'Guarda il trailer': + itemlist.append( + Item(channel=item.channel, + action="play", + url=link['url'], + fulltitle=item.fulltitle, + thumbnail=item.thumbnail, + show=item.show, + quality=link['title'], + contentType=item.contentType, + folder=False)) - autoplay.start(itemlist, item) - - return itemlist + return support.server(item, itemlist=itemlist) diff --git a/channels/vedohd.py b/channels/vedohd.py index fb7803d8..b5b30e7f 100644 --- a/channels/vedohd.py +++ b/channels/vedohd.py @@ -40,7 +40,7 @@ def search(item, text): def peliculas(item): - return support.dooplay_films(item, blacklist) + return support.dooplay_peliculas(item, blacklist) def findvideos(item): diff --git a/core/support.py b/core/support.py index 7961ce0a..ca833350 100644 --- a/core/support.py +++ b/core/support.py @@ -340,10 +340,10 @@ def scrape(func): # IMPORTANT 'type' is a special key, to work need typeContentDict={} and typeActionDict={} def wrapper(*args): - function = func.__name__ itemlist = [] args = func(*args) + function = func.__name__ if not 'actLike' in args else args['actLike'] # log('STACK= ',inspect.stack()[1][3]) item = args['item'] @@ -497,17 +497,15 @@ def dooplay_get_links(item, host): @scrape def dooplay_get_episodes(item): - item.contentType = "episode" patron = '<li class="mark-[0-9]+">.*?<img.*?(?:data-lazy-)?src="(?P<thumb>[^"]+).*?(?P<episode>[0-9]+ - [0-9]+).*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+).*?(?P<year>[0-9]{4})' - - def itemlistHook(itemlist): - return videolibrary(itemlist, item, function='episodios') + actLike = 'episodios' return locals() @scrape -def dooplay_films(item, blacklist=""): +def dooplay_peliculas(item, mixed=False, blacklist=""): + actLike = 'peliculas' if item.args == 'searchPage': return dooplay_search_vars(item, blacklist) else: @@ -516,7 +514,7 @@ def dooplay_films(item, blacklist=""): patron = '<article id="post-[0-9]+" class="item movies">.*?<img src="(?!data)(?P<thumb>[^"]+)".*?<span class="quality">(?P<quality>[^<>]+).*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></h3>.*?(?:<span>[^<>]*(?P<year>[0-9]{4})</span>|</article>).*?(?:<span>(?P<duration>[0-9]+) min</span>|</article>).*?(?:<div class="texto">(?P<plot>[^<>]+)|</article>).*?(?:genres">(?P<genre>.*?)</div>|</article>)' else: action = 'episodios' - patron = '<article id="post-[0-9]+" class="item tvshows">.*?<img src="(?!data)(?P<thumb>[^"]+)".*?(?:<span class="quality">(?P<quality>[^<>]+))?.*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></h3>.*?(?:<span>(?P<year>[0-9]{4})</span>|</article>).*?(?:<div class="texto">(?P<plot>[^<>]+)|</article>).*?(?:genres">(?P<genre>.*?)</div>|</article>)' + patron = '<article id="post-[0-9]+" class="item ' + ('\w+' if mixed else 'tvshow') + '">.*?<img src="(?!data)(?P<thumb>[^"]+)".*?(?:<span class="quality">(?P<quality>[^<>]+))?.*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></h3>.*?(?:<span>(?P<year>[0-9]{4})</span>|</article>).*?(?:<div class="texto">(?P<plot>[^<>]+)|</article>).*?(?:genres">(?P<genre>.*?)</div>|</article>)' patronNext = '<div class="pagination">.*?class="current".*?<a href="([^"]+)".*?<div class="resppages">' addVideolibrary = False @@ -536,6 +534,7 @@ def dooplay_search_vars(item, blacklist): action = 'episodios' patron = '<div class="result-item">.*?<img src="(?P<thumb>[^"]+)".*?<span class="' + type + '">(?P<quality>[^<>]+).*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a>.*?<span class="year">(?P<year>[0-9]{4}).*?<div class="contenido"><p>(?P<plot>[^<>]+)' patronNext = '<a class="arrow_pag" href="([^"]+)"><i id="nextpagination"' + def fullItemlistHook(itemlist): # se è una next page if itemlist[-1].title == typo(config.get_localized_string(30992), 'color kod bold'): @@ -905,16 +904,19 @@ def pagination(itemlist, item, page, perpage, function_level=1): def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=True, down_load=True): - if not data: + if not (data and itemlist): data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data - itemList = servertools.find_video_items(data=str(data)) itemlist = itemlist + itemList for videoitem in itemlist: + if not videoitem.server: + findS = servertools.findvideos(videoitem.url)[0] + videoitem.server = findS[2] + videoitem.title = findS[0] item.title = item.contentTitle if config.get_localized_string(30161) in item.title else item.title - videoitem.title = item.title + typo(videoitem.title, '_ color kod []') + (typo(videoitem.quality, '_ color kod []') if videoitem.quality else "") + videoitem.title = item.fulltitle + typo(videoitem.title, '_ color kod []') + (typo(videoitem.quality, '_ color kod []') if videoitem.quality else "") videoitem.fulltitle = item.fulltitle videoitem.show = item.show videoitem.thumbnail = item.thumbnail From cc07868d43063354615908ca15ce1686e426aea2 Mon Sep 17 00:00:00 2001 From: greko17 <sex1712@email.it> Date: Fri, 18 Oct 2019 11:56:46 +0200 Subject: [PATCH 45/61] fix: support.server --- core/support.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/support.py b/core/support.py index 90fca88d..b93d023f 100644 --- a/core/support.py +++ b/core/support.py @@ -907,7 +907,7 @@ def pagination(itemlist, item, page, perpage, function_level=1): def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=True, down_load=True): - if not (data and itemlist): + if not data and not itemlist: data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data itemList = servertools.find_video_items(data=str(data)) From 4e0c0f5436841b5d177f037eab150dce81439bc2 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Fri, 18 Oct 2019 17:32:39 +0200 Subject: [PATCH 46/61] Fix e migliorie a Community channels --- platformcode/platformtools.py | 2 +- specials/community.py | 261 +++++++++++++++++++++------------- specials/downloads.py | 4 +- 3 files changed, 168 insertions(+), 99 deletions(-) diff --git a/platformcode/platformtools.py b/platformcode/platformtools.py index 52f15ce3..76299a33 100644 --- a/platformcode/platformtools.py +++ b/platformcode/platformtools.py @@ -608,7 +608,7 @@ def set_context_commands(item, parent_item): # elif item.contentSerieName: # Descargar serie - elif item.contentType == "tvshow" and item.action in ["episodios", "episodesxseason"]: + elif item.contentType == "tvshow" and item.action in ["episodios"]: item.contentType == "tvshow" context_commands.append((config.get_localized_string(60355), "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(channel="downloads", action="save_download", diff --git a/specials/community.py b/specials/community.py index 2d1a4cc9..ea119e57 100644 --- a/specials/community.py +++ b/specials/community.py @@ -21,7 +21,7 @@ list_servers = ['directo', 'akvideo', 'verystream', 'openload'] list_quality = ['SD', '720', '1080', '4k'] def mainlist(item): - logger.info() + support.log() path = os.path.join(config.get_data_path(), 'community_channels.json') if not os.path.exists(path): @@ -34,32 +34,40 @@ def mainlist(item): def show_channels(item): - logger.info() + support.log() itemlist = [] context = [{"title": config.get_localized_string(50005), - "action": "remove_channel", - "channel": "community"}] + "action": "remove_channel", + "channel": "community"}] path = os.path.join(config.get_data_path(), 'community_channels.json') file = open(path, "r") json = jsontools.load(file.read()) - itemlist.append(Item(channel=item.channel, title=typo(config.get_localized_string(70676),'bold color kod'), action='add_channel', thumbnail=get_thumb('add.png'))) + itemlist.append(Item(channel=item.channel, + title=typo(config.get_localized_string(70676),'bold color kod'), + action='add_channel', + thumbnail=get_thumb('add.png') + )) for key, channel in json['channels'].items(): + # Find File Path if 'http' in channel['path']: file_path = requests.get(channel['path']).url else: file_path = channel['path'] + + # make relative path path = os.path.dirname(os.path.abspath(file_path)) if 'http' in path: path = path[path.find('http'):].replace('\\','/').replace(':/','://') - if file_path.startswith('http'): - file_url = httptools.downloadpage(file_path, follow_redirects=True).data - else: - file_url = open(file_path, "r").read() + if file_path.startswith('http'): file_url = httptools.downloadpage(file_path, follow_redirects=True).data + else: file_url = open(file_path, "r").read() + + # loa djson json_url = jsontools.load(file_url) - thumbnail = json_url['thumbnail'] if 'thumbnail' in json_url and ':/' in json_url['thumbnail'] else path + json_url['thumbnail'] if 'thumbnail' in json_url and '/' in json_url['thumbnail'] else '' - fanart = json_url['fanart'] if 'fanart' in json_url and ':/' in json_url['fanart'] else path + json_url['fanart'] if 'fanart' in json_url and '/' in json_url['fanart'] else '' - plot = json_url['plot'] if 'plot' in json_url else '' + + thumbnail = relative('thumbnail', json_url, path) + fanart = relative('fanart', json_url, path) + plot = json_url['plot'] if json_url.has_key('plot') else '' itemlist.append(Item(channel=item.channel, title=typo(channel['channel_name'],'bold'), @@ -73,94 +81,97 @@ def show_channels(item): path=path)) return itemlist -def load_json(item): - support.log(item) - url= item if type(item) == str else item.url - - if url: - if url.startswith('http'): - json_file = httptools.downloadpage(url).data - else: - json_file = open(url, "r").read() - - json_data = jsontools.load(json_file) - else: - json_data = '' - - return json_data - def show_menu(item): global list_data itemlist = [] - logger.info() + support.log() json_data = load_json(item) if "menu" in json_data: for option in json_data['menu']: - if 'thumbnail' in json_data: - thumbnail = option['thumbnail'] if ':/' in option['thumbnail'] else item.path + option['thumbnail'] if '/' in option['thumbnail'] else get_thumb(option['thumbnail']) - else: - thumbnail = '' - if 'fanart' in option and option['fanart']: - fanart = option['fanart'] if ':/' in option['fanart'] else item.path + option['fanart'] - else: - fanart = item.fanart - if 'plot' in option and option['plot']: - plot = option['plot'] - else: - plot = item.plot - url = '' if not option['link'] else option['link'] if ':/' in option['link'] else item.path + option['link'] - itemlist.append(Item(channel=item.channel, title=format_title(option['title']), thumbnail=thumbnail, fanart=fanart, plot=plot, action='show_menu', url=url, path=item.path)) - itemlist.append(Item(channel=item.channel, title=typo('Cerca...','color kod bold'), thumbnail=get_thumb('search.png'), action='search', url=item.url, path=item.path)) - if 'channel_name' in json_data: autoplay.show_option(item.channel, itemlist) + thumbnail = relative('thumbnail', option, item.path) + fanart = relative('fanart', option, item.path) + plot = option['plot'] if option.has_key('plot') else item.plot + url = relative('link', option, item.path) + itemlist.append(Item(channel=item.channel, + title=format_title(option['title']), + thumbnail=thumbnail, + fanart=fanart, + plot=plot, + action='show_menu', + url=url, + path=item.path)) + # add Search + itemlist.append(Item(channel=item.channel, + title=typo('Cerca...','color kod bold'), + thumbnail=get_thumb('search.png'), + action='search', + url=item.url, + path=item.path)) + # autoplay config only in main menu + if json_data.has_key('channel_name'): autoplay.show_option(item.channel, itemlist) return itemlist - if "movies_list" in json_data: - item.media_type='movies_list' - - elif "tvshows_list" in json_data: - item.media_type = 'tvshows_list' - - elif "episodes_list" in json_data: - item.media_type = 'episodes_list' - - if "generic_list" in json_data: - item.media_type='generic_list' + # select type of list + if json_data.has_key("movies_list"): item.media_type= 'movies_list' + elif json_data.has_key("tvshows_list"): item.media_type = 'tvshows_list' + elif json_data.has_key("episodes_list"): item.media_type = 'episodes_list' + elif json_data.has_key("generic_list"): item.media_type= 'generic_list' return list_all(item) def list_all(item): - logger.info() + support.log() itemlist = [] media_type = item.media_type json_data = load_json(item) + contentTitle = contentSerieName = '' + infoLabels = item.infoLabels if item.infoLabels else {} + if json_data: for media in json_data[media_type]: quality, language, plot, poster = set_extra_values(media) - title = media['title'] - title = set_title(title, language, quality) + fulltitle = media['title'] + title = set_title(fulltitle, language, quality) - new_item = Item(channel=item.channel, title=format_title(title), fulltitle=title, show=title, quality=quality, - language=language, plot=plot, personal_plot=plot, thumbnail=poster, path=item.path) - - new_item.infoLabels['year'] = media['year'] if 'year' in media else '' - new_item.infoLabels['tmdb_id'] = media['tmdb_id'] if 'tmdb_id' in media else '' + infoLabels['year'] = media['year'] if media.has_key('year')else '' + infoLabels['tmdb_id'] = media['tmdb_id'] if media.has_key('tmdb_id') else '' if 'movies_list' in json_data or 'generic_list' in json_data: - new_item.url = media - new_item.contentTitle = media['title'] - new_item.action = 'findvideos' - if 'movies_list' in json_data: new_item.contentType = 'movie' - else: - new_item.url = media['seasons_list'] - new_item.contentSerieName = media['title'] - new_item.action = 'get_seasons' + url= media + contentTitle = fulltitle + contentType = 'movie' + action='findvideos' - itemlist.append(new_item) + else: + contentSerieName = fulltitle + contentType = 'tvshow' + if media.has_key('seasons_list'): + url = media['seasons_list'] + action = 'get_seasons' + else: + url = relative('link', media, item.path) + action = 'episodios' + + itemlist.append(Item(channel=item.channel, + contentType=contentType, + title=format_title(title), + fulltitle=fulltitle, + show=fulltitle, + quality=quality, + language=language, + plot=plot, + personal_plot=plot, + thumbnail=poster, + path=item.path, + url=url, + contentTitle=contentTitle, + contentSerieName=contentSerieName, + action=action)) if not 'generic_list' in json_data: tmdb.set_infoLabels(itemlist, seekTmdb=True) @@ -170,24 +181,32 @@ def list_all(item): return itemlist def get_seasons(item): + support.log(item) itemlist = [] infoLabels = item.infoLabels if item.infolabels else {} list_seasons = item.url for season in list_seasons: - support.log() infoLabels['season'] = season['season'] title = config.get_localized_string(60027) % season['season'] - url = '' if not season['link'] else season['link'] if ':/' in season['link'] else item.path + season['link'] - itemlist.append(Item(channel=item.channel, title=format_title(title), fulltitle=item.fulltitle, show=item.show, url=url, action='episodesxseason', - contentSeason=season['season'], infoLabels=infoLabels ,contentType = 'tvshow')) + url = relative('link', season, item.path) + + itemlist.append(Item(channel=item.channel, + title=format_title(title), + fulltitle=item.fulltitle, + show=item.show, + thumbnails=item.thumbnails, + url=url, + action='episodios', + contentSeason=season['season'], + infoLabels=infoLabels, + contentType='tvshow')) + - logger.info('CANALE= '+ str(inspect.stack()[1][3])) if inspect.stack()[1][3] in ['add_tvshow', "get_seasons"]: it = [] for item in itemlist: - logger.info(str(item)) - it += episodesxseason(item) + it += episodios(item) itemlist = it else: @@ -198,30 +217,55 @@ def get_seasons(item): return itemlist -def episodesxseason(item): - logger.info() +def episodios(item): + support.log(item) itemlist = [] json_data = load_json(item) infoLabels = item.infoLabels - - season_number = infoLabels['season'] + ep = 1 + season_number = infoLabels['season'] if infoLabels.has_key('season') else item.contentSeason if item.contentSeason else 1 for episode in json_data['episodes_list']: - episode_number = episode['number'] + match = support.match(episode['number'], r'(?P<season>\d+)x(?P<episode>\d+)')[0][0] + if not match: match = support.match(episode['title'], r'(?P<season>\d+)x(?P<episode>\d+)')[0][0] + if match: + episode_number = match[1] + season_number = match[0] + else: + season_number = episode['season'] if episode.has_key('season') else 1 + episode_number = episode['number'] if episode.has_key('number') else '' + if not episode_number: + episode_number = str(ep) + ep +=1 + infoLabels['season'] = season_number infoLabels['episode'] = episode_number - title = '%sx%s - %s' % (item.contentSeason, episode_number, episode['title']) + plot = episode['plot'] if episode.has_key('plot') else item.plot + thumbnail = episode['poster'] if episode.has_key('poster') else episode['thumbnail'] if episode.has_key('thumbnail') else item.thumbnail - itemlist.append(Item(channel=item.channel, title=format_title(title), url=episode, action='findvideos', - contentSeason= item.contentSeason, contentEpisode=episode_number, infoLabels=infoLabels, contentType = 'episode')) + title = ' - ' + episode['title'] if episode.has_key('title') else '' + title = '%sx%s%s' % (season_number, episode_number, title) + + itemlist.append(Item(channel= item.channel, + title= format_title(title), + fulltitle = item.fulltitle, + show = item.show, + url= episode, + action= 'findvideos', + plot= plot, + thumbnail= thumbnail, + contentSeason= season_number, + contentEpisode= episode_number, + infoLabels= infoLabels, + contentType= 'episode')) tmdb.set_infoLabels(itemlist, seekTmdb=True) return itemlist def findvideos(item): - logger.info() + support.log() itemlist = [] if 'links' in item.url: for url in item.url['links']: @@ -239,7 +283,7 @@ def findvideos(item): return itemlist def add_channel(item): - logger.info() + support.log() import xbmc import xbmcgui channel_to_add = {} @@ -287,7 +331,7 @@ def add_channel(item): return def remove_channel(item): - logger.info() + support.log() import xbmc import xbmcgui path = os.path.join(config.get_data_path(), 'community_channels.json') @@ -308,7 +352,7 @@ def remove_channel(item): def set_extra_values(dict): - logger.info() + support.log() quality = '' language = '' plot = '' @@ -326,7 +370,7 @@ def set_extra_values(dict): return quality, language, plot, poster def set_title(title, language, quality): - logger.info() + support.log() if not config.get_setting('unify'): if quality != '': @@ -347,7 +391,7 @@ def format_title(title): return typo(title,t) def search(item, text): - logger.info('Search '+ text) + support.log('Search ', text) itemlist = [] json_data = load_json(item) @@ -394,4 +438,31 @@ def load_links(item, itemlist, json_data, text): item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot else: load_links(item, itemlist, json_data, text) - return itemlist \ No newline at end of file + return itemlist + + +def relative(key, json, path): + if json.has_key(key): + if key == 'thumbnail': + ret = json[key] if ':/' in json[key] else path + json[key] if '/' in json[key] else get_thumb(json[key]) if json[key] else '' + else: + ret = json[key] if ':/' in json[key] else path + json[key] if '/' in json[key] else '' + else: + ret = '' + return ret + +def load_json(item): + support.log(item) + url= item if type(item) == str else item.url + + if url: + if url.startswith('http'): + json_file = httptools.downloadpage(url).data + else: + json_file = open(url, "r").read() + + json_data = jsontools.load(json_file) + else: + json_data = '' + + return json_data \ No newline at end of file diff --git a/specials/downloads.py b/specials/downloads.py index 68bd197e..740c0691 100644 --- a/specials/downloads.py +++ b/specials/downloads.py @@ -731,11 +731,9 @@ def get_episodes(item): # importamos el canal if item.contentChannel == 'community': channel = __import__('specials.%s' % item.contentChannel, None, None, ["specials.%s" % item.contentChannel]) - episodes = getattr(channel, 'episodesxseason')(item) else: channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel]) - episodes = getattr(channel, item.contentAction)(item) - + episodes = getattr(channel, item.contentAction)(item) itemlist = [] From 5fe5d445c36f5f567619e33effd72457e9f31ede Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Fri, 18 Oct 2019 17:48:04 +0200 Subject: [PATCH 47/61] Fix Numerazione episodi --- specials/community.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/specials/community.py b/specials/community.py index ea119e57..7aac5509 100644 --- a/specials/community.py +++ b/specials/community.py @@ -226,17 +226,20 @@ def episodios(item): ep = 1 season_number = infoLabels['season'] if infoLabels.has_key('season') else item.contentSeason if item.contentSeason else 1 for episode in json_data['episodes_list']: - match = support.match(episode['number'], r'(?P<season>\d+)x(?P<episode>\d+)')[0][0] - if not match: match = support.match(episode['title'], r'(?P<season>\d+)x(?P<episode>\d+)')[0][0] + match = [] + if episode.has_key('number'): match = support.match(episode['number'], r'(?P<season>\d+)x(?P<episode>\d+)')[0][0] + if not match and episode.has_key('title'): match = support.match(episode['title'], r'(?P<season>\d+)x(?P<episode>\d+)')[0][0] if match: episode_number = match[1] + ep = int(match[1]) + 1 season_number = match[0] else: season_number = episode['season'] if episode.has_key('season') else 1 episode_number = episode['number'] if episode.has_key('number') else '' + ep = int(episode_number) if episode_number else ep if not episode_number: - episode_number = str(ep) - ep +=1 + episode_number = str(ep).zfill(2) + ep += 1 infoLabels['season'] = season_number infoLabels['episode'] = episode_number From c80a397d408fc7bff224b66278f969de0582748b Mon Sep 17 00:00:00 2001 From: dentaku65 <dentaku65@gmail.com> Date: Fri, 18 Oct 2019 20:28:44 +0200 Subject: [PATCH 48/61] addedd vcstream server (#116) --- servers/vcstream.json | 46 ++++++++++++++++++++++++++++++++ servers/vcstream.py | 61 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 107 insertions(+) create mode 100644 servers/vcstream.json create mode 100644 servers/vcstream.py diff --git a/servers/vcstream.json b/servers/vcstream.json new file mode 100644 index 00000000..3d51bc1a --- /dev/null +++ b/servers/vcstream.json @@ -0,0 +1,46 @@ +{ + "active": true, + "find_videos": { + "ignore_urls": [], + "patterns": [ + { + "pattern": "vcstream.to/(?:embed|f)/([A-z0-9]+)/([A-z0-9.]+)", + "url": "https://vcstream.to/embed/\\1/\\2" + }, + { + "pattern": "https://vidcloud.co/v/([a-z0-9A-Z]+)", + "url": "https:\/\/vidcloud.co\/v\/\\1" + } + ] + }, + "free": true, + "id": "vcstream", + "name": "vcstream", + "settings": [ + { + "default": false, + "enabled": true, + "id": "black_list", + "label": "@60654", + "type": "bool", + "visible": true + }, + { + "default": 0, + "enabled": true, + "id": "favorites_servers_list", + "label": "@60655", + "lvalues": [ + "No", + "1", + "2", + "3", + "4", + "5" + ], + "type": "list", + "visible": false + } + ], + "thumbnail": "http://i.imgur.com/l45Tk0G.png" +} diff --git a/servers/vcstream.py b/servers/vcstream.py new file mode 100644 index 00000000..d8cd588d --- /dev/null +++ b/servers/vcstream.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Icarus pv7 +# Fix dentaku65 + +import urlparse + +from core import httptools +from core import scrapertools +from platformcode import logger, config + + +def test_video_exists(page_url): + logger.info("(page_url='%s')" % page_url) + data = httptools.downloadpage(page_url).data + if "Not Found" in data or "File was deleted" in data: + return False, config.get_localized_string(70292) % "vcstream" + + return True, "" + + +def get_video_url(page_url, premium=False, user="", password="", video_password=""): + logger.info("url=" + page_url) + + video_urls = [] + + data = httptools.downloadpage(page_url).data + + url = scrapertools.find_single_match(data, "url: '([^']+)',") + + if url: + headers = dict() + headers['X-Requested-With'] = "XMLHttpRequest" + + token = scrapertools.find_single_match(data, 'set-cookie: vidcloud_session=(.*?);') + token = token.replace("%3D", "") + if token: + headers['vidcloud_session'] = token + + referer = scrapertools.find_single_match(data, "pageUrl = '([^']+)'") + if referer: + headers['Referer'] = referer + + page_url = urlparse.urljoin(page_url, url) + data = httptools.downloadpage(page_url, headers=headers, verify=False).data + data = data.replace('\\\\', '\\').replace('\\','') + + media_urls = scrapertools.find_multiple_matches(data, '\{"file"\s*:\s*"([^"]+)"\}') + + for media_url in media_urls: + ext = "mp4" + if "m3u8" in media_url: + ext = "m3u8" + import urllib2 + import ssl + context = ssl._create_unverified_context() + video_urls.append(["%s [vcstream]" % ext, media_url, urllib2.HTTPSHandler(context=context)]) + + for video_url in video_urls: + logger.info("%s - %s" % (video_url[0], video_url[1])) + return video_urls + From 165cc0250d5249447040d11da8c810831aa58ce2 Mon Sep 17 00:00:00 2001 From: marco <m.toma99@gmail.com> Date: Fri, 18 Oct 2019 20:55:43 +0200 Subject: [PATCH 49/61] fix vcstream e migliorie a support.server --- core/support.py | 18 ++++--- servers/vcstream.py | 119 ++++++++++++++++++++---------------------- servers/vidcloud.json | 42 --------------- servers/vidcloud.py | 28 ---------- 4 files changed, 70 insertions(+), 137 deletions(-) delete mode 100644 servers/vidcloud.json delete mode 100644 servers/vidcloud.py diff --git a/core/support.py b/core/support.py index b93d023f..b8550c67 100644 --- a/core/support.py +++ b/core/support.py @@ -18,6 +18,7 @@ from specials import autoplay def hdpass_get_servers(item): # Carica la pagina + itemlist = [] data = httptools.downloadpage(item.url).data.replace('\n', '') patron = r'<iframe(?: id="[^"]+")? width="[^"]+" height="[^"]+" src="([^"]+)"[^>]+><\/iframe>' url = scrapertoolsV2.find_single_match(data, patron).replace("?alta", "") @@ -46,23 +47,21 @@ def hdpass_get_servers(item): mir = scrapertoolsV2.find_single_match(data, patron_mir) - for mir_url, server in scrapertoolsV2.find_multiple_matches(mir, '<option.*?value="([^"]+?)">([^<]+?)</value>'): + for mir_url, srv in scrapertoolsV2.find_multiple_matches(mir, '<option.*?value="([^"]+?)">([^<]+?)</value>'): data = httptools.downloadpage(urlparse.urljoin(url, mir_url)).data.replace('\n', '') for media_label, media_url in scrapertoolsV2.find_multiple_matches(data, patron_media): itemlist.append(Item(channel=item.channel, action="play", - title=item.title + typo(server, '-- [] color kod') + typo(res_video, '-- [] color kod'), fulltitle=item.fulltitle, quality=res_video, show=item.show, thumbnail=item.thumbnail, contentType=item.contentType, - server=server, url=url_decode(media_url))) log("video -> ", res_video) - return controls(itemlist, item, AutoPlay=True, CheckLinks=True) + return server(item, itemlist=itemlist) def url_decode(url_enc): @@ -913,9 +912,15 @@ def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=Tru itemList = servertools.find_video_items(data=str(data)) itemlist = itemlist + itemList + verifiedItemlist = [] for videoitem in itemlist: if not videoitem.server: - findS = servertools.findvideos(videoitem.url)[0] + findS = servertools.findvideos(videoitem.url) + if findS: + findS = findS[0] + else: + log(videoitem, 'Non supportato') + continue videoitem.server = findS[2] videoitem.title = findS[0] item.title = item.contentTitle if config.get_localized_string(30161) in item.title else item.title @@ -925,8 +930,9 @@ def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=Tru videoitem.thumbnail = item.thumbnail videoitem.channel = item.channel videoitem.contentType = item.contentType + verifiedItemlist.append(videoitem) - return controls(itemlist, item, AutoPlay, CheckLinks, down_load) + return controls(verifiedItemlist, item, AutoPlay, CheckLinks, down_load) def controls(itemlist, item, AutoPlay=True, CheckLinks=True, down_load=True): from core import jsontools diff --git a/servers/vcstream.py b/servers/vcstream.py index d8cd588d..ecd1afcb 100644 --- a/servers/vcstream.py +++ b/servers/vcstream.py @@ -1,61 +1,58 @@ -# -*- coding: utf-8 -*- -# Icarus pv7 -# Fix dentaku65 - -import urlparse - -from core import httptools -from core import scrapertools -from platformcode import logger, config - - -def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) - data = httptools.downloadpage(page_url).data - if "Not Found" in data or "File was deleted" in data: - return False, config.get_localized_string(70292) % "vcstream" - - return True, "" - - -def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) - - video_urls = [] - - data = httptools.downloadpage(page_url).data - - url = scrapertools.find_single_match(data, "url: '([^']+)',") - - if url: - headers = dict() - headers['X-Requested-With'] = "XMLHttpRequest" - - token = scrapertools.find_single_match(data, 'set-cookie: vidcloud_session=(.*?);') - token = token.replace("%3D", "") - if token: - headers['vidcloud_session'] = token - - referer = scrapertools.find_single_match(data, "pageUrl = '([^']+)'") - if referer: - headers['Referer'] = referer - - page_url = urlparse.urljoin(page_url, url) - data = httptools.downloadpage(page_url, headers=headers, verify=False).data - data = data.replace('\\\\', '\\').replace('\\','') - - media_urls = scrapertools.find_multiple_matches(data, '\{"file"\s*:\s*"([^"]+)"\}') - - for media_url in media_urls: - ext = "mp4" - if "m3u8" in media_url: - ext = "m3u8" - import urllib2 - import ssl - context = ssl._create_unverified_context() - video_urls.append(["%s [vcstream]" % ext, media_url, urllib2.HTTPSHandler(context=context)]) - - for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) - return video_urls - +# -*- coding: utf-8 -*- +# Icarus pv7 +# Fix dentaku65 + +import urlparse + +from core import httptools +from core import scrapertools +from platformcode import logger, config + + +def test_video_exists(page_url): + logger.info("(page_url='%s')" % page_url) + data = httptools.downloadpage(page_url).data + if "We're Sorry" in data: + return False, config.get_localized_string(70292) % "vcstream" + + return True, "" + + +def get_video_url(page_url, premium=False, user="", password="", video_password=""): + logger.info("url=" + page_url) + + video_urls = [] + + data = httptools.downloadpage(page_url).data + + url = scrapertools.find_single_match(data, "url: '([^']+)',") + + if url: + headers = dict() + headers['X-Requested-With'] = "XMLHttpRequest" + + token = scrapertools.find_single_match(data, 'set-cookie: vidcloud_session=(.*?);') + token = token.replace("%3D", "") + if token: + headers['vidcloud_session'] = token + + referer = scrapertools.find_single_match(data, "pageUrl = '([^']+)'") + if referer: + headers['Referer'] = referer + + page_url = urlparse.urljoin(page_url, url) + data = httptools.downloadpage(page_url, headers=headers).data + data = data.replace('\\\\', '\\').replace('\\','') + + media_urls = scrapertools.find_multiple_matches(data, '\{"file"\s*:\s*"([^"]+)"\}') + + for media_url in media_urls: + ext = "mp4" + if "m3u8" in media_url: + ext = "m3u8" + video_urls.append(["%s [vcstream]" % ext, media_url]) + + for video_url in video_urls: + logger.info("%s - %s" % (video_url[0], video_url[1])) + return video_urls + diff --git a/servers/vidcloud.json b/servers/vidcloud.json deleted file mode 100644 index 3b65904e..00000000 --- a/servers/vidcloud.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "active": true, - "find_videos": { - "ignore_urls": [], - "patterns": [ - { - "pattern": "https://(?:vidcloud.co|vcstream.to)/embed/([a-z0-9]+)", - "url": "https://vidcloud.co/player?fid=\\1&page=embed" - } - ] - }, - "free": true, - "id": "vidcloud", - "name": "vidcloud", - "settings": [ - { - "default": false, - "enabled": true, - "id": "black_list", - "label": "@60654", - "type": "bool", - "visible": true - }, - { - "default": 0, - "enabled": true, - "id": "favorites_servers_list", - "label": "@60655", - "lvalues": [ - "No", - "1", - "2", - "3", - "4", - "5" - ], - "type": "list", - "visible": false - } - ], - "thumbnail": "https://i.postimg.cc/xjpwG0rK/0a-RVDzlb-400x400.jpg" -} diff --git a/servers/vidcloud.py b/servers/vidcloud.py deleted file mode 100644 index 41d5959d..00000000 --- a/servers/vidcloud.py +++ /dev/null @@ -1,28 +0,0 @@ -# Conector vidcloud By Alfa development Group -# -------------------------------------------------------- - -import re - -from core import httptools -from platformcode import logger - - -def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) - data = httptools.downloadpage(page_url) - if data.code == 404: - return False, "[Cloud] El archivo no existe o ha sido borrado" - return True, "" - - -def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) - video_urls = [] - data = httptools.downloadpage(page_url).data - data = data.replace('\\\\', '\\').replace('\\','') - patron = '"file":"([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data) - for url in matches: - if not ".vtt" in url: - video_urls.append(['vidcloud', url]) - return video_urls From 0201d471135fcedaac8b38eca68414152aab3b9c Mon Sep 17 00:00:00 2001 From: marco <m.toma99@gmail.com> Date: Fri, 18 Oct 2019 21:08:12 +0200 Subject: [PATCH 50/61] fix cineblog --- channels/cineblog01.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/channels/cineblog01.py b/channels/cineblog01.py index f0aba4c7..028bb818 100644 --- a/channels/cineblog01.py +++ b/channels/cineblog01.py @@ -18,10 +18,13 @@ headers = "" def findhost(): global host, headers permUrl = httptools.downloadpage('https://www.cb01.uno/', follow_redirects=False).headers - if host[:4] != 'http': - host = 'https://'+permUrl['location'].replace('https://www.google.it/search?q=site:', '') + if 'google' in permUrl['location']: + if host[:4] != 'http': + host = 'https://'+permUrl['location'].replace('https://www.google.it/search?q=site:', '') + else: + host = permUrl['location'].replace('https://www.google.it/search?q=site:', '') else: - host = permUrl['location'].replace('https://www.google.it/search?q=site:', '') + host = permUrl['location'] headers = [['Referer', host]] list_servers = ['verystream', 'openload', 'streamango', 'wstream'] From 40531e074c95be12ad30f95eb11d53cf8abfa747 Mon Sep 17 00:00:00 2001 From: marco <m.toma99@gmail.com> Date: Fri, 18 Oct 2019 22:23:44 +0200 Subject: [PATCH 51/61] fix winzzoz per update da file zip --- platformcode/updater.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/platformcode/updater.py b/platformcode/updater.py index c9a1b95a..5a11a75f 100644 --- a/platformcode/updater.py +++ b/platformcode/updater.py @@ -242,8 +242,9 @@ def updateFromZip(): try: hash = fixZipGetHash(localfilename) - unzipper = ziptools() - unzipper.extract(localfilename, destpathname) + import zipfile + with zipfile.ZipFile(localfilename, "r") as zip_ref: + zip_ref.extractall(destpathname) except Exception as e: logger.info('Non sono riuscito ad estrarre il file zip') logger.info(e) From ead0fefd01477746fb79ac1a5db5daf7a5212c41 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Sat, 19 Oct 2019 10:01:07 +0200 Subject: [PATCH 52/61] Migliorie ad auto_filter, vosi -> sub-ita --- RIAVVIARE AD AGGIORNAMENTO TERMINATO! --- --- .github/ISSUE_TEMPLATE/test-canale.md | 2 +- channels/0example.json | 2 +- channels/altadefinizione01.json | 2 +- channels/altadefinizione01_link.json | 2 +- channels/altadefinizioneclick.json | 2 +- channels/animeleggendari.json | 2 +- channels/animesubita.json | 4 +- channels/animeworld.json | 2 +- channels/casacinema.json | 2 +- channels/casacinemaInfo.json | 2 +- channels/cb01anime.json | 2 +- channels/cineblog01.json | 4 +- channels/dreamsub.json | 2 +- channels/fastsubita.json | 2 +- channels/italiaserie.json | 2 +- channels/mondolunatico2.json | 2 +- channels/toonitalia.json | 2 +- channelselector.py | 58 +++++++++++++++------------ platformcode/config.py | 2 +- platformcode/unify.py | 6 +-- resources/language/English/strings.po | 4 ++ resources/language/Italian/strings.po | 4 ++ resources/settings.xml | 2 +- specials/news.py | 6 +-- specials/search.py | 2 +- 25 files changed, 69 insertions(+), 53 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/test-canale.md b/.github/ISSUE_TEMPLATE/test-canale.md index 74759500..de8af5e7 100644 --- a/.github/ISSUE_TEMPLATE/test-canale.md +++ b/.github/ISSUE_TEMPLATE/test-canale.md @@ -45,7 +45,7 @@ Per aprirli non servono programmi particolari un semplice editor di testo è suf Occorrente: file .json **1. Indica la coerenza delle voci presenti in "language" con i contenuti presenti sul sito:** -valori: ita, vosi (sub-ita) +valori: ita, sub-ita (sub-ita) - [ ] coerenti - [ ] non coerenti diff --git a/channels/0example.json b/channels/0example.json index a66bf1a8..783193c6 100644 --- a/channels/0example.json +++ b/channels/0example.json @@ -19,7 +19,7 @@ se vanno cancellati tutti deve rimanere la voce: { "id": "nome del file .json", "name": "Nome del canale visualizzato in KOD", - "language": ["ita", "vosi"], + "language": ["ita", "sub-ita"], "active": false, "adult": false, "thumbnail": "", diff --git a/channels/altadefinizione01.json b/channels/altadefinizione01.json index a8690559..8f0d698a 100644 --- a/channels/altadefinizione01.json +++ b/channels/altadefinizione01.json @@ -1,7 +1,7 @@ { "id": "altadefinizione01", "name": "Altadefinizione01", - "language": ["ita", "vosi"], + "language": ["ita", "sub-ita"], "active": true, "adult": false, "thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/altadefinizione01.png", diff --git a/channels/altadefinizione01_link.json b/channels/altadefinizione01_link.json index 068a1ee4..fe102b43 100644 --- a/channels/altadefinizione01_link.json +++ b/channels/altadefinizione01_link.json @@ -3,7 +3,7 @@ "name": "Altadefinizione01 L", "active": true, "adult": false, - "language": ["ita","vosi"], + "language": ["ita","sub-ita"], "thumbnail": "altadefinizione01_L.png", "banner": "altadefinizione01_L.png", "categories": ["movie","vos"], diff --git a/channels/altadefinizioneclick.json b/channels/altadefinizioneclick.json index dbf3ca0a..d5190e8c 100644 --- a/channels/altadefinizioneclick.json +++ b/channels/altadefinizioneclick.json @@ -3,7 +3,7 @@ "name": "AltadefinizioneClick", "active": true, "adult": false, - "language": ["ita","vosi"], + "language": ["ita","sub-ita"], "thumbnail": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/altadefinizioneclick.png", "bannermenu": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/altadefinizioneciclk.png", "categories": ["movie","vos"], diff --git a/channels/animeleggendari.json b/channels/animeleggendari.json index 24509255..5fd0cf6b 100644 --- a/channels/animeleggendari.json +++ b/channels/animeleggendari.json @@ -3,7 +3,7 @@ "name": "AnimePerTutti", "active": true, "adult": false, - "language": ["ita", "vosi"], + "language": ["ita", "sub-ita"], "thumbnail": "animepertutti.png", "bannermenu": "animepertutti.png", "categories": ["anime", "vos"], diff --git a/channels/animesubita.json b/channels/animesubita.json index 60140b85..aa00009f 100644 --- a/channels/animesubita.json +++ b/channels/animesubita.json @@ -3,10 +3,10 @@ "name": "AnimeSubIta", "active": true, "adult": false, - "language": ["vosi"], + "language": ["sub-ita"], "thumbnail": "animesubita.png", "bannermenu": "animesubita.png", - "categories": ["anime", "vosi", "movie"], + "categories": ["anime", "vos", "movie"], "settings": [] } diff --git a/channels/animeworld.json b/channels/animeworld.json index 6d619898..0aa9aa1d 100644 --- a/channels/animeworld.json +++ b/channels/animeworld.json @@ -3,7 +3,7 @@ "name": "AnimeWorld", "active": true, "adult": false, - "language": ["ita", "vosi"], + "language": ["ita", "sub-ita"], "thumbnail": "animeworld.png", "banner": "animeworld.png", "categories": ["anime", "vos"], diff --git a/channels/casacinema.json b/channels/casacinema.json index ee2514ad..8f092063 100644 --- a/channels/casacinema.json +++ b/channels/casacinema.json @@ -1,7 +1,7 @@ { "id": "casacinema", "name": "Casacinema", - "language": ["ita", "vosi"], + "language": ["ita", "sub-ita"], "active": true, "adult": false, "thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/casacinema.png", diff --git a/channels/casacinemaInfo.json b/channels/casacinemaInfo.json index ff6d339b..f8e17a26 100644 --- a/channels/casacinemaInfo.json +++ b/channels/casacinemaInfo.json @@ -1,7 +1,7 @@ { "id": "casacinemaInfo", "name": "La Casa del Cinema", - "language": ["ita", "vosi"], + "language": ["ita", "sub-ita"], "active": true, "adult": false, "thumbnail": "", diff --git a/channels/cb01anime.json b/channels/cb01anime.json index b985c4e8..24fd0c8d 100644 --- a/channels/cb01anime.json +++ b/channels/cb01anime.json @@ -1,7 +1,7 @@ { "id": "cb01anime", "name": "Cb01anime", - "language": ["ita", "vos", "vosi"], + "language": ["ita", "vos", "sub-ita"], "active": true, "adult": false, "thumbnail": "cb01anime.png", diff --git a/channels/cineblog01.json b/channels/cineblog01.json index ddf2eaca..a758b139 100644 --- a/channels/cineblog01.json +++ b/channels/cineblog01.json @@ -1,11 +1,11 @@ { "id": "cineblog01", "name": "CB01", - "language": ["ita"], + "language": ["ita", "sub-ita"], "active": true, "adult": false, "thumbnail": "cb01.png", "banner": "cb01.png", - "categories": ["tvshow", "movie", "vosi"], + "categories": ["tvshow", "movie", "vos"], "settings": [] } \ No newline at end of file diff --git a/channels/dreamsub.json b/channels/dreamsub.json index b5ed2795..d0d39ea9 100644 --- a/channels/dreamsub.json +++ b/channels/dreamsub.json @@ -1,7 +1,7 @@ { "id": "dreamsub", "name": "DreamSub", - "language": ["ita", "vosi"], + "language": ["ita", "sub-ita"], "active": true, "adult": false, "thumbnail": "dreamsub.png", diff --git a/channels/fastsubita.json b/channels/fastsubita.json index 9c0cb466..aad4d706 100644 --- a/channels/fastsubita.json +++ b/channels/fastsubita.json @@ -1,7 +1,7 @@ { "id": "fastsubita", "name": "Fastsubita", - "language": ["vosi"], + "language": ["sub-ita"], "active": true, "adult": false, "thumbnail": "fastsubita.png", diff --git a/channels/italiaserie.json b/channels/italiaserie.json index bb53c218..5d487c0a 100644 --- a/channels/italiaserie.json +++ b/channels/italiaserie.json @@ -3,7 +3,7 @@ "name": "Italia Serie", "active": true, "adult": false, - "language": ["ita","vosi"], + "language": ["ita","sub-ita"], "thumbnail": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/italiaserie.png", "bannermenu": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/italiaserie.png", "categories": ["tvshow"], diff --git a/channels/mondolunatico2.json b/channels/mondolunatico2.json index ed77f7a2..c07479c2 100644 --- a/channels/mondolunatico2.json +++ b/channels/mondolunatico2.json @@ -6,7 +6,7 @@ "adult": false, "thumbnail": "mondolunatico2.png", "banner": "mondolunatico2.png", - "categories": ["tvshow", "movie", "vosi", "anime"], + "categories": ["tvshow", "movie", "vos", "anime"], "settings": [ { "id": "include_in_global_search", diff --git a/channels/toonitalia.json b/channels/toonitalia.json index a8cd2928..deaa9b02 100644 --- a/channels/toonitalia.json +++ b/channels/toonitalia.json @@ -6,7 +6,7 @@ "adult": false, "thumbnail": "toonitalia.png", "banner": "toonitalia.png", - "categories": ["tvshow", "movie", "vosi", "anime"], + "categories": ["tvshow", "movie", "vos", "anime"], "settings": [ { "id": "include_in_global_search", diff --git a/channelselector.py b/channelselector.py index d594766d..c3e63042 100644 --- a/channelselector.py +++ b/channelselector.py @@ -88,7 +88,7 @@ def getchanneltypes(view="thumb_"): channel_types.append("adult") # channel_language = config.get_setting("channel_language", default="all") - channel_language = auto_filter()[0] + channel_language = auto_filter() logger.info("channel_language=%s" % channel_language) # Ahora construye el itemlist ordenadamente @@ -138,7 +138,7 @@ def filterchannels(category, view="thumb_"): logger.info("channel_files encontrados %s" % (len(channel_files))) # channel_language = config.get_setting("channel_language", default="all") - channel_language, channel_language_list = auto_filter() + channel_language = auto_filter() logger.info("channel_language=%s" % channel_language) for channel_path in channel_files: @@ -193,15 +193,14 @@ def filterchannels(category, view="thumb_"): # Se muestran sólo los idiomas filtrados, cast o lat # Los canales de adultos se mostrarán siempre que estén activos - for c in channel_language_list: - if c in channel_parameters["language"]: - L = True - else: - L = False + # for channel_language_list in channel_language_list: + # if c in channel_parameters["language"]: + # L = True + # else: + # L = False # logger.info('CCLANG= ' + channel_language + ' ' + str(channel_language_list)) if channel_language != "all" and "*" not in channel_parameters["language"] \ - and L == False and channel_language not in channel_parameters["language"]: - logger.info('STOP!!!!') + and channel_language not in str(channel_parameters["language"]): continue # Se salta el canal si está en una categoria filtrado @@ -291,12 +290,13 @@ def set_channel_info(parameters): content = '' langs = parameters['language'] lang_dict = {'ita':'Italiano', - 'vosi':'Sottotitolato in Italiano', + 'sub-ita':'Sottotitolato in Italiano', '*':'Italiano, Sottotitolato in Italiano'} + for lang in langs: # if 'vos' in parameters['categories']: # lang = '*' - # if 'vosi' in parameters['categories']: + # if 'sub-ita' in parameters['categories']: # lang = 'ita' if lang in lang_dict: @@ -320,25 +320,33 @@ def set_channel_info(parameters): def auto_filter(auto_lang=False): - import xbmc, xbmcaddon - - addon = xbmcaddon.Addon('metadata.themoviedb.org') - def_lang = addon.getSetting('language') - lang = 'all' - lang_list = ['all'] - - lang_dict = {'it':'ita'} - lang_list_dict = {'it':['ita','vosi']} - if config.get_setting("channel_language") == 'auto' or auto_lang == True: - lang = lang_dict[def_lang] - lang_list = lang_list_dict[def_lang] + lang = config.get_localized_string(20001) else: lang = config.get_setting("channel_language", default="all") - lang_list = lang_list_dict[def_lang] - return lang, lang_list + return lang + + # import xbmc, xbmcaddon + + # addon = xbmcaddon.Addon('metadata.themoviedb.org') + # def_lang = addon.getSetting('language') + # lang = 'all' + # lang_list = ['all'] + + # lang_dict = {'it':'ita'} + # lang_list_dict = {'it':['ita','vosi']} + + # if config.get_setting("channel_language") == 'auto' or auto_lang == True: + # lang = lang_dict[def_lang] + # lang_list = lang_list_dict[def_lang] + + # else: + # lang = config.get_setting("channel_language", default="all") + # lang_list = lang_list_dict[def_lang] + + # return lang, lang_list def thumb(itemlist=[], genre=False): diff --git a/platformcode/config.py b/platformcode/config.py index a4da4250..87ea5ea1 100644 --- a/platformcode/config.py +++ b/platformcode/config.py @@ -366,7 +366,7 @@ def get_localized_string(code): def get_localized_category(categ): categories = {'movie': get_localized_string(30122), 'tvshow': get_localized_string(30123), 'anime': get_localized_string(30124), 'documentary': get_localized_string(30125), - 'vos': get_localized_string(30136), 'vosi': get_localized_string(70566), 'adult': get_localized_string(30126), + 'vos': get_localized_string(30136), 'sub-ita': get_localized_string(70566), 'adult': get_localized_string(30126), 'direct': get_localized_string(30137), 'torrent': get_localized_string(70015)} return categories[categ] if categ in categories else categ diff --git a/platformcode/unify.py b/platformcode/unify.py index c06f6eb1..0c8c41af 100644 --- a/platformcode/unify.py +++ b/platformcode/unify.py @@ -253,7 +253,7 @@ def set_lang(language): lat=['latino','lat','la', 'espanol latino', 'espaol latino', 'zl', 'mx', 'co', 'vl'] vose=['subtitulado','subtitulada','sub','sub espanol','vose','espsub','su','subs castellano', 'sub: español', 'vs', 'zs', 'vs', 'english-spanish subs', 'ingles sub espanol'] - vosi=['sottotitolato','sottotitolata','sub','sub ita','vosi','sub-ita','subs italiano', + sub_ita=['sottotitolato','sottotitolata','sub','sub ita','subs italiano', 'sub: italiano', 'inglese sottotitolato'] vos=['vos', 'sub ingles', 'engsub','ingles subtitulado', 'sub: ingles'] vo=['ingles', 'en','vo', 'ovos', 'eng','v.o', 'english'] @@ -272,8 +272,8 @@ def set_lang(language): language = 'lat' elif language in ita: language = 'ita' - elif language in vosi: - language = 'vosi' + elif language in sub_ita: + language = 'sub-ita' elif language in vose: language = 'vose' elif language in vos: diff --git a/resources/language/English/strings.po b/resources/language/English/strings.po index 34aaf082..9ec7c01e 100644 --- a/resources/language/English/strings.po +++ b/resources/language/English/strings.po @@ -21,6 +21,10 @@ msgctxt "#20000" msgid "KOD" msgstr "" +msgctxt "#20001" +msgid "eng" +msgstr "" + msgctxt "#30001" msgid "Check for updates:" msgstr "" diff --git a/resources/language/Italian/strings.po b/resources/language/Italian/strings.po index ccf02711..65efc50a 100644 --- a/resources/language/Italian/strings.po +++ b/resources/language/Italian/strings.po @@ -21,6 +21,10 @@ msgctxt "#20000" msgid "KOD" msgstr "KOD" +msgctxt "#20001" +msgid "eng" +msgstr "ita" + msgctxt "#30001" msgid "Check for updates:" msgstr "Verifica aggiornamenti:" diff --git a/resources/settings.xml b/resources/settings.xml index d5bf0175..89b0e9cb 100644 --- a/resources/settings.xml +++ b/resources/settings.xml @@ -106,7 +106,7 @@ <setting id="vose_color" type="labelenum" label="70142" values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]" default="white" visible="eq(-10,true)+eq(-11,true)"/> - <setting id="vosi_color" type="labelenum" label="70566" + <setting id="sub-ita_color" type="labelenum" label="70566" values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]" default="white" visible="eq(-11,true)+eq(-12,true)"/> <setting id="vos_color" type="labelenum" label="70143" diff --git a/specials/news.py b/specials/news.py index b4d2e734..0099ef87 100644 --- a/specials/news.py +++ b/specials/news.py @@ -46,7 +46,7 @@ def mainlist(item): list_canales, any_active = get_channels_list() channel_language = config.get_setting("channel_language", default="auto") if channel_language == 'auto': - channel_language = auto_filter()[0] + channel_language = auto_filter() #if list_canales['peliculas']: thumbnail = get_thumb("channels_movie.png") @@ -134,7 +134,7 @@ def get_channels_list(): channels_path = os.path.join(config.get_runtime_path(), "channels", '*.json') channel_language = config.get_setting("channel_language", default="all") if channel_language =="auto": - channel_language = auto_filter()[0] + channel_language = auto_filter() for infile in sorted(glob.glob(channels_path)): channel_id = os.path.basename(infile)[:-5] @@ -607,7 +607,7 @@ def setting_channel(item): channels_path = os.path.join(config.get_runtime_path(), "channels", '*.json') channel_language = config.get_setting("channel_language", default="auto") if channel_language == 'auto': - channel_language = auto_filter()[0] + channel_language = auto_filter() list_controls = [] diff --git a/specials/search.py b/specials/search.py index 882f97eb..985e6f0d 100644 --- a/specials/search.py +++ b/specials/search.py @@ -475,7 +475,7 @@ def do_search(item, categories=None): logger.info("channels_path=%s" % channels_path) # channel_language = config.get_setting("channel_language", default="all") - channel_language = auto_filter()[0] + channel_language = auto_filter() logger.info("channel_language=%s" % channel_language) # Para Kodi es necesario esperar antes de cargar el progreso, de lo contrario From 8b14031051ec1f1c0b88abc1d70eaefb000e4912 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Sat, 19 Oct 2019 12:21:01 +0200 Subject: [PATCH 53/61] Filtri Personalizzati per Community Channels --- specials/community.py | 220 +++++++++++++++++++++++++++++++++--------- 1 file changed, 174 insertions(+), 46 deletions(-) diff --git a/specials/community.py b/specials/community.py index 7aac5509..aa4b2bc7 100644 --- a/specials/community.py +++ b/specials/community.py @@ -3,14 +3,13 @@ # -*- Created for Alfa-addon -*- # -*- By the Alfa Develop Group -*- -import re, urllib, os, inspect -import requests +import re, os, inspect, requests from core import httptools, scrapertoolsV2, servertools, jsontools, tmdb, support from core.item import Item from core.support import typo from channelselector import get_thumb -from platformcode import logger, config, platformtools +from platformcode import config, platformtools from specials import autoplay @@ -20,6 +19,7 @@ list_language = ['ITA', 'SUB-ITA'] list_servers = ['directo', 'akvideo', 'verystream', 'openload'] list_quality = ['SD', '720', '1080', '4k'] + def mainlist(item): support.log() @@ -48,8 +48,7 @@ def show_channels(item): itemlist.append(Item(channel=item.channel, title=typo(config.get_localized_string(70676),'bold color kod'), action='add_channel', - thumbnail=get_thumb('add.png') - )) + thumbnail=get_thumb('add.png'))) for key, channel in json['channels'].items(): # Find File Path @@ -81,6 +80,7 @@ def show_channels(item): path=path)) return itemlist + def show_menu(item): global list_data itemlist = [] @@ -94,6 +94,7 @@ def show_menu(item): fanart = relative('fanart', option, item.path) plot = option['plot'] if option.has_key('plot') else item.plot url = relative('link', option, item.path) + submenu = option['submenu'] if option.has_key('submenu') else [] itemlist.append(Item(channel=item.channel, title=format_title(option['title']), thumbnail=thumbnail, @@ -102,9 +103,26 @@ def show_menu(item): action='show_menu', url=url, path=item.path)) + if submenu: + for key in submenu: + if key != 'search': + itemlist.append(Item(channel=item.channel, + title=typo(submenu[key],'submenu'), + url=url, + path=item.path, + thumbnail=item.thumbnail, + action='submenu', + filterkey=key)) + if submenu.has_key('search'): + itemlist.append(Item(channel=item.channel, + title=typo('Cerca ' + option['title'] +'...','color kod bold'), + thumbnail=get_thumb('search.png'), + action='search', + url=url, + path=item.path)) # add Search itemlist.append(Item(channel=item.channel, - title=typo('Cerca...','color kod bold'), + title=typo('Cerca nel Canale...','color kod bold'), thumbnail=get_thumb('search.png'), action='search', url=item.url, @@ -121,6 +139,40 @@ def show_menu(item): return list_all(item) + +def submenu(item): + support.log() + + itemlist = [] + filter_list = [] + + json_data = load_json(item) + if json_data.has_key("movies_list"): item.media_type= 'movies_list' + elif json_data.has_key("tvshows_list"): item.media_type = 'tvshows_list' + elif json_data.has_key("episodes_list"): item.media_type = 'episodes_list' + elif json_data.has_key("generic_list"): item.media_type= 'generic_list' + media_type = item.media_type + + for media in json_data[media_type]: + if media.has_key(item.filterkey): + if type(media[item.filterkey]) == str and media[item.filterkey] not in filter_list: + filter_list.append(media[item.filterkey].lower()) + elif type(media[item.filterkey]) == list: + for f in media[item.filterkey]: + if f not in filter_list: + filter_list.append(f.lower()) + filter_list.sort() + for filter in filter_list: + itemlist.append(Item(channel=item.channel, + title=typo(filter, 'bold'), + url=item.url, + media_type=item.media_type, + action='list_filtered', + filterkey=item.filterkey, + filter=filter)) + return itemlist + + def list_all(item): support.log() @@ -180,8 +232,70 @@ def list_all(item): item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot return itemlist + +def list_filtered(item): + support.log() + + itemlist = [] + media_type = item.media_type + json_data = load_json(item) + contentTitle = contentSerieName = '' + infoLabels = item.infoLabels if item.infoLabels else {} + + if json_data: + for media in json_data[media_type]: + if media.has_key(item.filterkey) and (item.filter.lower() in media[item.filterkey]): + + quality, language, plot, poster = set_extra_values(media) + + fulltitle = media['title'] + title = set_title(fulltitle, language, quality) + + infoLabels['year'] = media['year'] if media.has_key('year')else '' + infoLabels['tmdb_id'] = media['tmdb_id'] if media.has_key('tmdb_id') else '' + + if 'movies_list' in json_data or 'generic_list' in json_data: + url= media + contentTitle = fulltitle + contentType = 'movie' + action='findvideos' + + else: + contentSerieName = fulltitle + contentType = 'tvshow' + if media.has_key('seasons_list'): + url = media['seasons_list'] + action = 'get_seasons' + else: + url = relative('link', media, item.path) + action = 'episodios' + + itemlist.append(Item(channel=item.channel, + contentType=contentType, + title=format_title(title), + fulltitle=fulltitle, + show=fulltitle, + quality=quality, + language=language, + plot=plot, + personal_plot=plot, + thumbnail=poster, + path=item.path, + url=url, + contentTitle=contentTitle, + contentSerieName=contentSerieName, + action=action)) + + if not 'generic_list' in json_data: + tmdb.set_infoLabels(itemlist, seekTmdb=True) + for item in itemlist: + if item.personal_plot != item.plot and item.personal_plot: + item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot + return itemlist + + def get_seasons(item): - support.log(item) + support.log() itemlist = [] infoLabels = item.infoLabels if item.infolabels else {} list_seasons = item.url @@ -218,7 +332,7 @@ def get_seasons(item): def episodios(item): - support.log(item) + support.log() itemlist = [] json_data = load_json(item) @@ -285,6 +399,7 @@ def findvideos(item): return itemlist + def add_channel(item): support.log() import xbmc @@ -333,6 +448,7 @@ def add_channel(item): platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(70683) % json_file['channel_name']) return + def remove_channel(item): support.log() import xbmc @@ -387,60 +503,71 @@ def set_title(title, language, quality): return title + def format_title(title): t = scrapertoolsV2.find_single_match(title, r'\{([^\}]+)\}') if 'bold' not in t: t += ' bold' title = re.sub(r'(\{[^\}]+\})','',title) return typo(title,t) + def search(item, text): support.log('Search ', text) itemlist = [] json_data = load_json(item) + support.log('JSON= ', json_data) return load_links(item, itemlist, json_data, text) + def load_links(item, itemlist, json_data, text): - for option in json_data['menu']: - json_data = load_json(option['link'] if option['link'].startswith('http') else item.path+option['link']) - if not 'menu' in json_data: - if "movies_list" in json_data: media_type= 'movies_list' - elif "tvshows_list" in json_data: media_type = 'tvshows_list' - elif "episodes_list" in json_data: media_type = 'episodes_list' - if "generic_list" in json_data: media_type= 'generic_list' + support.log() - if json_data: - for media in json_data[media_type]: - if text.lower() in media['title'].lower(): - quality, language, plot, poster = set_extra_values(media) + def links(item, itemlist, json_data, text): + support.log() + if "movies_list" in json_data: media_type= 'movies_list' + elif "tvshows_list" in json_data: media_type = 'tvshows_list' + elif "episodes_list" in json_data: media_type = 'episodes_list' + if "generic_list" in json_data: media_type= 'generic_list' - title = media['title'] - title = set_title(title, language, quality) + if json_data: + for media in json_data[media_type]: + if text.lower() in media['title'].lower(): + quality, language, plot, poster = set_extra_values(media) - new_item = Item(channel=item.channel, title=format_title(title), quality=quality, - language=language, plot=plot, personal_plot=plot, thumbnail=poster, path=item.path) + title = media['title'] + title = set_title(title, language, quality) - new_item.infoLabels['year'] = media['year'] if 'year' in media else '' - new_item.infoLabels['tmdb_id'] = media['tmdb_id'] if 'tmdb_id' in media else '' + new_item = Item(channel=item.channel, title=format_title(title), quality=quality, + language=language, plot=plot, personal_plot=plot, thumbnail=poster, path=item.path) - if 'movies_list' in json_data or 'generic_list' in json_data: - new_item.url = media - new_item.contentTitle = media['title'] - new_item.action = 'findvideos' - else: - new_item.url = media['seasons_list'] - new_item.contentSerieName = media['title'] - new_item.action = 'seasons' + new_item.infoLabels['year'] = media['year'] if 'year' in media else '' + new_item.infoLabels['tmdb_id'] = media['tmdb_id'] if 'tmdb_id' in media else '' - itemlist.append(new_item) + if 'movies_list' in json_data or 'generic_list' in json_data: + new_item.url = media + new_item.contentTitle = media['title'] + new_item.action = 'findvideos' + else: + new_item.url = media['seasons_list'] + new_item.contentSerieName = media['title'] + new_item.action = 'seasons' - if not 'generic_list' in json_data: - tmdb.set_infoLabels(itemlist, seekTmdb=True) - for item in itemlist: - if item.personal_plot != item.plot and item.personal_plot: - item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot - else: + itemlist.append(new_item) + + if not 'generic_list' in json_data: + tmdb.set_infoLabels(itemlist, seekTmdb=True) + for item in itemlist: + if item.personal_plot != item.plot and item.personal_plot: + item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot + + if json_data.has_key('menu'): + for option in json_data['menu']: + json_data = load_json(option['link'] if option['link'].startswith('http') else item.path+option['link']) load_links(item, itemlist, json_data, text) + else: + links(item, itemlist, json_data, text) + return itemlist @@ -454,18 +581,19 @@ def relative(key, json, path): ret = '' return ret -def load_json(item): - support.log(item) - url= item if type(item) == str else item.url - if url: +def load_json(item): + support.log() + url= item if type(item) == str else item.url + try: if url.startswith('http'): json_file = httptools.downloadpage(url).data else: json_file = open(url, "r").read() json_data = jsontools.load(json_file) - else: - json_data = '' + + except: + json_data = {} return json_data \ No newline at end of file From ca96e110becb1f5401b98f5a94910baa22f9eb3c Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Sat, 19 Oct 2019 17:16:42 +0200 Subject: [PATCH 54/61] Community Channels Fix e Migliorie - Ricerca Foto e descrizione in automatico per: - "directors":"Nome Regista" - "actors"["Nome 1", "Nome 2"] - "search":"" -> ricerca nella lista --- specials/community.py | 108 +++++++++++++++++++++++++----------------- 1 file changed, 65 insertions(+), 43 deletions(-) diff --git a/specials/community.py b/specials/community.py index aa4b2bc7..8c982f06 100644 --- a/specials/community.py +++ b/specials/community.py @@ -12,13 +12,20 @@ from channelselector import get_thumb from platformcode import config, platformtools from specials import autoplay +import xbmc, xbmcaddon + +addon = xbmcaddon.Addon('metadata.themoviedb.org') +lang = addon.getSetting('language') + list_data = {} -list_language = ['ITA', 'SUB-ITA'] +# list_language = ['ITA', 'SUB-ITA'] list_servers = ['directo', 'akvideo', 'verystream', 'openload'] list_quality = ['SD', '720', '1080', '4k'] +tmdb_api = 'a1ab8b8669da03637a4b98fa39c39228' + def mainlist(item): support.log() @@ -156,18 +163,32 @@ def submenu(item): for media in json_data[media_type]: if media.has_key(item.filterkey): if type(media[item.filterkey]) == str and media[item.filterkey] not in filter_list: - filter_list.append(media[item.filterkey].lower()) + filter_list.append(media[item.filterkey]) elif type(media[item.filterkey]) == list: for f in media[item.filterkey]: if f not in filter_list: - filter_list.append(f.lower()) + filter_list.append(f) filter_list.sort() + for filter in filter_list: + if item.filterkey in ['director','actors']: + load_info = load_json('http://api.themoviedb.org/3/search/person/?api_key=' + tmdb_api + '&language=' + lang + '&query=' + filter) + if load_info: + id = str(load_info['results'][0]['id']) + if id: + info = load_json('http://api.themoviedb.org/3/person/'+ id + '?api_key=' + tmdb_api + '&language=' + lang) + if not info['biography']: + bio = load_json('http://api.themoviedb.org/3/person/'+ id + '?api_key=' + tmdb_api + '&language=en')['biography'] + thumbnail = 'https://image.tmdb.org/t/p/w600_and_h900_bestv2' + info['profile_path'] if info['profile_path'] else item.thumbnail + plot += info['biography'] if info['biography'] else bio if bio else '' + itemlist.append(Item(channel=item.channel, title=typo(filter, 'bold'), url=item.url, media_type=item.media_type, action='list_filtered', + thumbnail=thumbnail, + plot=plot, filterkey=item.filterkey, filter=filter)) return itemlist @@ -185,7 +206,7 @@ def list_all(item): if json_data: for media in json_data[media_type]: - quality, language, plot, poster = set_extra_values(media) + quality, language, plot, poster = set_extra_values(media, item.path) fulltitle = media['title'] title = set_title(fulltitle, language, quality) @@ -244,47 +265,49 @@ def list_filtered(item): if json_data: for media in json_data[media_type]: - if media.has_key(item.filterkey) and (item.filter.lower() in media[item.filterkey]): + if media.has_key(item.filterkey): + filter_keys = [it.lower() for it in media[item.filterkey]] if type(media[item.filterkey]) == list else media[item.filterkey].lower() + if item.filter.lower() in filter_keys: - quality, language, plot, poster = set_extra_values(media) + quality, language, plot, poster = set_extra_values(media, item.path) - fulltitle = media['title'] - title = set_title(fulltitle, language, quality) + fulltitle = media['title'] + title = set_title(fulltitle, language, quality) - infoLabels['year'] = media['year'] if media.has_key('year')else '' - infoLabels['tmdb_id'] = media['tmdb_id'] if media.has_key('tmdb_id') else '' + infoLabels['year'] = media['year'] if media.has_key('year')else '' + infoLabels['tmdb_id'] = media['tmdb_id'] if media.has_key('tmdb_id') else '' - if 'movies_list' in json_data or 'generic_list' in json_data: - url= media - contentTitle = fulltitle - contentType = 'movie' - action='findvideos' + if 'movies_list' in json_data or 'generic_list' in json_data: + url= media + contentTitle = fulltitle + contentType = 'movie' + action='findvideos' - else: - contentSerieName = fulltitle - contentType = 'tvshow' - if media.has_key('seasons_list'): - url = media['seasons_list'] - action = 'get_seasons' else: - url = relative('link', media, item.path) - action = 'episodios' + contentSerieName = fulltitle + contentType = 'tvshow' + if media.has_key('seasons_list'): + url = media['seasons_list'] + action = 'get_seasons' + else: + url = relative('link', media, item.path) + action = 'episodios' - itemlist.append(Item(channel=item.channel, - contentType=contentType, - title=format_title(title), - fulltitle=fulltitle, - show=fulltitle, - quality=quality, - language=language, - plot=plot, - personal_plot=plot, - thumbnail=poster, - path=item.path, - url=url, - contentTitle=contentTitle, - contentSerieName=contentSerieName, - action=action)) + itemlist.append(Item(channel=item.channel, + contentType=contentType, + title=format_title(title), + fulltitle=fulltitle, + show=fulltitle, + quality=quality, + language=language, + plot=plot, + personal_plot=plot, + thumbnail=poster, + path=item.path, + url=url, + contentTitle=contentTitle, + contentSerieName=contentSerieName, + action=action)) if not 'generic_list' in json_data: tmdb.set_infoLabels(itemlist, seekTmdb=True) @@ -386,7 +409,7 @@ def findvideos(item): itemlist = [] if 'links' in item.url: for url in item.url['links']: - quality, language, plot, poster = set_extra_values(url) + quality, language, plot, poster = set_extra_values(url, item.path) title = '' title = set_title(title, language, quality) @@ -470,7 +493,7 @@ def remove_channel(item): return -def set_extra_values(dict): +def set_extra_values(dict, path): support.log() quality = '' language = '' @@ -484,7 +507,7 @@ def set_extra_values(dict): if 'plot' in dict and dict['plot'] != '': plot = dict['plot'] if 'poster' in dict and dict['poster'] != '': - poster = dict['poster'] + poster = dict['poster']if ':/' in dict['poster'] else path + dict['poster'] if '/' in dict['poster'] else get_thumb(json[key]) if dict['poster'] else '' return quality, language, plot, poster @@ -515,7 +538,6 @@ def search(item, text): support.log('Search ', text) itemlist = [] json_data = load_json(item) - support.log('JSON= ', json_data) return load_links(item, itemlist, json_data, text) @@ -533,7 +555,7 @@ def load_links(item, itemlist, json_data, text): if json_data: for media in json_data[media_type]: if text.lower() in media['title'].lower(): - quality, language, plot, poster = set_extra_values(media) + quality, language, plot, poster = set_extra_values(media, item.path) title = media['title'] title = set_title(title, language, quality) From 8ec68ac58ecca583988c8a5467226c48d68da031 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Sat, 19 Oct 2019 18:17:27 +0200 Subject: [PATCH 55/61] Fix InfoLabels per Community Channels --- specials/community.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/specials/community.py b/specials/community.py index 8c982f06..bd7b8f32 100644 --- a/specials/community.py +++ b/specials/community.py @@ -205,6 +205,7 @@ def list_all(item): if json_data: for media in json_data[media_type]: + support.log(media) quality, language, plot, poster = set_extra_values(media, item.path) @@ -244,6 +245,7 @@ def list_all(item): url=url, contentTitle=contentTitle, contentSerieName=contentSerieName, + infoLabels=infoLabels, action=action)) if not 'generic_list' in json_data: @@ -307,6 +309,7 @@ def list_filtered(item): url=url, contentTitle=contentTitle, contentSerieName=contentSerieName, + infoLabels=infoLabels, action=action)) if not 'generic_list' in json_data: From 2b760459fc211a8709a267233e7e72973a1b8fb7 Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Sat, 19 Oct 2019 18:30:31 +0200 Subject: [PATCH 56/61] Fix CC se si usa thumbnail al posto di poster --- specials/community.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/specials/community.py b/specials/community.py index bd7b8f32..83325b26 100644 --- a/specials/community.py +++ b/specials/community.py @@ -510,7 +510,9 @@ def set_extra_values(dict, path): if 'plot' in dict and dict['plot'] != '': plot = dict['plot'] if 'poster' in dict and dict['poster'] != '': - poster = dict['poster']if ':/' in dict['poster'] else path + dict['poster'] if '/' in dict['poster'] else get_thumb(json[key]) if dict['poster'] else '' + poster = dict['poster']if ':/' in dict['poster'] else path + dict['poster'] if '/' in dict['poster'] else '' + elif 'thumbnail' in dict and dict['thumbnail'] != '': + poster = dict['thumbnail']if ':/' in dict['thumbnail'] else path + dict['thumbnail'] if '/' in dict['thumbnail'] else '' return quality, language, plot, poster From c6ec08068ccb12c5463d25a498bc211ede5087c9 Mon Sep 17 00:00:00 2001 From: marco <m.toma99@gmail.com> Date: Sat, 19 Oct 2019 21:06:33 +0200 Subject: [PATCH 57/61] fix streamingaltadefinizione (popcorn stream) e vedohd --- channels/streamingaltadefinizione.py | 41 +++++++++++++++++----------- channels/vedohd.py | 2 +- core/support.py | 25 ++++++++++------- 3 files changed, 41 insertions(+), 27 deletions(-) diff --git a/channels/streamingaltadefinizione.py b/channels/streamingaltadefinizione.py index 41b4b8e3..ffa7eaf9 100644 --- a/channels/streamingaltadefinizione.py +++ b/channels/streamingaltadefinizione.py @@ -1,37 +1,44 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -# Canale per Streaming Altadefinizione +# Canale per Popcorn Stream # ------------------------------------------------------------ -""" - Trasformate le sole def per support.menu e support.scrape - da non inviare nel test. - Test solo a trasformazione completa -""" - -from core import support +from core import support, httptools from core.item import Item from platformcode import config -__channel__ = "streamingaltadefinizione" -host = config.get_channel_url(__channel__) +# __channel__ = "streamingaltadefinizione" +# host = config.get_channel_url(__channel__) +host = headers = '' list_servers = ['verystream', 'openload', 'wstream'] list_quality = ['1080p', 'HD', 'DVDRIP', 'SD', 'CAM'] +def findhost(): + global host, headers + permUrl = httptools.downloadpage('https://www.popcornstream.info', follow_redirects=False).headers + if 'google' in permUrl['location']: + if host[:4] != 'http': + host = 'https://'+permUrl['location'].replace('https://www.google.it/search?q=site:', '') + else: + host = permUrl['location'].replace('https://www.google.it/search?q=site:', '') + else: + host = permUrl['location'] + headers = [['Referer', host]] + @support.menu def mainlist(item): + findhost() film = ["/film/"] - anime = ["/genere/anime/", - ('Film Anime', ["/genere/anime/", 'peliculas']), - ('Film Anime per genere', ["/genere/anime/", 'generos']) - ] + anime = ["/genere/anime/"] tvshow = ["/serietv/"] + top = [('Generi',['', 'generos'])] return locals() def search(item, text): + findhost() support.log("[streamingaltadefinizione.py] " + item.url + " search " + text) item.url = item.url + "/?s=" + text @@ -48,15 +55,17 @@ def generos(item): def peliculas(item): - support.dbg() - return support.dooplay_peliculas(item, True if "/genere/anime/" in item.url else False) + findhost() + return support.dooplay_peliculas(item, True if "/genere/" in item.url else False) def episodios(item): + findhost() return support.dooplay_get_episodes(item) def findvideos(item): + findhost() itemlist = [] for link in support.dooplay_get_links(item, host): if link['title'] != 'Guarda il trailer': diff --git a/channels/vedohd.py b/channels/vedohd.py index b5b30e7f..f5a43ca4 100644 --- a/channels/vedohd.py +++ b/channels/vedohd.py @@ -40,7 +40,7 @@ def search(item, text): def peliculas(item): - return support.dooplay_peliculas(item, blacklist) + return support.dooplay_peliculas(item, False, blacklist) def findvideos(item): diff --git a/core/support.py b/core/support.py index b8550c67..1d79d976 100644 --- a/core/support.py +++ b/core/support.py @@ -362,8 +362,8 @@ def scrape(func): headers = '' patronNext = args['patronNext'] if 'patronNext' in args else '' patronBlock = args['patronBlock'] if 'patronBlock' in args else '' - typeActionDict = args['type_action_dict'] if 'type_action_dict' in args else {} - typeContentDict = args['type_content_dict'] if 'type_content_dict' in args else {} + typeActionDict = args['typeActionDict'] if 'typeActionDict' in args else {} + typeContentDict = args['typeContentDict'] if 'typeContentDict' in args else {} debug = args['debug'] if 'debug' in args else False log('STACK= ', inspect.stack()[1][3]) if 'pagination' in args and inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']: pagination = args['pagination'] if args['pagination'] else 20 @@ -510,13 +510,17 @@ def dooplay_peliculas(item, mixed=False, blacklist=""): else: if item.contentType == 'movie': action = 'findvideos' - patron = '<article id="post-[0-9]+" class="item movies">.*?<img src="(?!data)(?P<thumb>[^"]+)".*?<span class="quality">(?P<quality>[^<>]+).*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></h3>.*?(?:<span>[^<>]*(?P<year>[0-9]{4})</span>|</article>).*?(?:<span>(?P<duration>[0-9]+) min</span>|</article>).*?(?:<div class="texto">(?P<plot>[^<>]+)|</article>).*?(?:genres">(?P<genre>.*?)</div>|</article>)' + patron = '<article id="post-[0-9]+" class="item movies">.*?<img src="(?!data)(?P<thumb>[^"]+)".*?<span class="quality">(?P<quality>[^<>]+).*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></h3>.*?(?:<span>[^<>]*(?P<year>[0-9]{4})</span>|</article>)' else: action = 'episodios' - patron = '<article id="post-[0-9]+" class="item ' + ('\w+' if mixed else 'tvshow') + '">.*?<img src="(?!data)(?P<thumb>[^"]+)".*?(?:<span class="quality">(?P<quality>[^<>]+))?.*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></h3>.*?(?:<span>(?P<year>[0-9]{4})</span>|</article>).*?(?:<div class="texto">(?P<plot>[^<>]+)|</article>).*?(?:genres">(?P<genre>.*?)</div>|</article>)' + patron = '<article id="post-[0-9]+" class="item (?P<type>' + ('\w+' if mixed else 'tvshows') + ')">.*?<img src="(?!data)(?P<thumb>[^"]+)".*?(?:<span class="quality">(?P<quality>[^<>]+))?.*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></h3>.*?(?:<span>(?P<year>[0-9]{4})</span>|</article>).*?(?:<div class="texto">(?P<plot>[^<>]+)|</article>).*?(?:genres">(?P<genre>.*?)</div>|</article>)' patronNext = '<div class="pagination">.*?class="current".*?<a href="([^"]+)".*?<div class="resppages">' addVideolibrary = False + if mixed: + typeActionDict={'findvideos': ['movies'], 'episodios': ['tvshows']} + typeContentDict={'film': ['movies'], 'serie': ['tvshows']} + return locals() @@ -524,6 +528,7 @@ def dooplay_peliculas(item, mixed=False, blacklist=""): def dooplay_search(item, blacklist=""): return dooplay_search_vars(item, blacklist) + def dooplay_search_vars(item, blacklist): if item.contentType == 'movie': type = 'movies' @@ -534,12 +539,12 @@ def dooplay_search_vars(item, blacklist): patron = '<div class="result-item">.*?<img src="(?P<thumb>[^"]+)".*?<span class="' + type + '">(?P<quality>[^<>]+).*?<a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a>.*?<span class="year">(?P<year>[0-9]{4}).*?<div class="contenido"><p>(?P<plot>[^<>]+)' patronNext = '<a class="arrow_pag" href="([^"]+)"><i id="nextpagination"' - def fullItemlistHook(itemlist): - # se è una next page - if itemlist[-1].title == typo(config.get_localized_string(30992), 'color kod bold'): - itemlist[-1].action = 'peliculas' - itemlist[-1].args = 'searchPage' - return itemlist + # def fullItemlistHook(itemlist): + # # se è una next page + # if itemlist[-1].title == typo(config.get_localized_string(30992), 'color kod bold'): + # itemlist[-1].action = 'peliculas' + # itemlist[-1].args = 'searchPage' + # return itemlist return locals() def swzz_get_url(item): From 1f46fd27374a71a0644ebb3c1239bc8e9efef076 Mon Sep 17 00:00:00 2001 From: marco <m.toma99@gmail.com> Date: Sun, 20 Oct 2019 20:53:19 +0200 Subject: [PATCH 58/61] fix ricerca streamingaltadefinizione (popcorn stream) --- channels/streamingaltadefinizione.py | 1 - core/support.py | 8 ++++++-- platformcode/updater.py | 3 ++- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/channels/streamingaltadefinizione.py b/channels/streamingaltadefinizione.py index ffa7eaf9..ce616fb3 100644 --- a/channels/streamingaltadefinizione.py +++ b/channels/streamingaltadefinizione.py @@ -38,7 +38,6 @@ def mainlist(item): def search(item, text): - findhost() support.log("[streamingaltadefinizione.py] " + item.url + " search " + text) item.url = item.url + "/?s=" + text diff --git a/core/support.py b/core/support.py index 1d79d976..105463c9 100644 --- a/core/support.py +++ b/core/support.py @@ -239,7 +239,7 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t title = episode + (s if episode and title else '') + title longtitle = title + (s if title and title2 else '') + title2 longtitle = typo(longtitle, 'bold') - longtitle += (typo(Type,'_ () bold') if Type else '') + (typo(quality, '_ [] color kod') if quality else '') + longtitle += typo(quality, '_ [] color kod') if quality else '' lang1, longtitle = scrapeLang(scraped, lang, longtitle) @@ -530,7 +530,11 @@ def dooplay_search(item, blacklist=""): def dooplay_search_vars(item, blacklist): - if item.contentType == 'movie': + if item.contentType == 'list': # ricerca globale + type = '(?P<type>movies|tvshows)' + typeActionDict = {'findvideos': ['movies'], 'episodios': ['tvshows']} + typeContentDict = {'movie': ['movies'], 'episode': ['tvshows']} + elif item.contentType == 'movie': type = 'movies' action = 'findvideos' else: diff --git a/platformcode/updater.py b/platformcode/updater.py index 5a11a75f..2ecc72fb 100644 --- a/platformcode/updater.py +++ b/platformcode/updater.py @@ -243,11 +243,12 @@ def updateFromZip(): try: hash = fixZipGetHash(localfilename) import zipfile - with zipfile.ZipFile(localfilename, "r") as zip_ref: + with zipfile.ZipFile(io.FileIO(localfilename), "r") as zip_ref: zip_ref.extractall(destpathname) except Exception as e: logger.info('Non sono riuscito ad estrarre il file zip') logger.info(e) + dp.close() return False dp.update(95) From e0b3348d2e26b50ef2148c9de7d50c1d52bde807 Mon Sep 17 00:00:00 2001 From: greko17 <sex1712@email.it> Date: Mon, 21 Oct 2019 14:13:43 +0200 Subject: [PATCH 59/61] fix: wstream --- servers/wstream.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/servers/wstream.json b/servers/wstream.json index d32574f1..8211a23c 100644 --- a/servers/wstream.json +++ b/servers/wstream.json @@ -7,8 +7,8 @@ "find_videos": { "patterns": [ { - "pattern": "wstream.video/(?:embed-|videos/|video/|videow/)?([a-z0-9A-Z]+)", - "url": "http:\/\/wstream.video\/videow\/\\1" + "pattern": "wstream.video/(?:embed-|videos/|video/|videow/|videoj/)?([a-z0-9A-Z]+)", + "url": "http:\/\/wstream.video\/videoj\/\\1" } ], "ignore_urls": [ ] From 899c8bd7542b7e43d8ecc436ab384905562724d1 Mon Sep 17 00:00:00 2001 From: mac12m99 <m.toma99@gmail.com> Date: Mon, 21 Oct 2019 18:07:46 +0200 Subject: [PATCH 60/61] nuovo pattern wstream --- servers/wstream.json | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/servers/wstream.json b/servers/wstream.json index 8211a23c..cadb9a84 100644 --- a/servers/wstream.json +++ b/servers/wstream.json @@ -7,9 +7,13 @@ "find_videos": { "patterns": [ { - "pattern": "wstream.video/(?:embed-|videos/|video/|videow/|videoj/)?([a-z0-9A-Z]+)", + "pattern": "wstream\\.video/video\\.php\\?file_code=([a-z0-9A-Z]+)", "url": "http:\/\/wstream.video\/videoj\/\\1" - } + }, + { + "pattern": "wstream\\.video/(?:embed-|videos/|video/|videow/|videoj/)?([a-z0-9A-Z]+)", + "url": "http:\/\/wstream.video\/videoj\/\\1" + } ], "ignore_urls": [ ] }, From 1043a62d874ea08bc2d864555292c9f497c254df Mon Sep 17 00:00:00 2001 From: Alhaziel <alhaziel01@gmail.com> Date: Mon, 21 Oct 2019 18:21:50 +0200 Subject: [PATCH 61/61] =?UTF-8?q?Community=20Channel=20-=20Supporto=20a=20?= =?UTF-8?q?Pagination=20-=20Visualizzazione=20in=20Stagioni=20-=20Filtri?= =?UTF-8?q?=20nel=20menu=20di=202=C2=B0=20livello=20-=20Thumbnails=20e=20p?= =?UTF-8?q?lot=20nei=20filtri?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- channels/seriehd.py | 3 +- core/videolibrarytools.py | 2 - platformcode/platformtools.py | 3 +- specials/community.json | 31 ++-- specials/community.py | 337 +++++++++++++++++++++++++--------- specials/downloads.py | 2 +- 6 files changed, 266 insertions(+), 112 deletions(-) diff --git a/channels/seriehd.py b/channels/seriehd.py index 51648003..030d0c7b 100644 --- a/channels/seriehd.py +++ b/channels/seriehd.py @@ -76,9 +76,8 @@ def episodios(item): episodes = support.match(item, r'<a href="([^"]+)">(\d+)<', '<h3>EPISODIO</h3><ul>(.*?)</ul>', headers, season_url)[0] for episode_url, episode in episodes: episode_url = support.urlparse.urljoin(url, episode_url) - title = season + "x" + episode.zfill(2) + title = season + "x" + episode.zfill(2) + ' - ' + item.fulltitle data += title + '|' + episode_url + '\n' - support.log('DaTa= ',data) patron = r'(?P<title>[^\|]+)\|(?P<url>[^\n]+)\n' action = 'findvideos' return locals() diff --git a/core/videolibrarytools.py b/core/videolibrarytools.py index a90321dd..832c6f87 100644 --- a/core/videolibrarytools.py +++ b/core/videolibrarytools.py @@ -834,8 +834,6 @@ def add_tvshow(item, channel=None): # Obtiene el listado de episodios if item.channel == 'community': - itemlist = getattr(channel, 'get_seasons')(item) - else: itemlist = getattr(channel, item.action)(item) insertados, sobreescritos, fallidos, path = save_tvshow(item, itemlist) diff --git a/platformcode/platformtools.py b/platformcode/platformtools.py index 76299a33..483c9506 100644 --- a/platformcode/platformtools.py +++ b/platformcode/platformtools.py @@ -608,7 +608,8 @@ def set_context_commands(item, parent_item): # elif item.contentSerieName: # Descargar serie - elif item.contentType == "tvshow" and item.action in ["episodios"]: + elif (item.contentType == "tvshow" and item.action in ["episodios"]) or \ + (item.contentType == "tvshow" and item.action in ['get_seasons'] and config.get_setting('show_seasons',item.channel) == False): item.contentType == "tvshow" context_commands.append((config.get_localized_string(60355), "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(channel="downloads", action="save_download", diff --git a/specials/community.json b/specials/community.json index f35640d1..ac2ad1f4 100644 --- a/specials/community.json +++ b/specials/community.json @@ -3,31 +3,28 @@ "name": "Community", "active": true, "adult": false, - "language": ["cast", "lat"], + "language": ["*"], "thumbnail": "", "banner": "", "fanart": "", - "categories": [ - "direct", - "movie", - "tvshow", - "vo" - ], + "categories": [], "settings": [ { - "id": "filterlanguages", + "id": "pagination", "type": "list", - "label": "Mostrar enlaces del canal en idioma...", - "default": 3, + "label": "Pagination", + "default": 2, "enabled": true, "visible": true, - "lvalues": [ - "No Filtrar", - "LAT", - "CAST", - "VO", - "VOSE" - ] + "lvalues": ["@70708", "20", "40", "60", "80", "100"] + }, + { + "id": "show_seasons", + "type": "bool", + "label": "Show Seasons", + "default": false, + "enabled": true, + "visible": true } ] } diff --git a/specials/community.py b/specials/community.py index 83325b26..7214a649 100644 --- a/specials/community.py +++ b/specials/community.py @@ -1,9 +1,7 @@ # -*- coding: utf-8 -*- # -*- Channel Community -*- -# -*- Created for Alfa-addon -*- -# -*- By the Alfa Develop Group -*- -import re, os, inspect, requests +import re, os, inspect, requests, xbmc, xbmcaddon from core import httptools, scrapertoolsV2, servertools, jsontools, tmdb, support from core.item import Item @@ -12,15 +10,15 @@ from channelselector import get_thumb from platformcode import config, platformtools from specials import autoplay -import xbmc, xbmcaddon - addon = xbmcaddon.Addon('metadata.themoviedb.org') lang = addon.getSetting('language') +defpage = ["", "20", "40", "60", "80", "100"] +defp = defpage[config.get_setting('pagination','community')] +show_seasons = config.get_setting('show_seasons','community') list_data = {} -# list_language = ['ITA', 'SUB-ITA'] list_servers = ['directo', 'akvideo', 'verystream', 'openload'] list_quality = ['SD', '720', '1080', '4k'] @@ -68,7 +66,7 @@ def show_channels(item): if file_path.startswith('http'): file_url = httptools.downloadpage(file_path, follow_redirects=True).data else: file_url = open(file_path, "r").read() - # loa djson + # load json json_url = jsontools.load(file_url) thumbnail = relative('thumbnail', json_url, path) @@ -85,6 +83,9 @@ def show_channels(item): channel_id = key, context=context, path=path)) + + autoplay.show_option(item.channel, itemlist) + support.channel_config(item, itemlist) return itemlist @@ -93,56 +94,111 @@ def show_menu(item): itemlist = [] support.log() - json_data = load_json(item) + # If Second Level Menu + if item.menu: + menu = item.menu + item.menu = None + itemlist.append(item) + for key in menu: + if key != 'search': + if type(menu[key]) == dict: + title = menu[key]['title'] if menu[key].has_key('title') else item.title + thumbnail = relative('thumbnail', menu[key], item.path) + plot = menu[key]['plot'] if menu[key].has_key('plot') else '' + else: + title = menu[key] + thumbnail = item.thumbnail + plot = '' - if "menu" in json_data: - for option in json_data['menu']: - thumbnail = relative('thumbnail', option, item.path) - fanart = relative('fanart', option, item.path) - plot = option['plot'] if option.has_key('plot') else item.plot - url = relative('link', option, item.path) - submenu = option['submenu'] if option.has_key('submenu') else [] + itemlist.append(Item(channel=item.channel, + title=typo(title,'submenu'), + url=item.url, + path=item.path, + thumbnail=thumbnail, + plot=plot, + action='submenu', + filterkey=key)) + + if menu.has_key('search'): itemlist.append(Item(channel=item.channel, - title=format_title(option['title']), - thumbnail=thumbnail, - fanart=fanart, - plot=plot, - action='show_menu', - url=url, - path=item.path)) - if submenu: - for key in submenu: - if key != 'search': - itemlist.append(Item(channel=item.channel, - title=typo(submenu[key],'submenu'), - url=url, - path=item.path, - thumbnail=item.thumbnail, - action='submenu', - filterkey=key)) - if submenu.has_key('search'): - itemlist.append(Item(channel=item.channel, - title=typo('Cerca ' + option['title'] +'...','color kod bold'), - thumbnail=get_thumb('search.png'), - action='search', - url=url, - path=item.path)) - # add Search - itemlist.append(Item(channel=item.channel, - title=typo('Cerca nel Canale...','color kod bold'), - thumbnail=get_thumb('search.png'), - action='search', - url=item.url, - path=item.path)) - # autoplay config only in main menu - if json_data.has_key('channel_name'): autoplay.show_option(item.channel, itemlist) + title=typo('Cerca ' + item.fulltitle +'...','color kod bold'), + thumbnail=get_thumb('search.png'), + action='search', + url=item.url, + path=item.path)) return itemlist - # select type of list - if json_data.has_key("movies_list"): item.media_type= 'movies_list' - elif json_data.has_key("tvshows_list"): item.media_type = 'tvshows_list' - elif json_data.has_key("episodes_list"): item.media_type = 'episodes_list' - elif json_data.has_key("generic_list"): item.media_type= 'generic_list' + else: + json_data = load_json(item) + + if "menu" in json_data: + for option in json_data['menu']: + thumbnail = relative('thumbnail', option, item.path) + fanart = relative('fanart', option, item.path) + plot = option['plot'] if option.has_key('plot') else item.plot + url = relative('link', option, item.path) + submenu = option['submenu'] if option.has_key('submenu') else [] + level2 = option['level2'] if option.has_key('level2') else [] + itemlist.append(Item(channel=item.channel, + title=format_title(option['title']), + fulltitle=option['title'], + thumbnail=thumbnail, + fanart=fanart, + plot=plot, + action='show_menu', + url=url, + path=item.path, + menu=level2)) + + if submenu: + for key in submenu: + if key != 'search': + if type(submenu[key]) == dict: + title = submenu[key]['title'] if submenu[key].has_key('title') else item.title + thumbnail = relative('thumbnail', submenu[key], item.path) + plot = submenu[key]['plot'] if submenu[key].has_key('plot') else '' + else: + title = submenu[key] + thumbnail = item.thumbnail + plot = '' + + itemlist.append(Item(channel=item.channel, + title=typo(title,'submenu'), + url=url, + path=item.path, + thumbnail=thumbnail, + plot=plot, + action='submenu', + filterkey=key)) + if submenu.has_key('search'): + itemlist.append(Item(channel=item.channel, + title=typo('Cerca ' + option['title'] +'...','color kod bold'), + thumbnail=get_thumb('search.png'), + action='search', + url=url, + path=item.path)) + # add Search + itemlist.append(Item(channel=item.channel, + title=typo('Cerca nel Canale...','color kod bold'), + thumbnail=get_thumb('search.png'), + action='search', + url=item.url, + path=item.path)) + + return itemlist + + # select type of list + if json_data.has_key("movies_list"): + item.media_type = 'movies_list' + item.contentType = 'movie' + elif json_data.has_key("tvshows_list"): + item.media_type = 'tvshows_list' + item.contentType = 'tvshow' + elif json_data.has_key("episodes_list"): + item.media_type = 'episodes_list' + item.contentType = 'episode' + elif json_data.has_key("generic_list"): + item.media_type= 'generic_list' return list_all(item) @@ -152,6 +208,7 @@ def submenu(item): itemlist = [] filter_list = [] + plot = item.plot json_data = load_json(item) if json_data.has_key("movies_list"): item.media_type= 'movies_list' @@ -195,7 +252,12 @@ def submenu(item): def list_all(item): - support.log() + support.log('CONTENT TYPE ', item.contentType) + + if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']: + pagination = int(defp) if defp.isdigit() else '' + else: pagination = '' + pag = item.page if item.page else 1 itemlist = [] media_type = item.media_type @@ -204,8 +266,9 @@ def list_all(item): infoLabels = item.infoLabels if item.infoLabels else {} if json_data: - for media in json_data[media_type]: - support.log(media) + for i, media in enumerate(json_data[media_type]): + if pagination and (pag - 1) * pagination > i: continue # pagination + if pagination and i >= pag * pagination: break # pagination quality, language, plot, poster = set_extra_values(media, item.path) @@ -247,6 +310,19 @@ def list_all(item): contentSerieName=contentSerieName, infoLabels=infoLabels, action=action)) + if pagination and len(json_data[media_type]) >= pag * pagination: + if inspect.stack()[1][3] != 'get_newest': + itemlist.append( + Item(channel=item.channel, + action = item.action, + contentType=contentType, + title=typo(config.get_localized_string(30992), 'color kod bold'), + fulltitle= item.fulltitle, + show= item.show, + url=item.url, + args=item.args, + page=pag + 1, + thumbnail=support.thumb())) if not 'generic_list' in json_data: tmdb.set_infoLabels(itemlist, seekTmdb=True) @@ -259,6 +335,11 @@ def list_all(item): def list_filtered(item): support.log() + if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']: + pagination = int(defp) if defp.isdigit() else '' + else: pagination = '' + pag = item.page if item.page else 1 + itemlist = [] media_type = item.media_type json_data = load_json(item) @@ -266,7 +347,9 @@ def list_filtered(item): infoLabels = item.infoLabels if item.infoLabels else {} if json_data: - for media in json_data[media_type]: + for i, media in enumerate(json_data[media_type]): + if pagination and (pag - 1) * pagination > i: continue # pagination + if pagination and i >= pag * pagination: break # pagination if media.has_key(item.filterkey): filter_keys = [it.lower() for it in media[item.filterkey]] if type(media[item.filterkey]) == list else media[item.filterkey].lower() if item.filter.lower() in filter_keys: @@ -312,15 +395,30 @@ def list_filtered(item): infoLabels=infoLabels, action=action)) + if pagination and len(json_data[media_type]) >= pag * pagination and len(itemlist) >= pag * pagination: + if inspect.stack()[1][3] != 'get_newest': + itemlist.append( + Item(channel=item.channel, + action = item.action, + contentType=contentType, + title=typo(config.get_localized_string(30992), 'color kod bold'), + fulltitle= item.fulltitle, + show= item.show, + url=item.url, + args=item.args, + page=pag + 1, + thumbnail=support.thumb())) + if not 'generic_list' in json_data: tmdb.set_infoLabels(itemlist, seekTmdb=True) - for item in itemlist: - if item.personal_plot != item.plot and item.personal_plot: - item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot + for item in itemlist: + if item.personal_plot != item.plot and item.personal_plot: + item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot return itemlist def get_seasons(item): + itm = item support.log() itemlist = [] infoLabels = item.infoLabels if item.infolabels else {} @@ -343,12 +441,27 @@ def get_seasons(item): contentType='tvshow')) - if inspect.stack()[1][3] in ['add_tvshow', "get_seasons"]: + if inspect.stack()[1][3] in ['add_tvshow', "get_seasons"] or show_seasons == False: it = [] for item in itemlist: it += episodios(item) - itemlist = it + + if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes', 'get_newest']: + pagination = int(defp) if defp.isdigit() else '' + pag = itm.page if itm.page else 1 + it = [] + for i, item in enumerate(itemlist): + if pagination and (pag - 1) * pagination > i: continue # pagination + if pagination and i >= pag * pagination: break # pagination + it.append(item) + + if pagination and len(itemlist) >= pag * pagination: + itm.page = pag + 1 + itm.title=typo(config.get_localized_string(30992), 'color kod bold') + itm.thumbnail=support.thumb() + it.append(itm) + itemlist = it else: tmdb.set_infoLabels(itemlist, seekTmdb=True) itemlist = sorted(itemlist, key=lambda i: i.title) @@ -359,13 +472,22 @@ def get_seasons(item): def episodios(item): support.log() + itm = item + + if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']: + pagination = int(defp) if defp.isdigit() else '' + else: pagination = '' + pag = item.page if item.page else 1 itemlist = [] json_data = load_json(item) infoLabels = item.infoLabels ep = 1 - season_number = infoLabels['season'] if infoLabels.has_key('season') else item.contentSeason if item.contentSeason else 1 - for episode in json_data['episodes_list']: + season = infoLabels['season'] if infoLabels.has_key('season') else item.contentSeason if item.contentSeason else 1 + + for i, episode in enumerate(json_data['episodes_list']): + if pagination and (pag - 1) * pagination > i: continue # pagination + if pagination and i >= pag * pagination: break # pagination match = [] if episode.has_key('number'): match = support.match(episode['number'], r'(?P<season>\d+)x(?P<episode>\d+)')[0][0] if not match and episode.has_key('title'): match = support.match(episode['title'], r'(?P<season>\d+)x(?P<episode>\d+)')[0][0] @@ -374,34 +496,70 @@ def episodios(item): ep = int(match[1]) + 1 season_number = match[0] else: - season_number = episode['season'] if episode.has_key('season') else 1 - episode_number = episode['number'] if episode.has_key('number') else '' - ep = int(episode_number) if episode_number else ep - if not episode_number: - episode_number = str(ep).zfill(2) - ep += 1 + season_number = episode['season'] if episode.has_key('season') else season if season else 1 + episode_number = episode['number'] if episode.has_key('number') else '' + if not episode_number.isdigit(): + episode_number = support.match(episode['title'], r'(?P<episode>\d+)')[0][0] + ep = int(episode_number) if episode_number else ep + if not episode_number: + episode_number = str(ep).zfill(2) + ep += 1 - infoLabels['season'] = season_number - infoLabels['episode'] = episode_number + infoLabels['season'] = season_number + infoLabels['episode'] = episode_number - plot = episode['plot'] if episode.has_key('plot') else item.plot - thumbnail = episode['poster'] if episode.has_key('poster') else episode['thumbnail'] if episode.has_key('thumbnail') else item.thumbnail + plot = episode['plot'] if episode.has_key('plot') else item.plot + thumbnail = episode['poster'] if episode.has_key('poster') else episode['thumbnail'] if episode.has_key('thumbnail') else item.thumbnail - title = ' - ' + episode['title'] if episode.has_key('title') else '' - title = '%sx%s%s' % (season_number, episode_number, title) + title = ' - ' + episode['title'] if episode.has_key('title') else '' + title = '%sx%s%s' % (season_number, episode_number, title) + if season_number == item.filter or not item.filterseason: + itemlist.append(Item(channel= item.channel, + title= format_title(title), + fulltitle = item.fulltitle, + show = item.show, + url= episode, + action= 'findvideos', + plot= plot, + thumbnail= thumbnail, + contentSeason= season_number, + contentEpisode= episode_number, + infoLabels= infoLabels, + contentType= 'episode')) - itemlist.append(Item(channel= item.channel, - title= format_title(title), - fulltitle = item.fulltitle, - show = item.show, - url= episode, - action= 'findvideos', - plot= plot, - thumbnail= thumbnail, - contentSeason= season_number, - contentEpisode= episode_number, - infoLabels= infoLabels, - contentType= 'episode')) + + if show_seasons == True and inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes'] and not item.filterseason: + itm.contentType='season' + season_list = [] + for item in itemlist: + if item.contentSeason not in season_list: + season_list.append(item.contentSeason) + itemlist = [] + for season in season_list: + itemlist.append(Item(channel=item.channel, + title=format_title(config.get_localized_string(60027) % season), + fulltitle=itm.fulltitle, + show=itm.show, + thumbnails=itm.thumbnails, + url=itm.url, + action='episodios', + contentSeason=season, + infoLabels=infoLabels, + filterseason=season)) + + elif pagination and len(json_data['episodes_list']) >= pag * pagination: + if inspect.stack()[1][3] != 'get_newest': + itemlist.append( + Item(channel=item.channel, + action = item.action, + contentType='episode', + title=typo(config.get_localized_string(30992), 'color kod bold'), + fulltitle= item.fulltitle, + show= item.show, + url=item.url, + args=item.args, + page=pag + 1, + thumbnail=support.thumb())) tmdb.set_infoLabels(itemlist, seekTmdb=True) return itemlist @@ -421,7 +579,8 @@ def findvideos(item): itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - autoplay.start(itemlist, item) + if inspect.stack()[2][3] != 'start_download': + autoplay.start(itemlist, item) return itemlist diff --git a/specials/downloads.py b/specials/downloads.py index 740c0691..6320c6bc 100644 --- a/specials/downloads.py +++ b/specials/downloads.py @@ -829,7 +829,7 @@ def save_download(item): item.contentAction = item.from_action if item.from_action else item.action if item.contentType in ["tvshow", "episode", "season"]: - if 'download' in item and item.channel != 'community': + if 'download' in item and config.get_setting('show_seasons',item.channel) == False: heading = config.get_localized_string(70594) # <- Enter the season number item.dlseason = platformtools.dialog_numeric(0, heading, '') if item.dlseason: