From ca6d5eb56d36c161a4ad3be10dff6a350ac55d15 Mon Sep 17 00:00:00 2001 From: marco Date: Sat, 22 Feb 2020 13:36:58 +0100 Subject: [PATCH] KoD 0.8 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - tanti miglioramenti sotto il cofano, supporto iniziale al futuro kodi 19 - Nuova modalità di visualizzazione per episodio successivo - fixato wstream tramite l'aggiunta della finestra per risolvere il reCaptcha - aggiunta sezione segnala un problema in Aiuto - altri fix e migliorie varie a canali e server --- addon.xml | 13 +- channels.json | 25 +- channels/0example.py.txt | 75 +- channels/cineblog01.py | 136 +- channels/cinemalibero.py | 5 +- channels/cinetecadibologna.py | 7 - channels/dreamsub.py | 51 +- channels/dsda.py | 4 +- channels/filmsenzalimiticc.py | 5 +- channelselector.py | 9 +- core/channeltools.py | 165 +- core/downloader.py | 8 +- core/downloadtools.py | 24 +- core/filetools.py | 83 +- core/httptools.py | 14 +- core/item.py | 80 +- core/jsontools.py | 50 +- core/scraper.py | 17 +- core/scrapertools.py | 85 +- core/servertools.py | 161 +- core/support.py | 85 +- core/tmdb.py | 114 +- core/tvdb.py | 86 +- core/videolibrarytools.py | 301 +-- core/ziptools.py | 60 +- default.py | 2 +- lib/builtins/__init__.py | 12 + lib/cloudscraper/__init__.py | 111 +- lib/cloudscraper/exceptions/__init__.py | 0 .../exceptions/cloudflare_exceptions.py | 31 + .../exceptions/reCaptcha_exceptions.py | 49 + lib/cloudscraper/reCaptcha/2captcha.py | 49 +- lib/cloudscraper/reCaptcha/9kw.py | 202 ++ lib/cloudscraper/reCaptcha/anticaptcha.py | 16 +- lib/cloudscraper/reCaptcha/deathbycaptcha.py | 62 +- lib/cloudscraper/user_agent/__init__.py | 11 +- lib/cloudscraper/user_agent/browsers.json | 10 + lib/future/backports/email/base64mime.py | 1 + lib/future/backports/test/pystone.py | 0 lib/future/backports/urllib/error.py | 4 +- lib/future/backports/urllib/parse.py | 8 +- lib/future/backports/urllib/request.py | 28 +- lib/future/backports/urllib/robotparser.py | 6 +- lib/future/builtins/newround.py | 17 +- lib/future/moves/tkinter/filedialog.py | 6 + lib/future/standard_library/__init__.py | 120 +- lib/future/utils/__init__.py | 4 +- lib/generictools.py | 8 +- lib/githash.py | 1 - lib/httplib2/__init__.py | 2231 ---------------- lib/httplib2/py2/__init__.py | 2285 +++++++++++++++++ lib/httplib2/{ => py2}/cacerts.txt | 0 lib/httplib2/{ => py2}/certs.py | 0 lib/httplib2/{ => py2}/iri2uri.py | 0 lib/httplib2/{ => py2}/socks.py | 10 +- lib/httplib2/py3/__init__.py | 2077 +++++++++++++++ lib/httplib2/py3/cacerts.txt | 2197 ++++++++++++++++ lib/httplib2/py3/certs.py | 42 + lib/httplib2/py3/iri2uri.py | 124 + lib/httplib2/py3/socks.py | 518 ++++ lib/reprlib/__init__.py | 9 + platformcode/config.py | 36 +- platformcode/custom_code.py | 260 +- platformcode/download_and_play.py | 32 +- platformcode/envtal.py | 613 +++++ platformcode/keymaptools.py | 7 +- platformcode/launcher.py | 99 +- platformcode/logger.py | 26 +- platformcode/mct.py | 730 ++++-- platformcode/platformtools.py | 728 +++--- platformcode/recaptcha.py | 29 +- platformcode/subtitletools.py | 154 +- platformcode/unify.py | 579 +++-- platformcode/updater.py | 37 +- platformcode/xbmc_config_menu.py | 72 +- platformcode/xbmc_info_window.py | 3 +- platformcode/xbmc_videolibrary.py | 58 +- resources/language/English/strings.po | 86 +- resources/language/Italian/strings.po | 88 +- resources/settings.xml | 3 +- .../skins/Default/720p/ChannelSettings.xml | 9 +- resources/skins/Default/720p/NextDialog.xml | 2 +- .../skins/Default/720p/NextDialogCompact.xml | 2 +- .../skins/Default/720p/NextDialogExtended.xml | 125 + resources/skins/Default/720p/Recaptcha.xml | 253 +- resources/skins/Default/720p/ShortCutMenu.xml | 22 +- .../media/Controls/background-diffuse.png | Bin 0 -> 1627 bytes .../media/NextDialog/background-play.png | Bin 0 -> 3409 bytes servers/{akstream.json => akvideo.json} | 8 +- servers/{akstream.py => akvideo.py} | 24 +- servers/anonfile.py | 4 +- servers/filepup.py | 4 +- servers/filevideo.py | 4 +- servers/nofile.py | 4 +- servers/userscloud.py | 4 +- servers/{vcstream.json => vidcloud.json} | 6 +- servers/{vcstream.py => vidcloud.py} | 5 +- servers/vidup.py | 4 +- servers/watchvideo.py | 4 +- servers/wstream.py | 33 +- servers/youtube.py | 5 +- specials/autoplay.py | 35 +- specials/checkhost.py | 7 +- specials/community.json | 2 +- specials/community.py | 1251 ++++----- specials/favorites.py | 16 +- specials/filtertools.py | 81 +- specials/help.py | 9 +- specials/infoplus.py | 58 +- specials/kodfavorites.py | 37 +- specials/news.py | 15 +- specials/nextep.py | 38 +- specials/renumbertools.py | 19 +- specials/resolverdns.py | 70 +- specials/search.py | 31 +- specials/setting.py | 553 +++- specials/side_menu.py | 19 +- specials/trailertools.py | 73 +- specials/tvmoviedb.py | 53 +- specials/videolibrary.py | 146 +- videolibrary_service.py | 6 +- 121 files changed, 13147 insertions(+), 5448 deletions(-) create mode 100644 lib/builtins/__init__.py create mode 100644 lib/cloudscraper/exceptions/__init__.py create mode 100644 lib/cloudscraper/exceptions/cloudflare_exceptions.py create mode 100644 lib/cloudscraper/exceptions/reCaptcha_exceptions.py create mode 100644 lib/cloudscraper/reCaptcha/9kw.py mode change 100644 => 100755 lib/future/backports/test/pystone.py create mode 100644 lib/httplib2/py2/__init__.py rename lib/httplib2/{ => py2}/cacerts.txt (100%) rename lib/httplib2/{ => py2}/certs.py (100%) rename lib/httplib2/{ => py2}/iri2uri.py (100%) rename lib/httplib2/{ => py2}/socks.py (98%) create mode 100644 lib/httplib2/py3/__init__.py create mode 100644 lib/httplib2/py3/cacerts.txt create mode 100644 lib/httplib2/py3/certs.py create mode 100644 lib/httplib2/py3/iri2uri.py create mode 100644 lib/httplib2/py3/socks.py create mode 100644 lib/reprlib/__init__.py create mode 100644 platformcode/envtal.py create mode 100644 resources/skins/Default/720p/NextDialogExtended.xml create mode 100644 resources/skins/Default/media/Controls/background-diffuse.png create mode 100644 resources/skins/Default/media/NextDialog/background-play.png rename servers/{akstream.json => akvideo.json} (81%) rename servers/{akstream.py => akvideo.py} (63%) rename servers/{vcstream.json => vidcloud.json} (85%) rename servers/{vcstream.py => vidcloud.py} (96%) mode change 100755 => 100644 specials/search.py diff --git a/addon.xml b/addon.xml index bd9dec94..df9f665d 100644 --- a/addon.xml +++ b/addon.xml @@ -1,4 +1,4 @@ - + @@ -19,12 +19,11 @@ resources/media/themes/ss/2.png resources/media/themes/ss/3.png - - - aggiunto raiplay -- agigunto d.s.d.a (ex documentaristreamingda) -- svariati fix ai canali (eurostreaming, streamtime, piratestreaming, altadefinizioneclick) -- la videoteca ora può essere messa nelle unità di rete -- aggiunto server upstream -- altri piccoli fix vari + - tanti miglioramenti "sotto il cofano", supporto iniziale al futuro kodi 19 +- Nuova modalità di visualizzazione per episodio successivo +- fixato wstream tramite l'aggiunta della finestra per risolvere il reCaptcha +- aggiunta sezione "segnala un problema" in Aiuto +- altri fix e migliorie varie a canali e server Naviga velocemente sul web e guarda i contenuti presenti [COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR] [COLOR yellow]Kodi © is a registered trademark of the XBMC Foundation. We are not connected to or in any other way affiliated with Kodi, Team Kodi, or the XBMC Foundation. Furthermore, any software, addons, or products offered by us will receive no support in official Kodi channels, including the Kodi forums and various social networks.[/COLOR] diff --git a/channels.json b/channels.json index 983d9865..7a41019f 100644 --- a/channels.json +++ b/channels.json @@ -1,17 +1,17 @@ { - "altadefinizione01": "https://www.altadefinizione01.tel", - "altadefinizione01_link": "https://altadefinizione01.cam", - "altadefinizioneclick": "https://altadefinizione.style", + "altadefinizione01": "https://altadefinizione01.media", + "altadefinizione01_link": "https://altadefinizione01.kim", + "altadefinizioneclick": "https://altadefinizione.style", "animeforce": "https://ww1.animeforce.org", "animeleggendari": "https://animepertutti.com", - "animesaturn": "https://animesaturn.com", + "animesaturn": "https://www.animesaturn.com", "animestream": "https://www.animeworld.it", "animesubita": "http://www.animesubita.org", "animetubeita": "http://www.animetubeita.com", "animeunity": "https://www.animeunity.it", - "animeworld": "https://www.animeworld.cc", - "casacinema": "https://www.casacinema.biz", - "casacinemaInfo": "https://casacinema.kim", + "animeworld": "https://www.animeworld.tv", + "casacinema": "https://www.casacinema.bid", + "casacinemaInfo": "https://casacinema.blue", "cb01anime": "https://www.cineblog01.ink", "cinetecadibologna": "http://cinestore.cinetecadibologna.it", "dreamsub": "https://dreamsub.stream", @@ -19,13 +19,13 @@ "fastsubita": "https://fastsubita.com", "filmgratis": "https://www.filmaltadefinizione.org", "filmigratis": "https://filmigratis.org", - "filmpertutti": "https://www.filmpertutti.casa", - "filmsenzalimiticc": "https://www.filmsenzalimiti.monster", + "filmpertutti": "https://www.filmpertutti.date", + "filmsenzalimiticc": "https://www.filmsenzalimiti.london", "filmstreaming01": "https://filmstreaming01.com", "guardarefilm": "https://www.guardarefilm.red", "guardaserie_stream": "https://guardaserie.store", "guardaserieclick": "https://www.guardaserie.media", - "ilgeniodellostreaming": "https://ilgeniodellostreaming.si", + "ilgeniodellostreaming": "https://ilgeniodellostreaming.si", "italiaserie": "https://italiaserie.org", "mondoserietv": "https://mondoserietv.com", "netfreex": "https://www.netfreex.pro", @@ -33,12 +33,11 @@ "polpotv": "https://polpo.tv", "pufimovies": "https://pufimovies.com", "raiplay": "https://www.raiplay.it", - "seriehd": "https://www.seriehd.watch", + "seriehd": "https://seriehd.click", "serietvonline": "https://serietvonline.icu", "serietvsubita": "http://serietvsubita.xyz", "serietvu": "https://www.serietvu.link", - "streamingaltadefinizione": "https://www.popcornstream.best", - "streamtime": "https://t.me/s/StreamTime", + "streamtime": "https://t.me/s/StreamTime", "tantifilm": "https://www.tantifilm.eu", "toonitalia": "https://toonitalia.org", "vedohd": "https://vedohd.uno", diff --git a/channels/0example.py.txt b/channels/0example.py.txt index 2d5bb5ea..72c2152d 100644 --- a/channels/0example.py.txt +++ b/channels/0example.py.txt @@ -44,8 +44,6 @@ # per l'uso dei decoratori, per i log, e funzioni per siti particolari from core import support -# se non si fa uso di findhost() -from platformcode import config # in caso di necessità #from core import scrapertools, httptools, servertools, tmdb @@ -54,30 +52,22 @@ from core.item import Item # per newest ##### fine import -# impostazioni variabili o def findhost() - -# se necessaria la variabile __channel__ -# da cancellare se non utilizzata -__channel__ = "id nel json" -# da cancellare se si utilizza findhost() -host = config.get_channel_url('id nel json' OR __channel__) # <-- ATTENZIONE -headers = [['Referer', host]] - -# Inizio findhost() - da cancellare se usato l'altro metodo -#impostati dinamicamente da findhost() -host = "" -headers = "" - +# se il sito ha un link per ottenere l'url corretto in caso di oscuramenti +# la funzione deve ritornare l'indirizzo corretto, verrà chiamata solo se necessario (link primario irraggiungibile) def findhost(): - global host, headers - # da adattare alla bisogna... - permUrl = httptools.downloadpage('INSERIRE-URL-QUI', follow_redirects=False).headers - host = 'https://www.'+permUrl['location'].replace('https://www.google.it/search?q=site:', '') - # cancellare host non utilizzato - host = scrapertools.find_single_match(permUrl, r'
]+)"?>(?P[^<([]+)(?:\[(?P<lang>Sub-ITA|B/N|SUB-ITA)\])?\s*(?:\[(?P<quality>HD|SD|HD/3D)\])?\s*\((?P<year>[0-9]{4})\)<\/a>' @@ -93,7 +94,7 @@ def menu(item): def newest(categoria): support.log(categoria) - + item = support.Item() try: if categoria == "series": @@ -170,36 +171,28 @@ def episodios(item): def findvideos(item): - + if item.contentType == "episode": return findvid_serie(item) - def load_links(itemlist, re_txt, color, desc_txt, quality=""): - streaming = scrapertools.find_single_match(data, re_txt).replace('"', '') - support.log('STREAMING',streaming) - support.log('STREAMING=', streaming) - # patron = '<td><a.*?href=(.*?) (?:target|rel)[^>]+>([^<]+)<' - patron = '<td><a.*?href=([^ ]+) [^>]+>([^<]+)<' - matches = re.compile(patron, re.DOTALL).findall(streaming) - for scrapedurl, scrapedtitle in matches: - logger.debug("##### findvideos %s ## %s ## %s ##" % (desc_txt, scrapedurl, scrapedtitle)) - itemlist.append( - Item(channel=item.channel, - action="play", - title=scrapedtitle, - url=scrapedurl, - server=scrapedtitle, - fulltitle=item.fulltitle, - thumbnail=item.thumbnail, - show=item.show, - quality=quality, - contentType=item.contentType, - folder=False)) + def load_links(urls, re_txt, desc_txt, quality=""): + if re_txt: + streaming = scrapertools.find_single_match(data, re_txt).replace('"', '') + support.log('STREAMING',streaming) + matches = support.match(streaming, patron = r'<td><a.*?href=([^ ]+) [^>]+>[^<]+<').matches + with futures.ThreadPoolExecutor() as executor: + u = [executor.submit(final_links, match) for match in matches] + for res in futures.as_completed(u): + if res.result(): + urls.append(res.result()) + # for url in matches: + # # logger.debug("##### findvideos %s ## %s ## %s ##" % (desc_txt, url, server)) + # urls.append(final_links(url)) support.log() - itemlist = [] + itemlist = urls = [] # Carica la pagina data = httptools.downloadpage(item.url).data @@ -213,22 +206,22 @@ def findvideos(item): QualityStr = scrapertools.decodeHtmlentities(match.group(1)) # Estrae i contenuti - Streaming - load_links(itemlist, '<strong>Streamin?g:</strong>(.*?)cbtable', "orange", "Streaming", "SD") + load_links(urls, '<strong>Streamin?g:</strong>(.*?)cbtable', "Streaming", "SD") # Estrae i contenuti - Streaming HD - load_links(itemlist, '<strong>Streamin?g HD[^<]+</strong>(.*?)cbtable', "yellow", "Streaming HD", "HD") + load_links(urls, '<strong>Streamin?g HD[^<]+</strong>(.*?)cbtable', "Streaming HD", "HD") # Estrae i contenuti - Streaming 3D - load_links(itemlist, '<strong>Streamin?g 3D[^<]+</strong>(.*?)cbtable', "pink", "Streaming 3D") - - itemlist=support.server(item, itemlist=itemlist) + load_links(urls, '<strong>Streamin?g 3D[^<]+</strong>(.*?)cbtable', "Streaming 3D") + + itemlist=support.server(item, urls) if itemlist and QualityStr: itemlist.insert(0, Item(channel=item.channel, action="", - title="[COLOR orange]%s[/COLOR]" % QualityStr, + title=support.typo(QualityStr,'[] color kod bold'), folder=False)) - + return itemlist # Estrae i contenuti - Download @@ -239,68 +232,47 @@ def findvideos(item): def findvid_serie(item): - def load_vid_series(html, item, itemlist, blktxt): - logger.info('HTML' + html) - patron = r'<a href="([^"]+)"[^=]+="_blank"[^>]+>(?!<!--)(.*?)</a>' + def load_vid_series(html, item, urls, blktxt=''): + # logger.info('HTML' + html) + # patron = r'<a href="([^"]+)"[^=]+="_blank"[^>]+>(?!<!--)(.*?)</a>' # Estrae i contenuti - matches = re.compile(patron, re.DOTALL).finditer(html) - for match in matches: - scrapedurl = match.group(1) - scrapedtitle = match.group(2) - # title = item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]" - itemlist.append( - Item(channel=item.channel, - action="play", - title=scrapedtitle, - url=scrapedurl, - server=scrapedtitle, - fulltitle=item.fulltitle, - show=item.show, - contentType=item.contentType, - folder=False)) + # matches = re.compile(patron, re.DOTALL).finditer(html) + matches = support.match(html, patron = r'<a href="([^"]+)"[^=]+="_blank"[^>]+>(?!<!--).*?</a>').matches + with futures.ThreadPoolExecutor() as executor: + u = [executor.submit(final_links, match) for match in matches] + for res in futures.as_completed(u): + if res.result(): + urls.append(res.result()) + # for url, server in matches: + # urls.append(final_links(url)) support.log() itemlist = [] lnkblk = [] lnkblkp = [] + urls = [] data = item.url - # First blocks of links - if data[0:data.find('<a')].find(':') > 0: - lnkblk.append(data[data.find(' - ') + 3:data[0:data.find('<a')].find(':') + 1]) - lnkblkp.append(data.find(' - ') + 3) - else: - lnkblk.append(' ') - lnkblkp.append(data.find('<a')) + # Blocks with split + blk=re.split(r"(?:>\s*)?([A-Za-z\s0-9]*):\s*<",data,re.S) + blktxt="" + for b in blk: + if b[0:3]=="a h" or b[0:4]=="<a h": + load_vid_series("<%s>"%b, item, urls, blktxt) + blktxt="" + elif len(b.strip())>1: + blktxt=b.strip() - # Find new blocks of links - patron = r'<a\s[^>]+>[^<]+</a>([^<]+)' - matches = re.compile(patron, re.DOTALL).finditer(data) - for match in matches: - sep = match.group(1) - if sep != ' - ': - lnkblk.append(sep) - - i = 0 - if len(lnkblk) > 1: - for lb in lnkblk[1:]: - lnkblkp.append(data.find(lb, lnkblkp[i] + len(lnkblk[i]))) - i = i + 1 - - for i in range(0, len(lnkblk)): - if i == len(lnkblk) - 1: - load_vid_series(data[lnkblkp[i]:], item, itemlist, lnkblk[i]) - else: - load_vid_series(data[lnkblkp[i]:lnkblkp[i + 1]], item, itemlist, lnkblk[i]) - - return support.server(item, itemlist=itemlist) + return support.server(item, urls) -def play(item): +def final_links(url): support.log() itemlist = [] + item= Item() + item.url = url ### Handling new cb01 wrapper if host[9:] + "/film/" in item.url: iurl = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "") @@ -324,9 +296,9 @@ def play(item): data, c = unshortenit.unwrap_30x_only(data) else: data = scrapertools.find_single_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>') - + logger.debug("##### play go.php data ##\n%s\n##" % data) else: data = support.swzz_get_url(item) - return servertools.find_video_items(data=data) + return data diff --git a/channels/cinemalibero.py b/channels/cinemalibero.py index c3329868..a9743158 100644 --- a/channels/cinemalibero.py +++ b/channels/cinemalibero.py @@ -14,7 +14,10 @@ list_quality = ['default'] def findhost(): permUrl = httptools.downloadpage('https://www.cinemalibero.online/', follow_redirects=False).headers - import urlparse + try: + import urlparse + except: + import urllib.parse as urlparse p = list(urlparse.urlparse(permUrl['location'].replace('https://www.google.com/search?q=site:', ''))) if not p[0]: p[0] = 'https' diff --git a/channels/cinetecadibologna.py b/channels/cinetecadibologna.py index 9bf10c08..cc66d5d5 100644 --- a/channels/cinetecadibologna.py +++ b/channels/cinetecadibologna.py @@ -3,14 +3,7 @@ # Ringraziamo Icarus crew # Canale per cinetecadibologna # ------------------------------------------------------------ - -import re - -import urlparse - -from core import httptools, scrapertools from core.item import Item -from platformcode import logger, config from core import support diff --git a/channels/dreamsub.py b/channels/dreamsub.py index 55fe3bfe..7fdc5c35 100644 --- a/channels/dreamsub.py +++ b/channels/dreamsub.py @@ -110,6 +110,7 @@ def episodios(item): def findvideos(item): itemlist = [] support.log() + # support.dbg() matches = support.match(item, patron=r'href="([^"]+)"', patronBlock=r'<div style="white-space: (.*?)<div id="main-content"') @@ -118,27 +119,37 @@ def findvideos(item): item.contentType = 'tvshow' return episodios(item) - # matches.matches.sort() - support.log('VIDEO') - for url in matches.matches: - lang = url.split('/')[-2] - if 'ita' in lang.lower(): - language = 'ITA' - if 'sub' in lang.lower(): - language = 'Sub-' + language - quality = url.split('/')[-1] - + if 'vvvvid' in matches.data: itemlist.append( - support.Item(channel=item.channel, - action="play", - contentType=item.contentType, - title=language, - url=url, - contentLanguage = language, - quality = quality, - order = quality.replace('p','').zfill(4), - server='directo', - )) + support.Item(channel=item.channel, + action="play", + contentType=item.contentType, + title='vvvid', + url=support.match(matches.data, patron=r'(http://www.vvvvid[^"]+)').match, + server='vvvvid', + )) + else: + # matches.matches.sort() + support.log('VIDEO') + for url in matches.matches: + lang = url.split('/')[-2] + if 'ita' in lang.lower(): + language = 'ITA' + if 'sub' in lang.lower(): + language = 'Sub-' + language + quality = url.split('/')[-1] + + itemlist.append( + support.Item(channel=item.channel, + action="play", + contentType=item.contentType, + title=language, + url=url, + contentLanguage = language, + quality = quality, + order = quality.replace('p','').zfill(4), + server='directo', + )) itemlist.sort(key=lambda x: (x.title, x.order), reverse=False) return support.server(item, itemlist=itemlist) \ No newline at end of file diff --git a/channels/dsda.py b/channels/dsda.py index d1a2a4f3..0586be06 100644 --- a/channels/dsda.py +++ b/channels/dsda.py @@ -3,10 +3,8 @@ # Ringraziamo Icarus crew # Canale per documentaristreamingda # ------------------------------------------------------------ -import re -import urlparse -from core import httptools, scrapertools, servertools, support +from core import support from core.item import Item from platformcode import logger, config diff --git a/channels/filmsenzalimiticc.py b/channels/filmsenzalimiticc.py index 2f3fd290..689a9a1a 100644 --- a/channels/filmsenzalimiticc.py +++ b/channels/filmsenzalimiticc.py @@ -4,7 +4,10 @@ # ------------------------------------------------------------ import re -import urlparse +try: + import urlparse +except: + import urllib.parse as urlparse from core import scrapertools, servertools, httptools from core import tmdb diff --git a/channelselector.py b/channelselector.py index ebafe694..bbc7f200 100644 --- a/channelselector.py +++ b/channelselector.py @@ -69,7 +69,6 @@ def getmainlist(view="thumb_"): itemlist.append(Item(title=config.get_localized_string(30100), channel="setting", action="mainlist", thumbnail=get_thumb(thumb_setting, view), category=config.get_localized_string(30100), viewmode="list")) - itemlist.append(Item(title=config.get_localized_string(30104) + " (v" + config.get_addon_version(with_fix=True) + ")", channel="help", action="mainlist", thumbnail=get_thumb("help.png", view), category=config.get_localized_string(30104), viewmode="list")) @@ -109,7 +108,7 @@ def getchanneltypes(view="thumb_"): itemlist.append(Item(title=config.get_localized_string(70685), channel="community", action="mainlist", view=view, - category=title, channel_type="all", thumbnail=get_thumb("channels_community.png", view), + category=config.get_localized_string(70685), channel_type="all", thumbnail=get_thumb("channels_community.png", view), viewmode="thumbnails")) return itemlist @@ -150,9 +149,9 @@ def filterchannels(category, view="thumb_"): if channel_parameters["channel"] == 'community': continue - # si el canal no es compatible, no se muestra - if not channel_parameters["compatible"]: - continue + # # si el canal no es compatible, no se muestra + # if not channel_parameters["compatible"]: + # continue # Si no es un canal lo saltamos if not channel_parameters["channel"]: diff --git a/core/channeltools.py b/core/channeltools.py index bc4c0f6f..2e9a1ddd 100644 --- a/core/channeltools.py +++ b/core/channeltools.py @@ -3,10 +3,9 @@ # channeltools - Herramientas para trabajar con canales # ------------------------------------------------------------ -import os - -import jsontools +from __future__ import absolute_import +from core import jsontools from platformcode import config, logger DEFAULT_UPDATE_URL = "/channels/" @@ -14,6 +13,7 @@ dict_channels_parameters = dict() remote_path = 'https://raw.githubusercontent.com/kodiondemand/media/master/' + def is_adult(channel_name): logger.info("channel_name=" + channel_name) channel_parameters = get_channel_parameters(channel_name) @@ -27,6 +27,7 @@ def is_enabled(channel_name): def get_channel_parameters(channel_name): + from core import filetools global dict_channels_parameters if channel_name not in dict_channels_parameters: @@ -35,20 +36,22 @@ def get_channel_parameters(channel_name): # logger.debug(channel_parameters) if channel_parameters: # cambios de nombres y valores por defecto - channel_parameters["title"] = channel_parameters.pop("name") + (' [DEPRECATED]' if channel_parameters.has_key('deprecated') and channel_parameters['deprecated'] else '') + channel_parameters["title"] = channel_parameters.pop("name") + (' [DEPRECATED]' if 'deprecated' in channel_parameters and channel_parameters['deprecated'] else '') channel_parameters["channel"] = channel_parameters.pop("id") # si no existe el key se declaran valor por defecto para que no de fallos en las funciones que lo llaman channel_parameters["adult"] = channel_parameters.get("adult", False) logger.info(channel_parameters["adult"]) if channel_parameters["adult"]: - channel_parameters["update_url"] = channel_parameters.get("update_url", DEFAULT_UPDATE_URL+'porn/') + channel_parameters["update_url"] = channel_parameters.get("update_url", + DEFAULT_UPDATE_URL + 'porn/') else: channel_parameters["update_url"] = channel_parameters.get("update_url", DEFAULT_UPDATE_URL) channel_parameters["language"] = channel_parameters.get("language", ["all"]) -## channel_parameters["adult"] = channel_parameters.get("adult", False) + ## channel_parameters["adult"] = channel_parameters.get("adult", False) channel_parameters["active"] = channel_parameters.get("active", False) - channel_parameters["include_in_global_search"] = channel_parameters.get("include_in_global_search", False) + channel_parameters["include_in_global_search"] = channel_parameters.get("include_in_global_search", + False) channel_parameters["categories"] = channel_parameters.get("categories", list()) channel_parameters["thumbnail"] = channel_parameters.get("thumbnail", "") @@ -57,57 +60,27 @@ def get_channel_parameters(channel_name): # Imagenes: se admiten url y archivos locales dentro de "resources/images" if channel_parameters.get("thumbnail") and "://" not in channel_parameters["thumbnail"]: - channel_parameters["thumbnail"] = os.path.join(remote_path, 'resources', "thumb", channel_parameters["thumbnail"]) + channel_parameters["thumbnail"] = filetools.join(remote_path, "resources", "thumb", channel_parameters["thumbnail"]) if channel_parameters.get("banner") and "://" not in channel_parameters["banner"]: - channel_parameters["banner"] = os.path.join(remote_path, 'resources', "banner", channel_parameters["banner"]) + channel_parameters["banner"] = filetools.join(remote_path, "resources", "banner", channel_parameters["banner"]) if channel_parameters.get("fanart") and "://" not in channel_parameters["fanart"]: - channel_parameters["fanart"] = os.path.join(remote_path, 'resources', "fanart", channel_parameters["fanart"]) + channel_parameters["fanart"] = filetools.join(remote_path, "resources", channel_parameters["fanart"]) # Obtenemos si el canal tiene opciones de configuración channel_parameters["has_settings"] = False if 'settings' in channel_parameters: - # if not isinstance(channel_parameters['settings'], list): - # channel_parameters['settings'] = [channel_parameters['settings']] - - # if "include_in_global_search" in channel_parameters['settings']: - # channel_parameters["include_in_global_search"] = channel_parameters['settings'] - # ["include_in_global_search"].get('default', False) - # - # found = False - # for el in channel_parameters['settings']: - # for key in el.items(): - # if 'include_in' not in key: - # channel_parameters["has_settings"] = True - # found = True - # break - # if found: - # break channel_parameters['settings'] = get_default_settings(channel_name) for s in channel_parameters['settings']: if 'id' in s: if s['id'] == "include_in_global_search": channel_parameters["include_in_global_search"] = True elif s['id'] == "filter_languages": - channel_parameters["filter_languages"] = s.get('lvalues',[]) + channel_parameters["filter_languages"] = s.get('lvalues', []) elif s['id'].startswith("include_in_"): channel_parameters["has_settings"] = True del channel_parameters['settings'] - # Compatibilidad - if 'compatible' in channel_parameters: - # compatible python - python_compatible = True - if 'python' in channel_parameters["compatible"]: - import sys - python_condition = channel_parameters["compatible"]['python'] - if sys.version_info < tuple(map(int, (python_condition.split(".")))): - python_compatible = False - - channel_parameters["compatible"] = python_compatible - else: - channel_parameters["compatible"] = True - dict_channels_parameters[channel_name] = channel_parameters else: @@ -115,13 +88,12 @@ def get_channel_parameters(channel_name): # lanzamos la excepcion y asi tenemos los valores básicos raise Exception - except Exception, ex: + except Exception as ex: logger.error(channel_name + ".json error \n%s" % ex) channel_parameters = dict() channel_parameters["channel"] = "" channel_parameters["adult"] = False channel_parameters['active'] = False - channel_parameters["compatible"] = True channel_parameters["language"] = "" channel_parameters["update_url"] = DEFAULT_UPDATE_URL return channel_parameters @@ -131,25 +103,26 @@ def get_channel_parameters(channel_name): def get_channel_json(channel_name): # logger.info("channel_name=" + channel_name) - import filetools + from core import filetools channel_json = None try: channel_path = filetools.join(config.get_runtime_path(), "channels", channel_name + ".json") - if not os.path.isfile(channel_path): + if not filetools.isfile(channel_path): channel_path = filetools.join(config.get_runtime_path(), 'channels', "porn", channel_name + ".json") - if not os.path.isfile(channel_path): + if not filetools.isfile(channel_path): channel_path = filetools.join(config.get_runtime_path(), "specials", channel_name + ".json") - if not os.path.isfile(channel_path): + if not filetools.isfile(channel_path): channel_path = filetools.join(config.get_runtime_path(), "servers", channel_name + ".json") - if not os.path.isfile(channel_path): - channel_path = filetools.join(config.get_runtime_path(), "servers", "debriders", channel_name + ".json") + if not filetools.isfile(channel_path): + channel_path = filetools.join(config.get_runtime_path(), "servers", "debriders", + channel_name + ".json") if filetools.isfile(channel_path): # logger.info("channel_data=" + channel_path) channel_json = jsontools.load(filetools.read(channel_path)) # logger.info("channel_json= %s" % channel_json) - except Exception, ex: + except Exception as ex: template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex).__name__, ex.args) logger.error(" %s" % message) @@ -174,6 +147,7 @@ def get_channel_controls_settings(channel_name): return list_controls, dict_settings + def get_lang(channel_name): channel = __import__('channels.%s' % channel_name, fromlist=["channels.%s" % channel_name]) list_language = [config.get_localized_string(70522)] @@ -199,16 +173,17 @@ def get_lang(channel_name): list_language.append(lang) return list_language + def get_default_settings(channel_name): - import filetools + from core import filetools default_path = filetools.join(config.get_runtime_path(), 'default_channel_settings' + '.json') default_file = jsontools.load(filetools.read(default_path)) - channel_path = filetools.join(config.get_runtime_path(),'channels',channel_name + '.json') - adult_path = filetools.join(config.get_runtime_path(),'channels', 'porn', channel_name + '.json') + channel_path = filetools.join(config.get_runtime_path(), 'channels', channel_name + '.json') + adult_path = filetools.join(config.get_runtime_path(), 'channels', 'porn', channel_name + '.json') # from core.support import dbg; dbg() - if os.path.exists(channel_path) or os.path.exists(adult_path): + if filetools.exists(channel_path) or filetools.exists(adult_path): default_controls = default_file['settings'] default_controls_renumber = default_file['renumber'] channel_json = get_channel_json(channel_name) @@ -217,33 +192,43 @@ def get_default_settings(channel_name): channel_language = channel_json['language'] channel_controls = channel_json['settings'] categories = channel_json['categories'] - not_active = channel_json['not_active'] if channel_json.has_key('not_active') else [] - default_off = channel_json['default_off'] if channel_json.has_key('default_off') else [] + not_active = channel_json['not_active'] if 'not_active' in channel_json else [] + default_off = channel_json['default_off'] if 'default_off' in channel_json else [] # Apply default configurations if they do not exist for control in default_controls: if control['id'] not in str(channel_controls): - if 'include_in_newest' in control['id'] and 'include_in_newest' not in not_active and control['id'] not in not_active: + if 'include_in_newest' in control['id'] and 'include_in_newest' not in not_active and control[ + 'id'] not in not_active: label = control['id'].split('_') label = label[-1] if label == 'peliculas': if 'movie' in categories: - control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string(30122) - control['default'] = False if ('include_in_newest' in default_off) or ('include_in_newest_peliculas' in default_off) else True + control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string( + 30122) + control['default'] = False if ('include_in_newest' in default_off) or ( + 'include_in_newest_peliculas' in default_off) else True channel_controls.append(control) - else: pass + else: + pass elif label == 'series': if 'tvshow' in categories: - control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string(30123) - control['default'] = False if ('include_in_newest' in default_off) or ('include_in_newest_series' in default_off) else True + control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string( + 30123) + control['default'] = False if ('include_in_newest' in default_off) or ( + 'include_in_newest_series' in default_off) else True channel_controls.append(control) - else: pass + else: + pass elif label == 'anime': if 'anime' in categories: - control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string(30124) - control['default'] = False if ('include_in_newest' in default_off) or ('include_in_newest_anime' in default_off) else True + control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string( + 30124) + control['default'] = False if ('include_in_newest' in default_off) or ( + 'include_in_newest_anime' in default_off) else True channel_controls.append(control) - else: pass + else: + pass else: control['label'] = config.get_localized_string(70727) + ' - ' + label.capitalize() @@ -259,13 +244,15 @@ def get_default_settings(channel_name): for control in default_controls_renumber: if control['id'] not in str(channel_controls): channel_controls.append(control) - else: pass + else: + pass else: return get_channel_json(channel_name).get('settings', list()) return channel_controls def get_channel_setting(name, channel, default=None): + from core import filetools """ Retorna el valor de configuracion del parametro solicitado. @@ -288,13 +275,15 @@ def get_channel_setting(name, channel, default=None): @rtype: any """ - file_settings = os.path.join(config.get_data_path(), "settings_channels", channel + "_data.json") + file_settings = filetools.join(config.get_data_path(), "settings_channels", channel + "_data.json") dict_settings = {} dict_file = {} - if os.path.exists(file_settings): + if channel not in ['trakt']: def_settings = get_default_settings(channel) + + if filetools.exists(file_settings): # Obtenemos configuracion guardada de ../settings/channel_data.json try: - dict_file = jsontools.load(open(file_settings, "rb").read()) + dict_file = jsontools.load(filetools.read(file_settings)) if isinstance(dict_file, dict) and 'settings' in dict_file: dict_settings = dict_file['settings'] except EnvironmentError: @@ -313,9 +302,7 @@ def get_channel_setting(name, channel, default=None): dict_file['settings'] = dict_settings # Creamos el archivo ../settings/channel_data.json json_data = jsontools.dump(dict_file) - try: - open(file_settings, "wb").write(json_data) - except EnvironmentError: + if not filetools.write(file_settings, json_data, silent=True): logger.error("ERROR al salvar el archivo: %s" % file_settings) # Devolvemos el valor del parametro local 'name' si existe, si no se devuelve default @@ -323,7 +310,7 @@ def get_channel_setting(name, channel, default=None): def set_channel_setting(name, value, channel): - import filetools + from core import filetools """ Fija el valor de configuracion del parametro indicado. @@ -346,36 +333,22 @@ def set_channel_setting(name, value, channel): """ # Creamos la carpeta si no existe - if not os.path.exists(os.path.join(config.get_data_path(), "settings_channels")): - os.mkdir(os.path.join(config.get_data_path(), "settings_channels")) + if not filetools.exists(filetools.join(config.get_data_path(), "settings_channels")): + filetools.mkdir(filetools.join(config.get_data_path(), "settings_channels")) - file_settings = os.path.join(config.get_data_path(), "settings_channels", channel + "_data.json") + file_settings = filetools.join(config.get_data_path(), "settings_channels", channel + "_data.json") dict_settings = {} - if channel not in ['trakt']: def_settings = get_default_settings(channel) dict_file = None - if os.path.exists(file_settings): + if filetools.exists(file_settings): # Obtenemos configuracion guardada de ../settings/channel_data.json try: - dict_file = jsontools.load(open(file_settings, "r").read()) + dict_file = jsontools.load(filetools.read(file_settings)) dict_settings = dict_file.get('settings', {}) except EnvironmentError: logger.error("ERROR al leer el archivo: %s" % file_settings) - if os.path.isfile(filetools.join(config.get_runtime_path(), "channels", channel + ".json")): - - # delete unused Settings - def_keys = [] - del_keys = [] - for key in def_settings: - def_keys.append(key['id']) - for key in dict_settings: - if key not in def_keys: - del_keys.append(key) - for key in del_keys: - del dict_settings[key] - dict_settings[name] = value # comprobamos si existe dict_file y es un diccionario, sino lo creamos @@ -385,10 +358,8 @@ def set_channel_setting(name, value, channel): dict_file['settings'] = dict_settings # Creamos el archivo ../settings/channel_data.json - try: - json_data = jsontools.dump(dict_file) - open(file_settings, "w").write(json_data) - except EnvironmentError: + json_data = jsontools.dump(dict_file) + if not filetools.write(file_settings, json_data, silent=True): logger.error("ERROR al salvar el archivo: %s" % file_settings) return None diff --git a/core/downloader.py b/core/downloader.py index 564716e2..a0bb4b51 100644 --- a/core/downloader.py +++ b/core/downloader.py @@ -20,8 +20,8 @@ metodos: from __future__ import division from future import standard_library standard_library.install_aliases() -from future.builtins import range -from future.builtins import object +from builtins import range +from builtins import object from past.utils import old_div #from builtins import str import sys @@ -243,7 +243,7 @@ class Downloader(object): # Abrimos en modo "a+" para que cree el archivo si no existe, luego en modo "r+b" para poder hacer seek() self.file = filetools.file_open(filetools.join(self._path, self._filename), "a+", vfs=VFS) - if self.file: self.file.close() + if self.file: self.file.close() self.file = filetools.file_open(filetools.join(self._path, self._filename), "r+b", vfs=VFS) if not self.file: return @@ -258,7 +258,7 @@ class Downloader(object): self.__get_download_info__() try: - logger.info("Initialized Download: Parts: %s | Path: %s | Archive: %s | Size: %s" % \ + logger.info("Descarga inicializada: Partes: %s | Ruta: %s | Archivo: %s | Tamaño: %s" % \ (str(len(self._download_info["parts"])), self._pathencode('utf-8'), \ self._filenameencode('utf-8'), str(self._download_info["size"]))) except: diff --git a/core/downloadtools.py b/core/downloadtools.py index 8d1556a2..628b9bcb 100644 --- a/core/downloadtools.py +++ b/core/downloadtools.py @@ -110,6 +110,8 @@ def limpia_nombre_excepto_1(s): stripped = ''.join(c for c in s if c in validchars) # Convierte a iso s = stripped.encode("iso-8859-1") + if PY3: + s = s.decode('utf-8') return s @@ -129,7 +131,7 @@ def getfilefromtitle(url, title): logger.info("platform=" + plataforma) # nombrefichero = xbmc.makeLegalFilename(title + url[-4:]) - from . import scrapertools + from core import scrapertools nombrefichero = title + scrapertools.get_filename_from_url(url)[-4:] logger.info("filename=%s" % nombrefichero) @@ -169,7 +171,10 @@ def downloadbest(video_urls, title, continuar=False): for elemento in invertida: # videotitle = elemento[0] url = elemento[1] - logger.info("Downloading option " + title + " " + url.encode('ascii', 'ignore')) + if not PY3: + logger.info("Downloading option " + title + " " + url.encode('ascii', 'ignore')) + else: + logger.info("Downloading option " + title + " " + url.encode('ascii', 'ignore').decode('utf-8')) # Calcula el fichero donde debe grabar try: @@ -621,7 +626,7 @@ def downloadfileGzipped(url, pathfichero): break except: reintentos += 1 - logger.info("ERROR in block download, retry %dd" % reintentos) + logger.info("ERROR in block download, retry %d" % reintentos) for line in sys.exc_info(): logger.error("%s" % line) @@ -660,7 +665,7 @@ def GetTitleFromFile(title): # Imprime en el log lo que va a descartar logger.info("title=" + title) plataforma = config.get_system_platform() - logger.info("platform=" + plataforma) + logger.info("plataform=" + plataforma) # nombrefichero = xbmc.makeLegalFilename(title + url[-4:]) nombrefichero = title @@ -678,7 +683,7 @@ def downloadIfNotModifiedSince(url, timestamp): # Convierte la fecha a GMT fecha_formateada = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(timestamp)) - logger.info("DateFormat=%s" % fecha_formateada) + logger.info("fechaFormateada=%s" % fecha_formateada) # Comprueba si ha cambiado inicio = time.clock() @@ -700,11 +705,11 @@ def downloadIfNotModifiedSince(url, timestamp): except urllib.error.URLError as e: # Si devuelve 304 es que no ha cambiado if hasattr(e, 'code'): - logger.info("HTTP response code: %d" % e.code) + logger.info("HTTP response code : %d" % e.code) if e.code == 304: logger.info("It has not changed") updated = False - # Agarra los errores con codigo de respuesta del servidor externo solicitado + # Agarra los errores con codigo de respuesta del servidor externo solicitado else: for line in sys.exc_info(): logger.error("%s" % line) @@ -814,6 +819,7 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid for mirror_item in mirrors_itemlist: logger.info("mirror=" + mirror_item.title) + if "(Italiano)" in mirror_item.title: idioma = "(Italiano)" codigo_idioma = "it" @@ -885,8 +891,8 @@ def download_all_episodes(item, channel, first_episode="", preferred_server="vid def episodio_ya_descargado(show_title, episode_title): - from . import scrapertools - ficheros = os.listdir(".") + from core import scrapertools + ficheros = filetools.listdir(".") for fichero in ficheros: # logger.info("fichero="+fichero) diff --git a/core/filetools.py b/core/filetools.py index 6a1d5500..3b97063c 100644 --- a/core/filetools.py +++ b/core/filetools.py @@ -87,7 +87,7 @@ def encode(path, _samba=False): if scrapertools.find_single_match(path, '(^\w+:\/\/)') or _samba: path = path.encode("utf-8", "ignore") else: - if fs_encoding: + if fs_encoding and not PY3: path = path.encode(fs_encoding, "ignore") return path @@ -133,13 +133,13 @@ def read(path, linea_inicio=0, total_lineas=None, whence=0, silent=False, vfs=Tr try: linea_inicio = int(linea_inicio) except: - logger.error('Read: ERROR de linea_inicio: %s' % str(linea_inicio)) + logger.error('Read: Start_line ERROR: %s' % str(linea_inicio)) linea_inicio = 0 if total_lineas != None and not isinstance(total_lineas, int): try: total_lineas = int(total_lineas) except: - logger.error('Read: ERROR de total_lineas: %s' % str(total_lineas)) + logger.error('Read: ERROR of total_lineas: %s' % str(total_lineas)) total_lineas = None if xbmc_vfs and vfs: if not exists(path): return False @@ -151,7 +151,7 @@ def read(path, linea_inicio=0, total_lineas=None, whence=0, silent=False, vfs=Tr except: return False f.seek(linea_inicio, whence) - logger.debug('POSICIÓN de comienzo de lectura, tell(): %s' % f.seek(0, 1)) + logger.debug('POSITION of beginning of reading,, tell(): %s' % f.seek(0, 1)) if total_lineas == None: total_lineas = 0 data = f.read(total_lineas) @@ -169,15 +169,15 @@ def read(path, linea_inicio=0, total_lineas=None, whence=0, silent=False, vfs=Tr f.close() except: if not silent: - logger.error("ERROR al leer el archivo: %s" % path) + logger.error("ERROR reading file: %s" % path) logger.error(traceback.format_exc()) return False else: if not PY3: - return "".join(data) + return unicode("".join(data)) else: - return b"".join(data) + return unicode(b"".join(data)) def write(path, data, mode="wb", silent=False, vfs=True): @@ -226,20 +226,20 @@ def file_open(path, mode="r", silent=False, vfs=True): if xbmc_vfs and vfs: if 'r' in mode and '+' in mode: mode = mode.replace('r', 'w').replace('+', '') - logger.debug('Open MODE cambiado a: %s' % mode) + logger.debug('Open MODE changed to: %s' % mode) if 'a' in mode: mode = mode.replace('a', 'w').replace('+', '') - logger.debug('Open MODE cambiado a: %s' % mode) + logger.debug('Open MODE changed to: %s' % mode) return xbmcvfs.File(path, mode) elif path.lower().startswith("smb://"): return samba.smb_open(path, mode) else: return open(path, mode) except: - logger.error("ERROR al abrir el archivo: %s, %s" % (path, mode)) + logger.error("ERROR when opening file: %s, %s" % (path, mode)) if not silent: logger.error(traceback.format_exc()) - platformtools.dialog_notification("Error al abrir", path) + platformtools.dialog_notification("Error Opening", path) return False @@ -258,7 +258,7 @@ def file_stat(path, silent=False, vfs=True): return xbmcvfs.Stat(path) raise except: - logger.error("File_Stat no soportado: %s" % path) + logger.error("File_Stat not supported: %s" % path) if not silent: logger.error(traceback.format_exc()) return False @@ -283,9 +283,9 @@ def rename(path, new_name, silent=False, strict=False, vfs=True): dest = encode(join(dirname(path_end), new_name)) result = xbmcvfs.rename(path, dest) if not result and not strict: - logger.error("ERROR al RENOMBRAR el archivo: %s. Copiando y borrando" % path) + logger.error("ERROR RENAME file: %s. Copying and deleting" % path) if not silent: - dialogo = platformtools.dialog_progress("Copiando archivo", "") + dialogo = platformtools.dialog_progress("Copying file", "") result = xbmcvfs.copy(path, dest) if not result: return False @@ -298,10 +298,10 @@ def rename(path, new_name, silent=False, strict=False, vfs=True): new_name = encode(new_name, False) os.rename(path, os.path.join(os.path.dirname(path), new_name)) except: - logger.error("ERROR al renombrar el archivo: %s" % path) + logger.error("ERROR when renaming the file: %s" % path) if not silent: logger.error(traceback.format_exc()) - platformtools.dialog_notification("Error al renombrar", path) + platformtools.dialog_notification("Error renaming", path) return False else: return True @@ -324,9 +324,9 @@ def move(path, dest, silent=False, strict=False, vfs=True): dest = encode(dest) result = xbmcvfs.rename(path, dest) if not result and not strict: - logger.error("ERROR al MOVER el archivo: %s. Copiando y borrando" % path) + logger.error("ERROR when MOVING the file: %s. Copying and deleting" % path) if not silent: - dialogo = platformtools.dialog_progress("Copiando archivo", "") + dialogo = platformtools.dialog_progress("Copying file", "") result = xbmcvfs.copy(path, dest) if not result: return False @@ -349,7 +349,7 @@ def move(path, dest, silent=False, strict=False, vfs=True): dialogo = platformtools.dialog_progress("Copiando archivo", "") return copy(path, dest) == True and remove(path) == True except: - logger.error("ERROR al mover el archivo: %s a %s" % (path, dest)) + logger.error("ERROR when moving file: %s to %s" % (path, dest)) if not silent: logger.error(traceback.format_exc()) return False @@ -376,7 +376,7 @@ def copy(path, dest, silent=False, vfs=True): if not silent: dialogo = platformtools.dialog_progress("Copiando archivo", "") return bool(xbmcvfs.copy(path, dest)) - + fo = file_open(path, "rb") fd = file_open(dest, "wb") if fo and fd: @@ -398,7 +398,7 @@ def copy(path, dest, silent=False, vfs=True): if not silent: dialogo.close() except: - logger.error("ERROR al copiar el archivo: %s" % path) + logger.error("ERROR when copying the file: %s" % path) if not silent: logger.error(traceback.format_exc()) return False @@ -420,13 +420,13 @@ def exists(path, silent=False, vfs=True): result = bool(xbmcvfs.exists(path)) if not result and not path.endswith('/') and not path.endswith('\\'): result = bool(xbmcvfs.exists(join(path, ' ').rstrip())) - return result + return result elif path.lower().startswith("smb://"): return samba.exists(path) else: return os.path.exists(path) except: - logger.error("ERROR al comprobar la ruta: %s" % path) + logger.error("ERROR when checking the path: %s" % path) if not silent: logger.error(traceback.format_exc()) return False @@ -458,7 +458,7 @@ def isfile(path, silent=False, vfs=True): else: return os.path.isfile(path) except: - logger.error("ERROR al comprobar el archivo: %s" % path) + logger.error("ERROR when checking file: %s" % path) if not silent: logger.error(traceback.format_exc()) return False @@ -490,7 +490,7 @@ def isdir(path, silent=False, vfs=True): else: return os.path.isdir(path) except: - logger.error("ERROR al comprobar el directorio: %s" % path) + logger.error("ERROR when checking the directory: %s" % path) if not silent: logger.error(traceback.format_exc()) return False @@ -517,7 +517,7 @@ def getsize(path, silent=False, vfs=True): else: return os.path.getsize(path) except: - logger.error("ERROR al obtener el tamaño: %s" % path) + logger.error("ERROR when getting the size: %s" % path) if not silent: logger.error(traceback.format_exc()) return long(0) @@ -540,10 +540,10 @@ def remove(path, silent=False, vfs=True): else: os.remove(path) except: - logger.error("ERROR al eliminar el archivo: %s" % path) + logger.error("ERROR deleting file: %s" % path) if not silent: logger.error(traceback.format_exc()) - platformtools.dialog_notification("Error al eliminar el archivo", path) + platformtools.dialog_notification("ERROR deleting file", path) return False else: return True @@ -580,10 +580,10 @@ def rmdirtree(path, silent=False, vfs=True): import shutil shutil.rmtree(path, ignore_errors=True) except: - logger.error("ERROR al eliminar el directorio: %s" % path) + logger.error("ERROR deleting directory: %s" % path) if not silent: logger.error(traceback.format_exc()) - platformtools.dialog_notification("Error al eliminar el directorio", path) + platformtools.dialog_notification("ERROR deleting directory", path) return False else: return not exists(path) @@ -608,10 +608,10 @@ def rmdir(path, silent=False, vfs=True): else: os.rmdir(path) except: - logger.error("ERROR al eliminar el directorio: %s" % path) + logger.error("ERROR deleting directory: %s" % path) if not silent: logger.error(traceback.format_exc()) - platformtools.dialog_notification("Error al eliminar el directorio", path) + platformtools.dialog_notification("ERROR deleting directory", path) return False else: return True @@ -641,10 +641,10 @@ def mkdir(path, silent=False, vfs=True): else: os.mkdir(path) except: - logger.error("ERROR al crear el directorio: %s" % path) + logger.error("ERROR when creating directory: %s" % path) if not silent: logger.error(traceback.format_exc()) - platformtools.dialog_notification("Error al crear el directorio", path) + platformtools.dialog_notification("ERROR when creating directory", path) return False else: return True @@ -724,7 +724,7 @@ def listdir(path, silent=False, vfs=True): else: return decode(os.listdir(path)) except: - logger.error("ERROR al leer el directorio: %s" % path) + logger.error("ERROR when reading the directory: %s" % path) if not silent: logger.error(traceback.format_exc()) return False @@ -740,14 +740,13 @@ def join(*paths): list_path = [] if paths[0].startswith("/"): list_path.append("") - for path in paths: if path: - if xbmc_vfs: + if xbmc_vfs and type(path) != str: path = encode(path) list_path += path.replace("\\", "/").strip("/").split("/") - if scrapertools.find_single_match(paths[0], '(^\w+:\/\/)'): + if scrapertools.find_single_match(paths[0], r'(^\w+:\/\/)'): return str("/".join(list_path)) else: return str(os.sep.join(list_path)) @@ -812,8 +811,8 @@ def remove_tags(title): return title_without_tags else: return title - - + + def remove_smb_credential(path): """ devuelve el path sin contraseña/usuario para paths de SMB @@ -823,10 +822,10 @@ def remove_smb_credential(path): @rtype: str """ logger.info() - + if not scrapertools.find_single_match(path, '(^\w+:\/\/)'): return path - + protocol = scrapertools.find_single_match(path, '(^\w+:\/\/)') path_without_credentials = scrapertools.find_single_match(path, '^\w+:\/\/(?:[^;\n]+;)?(?:[^:@\n]+[:|@])?(?:[^@\n]+@)?(.*?$)') diff --git a/core/httptools.py b/core/httptools.py index 8384bb23..163bc8d8 100755 --- a/core/httptools.py +++ b/core/httptools.py @@ -31,7 +31,7 @@ cookies_file = os.path.join(config.get_data_path(), "cookies.dat") default_headers = dict() default_headers["User-Agent"] = "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36" default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8" -default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3" +default_headers["Accept-Language"] = "it-IT,it;q=0.8,en-US;q=0.5,en;q=0.3" default_headers["Accept-Charset"] = "UTF-8" default_headers["Accept-Encoding"] = "gzip" @@ -255,13 +255,11 @@ def downloadpage(url, **opt): domain = urlparse.urlparse(url).netloc CF = False if domain in ['www.guardaserie.media', 'casacinema.space', 'wstream.video', 'akvideo.stream', 'backin.net', - 'dreamsub.stream', 'altadefinizione-nuovo.link', 'ilgeniodellostreaming.si', 'www.piratestreaming.gratis']: + 'dreamsub.stream', 'altadefinizione-nuovo.link', 'ilgeniodellostreaming.si', 'www.piratestreaming.gratis', + 'altadefinizione.style']: from lib import cloudscraper session = cloudscraper.create_scraper() CF = True - elif opt.get('session', False): - session = opt['session'] # same session to speed up search - logger.info('same session') else: from lib import requests session = requests.session() @@ -360,6 +358,7 @@ def downloadpage(url, **opt): timeout=opt['timeout']) except Exception as e: from lib import requests + req = requests.Response() if not opt.get('ignore_response_code', False) and not proxy_data.get('stat', ''): response['data'] = '' response['sucess'] = False @@ -371,7 +370,6 @@ def downloadpage(url, **opt): show_infobox(info_dict) return type('HTTPResponse', (), response) else: - req = requests.Response() req.status_code = str(e) else: @@ -384,6 +382,10 @@ def downloadpage(url, **opt): response['data'] = req.content response['url'] = req.url + + if type(response['data']) != str: + response['data'] = response['data'].decode('UTF-8') + if not response['data']: response['data'] = '' try: diff --git a/core/item.py b/core/item.py index 10d1a83c..0c93a18e 100644 --- a/core/item.py +++ b/core/item.py @@ -3,12 +3,23 @@ # Item is the object we use for representing data # -------------------------------------------------------------------------------- +#from builtins import str +from future.builtins import object +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + +if PY3: + #from future import standard_library + #standard_library.install_aliases() + import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo + from html.parser import HTMLParser +else: + import urllib # Usamos el nativo de PY2 que es más rápido + from HTMLParser import HTMLParser + import base64 import copy -import os -import urllib - -from HTMLParser import HTMLParser from core import jsontools as json @@ -58,12 +69,12 @@ class InfoLabels(dict): elif key == 'code': code = [] # Añadir imdb_id al listado de codigos - if 'imdb_id' in super(InfoLabels, self).keys() and super(InfoLabels, self).__getitem__('imdb_id'): + if 'imdb_id' in list(super(InfoLabels, self).keys()) and super(InfoLabels, self).__getitem__('imdb_id'): code.append(super(InfoLabels, self).__getitem__('imdb_id')) # Completar con el resto de codigos for scr in ['tmdb_id', 'tvdb_id', 'noscrap_id']: - if scr in super(InfoLabels, self).keys() and super(InfoLabels, self).__getitem__(scr): + if scr in list(super(InfoLabels, self).keys()) and super(InfoLabels, self).__getitem__(scr): value = "%s%s" % (scr[:-2], super(InfoLabels, self).__getitem__(scr)) code.append(value) @@ -78,21 +89,21 @@ class InfoLabels(dict): elif key == 'mediatype': # "list", "movie", "tvshow", "season", "episode" - if 'tvshowtitle' in super(InfoLabels, self).keys() \ + if 'tvshowtitle' in list(super(InfoLabels, self).keys()) \ and super(InfoLabels, self).__getitem__('tvshowtitle') != "": - if 'episode' in super(InfoLabels, self).keys() and super(InfoLabels, self).__getitem__('episode') != "": + if 'episode' in list(super(InfoLabels, self).keys()) and super(InfoLabels, self).__getitem__('episode') != "": return 'episode' - if 'episodeName' in super(InfoLabels, self).keys() \ + if 'episodeName' in list(super(InfoLabels, self).keys()) \ and super(InfoLabels, self).__getitem__('episodeName') != "": return 'episode' - if 'season' in super(InfoLabels, self).keys() and super(InfoLabels, self).__getitem__('season') != "": + if 'season' in list(super(InfoLabels, self).keys()) and super(InfoLabels, self).__getitem__('season') != "": return 'season' else: return 'tvshow' - elif 'title' in super(InfoLabels, self).keys() and super(InfoLabels, self).__getitem__('title') != "": + elif 'title' in list(super(InfoLabels, self).keys()) and super(InfoLabels, self).__getitem__('title') != "": return 'movie' else: @@ -104,7 +115,7 @@ class InfoLabels(dict): def tostring(self, separador=', '): ls = [] - dic = dict(super(InfoLabels, self).items()) + dic = dict(list(super(InfoLabels, self).items())) for i in sorted(dic.items()): i_str = str(i)[1:-1] @@ -158,6 +169,7 @@ class Item(object): Función llamada al modificar cualquier atributo del item, modifica algunos atributos en función de los datos modificados. """ + if PY3: name = self.toutf8(name) value = self.toutf8(value) if name == "__dict__": for key in value: @@ -313,9 +325,13 @@ class Item(object): valor = dic[var].tostring(',\r\t\t') else: valor = dic[var].tostring() + elif PY3 and isinstance(dic[var], bytes): + valor = "'%s'" % dic[var].decode('utf-8') else: valor = str(dic[var]) + if PY3 and isinstance(var, bytes): + var = var.decode('utf-8') ls.append(var + "= " + valor) return separator.join(ls) @@ -327,12 +343,12 @@ class Item(object): Uso: url = item.tourl() """ - dump = json.dump(self.__dict__) + dump = json.dump(self.__dict__).encode("utf8") # if empty dict if not dump: # set a str to avoid b64encode fails - dump = "" - return urllib.quote(base64.b64encode(dump)) + dump = "".encode("utf8") + return str(urllib.quote(base64.b64encode(dump))) def fromurl(self, url): """ @@ -367,6 +383,7 @@ class Item(object): return self def tojson(self, path=""): + from core import filetools """ Crea un JSON a partir del item, para guardar archivos de favoritos, lista de descargas, etc... Si se especifica un path, te lo guarda en la ruta especificada, si no, devuelve la cadena json @@ -377,11 +394,13 @@ class Item(object): @type path: str """ if path: - open(path, "wb").write(json.dump(self.__dict__)) + #open(path, "wb").write(json.dump(self.__dict__)) + res = filetools.write(path, json.dump(self.__dict__)) else: return json.dump(self.__dict__) def fromjson(self, json_item=None, path=""): + from core import filetools """ Genera un item a partir de un archivo JSON Si se especifica un path, lee directamente el archivo, si no, lee la cadena de texto pasada. @@ -394,8 +413,9 @@ class Item(object): @type path: str """ if path: - if os.path.exists(path): - json_item = open(path, "rb").read() + if filetools.exists(path): + #json_item = open(path, "rb").read() + json_item = filetools.read(path) else: json_item = {} @@ -436,6 +456,8 @@ class Item(object): unicode_title = unicode(value, "utf8", "ignore") return HTMLParser().unescape(unicode_title).encode("utf8") except: + if PY3 and isinstance(value, bytes): + value = value.decode("utf8") return value def toutf8(self, *args): @@ -447,13 +469,18 @@ class Item(object): else: value = self.__dict__ - if type(value) == unicode: - return value.encode("utf8") + if isinstance(value, unicode): + value = value.encode("utf8") + if PY3: value = value.decode("utf8") + return value - elif type(value) == str: + elif not PY3 and isinstance(value, str): return unicode(value, "utf8", "ignore").encode("utf8") - elif type(value) == list: + elif PY3 and isinstance(value, bytes): + return value.decode("utf8") + + elif isinstance(value, list): for x, key in enumerate(value): value[x] = self.toutf8(value[x]) return value @@ -461,11 +488,12 @@ class Item(object): elif isinstance(value, dict): newdct = {} for key in value: - v = self.toutf8(value[key]) - if type(key) == unicode: - key = key.encode("utf8") + value_unc = self.toutf8(value[key]) + key_unc = self.toutf8(key) + #if isinstance(key, unicode): + # key = key.encode("utf8") - newdct[key] = v + newdct[key_unc] = value_unc if len(args) > 0: if isinstance(value, InfoLabels): diff --git a/core/jsontools.py b/core/jsontools.py index c7e48cdc..07733fd8 100644 --- a/core/jsontools.py +++ b/core/jsontools.py @@ -10,24 +10,28 @@ from platformcode import logger try: import json except: - logger.info("json incluido en el interprete **NO** disponible") + logger.info("json included in the interpreter **NOT** available") try: import simplejson as json except: - logger.info("simplejson incluido en el interprete **NO** disponible") + logger.info("simplejson included in the interpreter **NOT** available") try: from lib import simplejson as json except: - logger.info("simplejson en el directorio lib **NO** disponible") - logger.error("No se ha encontrado un parser de JSON valido") + logger.info("simplejson in lib directory **NOT** available") + logger.error("A valid JSON parser was not found") json = None else: - logger.info("Usando simplejson en el directorio lib") + logger.info("Using simplejson in the lib directory") else: - logger.info("Usando simplejson incluido en el interprete") -else: - logger.info("Usando json incluido en el interprete") + logger.info("Using simplejson included in the interpreter") +# ~ else: + # ~ logger.info("Usando json incluido en el interprete") + +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int def load(*args, **kwargs): @@ -37,7 +41,7 @@ def load(*args, **kwargs): try: value = json.loads(*args, **kwargs) except: - logger.error("**NO** se ha podido cargar el JSON") + logger.error("**NOT** able to load the JSON") logger.error(traceback.format_exc()) value = {} @@ -46,12 +50,12 @@ def load(*args, **kwargs): def dump(*args, **kwargs): if not kwargs: - kwargs = {"indent": 4, "skipkeys": True, "sort_keys": True, "ensure_ascii": False} + kwargs = {"indent": 4, "skipkeys": True, "sort_keys": True, "ensure_ascii": True} try: value = json.dumps(*args, **kwargs) except: - logger.error("**NO** se ha podido cargar el JSON") + logger.error("JSON could **NOT** be saved") logger.error(traceback.format_exc()) value = "" return value @@ -59,11 +63,15 @@ def dump(*args, **kwargs): def to_utf8(dct): if isinstance(dct, dict): - return dict((to_utf8(key), to_utf8(value)) for key, value in dct.iteritems()) + return dict((to_utf8(key), to_utf8(value)) for key, value in dct.items()) elif isinstance(dct, list): return [to_utf8(element) for element in dct] elif isinstance(dct, unicode): - return dct.encode('utf-8') + dct = dct.encode("utf8") + if PY3: dct = dct.decode("utf8") + return dct + elif PY3 and isinstance(dct, bytes): + return dct.decode('utf-8') else: return dct @@ -124,18 +132,18 @@ def check_to_backup(data, fname, dict_data): logger.info() if not dict_data: - logger.error("Error al cargar el json del fichero %s" % fname) + logger.error("Error loading json from file %s" % fname) if data != "": # se crea un nuevo fichero from core import filetools title = filetools.write("%s.bk" % fname, data) if title != "": - logger.error("Ha habido un error al guardar el fichero: %s.bk" % fname) + logger.error("There was an error saving the file: %s.bk" % fname) else: - logger.debug("Se ha guardado una copia con el nombre: %s.bk" % fname) + logger.debug("A copy with the name has been saved: %s.bk" % fname) else: - logger.debug("Está vacío el fichero: %s" % fname) + logger.debug("The file is empty: %s" % fname) def update_node(dict_node, name_file, node, path=None): @@ -175,18 +183,18 @@ def update_node(dict_node, name_file, node, path=None): # es un dict if dict_data: if node in dict_data: - logger.debug(" existe el key %s" % node) + logger.debug(" the key exists %s" % node) dict_data[node] = dict_node else: - logger.debug(" NO existe el key %s" % node) + logger.debug(" The key does NOT exist %s" % node) new_dict = {node: dict_node} dict_data.update(new_dict) else: - logger.debug(" NO es un dict") + logger.debug(" It is NOT a dict") dict_data = {node: dict_node} json_data = dump(dict_data) result = filetools.write(fname, json_data) except: - logger.error("No se ha podido actualizar %s" % fname) + logger.error("Could not update %s" % fname) return result, json_data diff --git a/core/scraper.py b/core/scraper.py index 5d3c9f95..44b945fb 100644 --- a/core/scraper.py +++ b/core/scraper.py @@ -1,5 +1,10 @@ # -*- coding: utf-8 -*- +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + +#from builtins import str from core.item import InfoLabels from platformcode import config, logger from platformcode import platformtools @@ -46,7 +51,7 @@ def find_and_set_infoLabels(item): try: scraper = __import__('core.%s' % scraper_actual, fromlist=["core.%s" % scraper_actual]) except ImportError: - exec "import core." + scraper_actual + " as scraper" + exec("import core." + scraper_actual + " as scraper") except: import traceback logger.error(traceback.format_exc()) @@ -99,9 +104,9 @@ def find_and_set_infoLabels(item): return True # raise - elif list_opciones_cuadro[index] in scrapers_disponibles.values(): + elif list_opciones_cuadro[index] in list(scrapers_disponibles.values()): # Obtener el nombre del modulo del scraper - for k, v in scrapers_disponibles.items(): + for k, v in list(scrapers_disponibles.items()): if list_opciones_cuadro[index] == v: if scrapers_disponibles[scraper_actual] not in list_opciones_cuadro: list_opciones_cuadro.append(scrapers_disponibles[scraper_actual]) @@ -111,7 +116,7 @@ def find_and_set_infoLabels(item): scraper = None scraper = __import__('core.%s' % scraper_actual, fromlist=["core.%s" % scraper_actual]) except ImportError: - exec "import core." + scraper_actual + " as scraper_module" + exec("import core." + scraper_actual + " as scraper_module") break logger.error("Error al importar el modulo scraper %s" % scraper_actual) @@ -175,7 +180,7 @@ def cuadro_completar(item): if not dict_default[c[0]] or dict_default[c[0]] == 'None' or dict_default[c[0]] == 0: dict_default[c[0]] = '' - elif isinstance(dict_default[c[0]], (int, float, long)): + elif isinstance(dict_default[c[0]], (int, float)) or (not PY3 and isinstance(dict_default[c[0]], (int, float, long))): # Si es numerico lo convertimos en str dict_default[c[0]] = str(dict_default[c[0]]) @@ -204,7 +209,7 @@ def callback_cuadro_completar(item, dict_values): if dict_values.get("title", None): # Adaptar dict_values a infoLabels validos dict_values['mediatype'] = ['movie', 'tvshow'][dict_values['mediatype']] - for k, v in dict_values.items(): + for k, v in list(dict_values.items()): if k in dict_default and dict_default[k] == dict_values[k]: del dict_values[k] diff --git a/core/scrapertools.py b/core/scrapertools.py index d303593d..c01fecac 100644 --- a/core/scrapertools.py +++ b/core/scrapertools.py @@ -1,17 +1,36 @@ # -*- coding: utf-8 -*- # -------------------------------------------------------------------------------- -# Scraper tools v2 for reading and processing web elements +# Scraper tools for reading and processing web elements # -------------------------------------------------------------------------------- +#from future import standard_library +#standard_library.install_aliases() +#from builtins import str +#from builtins import chr +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + import re import time -import urlparse - +# from core import httptools from core.entities import html5 from platformcode import logger +# def get_header_from_response(url, header_to_get="", post=None, headers=None): +# header_to_get = header_to_get.lower() +# response = httptools.downloadpage(url, post=post, headers=headers, only_headers=True) +# return response.headers.get(header_to_get) + + +# def read_body_and_headers(url, post=None, headers=None, follow_redirects=False, timeout=None): +# response = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=follow_redirects, +# timeout=timeout) +# return response.data, response.headers + + def printMatches(matches): i = 0 for match in matches: @@ -89,7 +108,10 @@ def unescape(text): else: # named entity try: - import htmlentitydefs + if PY3: + import html.entities as htmlentitydefs + else: + import htmlentitydefs text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8") except KeyError: logger.error("keyerror") @@ -103,6 +125,50 @@ def unescape(text): # Convierte los codigos html "ñ" y lo reemplaza por "ñ" caracter unicode utf-8 +# def decodeHtmlentities(string): +# string = entitiesfix(string) +# entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8});") + +# def substitute_entity(match): +# if PY3: +# from html.entities import name2codepoint as n2cp +# else: +# from htmlentitydefs import name2codepoint as n2cp +# ent = match.group(2) +# if match.group(1) == "#": +# return unichr(int(ent)).encode('utf-8') +# else: +# cp = n2cp.get(ent) + +# if cp: +# return unichr(cp).encode('utf-8') +# else: +# return match.group() + +# return entity_re.subn(substitute_entity, string)[0] + + +# def entitiesfix(string): +# # Las entidades comienzan siempre con el símbolo & , y terminan con un punto y coma ( ; ). +# string = string.replace("á", "á") +# string = string.replace("é", "é") +# string = string.replace("í", "í") +# string = string.replace("ó", "ó") +# string = string.replace("ú", "ú") +# string = string.replace("Á", "Á") +# string = string.replace("É", "É") +# string = string.replace("Í", "Í") +# string = string.replace("Ó", "Ó") +# string = string.replace("Ú", "Ú") +# string = string.replace("ü", "ü") +# string = string.replace("Ü", "Ü") +# string = string.replace("ñ", "ñ") +# string = string.replace("¿", "¿") +# string = string.replace("¡", "¡") +# string = string.replace(";;", ";") +# return string + + def htmlclean(cadena): cadena = re.compile("<!--.*?-->", re.DOTALL).sub("", cadena) @@ -292,8 +358,12 @@ def remove_show_from_title(title, show): return title -# scrapertools.get_filename_from_url(media_url)[-4:] def get_filename_from_url(url): + if PY3: + import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo + else: + import urlparse # Usamos el nativo de PY2 que es más rápido + parsed_url = urlparse.urlparse(url) try: filename = parsed_url.path @@ -311,6 +381,11 @@ def get_filename_from_url(url): def get_domain_from_url(url): + if PY3: + import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo + else: + import urlparse # Usamos el nativo de PY2 que es más rápido + parsed_url = urlparse.urlparse(url) try: filename = parsed_url.netloc diff --git a/core/servertools.py b/core/servertools.py index c5aa9c66..c343a286 100644 --- a/core/servertools.py +++ b/core/servertools.py @@ -3,18 +3,32 @@ # Server management # -------------------------------------------------------------------------------- -import os +from __future__ import division +from __future__ import absolute_import +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + +if PY3: + #from future import standard_library + #standard_library.install_aliases() + import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo +else: + import urlparse # Usamos el nativo de PY2 que es más rápido + +from future.builtins import range +from past.utils import old_div + +import datetime import re +import time -import filetools -import urlparse - +from core import filetools from core import httptools from core import jsontools from core.item import Item from platformcode import config, logger from platformcode import platformtools -# from servers.decrypters import zcrypt from lib import unshortenit dict_servers_parameters = {} @@ -80,7 +94,7 @@ def get_servers_itemlist(itemlist, fnc=None, sort=False): @type sort: bool """ # Recorre los servidores - for serverid in get_servers_list().keys(): + for serverid in list(get_servers_list().keys()): server_parameters = get_server_parameters(serverid) # Recorre los patrones @@ -105,18 +119,18 @@ def get_servers_itemlist(itemlist, fnc=None, sort=False): item.url = url # Eliminamos los servidores desactivados - itemlist = filter(lambda i: not i.server or is_server_enabled(i.server), itemlist) + #itemlist = filter(lambda i: not i.server or is_server_enabled(i.server), itemlist) + # Filtrar si es necesario + itemlist = filter_servers(itemlist) for item in itemlist: - # Asignamos "directo" en caso de que el server no se encuentre en pelisalcarta + # Asignamos "directo" en caso de que el server no se encuentre en Alfa if not item.server and item.url: - item.server = 'directo' + item.server = "directo" if fnc: item.title = fnc(item) - # Filtrar si es necesario - itemlist = filter_servers(itemlist) # Ordenar segun favoriteslist si es necesario if sort: @@ -137,7 +151,8 @@ def findvideos(data, skip=False): logger.info() devuelve = [] skip = int(skip) - servers_list = get_servers_list().keys() + servers_list = list(get_servers_list().keys()) + # Ordenar segun favoriteslist si es necesario servers_list = sort_servers(servers_list) @@ -145,8 +160,8 @@ def findvideos(data, skip=False): # Ejecuta el findvideos en cada servidor activo for serverid in servers_list: - if not is_server_enabled(serverid): - continue + '''if not is_server_enabled(serverid): + continue''' if config.get_setting("filter_servers") == True and config.get_setting("black_list", server=serverid): is_filter_servers = True continue @@ -167,6 +182,8 @@ def findvideosbyserver(data, serverid): return [] server_parameters = get_server_parameters(serverid) + if not server_parameters["active"]: + return [] devuelve = [] if "find_videos" in server_parameters: # Recorre los patrones @@ -229,6 +246,8 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo # Si el vídeo es "directo" o "local", no hay que buscar más if server == "directo" or server == "local": + if isinstance(video_password, list): + return video_password, len(video_password) > 0, "<br/>".join(error_messages) logger.info("Server: %s, la url es la buena" % server) video_urls.append(["%s [%s]" % (urlparse.urlparse(url)[2][-4:], server), url]) @@ -309,7 +328,7 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo # Muestra el progreso if muestra_dialogo: - progreso.update((100 / len(opciones)) * opciones.index(opcion), config.get_localized_string(70180) % server_name) + progreso.update((old_div(100, len(opciones))) * opciones.index(opcion), config.get_localized_string(70180) % server_name) # Modo free if opcion == "free": @@ -377,7 +396,7 @@ def get_server_name(serverid): serverid = serverid.lower().split(".")[0] # Obtenemos el listado de servers - server_list = get_servers_list().keys() + server_list = list(get_servers_list().keys()) # Si el nombre está en la lista if serverid in server_list: @@ -445,25 +464,25 @@ def get_server_parameters(server): if server not in dict_servers_parameters: try: # Servers - if os.path.isfile(os.path.join(config.get_runtime_path(), "servers", server + ".json")): - path = os.path.join(config.get_runtime_path(), "servers", server + ".json") + if filetools.isfile(filetools.join(config.get_runtime_path(), "servers", server + ".json")): + path = filetools.join(config.get_runtime_path(), "servers", server + ".json") # Debriders - elif os.path.isfile(os.path.join(config.get_runtime_path(), "servers", "debriders", server + ".json")): - path = os.path.join(config.get_runtime_path(), "servers", "debriders", server + ".json") + elif filetools.isfile(filetools.join(config.get_runtime_path(), "servers", "debriders", server + ".json")): + path = filetools.join(config.get_runtime_path(), "servers", "debriders", server + ".json") # #Cuando no está bien definido el server en el canal (no existe conector), muestra error por no haber "path" y se tiene que revisar el canal # - data = filetools.read(path) - dict_server = jsontools.load(data) + dict_server = jsontools.load(filetools.read(path)) # Imagenes: se admiten url y archivos locales dentro de "resources/images" if dict_server.get("thumbnail") and "://" not in dict_server["thumbnail"]: - dict_server["thumbnail"] = os.path.join("https://raw.githubusercontent.com/kodiondemand/media/master/resources/servers", dict_server["thumbnail"]) + dict_server["thumbnail"] = filetools.join(config.get_runtime_path(), "resources", "media", + "servers", dict_server["thumbnail"]) for k in ['premium', 'id']: dict_server[k] = dict_server.get(k, list()) - if type(dict_server[k]) == str: + if isinstance(dict_server[k], str): dict_server[k] = [dict_server[k]] if "find_videos" in dict_server: @@ -497,7 +516,7 @@ def get_server_json(server_name): server_json = jsontools.load(filetools.read(server_path)) # logger.info("server_json= %s" % server_json) - except Exception, ex: + except Exception as ex: template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex).__name__, ex.args) logger.error(" %s" % message) @@ -554,16 +573,16 @@ def get_server_setting(name, server, default=None): """ # Creamos la carpeta si no existe - if not os.path.exists(os.path.join(config.get_data_path(), "settings_servers")): - os.mkdir(os.path.join(config.get_data_path(), "settings_servers")) + if not filetools.exists(filetools.join(config.get_data_path(), "settings_servers")): + filetools.mkdir(filetools.join(config.get_data_path(), "settings_servers")) - file_settings = os.path.join(config.get_data_path(), "settings_servers", server + "_data.json") + file_settings = filetools.join(config.get_data_path(), "settings_servers", server + "_data.json") dict_settings = {} dict_file = {} - if os.path.exists(file_settings): + if filetools.exists(file_settings): # Obtenemos configuracion guardada de ../settings/channel_data.json try: - dict_file = jsontools.load(open(file_settings, "rb").read()) + dict_file = jsontools.load(filetools.read(file_settings)) if isinstance(dict_file, dict) and 'settings' in dict_file: dict_settings = dict_file['settings'] except EnvironmentError: @@ -580,10 +599,7 @@ def get_server_setting(name, server, default=None): dict_settings = default_settings dict_file['settings'] = dict_settings # Creamos el archivo ../settings/channel_data.json - json_data = jsontools.dump(dict_file) - try: - open(file_settings, "wb").write(json_data) - except EnvironmentError: + if not filetools.write(file_settings, jsontools.dump(dict_file)): logger.info("ERROR al salvar el archivo: %s" % file_settings) # Devolvemos el valor del parametro local 'name' si existe, si no se devuelve default @@ -592,18 +608,18 @@ def get_server_setting(name, server, default=None): def set_server_setting(name, value, server): # Creamos la carpeta si no existe - if not os.path.exists(os.path.join(config.get_data_path(), "settings_servers")): - os.mkdir(os.path.join(config.get_data_path(), "settings_servers")) + if not filetools.exists(filetools.join(config.get_data_path(), "settings_servers")): + filetools.mkdir(filetools.join(config.get_data_path(), "settings_servers")) - file_settings = os.path.join(config.get_data_path(), "settings_servers", server + "_data.json") + file_settings = filetools.join(config.get_data_path(), "settings_servers", server + "_data.json") dict_settings = {} dict_file = None - if os.path.exists(file_settings): + if filetools.exists(file_settings): # Obtenemos configuracion guardada de ../settings/channel_data.json try: - dict_file = jsontools.load(open(file_settings, "r").read()) + dict_file = jsontools.load(filetools.read(file_settings)) dict_settings = dict_file.get('settings', {}) except EnvironmentError: logger.info("ERROR al leer el archivo: %s" % file_settings) @@ -617,10 +633,7 @@ def set_server_setting(name, value, server): dict_file['settings'] = dict_settings # Creamos el archivo ../settings/channel_data.json - try: - json_data = jsontools.dump(dict_file) - open(file_settings, "w").write(json_data) - except EnvironmentError: + if not filetools.write(file_settings, jsontools.dump(dict_file)): logger.info("ERROR al salvar el archivo: %s" % file_settings) return None @@ -636,11 +649,10 @@ def get_servers_list(): @rtype: dict """ server_list = {} - for server in os.listdir(os.path.join(config.get_runtime_path(), "servers")): + for server in filetools.listdir(filetools.join(config.get_runtime_path(), "servers")): if server.endswith(".json") and not server == "version.json": server_parameters = get_server_parameters(server) - if server_parameters["active"] == True: - server_list[server.split(".")[0]] = server_parameters + server_list[server.split(".")[0]] = server_parameters return server_list @@ -654,7 +666,7 @@ def get_debriders_list(): @rtype: dict """ server_list = {} - for server in os.listdir(os.path.join(config.get_runtime_path(), "servers", "debriders")): + for server in filetools.listdir(filetools.join(config.get_runtime_path(), "servers", "debriders")): if server.endswith(".json"): server_parameters = get_server_parameters(server) if server_parameters["active"] == True: @@ -678,6 +690,7 @@ def sort_servers(servers_list): else: servers_list = sorted(servers_list, key=lambda x: config.get_setting("favorites_servers_list", server=x) or 100) + return servers_list @@ -689,18 +702,26 @@ def filter_servers(servers_list): u objetos Item. En cuyo caso es necesario q tengan un atributo item.server del tipo str. :return: Lista del mismo tipo de objetos que servers_list filtrada en funcion de la Lista Negra. """ + #Eliminamos los inactivos + if servers_list: + servers_list = [i for i in servers_list if not i.server or is_server_enabled(i.server)] + + if servers_list and config.get_setting('filter_servers'): if isinstance(servers_list[0], Item): - servers_list_filter = filter(lambda x: not config.get_setting("black_list", server=x.server), servers_list) + servers_list_filter = [x for x in servers_list if not config.get_setting("black_list", server=x.server)] else: - servers_list_filter = filter(lambda x: not config.get_setting("black_list", server=x), servers_list) + servers_list_filter = [x for x in servers_list if not config.get_setting("black_list", server=x)] # Si no hay enlaces despues de filtrarlos if servers_list_filter or not platformtools.dialog_yesno(config.get_localized_string(60000), config.get_localized_string(60010), config.get_localized_string(70281)): servers_list = servers_list_filter - + + if config.get_setting("favorites_servers") == True: + servers_list = sort_servers(servers_list) + return servers_list # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -715,21 +736,31 @@ def check_list_links(itemlist, numero='', timeout=3): El parámetro timeout indica un tope de espera para descargar la página """ numero = ((int(numero) + 1) * 5) if numero != '' else 10 - for it in itemlist: - if numero > 0 and it.server != '' and it.url != '': - verificacion = check_video_link(it.url, it.server, timeout) - it.title = verificacion + ' ' + it.title.strip() - logger.info('VERIFICATION= '+ verificacion) - it.alive = verificacion - numero -= 1 + from lib.concurrent import futures + with futures.ThreadPoolExecutor() as executor: + checked = [] + for it in itemlist: + if numero > 0 and it.server != '' and it.url != '': + checked.append(executor.submit(check_video_link, it, timeout)) + numero -= 1 + for link in futures.as_completed(checked): + res = link.result() + if res: + it = res[0] + verificacion = res[1] + it.title = verificacion + ' ' + it.title.strip() + logger.info('VERIFICATION= ' + verificacion) + it.alive = verificacion return itemlist -def check_video_link(url, server, timeout=3): - """ - Comprueba si el enlace a un video es valido y devuelve un string de 2 posiciones con la verificacion. - :param url, server: Link y servidor - :return: str(2) '??':No se ha podido comprobar. 'Ok':Parece que el link funciona. 'NO':Parece que no funciona. +def check_video_link(item, timeout=3): """ + Comprueba si el enlace a un video es valido y devuelve un string de 2 posiciones con la verificacion. + :param url, server: Link y servidor + :return: str(2) '??':No se ha podido comprobar. 'Ok':Parece que el link funciona. 'NO':Parece que no funciona. + """ + url = item.url + server = item.server NK = "[COLOR 0xFFF9B613][B]" + u"\u2022".encode('utf-8') + "[/B][/COLOR]" OK = "[COLOR 0xFF00C289][B]" + u"\u2022".encode('utf-8') + "[/B][/COLOR]" @@ -744,7 +775,7 @@ def check_video_link(url, server, timeout=3): except: server_module = None logger.info("[check_video_link] No se puede importar el servidor! %s" % server) - return NK + return item, NK if hasattr(server_module, 'test_video_exists'): ant_timeout = httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT @@ -764,7 +795,7 @@ def check_video_link(url, server, timeout=3): finally: httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = ant_timeout # Restaurar tiempo de descarga - return resultado + return item, resultado logger.info("[check_video_link] No hay test_video_exists para servidor: %s" % server) - return NK + return item, NK diff --git a/core/support.py b/core/support.py index d9f425f3..5331ee43 100755 --- a/core/support.py +++ b/core/support.py @@ -10,8 +10,10 @@ from concurrent import futures try: import urllib.request as urllib import urllib.parse as urlparse + from urllib.parse import urlencode except ImportError: import urllib, urlparse + from urllib import urlencode from channelselector import thumb from core import httptools, scrapertools, servertools, tmdb, channeltools @@ -157,7 +159,8 @@ def scrapeLang(scraped, lang, longtitle): return language, longtitle def cleantitle(title): - cleantitle = scrapertools.htmlclean(scrapertools.decodeHtmlentities(title).replace('"', "'").replace('×', 'x').replace('–', '-')).strip() + if type(title) != str: title.decode('UTF-8') + cleantitle = title.replace('"', "'").replace('×', 'x').replace('–', '-').strip() return cleantitle def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang): @@ -192,16 +195,17 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t for i, match in enumerate(matches): if pagination and (pag - 1) * pagination > i and not search: continue # pagination if pagination and i >= pag * pagination and not search: break # pagination - listGroups = match.keys() - match = match.values() + # listGroups = match.keys() + # match = match.values() - if len(listGroups) > len(match): # to fix a bug - match = list(match) - match.extend([''] * (len(listGroups) - len(match))) + # if len(listGroups) > len(match): # to fix a bug + # match = list(match) + # match.extend([''] * (len(listGroups) - len(match))) scraped = {} for kk in known_keys: - val = match[listGroups.index(kk)] if kk in listGroups else '' + val = match[kk] if kk in match else '' + # val = match[listGroups.index(kk)] if kk in listGroups else '' if val and (kk == "url" or kk == 'thumb') and 'http' not in val: val = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+') + (val if val.startswith('/') else '/' + val) scraped[kk] = val @@ -294,8 +298,10 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t other = scraped['other'] if scraped['other'] else '' ) - for lg in list(set(listGroups).difference(known_keys)): - it.__setattr__(lg, match[listGroups.index(lg)]) + # for lg in list(set(listGroups).difference(known_keys)): + # it.__setattr__(lg, match[listGroups.index(lg)]) + for lg in list(set(match.keys()).difference(known_keys)): + it.__setattr__(lg, match[lg]) if 'itemHook' in args: it = args['itemHook'](it) @@ -367,7 +373,7 @@ def scrape(func): log('PATRON= ', patron) if not data: - page = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True, session=item.session) + page = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True) # if url may be changed and channel has findhost to update if (not page.data or scrapertools.get_domain_from_url(page.url) != scrapertools.get_domain_from_url(item.url)) and 'findhost' in func.__globals__: host = func.__globals__['findhost']() @@ -376,8 +382,7 @@ def scrape(func): jsontools.update_node(host, func.__module__.split('.')[-1], 'url') parse[1] = scrapertools.get_domain_from_url(host) item.url = urlparse.urlunparse(parse) - page = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True, - session=item.session) + page = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True) data = page.data.replace("'", '"') data = re.sub('\n|\t', ' ', data) data = re.sub(r'>\s+<', '> <', data) @@ -468,7 +473,7 @@ def dooplay_get_links(item, host): ret = [] for type, post, nume, title, server in matches: - postData = urllib.urlencode({ + postData = urlencode({ "action": "doo_player_ajax", "post": post, "nume": nume, @@ -582,7 +587,7 @@ def swzz_get_url(item): elif 'https://stayonline.pro' in item.url: id = item.url.split('/')[-2] reqUrl = 'https://stayonline.pro/ajax/linkView.php' - p = urllib.urlencode({"id": id}) + p = urlencode({"id": id}) data = httptools.downloadpage(reqUrl, post=p).data try: import json @@ -699,9 +704,9 @@ def menu(func): if global_search: menuItem(itemlist, filename, config.get_localized_string(70741) % '… bold', 'search', host + dictUrl['search']) - - autoplay.init(item.channel, list_servers, list_quality) - autoplay.show_option(item.channel, itemlist) + if 'get_channel_results' not in inspect.stack()[1][3]: + autoplay.init(item.channel, list_servers, list_quality) + autoplay.show_option(item.channel, itemlist) channel_config(item, itemlist) return itemlist @@ -744,7 +749,7 @@ def typo(string, typography=''): if '{}' in string: string = '{' + re.sub(r'\s\{\}','',string) + '}' if 'submenu' in string: - string = u"\u2022\u2022 ".encode('utf-8') + re.sub(r'\ssubmenu','',string) + string = "•• " + re.sub(r'\ssubmenu','',string) if 'color' in string: color = scrapertools.find_single_match(string, 'color ([a-z]+)') if color == 'kod' or '': color = kod_color @@ -758,7 +763,7 @@ def typo(string, typography=''): if '--' in string: string = ' - ' + re.sub(r'\s--','',string) if 'bullet' in string: - string = '[B]' + u"\u2022".encode('utf-8') + '[/B] ' + re.sub(r'\sbullet','',string) + string = '[B]' + "•" + '[/B] ' + re.sub(r'\sbullet','',string) return string @@ -766,10 +771,33 @@ def typo(string, typography=''): def match(item_url_string, **args): ''' match is a function that combines httptools and scraper tools: + + supports all httptools and the following arggs: + @param item_url_string: if it's a titem download the page item.url, if it's a URL download the page, if it's a string pass it to scrapertools + @type item_url_string: item or str + @param string: force item_url_string to be a string + @type string: bool + @param patronBlock: find first element in patron + @type patronBlock: str + @param patronBloks: find multiple matches + @type patronBloks: str or list + @param debugBlock: regex101.com for debug + @type debugBlock: bool + @param patron: find multiple matches on block, blocks or data + @type patron: str or list + @param debug: regex101.com for debug + @type debug: bool + + Return a item with the following key: + data: data of the webpage + block: first block + blocks: all the blocks + match: first match + matches: all the matches ''' log(item_url_string) - matches = [] + matches = blocks = [] url = None # arguments allowed for scrape patron = args.get('patron', None) @@ -778,12 +806,15 @@ def match(item_url_string, **args): debug = args.get('debug', False) debugBlock = args.get('debugBlock', False) string = args.get('string', False) + # remove scrape arguments args = dict([(key, val) for key, val in args.items() if key not in ['patron', 'patronBlock', 'patronBlocks', 'debug', 'debugBlock', 'string']]) - # dbg() + # check type of item_url_string - if type(item_url_string) == str: - if item_url_string.startswith('http') and not string: url = item_url_string + if string: + data = item_url_string + elif type(item_url_string) == str: + if item_url_string.startswith('http'): url = item_url_string else : data = item_url_string else: # if item_url_string is an item use item.url as url @@ -803,7 +834,9 @@ def match(item_url_string, **args): if patronBlock: blocks = [scrapertools.find_single_match(data, patronBlock)] elif patronBlocks: - blocks = scrapertools.find_multiple_matches(data, patronBlock) + if type(patronBlock) == str: patron = [patronBlock] + for p in patronBlock: + blocks += scrapertools.find_multiple_matches(data, p) else: blocks = [data] @@ -1010,7 +1043,7 @@ def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=Tru item.title = typo(item.contentTitle.strip(),'bold') if item.contentType == 'movie' or (config.get_localized_string(30161) in item.title) else item.title - videoitem.plot= typo(videoitem.title, 'bold') + videoitem.plot= typo(videoitem.title, 'bold') + typo(videoitem.quality, '_ [] bold') videoitem.title = item.title + (typo(videoitem.title, '_ color kod [] bold') if videoitem.title else "") + (typo(videoitem.quality, '_ color kod []') if videoitem.quality else "") videoitem.fulltitle = item.fulltitle videoitem.show = item.show @@ -1036,7 +1069,7 @@ def controls(itemlist, item, AutoPlay=True, CheckLinks=True, down_load=True): channel_node = autoplay_node.get(item.channel, {}) settings_node = channel_node.get('settings', {}) AP = get_setting('autoplay') or settings_node['active'] - HS = config.get_setting('hide_servers') or (settings_node['hide_servers'] if settings_node.has_key('hide_server') else False) + HS = config.get_setting('hide_servers') or (settings_node['hide_servers'] if 'hide_server' in settings_node else False) if CL and not AP: if get_setting('checklinks', item.channel): diff --git a/core/tmdb.py b/core/tmdb.py index 06c919ed..d306a392 100644 --- a/core/tmdb.py +++ b/core/tmdb.py @@ -1,10 +1,26 @@ # -*- coding: utf-8 -*- +#from future import standard_library +#standard_library.install_aliases() +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + +if PY3: + import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo +else: + import urllib # Usamos el nativo de PY2 que es más rápido + +from future.builtins import range +from future.builtins import object + +import ast + import copy import re import sqlite3 import time -import urllib import xbmcaddon @@ -37,8 +53,8 @@ def_lang = addon.getSetting('language') # tmdb.set_infoLabels(item, seekTmdb = True) # # Obtener datos basicos de una pelicula: -# Antes de llamar al metodo set_infoLabels el titulo a buscar debe estar en item.fulltitle -# o en item.contentTitle y el año en item.infoLabels['year']. +# Antes de llamar al metodo set_infoLabels el titulo a buscar debe estar en item.contentTitle +# y el año en item.infoLabels['year']. # # Obtener datos basicos de una serie: # Antes de llamar al metodo set_infoLabels el titulo a buscar debe estar en item.show o en @@ -73,7 +89,6 @@ def_lang = addon.getSetting('language') otmdb_global = None fname = filetools.join(config.get_data_path(), "kod_db.sqlite") - def create_bd(): conn = sqlite3.connect(fname) c = conn.cursor() @@ -160,7 +175,7 @@ def cache_response(fn): conn = sqlite3.connect(fname, timeout=15) c = conn.cursor() url = re.sub('&year=-', '', args[0]) - # logger.error('la url %s' % url) + if PY3: url = str.encode(url) url_base64 = base64.b64encode(url) c.execute("SELECT response, added FROM tmdb_cache WHERE url=?", (url_base64,)) row = c.fetchone() @@ -171,7 +186,9 @@ def cache_response(fn): # si no se ha obtenido información, llamamos a la funcion if not result: result = fn(*args) - result_base64 = base64.b64encode(str(result)) + result = str(result) + if PY3: result = str.encode(result) + result_base64 = base64.b64encode(result) c.execute("INSERT OR REPLACE INTO tmdb_cache (url, response, added) VALUES (?, ?, ?)", (url_base64, result_base64, time.time())) @@ -375,17 +392,19 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None # ... buscar datos temporada item.infoLabels['mediatype'] = 'season' temporada = otmdb_global.get_temporada(numtemporada) + if not isinstance(temporada, dict): + temporada = ast.literal_eval(temporada.decode('utf-8')) if temporada: # Actualizar datos __leer_datos(otmdb_global) - item.infoLabels['title'] = temporada['name'] if temporada.has_key('name') else '' - if temporada.has_key('overview') and temporada['overview']: + item.infoLabels['title'] = temporada['name'] if 'name' in temporada else '' + if 'overview' in temporada and temporada['overview']: item.infoLabels['plot'] = temporada['overview'] - if temporada.has_key('air_date') and temporada['air_date']: + if 'air_date' in temporada and temporada['air_date']: date = temporada['air_date'].split('-') item.infoLabels['aired'] = date[2] + "/" + date[1] + "/" + date[0] - if temporada.has_key('poster_path') and temporada['poster_path']: + if 'poster_path' in temporada and temporada['poster_path']: item.infoLabels['poster_path'] = 'http://image.tmdb.org/t/p/original' + temporada['poster_path'] item.thumbnail = item.infoLabels['poster_path'] @@ -445,12 +464,8 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None # Busqueda de pelicula por titulo... if item.infoLabels['year'] or item.infoLabels['filtro']: # ...y año o filtro - if item.contentTitle: - titulo_buscado = item.contentTitle - else: - titulo_buscado = item.fulltitle - - otmdb = Tmdb(texto_buscado=titulo_buscado, tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda, + searched_title = item.contentTitle if item.contentTitle else item.fulltitle + otmdb = Tmdb(texto_buscado=searched_title, tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda, filtro=item.infoLabels.get('filtro', {}), year=item.infoLabels['year']) if otmdb is not None: if otmdb.get_id() and config.get_setting("tmdb_plus_info", default=False): @@ -492,7 +507,7 @@ def find_and_set_infoLabels(item): title = title.replace(year, "").strip() item.infoLabels['year'] = year[1:-1] - if not item.infoLabels.get("tmdb_id"): + if not item.infoLabels.get("tmdb_id") or not item.infoLabels.get("tmdb_id")[0].isdigit(): if not item.infoLabels.get("imdb_id"): otmdb_global = Tmdb(texto_buscado=title, tipo=tipo_busqueda, year=item.infoLabels['year']) else: @@ -588,7 +603,6 @@ def get_genres(type): return genres.dic_generos[lang] - # Clase auxiliar class ResultDictDefault(dict): # Python 2.4 @@ -606,7 +620,7 @@ class ResultDictDefault(dict): return list() elif key == 'images_posters': posters = dict() - if 'images' in super(ResultDictDefault, self).keys() and \ + if 'images' in list(super(ResultDictDefault, self).keys()) and \ 'posters' in super(ResultDictDefault, self).__getitem__('images'): posters = super(ResultDictDefault, self).__getitem__('images')['posters'] super(ResultDictDefault, self).__setattr__("images_posters", posters) @@ -615,7 +629,7 @@ class ResultDictDefault(dict): elif key == "images_backdrops": backdrops = dict() - if 'images' in super(ResultDictDefault, self).keys() and \ + if 'images' in list(super(ResultDictDefault, self).keys()) and \ 'backdrops' in super(ResultDictDefault, self).__getitem__('images'): backdrops = super(ResultDictDefault, self).__getitem__('images')['backdrops'] super(ResultDictDefault, self).__setattr__("images_backdrops", backdrops) @@ -624,7 +638,7 @@ class ResultDictDefault(dict): elif key == "images_profiles": profiles = dict() - if 'images' in super(ResultDictDefault, self).keys() and \ + if 'images' in list(super(ResultDictDefault, self).keys()) and \ 'profiles' in super(ResultDictDefault, self).__getitem__('images'): profiles = super(ResultDictDefault, self).__getitem__('images')['profiles'] super(ResultDictDefault, self).__setattr__("images_profiles", profiles) @@ -640,7 +654,7 @@ class ResultDictDefault(dict): def tostring(self, separador=',\n'): ls = [] - for i in super(ResultDictDefault, self).items(): + for i in list(super(ResultDictDefault, self).items()): i_str = str(i)[1:-1] if isinstance(i[0], str): old = i[0] + "'," @@ -899,12 +913,16 @@ class Tmdb(object): logger.info("[Tmdb.py] Filling in dictionary of genres") resultado = cls.get_json(url) + if not isinstance(resultado, dict): + resultado = ast.literal_eval(resultado.decode('utf-8')) lista_generos = resultado["genres"] for i in lista_generos: cls.dic_generos[idioma][tipo][str(i["id"])] = i["name"] except: logger.error("Error generating dictionaries") + import traceback + logger.error(traceback.format_exc()) def __by_id(self, source='tmdb'): @@ -926,6 +944,8 @@ class Tmdb(object): logger.info("[Tmdb.py] Searching %s:\n%s" % (buscando, url)) resultado = self.get_json(url) + if not isinstance(resultado, dict): + resultado = ast.literal_eval(resultado.decode('utf-8')) if resultado: if source != "tmdb": @@ -942,14 +962,14 @@ class Tmdb(object): else: # No hay resultados de la busqueda msg = "The search of %s gave no results" % buscando - # logger.debug(msg) + logger.debug(msg) def __search(self, index_results=0, page=1): self.result = ResultDictDefault() results = [] - total_results = 0 text_simple = self.busqueda_texto.lower() text_quote = urllib.quote(text_simple) + total_results = 0 total_pages = 0 buscando = "" @@ -957,15 +977,17 @@ class Tmdb(object): # http://api.themoviedb.org/3/search/movie?api_key=a1ab8b8669da03637a4b98fa39c39228&query=superman&language=es # &include_adult=false&page=1 url = ('http://api.themoviedb.org/3/search/%s?api_key=a1ab8b8669da03637a4b98fa39c39228&query=%s&language=%s' - '&include_adult=%s&page=%s' % (self.busqueda_tipo, text_quote.replace(' ', '%20'), + '&include_adult=%s&page=%s' % (self.busqueda_tipo, text_quote, self.busqueda_idioma, self.busqueda_include_adult, page)) if self.busqueda_year: url += '&year=%s' % self.busqueda_year buscando = self.busqueda_texto.capitalize() - logger.info("[Tmdb.py] Searching %s on page %s:\n%s" % (buscando, page, url)) + logger.info("[Tmdb.py] Buscando %s en pagina %s:\n%s" % (buscando, page, url)) resultado = self.get_json(url) + if not isinstance(resultado, dict): + resultado = ast.literal_eval(resultado.decode('utf-8')) total_results = resultado.get("total_results", 0) total_pages = resultado.get("total_pages", 0) @@ -973,11 +995,13 @@ class Tmdb(object): if total_results > 0: results = resultado["results"] - if self.busqueda_filtro and results: + if self.busqueda_filtro and total_results > 1: # TODO documentar esta parte - for key, value in dict(self.busqueda_filtro).items(): + for key, value in list(dict(self.busqueda_filtro).items()): for r in results[:]: - if key not in r or r[key] != value: + if not r[key]: + r[key] = str(r[key]) + if key not in r or value not in r[key]: results.remove(r) total_results -= 1 @@ -1015,7 +1039,7 @@ class Tmdb(object): type_search = self.discover.get('url', '') if type_search: params = [] - for key, value in self.discover.items(): + for key, value in list(self.discover.items()): if key != "url": params.append(key + "=" + str(value)) # http://api.themoviedb.org/3/discover/movie?api_key=a1ab8b8669da03637a4b98fa39c39228&query=superman&language=es @@ -1024,6 +1048,8 @@ class Tmdb(object): logger.info("[Tmdb.py] Searcing %s:\n%s" % (type_search, url)) resultado = self.get_json(url) + if not isinstance(resultado, dict): + resultado = ast.literal_eval(resultado.decode('utf-8')) total_results = resultado.get("total_results", -1) total_pages = resultado.get("total_pages", 1) @@ -1036,7 +1062,7 @@ class Tmdb(object): results = resultado["results"] if self.busqueda_filtro and results: # TODO documentar esta parte - for key, value in dict(self.busqueda_filtro).items(): + for key, value in list(dict(self.busqueda_filtro).items()): for r in results[:]: if key not in r or r[key] != value: results.remove(r) @@ -1184,6 +1210,8 @@ class Tmdb(object): (self.busqueda_tipo, self.busqueda_id, self.busqueda_idioma)) resultado = self.get_json(url) + if not isinstance(resultado, dict): + resultado = ast.literal_eval(resultado.decode('utf-8')) if 'overview' in resultado: self.result['overview'] = resultado['overview'] @@ -1316,6 +1344,8 @@ class Tmdb(object): logger.info("[Tmdb.py] Searcing " + buscando) try: self.temporada[numtemporada] = self.get_json(url) + if not isinstance(self.temporada[numtemporada], dict): + self.temporada[numtemporada] = ast.literal_eval(self.temporada[numtemporada].decode('utf-8')) except: logger.error("Unable to get the season") @@ -1356,6 +1386,8 @@ class Tmdb(object): return {} temporada = self.get_temporada(numtemporada) + if not isinstance(temporada, dict): + temporada = ast.literal_eval(temporada.decode('utf-8')) if not temporada: # Se ha producido un error return {} @@ -1388,9 +1420,9 @@ class Tmdb(object): dic_aux = dict((i['id'], i) for i in ret_dic["temporada_crew"]) for e in temporada["episodes"]: for crew in e['crew']: - if crew['id'] not in dic_aux.keys(): + if crew['id'] not in list(dic_aux.keys()): dic_aux[crew['id']] = crew - ret_dic["temporada_crew"] = dic_aux.values() + ret_dic["temporada_crew"] = list(dic_aux.values()) # Obtener datos del capitulo si procede if capitulo != -1: @@ -1429,6 +1461,8 @@ class Tmdb(object): % (self.busqueda_tipo, self.result['id'], self.busqueda_idioma) dict_videos = self.get_json(url) + if not isinstance(dict_videos, dict): + dict_videos = ast.literal_eval(dict_videos.decode('utf-8')) if dict_videos['results']: dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size'])) @@ -1440,6 +1474,8 @@ class Tmdb(object): % (self.busqueda_tipo, self.result['id']) dict_videos = self.get_json(url) + if not isinstance(dict_videos, dict): + dict_videos = ast.literal_eval(dict_videos.decode('utf-8')) if dict_videos['results']: dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size'])) @@ -1481,13 +1517,13 @@ class Tmdb(object): if not origen: origen = self.result - if 'credits' in origen.keys(): + if 'credits' in list(origen.keys()): dic_origen_credits = origen['credits'] origen['credits_cast'] = dic_origen_credits.get('cast', []) origen['credits_crew'] = dic_origen_credits.get('crew', []) del origen['credits'] - items = origen.items() + items = list(origen.items()) # Informacion Temporada/episodio if ret_infoLabels['season'] and self.temporada.get(ret_infoLabels['season']): @@ -1496,14 +1532,14 @@ class Tmdb(object): if ret_infoLabels['episode']: episodio = ret_infoLabels['episode'] - items.extend(self.get_episodio(ret_infoLabels['season'], episodio).items()) + items.extend(list(self.get_episodio(ret_infoLabels['season'], episodio).items())) # logger.info("ret_infoLabels" % ret_infoLabels) for k, v in items: if not v: continue - elif type(v) == str: + elif isinstance(v, str): v = re.sub(r"\n|\r|\t", "", v) # fix if v == "None": @@ -1517,7 +1553,7 @@ class Tmdb(object): elif k == 'runtime': #Duration for movies ret_infoLabels['duration'] = int(v) * 60 - + elif k == 'episode_run_time': #Duration for episodes try: for v_alt in v: #It comes as a list (?!) @@ -1572,7 +1608,7 @@ class Tmdb(object): elif k == 'credits_cast' or k == 'temporada_cast' or k == 'episodio_guest_stars': dic_aux = dict((name, character) for (name, character) in l_castandrole) - l_castandrole.extend([(p['name'], p['character']) for p in v if p['name'] not in dic_aux.keys()]) + l_castandrole.extend([(p['name'], p['character']) for p in v if p['name'] not in list(dic_aux.keys())]) elif k == 'videos': if not isinstance(v, list): diff --git a/core/tvdb.py b/core/tvdb.py index 328fdce5..92b0a4c8 100644 --- a/core/tvdb.py +++ b/core/tvdb.py @@ -7,9 +7,14 @@ # del addon y también Kodi. # ------------------------------------------------------------ -import re +from future import standard_library +standard_library.install_aliases() +#from builtins import str +from future.builtins import object -import urllib2 +import urllib.request, urllib.error, urllib.parse + +import re from core import jsontools from core import scrapertools @@ -218,7 +223,7 @@ def set_infoLabels_item(item): break _next = list_episodes['links']['next'] - if type(_next) == int: + if isinstance(_next, int): page = _next else: break @@ -330,7 +335,7 @@ def completar_codigos(item): break -class Tvdb: +class Tvdb(object): def __init__(self, **kwargs): self.__check_token() @@ -398,12 +403,12 @@ class Tvdb: params = {"apikey": apikey} try: - req = urllib2.Request(url, data=jsontools.dump(params), headers=DEFAULT_HEADERS) - response = urllib2.urlopen(req) + req = urllib.request.Request(url, data=jsontools.dump(params), headers=DEFAULT_HEADERS) + response = urllib.request.urlopen(req) html = response.read() response.close() - except Exception, ex: + except Exception as ex: message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) logger.error("error en: %s" % message) @@ -426,12 +431,12 @@ class Tvdb: url = HOST + "/refresh_token" try: - req = urllib2.Request(url, headers=DEFAULT_HEADERS) - response = urllib2.urlopen(req) + req = urllib.request.Request(url, headers=DEFAULT_HEADERS) + response = urllib.request.urlopen(req) html = response.read() response.close() - except urllib2.HTTPError, err: + except urllib.error.HTTPError as err: logger.error("err.code es %s" % err.code) # si hay error 401 es que el token se ha pasado de tiempo y tenemos que volver a llamar a login if err.code == 401: @@ -439,7 +444,7 @@ class Tvdb: else: raise - except Exception, ex: + except Exception as ex: message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) logger.error("error en: %s" % message) @@ -525,19 +530,18 @@ class Tvdb: params = {"airedSeason": "%s" % season, "airedEpisode": "%s" % episode} try: - import urllib - params = urllib.urlencode(params) + params = urllib.parse.urlencode(params) url = HOST + "/series/%s/episodes/query?%s" % (_id, params) DEFAULT_HEADERS["Accept-Language"] = lang logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) - req = urllib2.Request(url, headers=DEFAULT_HEADERS) - response = urllib2.urlopen(req) + req = urllib.request.Request(url, headers=DEFAULT_HEADERS) + response = urllib.request.urlopen(req) html = response.read() response.close() - except Exception, ex: + except Exception as ex: message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) logger.error("error en: %s" % message) @@ -595,12 +599,12 @@ class Tvdb: url = HOST + "/series/%s/episodes?page=%s" % (_id, page) logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) - req = urllib2.Request(url, headers=DEFAULT_HEADERS) - response = urllib2.urlopen(req) + req = urllib.request.Request(url, headers=DEFAULT_HEADERS) + response = urllib.request.urlopen(req) html = response.read() response.close() - except Exception, ex: + except Exception as ex: message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) logger.error("error en: %s" % message) @@ -682,13 +686,13 @@ class Tvdb: try: DEFAULT_HEADERS["Accept-Language"] = lang logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) - req = urllib2.Request(url, headers=DEFAULT_HEADERS) - response = urllib2.urlopen(req) + req = urllib.request.Request(url, headers=DEFAULT_HEADERS) + response = urllib.request.urlopen(req) html = response.read() response.close() - except Exception, ex: - if type(ex) == urllib2.HTTPError: + except Exception as ex: + if isinstance(ex, urllib).HTTPError: logger.debug("code es %s " % ex.code) message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) @@ -741,20 +745,19 @@ class Tvdb: elif zap2it_id: params["zap2itId"] = zap2it_id - import urllib - params = urllib.urlencode(params) + params = urllib.parse.urlencode(params) DEFAULT_HEADERS["Accept-Language"] = lang url = HOST + "/search/series?%s" % params logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) - req = urllib2.Request(url, headers=DEFAULT_HEADERS) - response = urllib2.urlopen(req) + req = urllib.request.Request(url, headers=DEFAULT_HEADERS) + response = urllib.request.urlopen(req) html = response.read() response.close() - except Exception, ex: - if type(ex) == urllib2.HTTPError: + except Exception as ex: + if isinstance(ex, urllib).HTTPError: logger.debug("code es %s " % ex.code) message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) @@ -835,15 +838,15 @@ class Tvdb: try: DEFAULT_HEADERS["Accept-Language"] = lang - req = urllib2.Request(url, headers=DEFAULT_HEADERS) + req = urllib.request.Request(url, headers=DEFAULT_HEADERS) logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) - response = urllib2.urlopen(req) + response = urllib.request.urlopen(req) html = response.read() response.close() - except Exception, ex: - if type(ex) == urllib2.HTTPError: + except Exception as ex: + if isinstance(ex, urllib).HTTPError: logger.debug("code es %s " % ex.code) message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) @@ -905,18 +908,17 @@ class Tvdb: try: - import urllib - params = urllib.urlencode(params) + params = urllib.parse.urlencode(params) DEFAULT_HEADERS["Accept-Language"] = lang url = HOST + "/series/%s/images/query?%s" % (_id, params) logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) - req = urllib2.Request(url, headers=DEFAULT_HEADERS) - response = urllib2.urlopen(req) + req = urllib.request.Request(url, headers=DEFAULT_HEADERS) + response = urllib.request.urlopen(req) html = response.read() response.close() - except Exception, ex: + except Exception as ex: message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) logger.error("error en: %s" % message) @@ -946,8 +948,8 @@ class Tvdb: DEFAULT_HEADERS["Accept-Language"] = lang logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) - req = urllib2.Request(url, headers=DEFAULT_HEADERS) - response = urllib2.urlopen(req) + req = urllib.request.Request(url, headers=DEFAULT_HEADERS) + response = urllib.request.urlopen(req) html = response.read() response.close() @@ -1039,7 +1041,7 @@ class Tvdb: # origen['credits_crew'] = dic_origen_credits.get('crew', []) # del origen['credits'] - items = origen.items() + items = list(origen.items()) for k, v in items: if not v: @@ -1118,7 +1120,7 @@ class Tvdb: elif k == 'cast': dic_aux = dict((name, character) for (name, character) in l_castandrole) - l_castandrole.extend([(p['name'], p['role']) for p in v if p['name'] not in dic_aux.keys()]) + l_castandrole.extend([(p['name'], p['role']) for p in v if p['name'] not in list(dic_aux.keys())]) else: logger.debug("Atributos no añadidos: %s=%s" % (k, v)) diff --git a/core/videolibrarytools.py b/core/videolibrarytools.py index 5357bd28..099a0bf1 100644 --- a/core/videolibrarytools.py +++ b/core/videolibrarytools.py @@ -3,6 +3,11 @@ # Common Library Tools # ------------------------------------------------------------ +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + import errno import math import traceback @@ -130,7 +135,10 @@ def save_movie(item): else: base_name = item.contentTitle - base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").encode("utf8") + if not PY3: + base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").encode("utf8") + else: + base_name = filetools.validate_path(base_name.replace('/', '-')) if config.get_setting("lowerize_title", "videolibrary") == 0: base_name = base_name.lower() @@ -191,9 +199,12 @@ def save_movie(item): # Si se ha marcado la opción de url de emergencia, se añade ésta a la película después de haber ejecutado Findvideos del canal try: + headers = {} + if item.headers: + headers = item.headers channel = generictools.verify_channel(item.channel) if config.get_setting("emergency_urls", channel) in [1, 3]: - item = emergency_urls(item, None, json_path) + item = emergency_urls(item, None, json_path, headers=headers) if item_nfo.emergency_urls and not isinstance(item_nfo.emergency_urls, dict): del item_nfo.emergency_urls if not item_nfo.emergency_urls: @@ -224,7 +235,7 @@ def save_movie(item): return 0, 0, -1 def filter_list(episodelist, action=None, path=None): - if path: path = path.decode('utf8') + # if path: path = path.decode('utf8') # import xbmc # if xbmc.getCondVisibility('system.platform.windows') > 0: path = path.replace('smb:','').replace('/','\\') channel_prefs = {} @@ -397,17 +408,29 @@ def save_tvshow(item, episodelist): return 0, 0, -1, path _id = item.infoLabels['code'][0] + if not item.infoLabels['code'][0] or item.infoLabels['code'][0] == 'None': + if item.infoLabels['code'][1] and item.infoLabels['code'][1] != 'None': + _id = item.infoLabels['code'][1] + elif item.infoLabels['code'][2] and item.infoLabels['code'][2] != 'None': + _id = item.infoLabels['code'][2] + else: + logger.error("NO ENCONTRADO EN SCRAPER O NO TIENE code: " + item.url + + ' / ' + item.infoLabels['code']) + return 0, 0, -1, path if config.get_setting("original_title_folder", "videolibrary") == 1 and item.infoLabels['originaltitle']: - base_name = item.infoLabels[u'originaltitle'] + base_name = item.infoLabels['originaltitle'] elif item.infoLabels['tvshowtitle']: - base_name = item.infoLabels[u'tvshowtitle'] + base_name = item.infoLabels['tvshowtitle'] elif item.infoLabels['title']: - base_name = item.infoLabels[u'title'] + base_name = item.infoLabels['title'] else: - base_name = u'%s' % item.contentSerieName + base_name = item.contentSerieName - base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").encode("utf8") + if not PY3: + base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").encode("utf8") + else: + base_name = filetools.validate_path(base_name.replace('/', '-')) if config.get_setting("lowerize_title", "videolibrary") == 0: base_name = base_name.lower() @@ -415,7 +438,7 @@ def save_tvshow(item, episodelist): for raiz, subcarpetas, ficheros in filetools.walk(TVSHOWS_PATH): for c in subcarpetas: code = scrapertools.find_single_match(c, '\[(.*?)\]') - if code and code in item.infoLabels['code']: + if code and code != 'None' and code in item.infoLabels['code']: path = filetools.join(raiz, c) _id = code break @@ -425,7 +448,7 @@ def save_tvshow(item, episodelist): logger.info("Creating series directory: " + path) try: filetools.mkdir(path) - except OSError, exception: + except OSError as exception: if exception.errno != errno.EEXIST: raise @@ -518,7 +541,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): news_in_playcounts = {} # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno - raiz, carpetas_series, ficheros = filetools.walk(path).next() + raiz, carpetas_series, ficheros = next(filetools.walk(path)) ficheros = [filetools.join(path, f) for f in ficheros] nostrm_episodelist = [] @@ -550,7 +573,11 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): tags = [] if config.get_setting("enable_filter", "videolibrary"): tags = [x.strip() for x in config.get_setting("filters", "videolibrary").lower().split(",")] + for e in episodelist: + headers = {} + if e.headers: + headers = e.headers if tags != [] and tags != None and any(tag in e.title.lower() for tag in tags): continue @@ -567,31 +594,34 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): if overwrite: #pero solo si se se sobrescriben los .json json_epi = Item().fromjson(filetools.read(json_path)) #Leemos el .json if json_epi.emergency_urls: #si existen las urls de emergencia... - e.emergency_urls = json_epi.emergency_urls #... las copiamos - else: #y si no... - e = emergency_urls(e, channel, json_path) #... las generamos + e.emergency_urls = json_epi.emergency_urls #... las copiamos + else: #y si no... + e = emergency_urls(e, channel, json_path, headers=headers) #... las generamos else: - e = emergency_urls(e, channel, json_path) #Si el episodio no existe, generamos las urls - if e.emergency_urls: #Si ya tenemos urls... + e = emergency_urls(e, channel, json_path, headers=headers) #Si el episodio no existe, generamos las urls + if e.emergency_urls: #Si ya tenemos urls... emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo - elif emergency_urls_stat == 2 and e.contentType == 'episode': #Borramos urls de emergencia? + elif emergency_urls_stat == 2 and e.contentType == 'episode': #Borramos urls de emergencia? if e.emergency_urls: del e.emergency_urls emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo elif emergency_urls_stat == 3 and e.contentType == 'episode': #Actualizamos urls de emergencia? if not silent: p_dialog.update(0, 'Cacheando enlaces y archivos .torrent...', e.title) #progress dialog - e = emergency_urls(e, channel, json_path) #generamos las urls - if e.emergency_urls: #Si ya tenemos urls... + e = emergency_urls(e, channel, json_path, headers=headers) #generamos las urls + if e.emergency_urls: #Si ya tenemos urls... emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo - - if not e.infoLabels["tmdb_id"] or (serie.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != serie.infoLabels["tmdb_id"]): #en series multicanal, prevalece el infolabels... + + if not e.infoLabels["tmdb_id"] or (serie.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != serie.infoLabels["tmdb_id"]): #en series multicanal, prevalece el infolabels... e.infoLabels = serie.infoLabels #... del canal actual y no el del original e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") + if e.videolibray_emergency_urls: + del e.videolibray_emergency_urls + if e.channel_redir: + del e.channel_redir #... y se borran las marcas de redirecciones new_episodelist.append(e) except: if e.contentType == 'episode': logger.error("Unable to save %s emergency urls in the video library" % e.contentTitle) - logger.error(traceback.format_exc()) continue # No hay lista de episodios, no hay nada que guardar @@ -600,18 +630,35 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): return 0, 0, 0 # fix float porque la division se hace mal en python 2.x - t = float(100) / len(new_episodelist) + try: + t = float(100) / len(new_episodelist) + except: + t = 0 + last_season_episode = '' for i, e in enumerate(scraper.sort_episode_list(new_episodelist)): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), config.get_localized_string(60064), e.title) + high_sea = e.contentSeason + high_epi = e.contentEpisodeNumber + if scrapertools.find_single_match(e.title, '[a|A][l|L]\s*(\d+)'): + high_epi = int(scrapertools.find_single_match(e.title, 'al\s*(\d+)')) + max_sea = e.infoLabels["number_of_seasons"] + max_epi = 0 + if e.infoLabels["number_of_seasons"] and (e.infoLabels["temporada_num_episodios"] or e.infoLabels["number_of_seasons"] == 1): + if e.infoLabels["number_of_seasons"] == 1 and e.infoLabels["number_of_episodes"]: + max_epi = e.infoLabels["number_of_episodes"] + else: + max_epi = e.infoLabels["temporada_num_episodios"] + season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2)) strm_path = filetools.join(path, "%s.strm" % season_episode) nfo_path = filetools.join(path, "%s.nfo" % season_episode) json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) if season_episode in nostrm_episodelist: + logger.error('Error in the structure of the Video Library: Seriese ' + serie.contentSerieName + ' ' + season_episode) continue strm_exists = strm_path in ficheros nfo_exists = nfo_path in ficheros @@ -659,8 +706,10 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): if not item_nfo: head_nfo, item_nfo = read_nfo(nfo_path) - if not e.infoLabels["tmdb_id"] or (item_nfo.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != item_nfo.infoLabels["tmdb_id"]): #en series multicanal, prevalece el infolabels... - e.infoLabels = item_nfo.infoLabels #... del canal actual y no el del original + # En series multicanal, prevalece el infolabels del canal actual y no el del original + if not e.infoLabels["tmdb_id"] or (item_nfo.infoLabels["tmdb_id"] \ + and e.infoLabels["tmdb_id"] != item_nfo.infoLabels["tmdb_id"]): + e.infoLabels = item_nfo.infoLabels if filetools.write(json_path, e.tojson()): if not json_exists: @@ -688,10 +737,12 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): if not silent and p_dialog.iscanceled(): break + #logger.debug('high_sea x high_epi: %sx%s' % (str(high_sea), str(high_epi))) + #logger.debug('max_sea x max_epi: %sx%s' % (str(max_sea), str(max_epi))) if not silent: p_dialog.close() - if news_in_playcounts: + if news_in_playcounts or emergency_urls_succ or serie.infoLabels["status"] == "Ended" or serie.infoLabels["status"] == "Canceled": # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... tvshow_path = filetools.join(path, "tvshow.nfo") try: @@ -703,16 +754,27 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): if emergency_urls_succ: if tvshow_item.emergency_urls and not isinstance(tvshow_item.emergency_urls, dict): del tvshow_item.emergency_urls - if emergency_urls_stat in [1, 3]: #Operación de guardar/actualizar enlaces + if emergency_urls_stat in [1, 3]: #Operación de guardar/actualizar enlaces if not tvshow_item.emergency_urls: tvshow_item.emergency_urls = dict() - tvshow_item.emergency_urls.update({serie.channel: True}) - elif emergency_urls_stat == 2: #Operación de Borrar enlaces + if tvshow_item.library_urls.get(serie.channel, False): + tvshow_item.emergency_urls.update({serie.channel: True}) + elif emergency_urls_stat == 2: #Operación de Borrar enlaces if tvshow_item.emergency_urls and tvshow_item.emergency_urls.get(serie.channel, False): - tvshow_item.emergency_urls.pop(serie.channel, None) #borramos la entrada del .nfo + tvshow_item.emergency_urls.pop(serie.channel, None) #borramos la entrada del .nfo if tvshow_item.active == 30: tvshow_item.active = 1 + if tvshow_item.infoLabels["tmdb_id"] == serie.infoLabels["tmdb_id"]: + tvshow_item.infoLabels = serie.infoLabels + tvshow_item.infoLabels["title"] = tvshow_item.infoLabels["tvshowtitle"] + + if max_sea == high_sea and max_epi == high_epi and (tvshow_item.infoLabels["status"] == "Ended" + or tvshow_item.infoLabels["status"] == "Canceled") and insertados == 0 and fallidos == 0: + tvshow_item.active = 0 # ... no la actualizaremos más + logger.debug("%s [%s]: serie 'Terminada' o 'Cancelada'. Se desactiva la actualización periódica" % \ + (serie.contentSerieName, serie.channel)) + update_last = datetime.date.today() tvshow_item.update_last = update_last.strftime('%Y-%m-%d') update_next = datetime.date.today() + datetime.timedelta(days=int(tvshow_item.active)) @@ -819,10 +881,10 @@ def add_tvshow(item, channel=None): if not channel: try: - #channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel]) + # channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel]) channel = __import__('specials.%s' % channel_alt, fromlist=["specials.%s" % channel_alt]) except ImportError: - exec "import channels." + item.channel + " as channel" + exec("import channels." + item.channel + " as channel") #Para desambiguar títulos, se provoca que TMDB pregunte por el título realmente deseado #El usuario puede seleccionar el título entre los ofrecidos en la primera pantalla @@ -836,15 +898,15 @@ def add_tvshow(item, channel=None): # del item.tmdb_stat #Limpiamos el status para que no se grabe en la Videoteca # Obtiene el listado de episodios - #if item.channel == 'community': itemlist = getattr(channel, item.action)(item) + global magnet_caching + magnet_caching = False insertados, sobreescritos, fallidos, path = save_tvshow(item, itemlist) if not insertados and not sobreescritos and not fallidos: platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60067)) - logger.error("The %s series could not be added to the video library. Could not get any episode" - % item.show) + logger.error("The %s series could not be added to the video library. Could not get any episode" % item.show) elif fallidos == -1: platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60068)) @@ -856,8 +918,7 @@ def add_tvshow(item, channel=None): else: platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60070)) - logger.info("%s episodes of the %s series have been added to the video library" % - (insertados, item.show)) + logger.info("%s episodes of the %s series have been added to the video library" % (insertados, item.show)) if config.is_xbmc(): if config.get_setting("sync_trakt_new_tvshow", "videolibrary"): import xbmc @@ -872,10 +933,16 @@ def add_tvshow(item, channel=None): xbmc_videolibrary.sync_trakt_addon(path) -def emergency_urls(item, channel=None, path=None): +def emergency_urls(item, channel=None, path=None, headers={}): logger.info() import re - """ + from servers import torrent + try: + magnet_caching_e = magnet_caching + except: + magnet_caching_e = True + + """ Llamamos a Findvideos del canal con la variable "item.videolibray_emergency_urls = True" para obtener la variable "item.emergency_urls" con la lista de listas de tuplas de los enlaces torrent y de servidores directos para ese episodio o película En la lista [0] siempre deben ir los enlaces torrents, si los hay. Si se desea cachear los .torrents, la búsqueda va contra esa lista. @@ -890,17 +957,28 @@ def emergency_urls(item, channel=None, path=None): if hasattr(channel, 'findvideos'): #Si el canal tiene "findvideos"... item.videolibray_emergency_urls = True #... se marca como "lookup" channel_save = item.channel #... guarda el canal original por si hay fail-over en Newpct1 + category_save = item.category #... guarda la categoría original por si hay fail-over o redirección en Newpct1 + if item.channel_redir: #... si hay un redir, se restaura temporamente el canal alternativo + item.channel = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').lower() + item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize() item_res = getattr(channel, 'findvideos')(item) #... se procesa Findvideos item_res.channel = channel_save #... restaura el canal original por si hay fail-over en Newpct1 - item_res.category = channel_save.capitalize() #... y la categoría + item_res.category = category_save #... restaura la categoría original por si hay fail-over o redirección en Newpct1 + item.category = category_save #... restaura la categoría original por si hay fail-over o redirección en Newpct1 del item_res.videolibray_emergency_urls #... y se borra la marca de lookup + if item.videolibray_emergency_urls: + del item.videolibray_emergency_urls #... y se borra la marca de lookup original except: logger.error('ERROR when processing the title in Findvideos del Canal: ' + item.channel + ' / ' + item.title) logger.error(traceback.format_exc()) + item.channel = channel_save #... restaura el canal original por si hay fail-over o redirección en Newpct1 + item.category = category_save #... restaura la categoría original por si hay fail-over o redirección en Newpct1 item_res = item.clone() #Si ha habido un error, se devuelve el Item original if item_res.videolibray_emergency_urls: del item_res.videolibray_emergency_urls #... y se borra la marca de lookup - + if item.videolibray_emergency_urls: + del item.videolibray_emergency_urls #... y se borra la marca de lookup original + #Si el usuario ha activado la opción "emergency_urls_torrents", se descargarán los archivos .torrent de cada título else: #Si se han cacheado con éxito los enlaces... try: @@ -921,7 +999,9 @@ def emergency_urls(item, channel=None, path=None): if item_res.post: post = item_res.post for url in item_res.emergency_urls[0]: #Recorremos las urls de emergencia... torrents_path = re.sub(r'(?:\.\w+$)', '_%s.torrent' % str(i).zfill(2), path) - path_real = caching_torrents(url, referer, post, torrents_path=torrents_path) #... para descargar los .torrents + path_real = '' + if magnet_caching_e or not url.startswith('magnet'): + path_real = torrent.caching_torrents(url, referer, post, torrents_path=torrents_path, headers=headers) #... para descargar los .torrents if path_real: #Si ha tenido éxito... item_res.emergency_urls[0][i-1] = path_real.replace(videolibrary_path, '') #se guarda el "path" relativo i += 1 @@ -944,140 +1024,3 @@ def emergency_urls(item, channel=None, path=None): #logger.debug(item_res.emergency_urls) return item_res #Devolvemos el Item actualizado con los enlaces de emergencia - - -def caching_torrents(url, referer=None, post=None, torrents_path=None, timeout=10, lookup=False, data_torrent=False): - if torrents_path != None: - logger.info("path = " + torrents_path) - else: - logger.info() - if referer and post: - logger.info('REFERER: ' + referer) - from core import httptools - torrent_file = '' - headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': referer} #Necesario para el Post del .Torrent - - """ - Descarga en el path recibido el .torrent de la url recibida, y pasa el decode - Devuelve el path real del .torrent, o el path vacío si la operación no ha tenido éxito - """ - - videolibrary_path = config.get_videolibrary_path() #Calculamos el path absoluto a partir de la Videoteca - if torrents_path == None: - if not videolibrary_path: - torrents_path = '' - if data_torrent: - return (torrents_path, torrent_file) - return torrents_path #Si hay un error, devolvemos el "path" vacío - torrents_path = filetools.join(videolibrary_path, 'temp_torrents_Alfa', 'cliente_torrent_Alfa.torrent') #path de descarga temporal - if '.torrent' not in torrents_path: - torrents_path += '.torrent' #path para dejar el .torrent - torrents_path_encode = filetools.encode(torrents_path) #encode utf-8 del path - - if url.endswith(".rar") or url.startswith("magnet:"): #No es un archivo .torrent - logger.error('It is not a Torrent file: ' + url) - torrents_path = '' - if data_torrent: - return (torrents_path, torrent_file) - return torrents_path #Si hay un error, devolvemos el "path" vacío - - try: - #Descargamos el .torrent - if referer and post: #Descarga con POST - response = httptools.downloadpage(url, headers=headers, post=post, follow_redirects=False, timeout=timeout) - else: #Descarga sin post - response = httptools.downloadpage(url, timeout=timeout) - if not response.sucess: - logger.error('.Torrent file not found: ' + url) - torrents_path = '' - if data_torrent: - return (torrents_path, torrent_file) - return torrents_path #Si hay un error, devolvemos el "path" vacío - torrent_file = response.data - - if "used CloudFlare" in torrent_file: #Si tiene CloudFlare, usamos este proceso - response = httptools.downloadpage("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url.strip(), timeout=timeout) - if not response.sucess: - logger.error('Archivo .torrent no encontrado: ' + url) - torrents_path = '' - if data_torrent: - return (torrents_path, torrent_file) - return torrents_path #Si hay un error, devolvemos el "path" vacío - torrent_file = response.data - - #Si es un archivo .ZIP tratamos de extraer el contenido - if torrent_file.startswith("PK"): - logger.info('It is a .ZIP file: ' + url) - - torrents_path_zip = filetools.join(videolibrary_path, 'temp_torrents_zip') #Carpeta de trabajo - torrents_path_zip = filetools.encode(torrents_path_zip) - torrents_path_zip_file = filetools.join(torrents_path_zip, 'temp_torrents_zip.zip') #Nombre del .zip - - import time - filetools.rmdirtree(torrents_path_zip) #Borramos la carpeta temporal - time.sleep(1) #Hay que esperar, porque si no da error - filetools.mkdir(torrents_path_zip) #La creamos de nuevo - - if filetools.write(torrents_path_zip_file, torrent_file): #Salvamos el .zip - torrent_file = '' #Borramos el contenido en memoria - try: #Extraemos el .zip - from core import ziptools - unzipper = ziptools.ziptools() - unzipper.extract(torrents_path_zip_file, torrents_path_zip) - except: - import xbmc - xbmc.executebuiltin('XBMC.Extract("%s", "%s")' % (torrents_path_zip_file, torrents_path_zip)) - time.sleep(1) - - import os - for root, folders, files in os.walk(torrents_path_zip): #Recorremos la carpeta para leer el .torrent - for file in files: - if file.endswith(".torrent"): - input_file = filetools.join(root, file) #nombre del .torrent - torrent_file = filetools.read(input_file) #leemos el .torrent - - filetools.rmdirtree(torrents_path_zip) #Borramos la carpeta temporal - - #Si no es un archivo .torrent (RAR, HTML,..., vacío) damos error - if not scrapertools.find_single_match(torrent_file, '^d\d+:.*?\d+:'): - logger.error('It is not a Torrent file: ' + url) - torrents_path = '' - if data_torrent: - return (torrents_path, torrent_file) - return torrents_path #Si hay un error, devolvemos el "path" vacío - - #Salvamos el .torrent - if not lookup: - if not filetools.write(torrents_path_encode, torrent_file): - logger.error('ERROR: Unwritten .torrent file: ' + torrents_path_encode) - torrents_path = '' #Si hay un error, devolvemos el "path" vacío - torrent_file = '' #... y el buffer del .torrent - if data_torrent: - return (torrents_path, torrent_file) - return torrents_path - except: - torrents_path = '' #Si hay un error, devolvemos el "path" vacío - torrent_file = '' #... y el buffer del .torrent - logger.error('ERROR: .Torrent download process failed: ' + url + ' / ' + torrents_path_encode) - logger.error(traceback.format_exc()) - - #logger.debug(torrents_path) - if data_torrent: - return (torrents_path, torrent_file) - return torrents_path - - -def verify_url_torrent(url, timeout=5): - """ - Verifica si el archivo .torrent al que apunta la url está disponible, descargándolo en un area temporal - Entrada: url - Salida: True o False dependiendo del resultado de la operación - """ - - if not url or url == 'javascript:;': #Si la url viene vacía... - return False #... volvemos con error - torrents_path = caching_torrents(url, timeout=timeout, lookup=True) #Descargamos el .torrent - if torrents_path: #Si ha tenido éxito... - return True - else: - return False diff --git a/core/ziptools.py b/core/ziptools.py index 3f5ed09d..ad3a88e7 100644 --- a/core/ziptools.py +++ b/core/ziptools.py @@ -3,22 +3,26 @@ # Zip Tools # -------------------------------------------------------------------------------- -import io -import os +from builtins import object +import sys +PY3 = False +VFS = True +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int; VFS = False + import zipfile from platformcode import config, logger +from core import filetools -class ziptools: +class ziptools(object): def extract(self, file, dir, folder_to_extract="", overwrite_question=False, backup=False): logger.info("file=%s" % file) logger.info("dir=%s" % dir) - if not dir.endswith(':') and not os.path.exists(dir): - os.mkdir(dir) + if not dir.endswith(':') and not filetools.exists(dir): + filetools.mkdir(dir) - file = io.FileIO(file) zf = zipfile.ZipFile(file) if not folder_to_extract: self._createstructure(file, dir) @@ -30,60 +34,66 @@ class ziptools: if not name.endswith('/'): logger.info("no es un directorio") try: - (path, filename) = os.path.split(os.path.join(dir, name)) + (path, filename) = filetools.split(filetools.join(dir, name)) logger.info("path=%s" % path) logger.info("name=%s" % name) if folder_to_extract: - if path != os.path.join(dir, folder_to_extract): + if path != filetools.join(dir, folder_to_extract): break else: - os.makedirs(path) + filetools.mkdir(path) except: pass if folder_to_extract: - outfilename = os.path.join(dir, filename) + outfilename = filetools.join(dir, filename) else: - outfilename = os.path.join(dir, name) + outfilename = filetools.join(dir, name) logger.info("outfilename=%s" % outfilename) try: - if os.path.exists(outfilename) and overwrite_question: + if filetools.exists(outfilename) and overwrite_question: from platformcode import platformtools dyesno = platformtools.dialog_yesno("El archivo ya existe", "El archivo %s a descomprimir ya existe" \ ", ¿desea sobrescribirlo?" \ - % os.path.basename(outfilename)) + % filetools.basename(outfilename)) if not dyesno: break if backup: import time - import shutil hora_folder = "Copia seguridad [%s]" % time.strftime("%d-%m_%H-%M", time.localtime()) - backup = os.path.join(config.get_data_path(), 'backups', hora_folder, folder_to_extract) - if not os.path.exists(backup): - os.makedirs(backup) - shutil.copy2(outfilename, os.path.join(backup, os.path.basename(outfilename))) + backup = filetools.join(config.get_data_path(), 'backups', hora_folder, folder_to_extract) + if not filetools.exists(backup): + filetools.mkdir(backup) + filetools.copy(outfilename, filetools.join(backup, filetools.basename(outfilename))) - outfile = open(outfilename, 'wb') - outfile.write(zf.read(nameo)) + if not filetools.write(outfilename, zf.read(nameo), silent=True, vfs=VFS): #TRUNCA en FINAL en Kodi 19 con VFS + logger.error("Error en fichero " + nameo) except: + import traceback + logger.error(traceback.format_exc()) logger.error("Error en fichero " + nameo) + try: + zf.close() + except: + logger.info("Error cerrando .zip " + file) + def _createstructure(self, file, dir): self._makedirs(self._listdirs(file), dir) def create_necessary_paths(filename): try: - (path, name) = os.path.split(filename) - os.makedirs(path) + (path, name) = filetools.split(filename) + filetools.mkdir(path) except: pass def _makedirs(self, directories, basedir): for dir in directories: - curdir = os.path.join(basedir, dir) - if not os.path.exists(curdir): - os.mkdir(curdir) + curdir = filetools.join(basedir, dir) + if not filetools.exists(curdir): + filetools.mkdir(curdir) def _listdirs(self, file): zf = zipfile.ZipFile(file) diff --git a/default.py b/default.py index b0285886..34c7681a 100644 --- a/default.py +++ b/default.py @@ -12,7 +12,7 @@ from platformcode import config, logger logger.info("init...") librerias = xbmc.translatePath(os.path.join(config.get_runtime_path(), 'lib')) -sys.path.insert(0, librerias) +sys.path.append(librerias) if not config.dev_mode(): from platformcode import updater diff --git a/lib/builtins/__init__.py b/lib/builtins/__init__.py new file mode 100644 index 00000000..4f936f28 --- /dev/null +++ b/lib/builtins/__init__.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import +import sys +__future_module__ = True + +if sys.version_info[0] < 3: + from __builtin__ import * + # Overwrite any old definitions with the equivalent future.builtins ones: + from future.builtins import * +else: + raise ImportError('This package should not be accessible on Python 3. ' + 'Either you are trying to run from the python-future src folder ' + 'or your installation of python-future is corrupted.') diff --git a/lib/cloudscraper/__init__.py b/lib/cloudscraper/__init__.py index f4e584e5..1b57f325 100644 --- a/lib/cloudscraper/__init__.py +++ b/lib/cloudscraper/__init__.py @@ -1,4 +1,6 @@ +# https://github.com/VeNoMouS/cloudscraper/tree/master import logging +import os import re import sys import ssl @@ -9,6 +11,14 @@ try: except ImportError: import copy_reg as copyreg +try: + from HTMLParser import HTMLParser +except ImportError: + if sys.version_info >= (3, 4): + import html + else: + from html.parser import HTMLParser + from copy import deepcopy from time import sleep from collections import OrderedDict @@ -31,13 +41,17 @@ except ImportError: pass try: - from urlparse import urlparse + from urlparse import urlparse, urljoin except ImportError: - from urllib.parse import urlparse + from urllib.parse import urlparse, urljoin + +# Add exceptions path +sys.path.append(os.path.join(os.path.dirname(__file__), 'exceptions')) +import cloudflare_exceptions # noqa: E402 # ------------------------------------------------------------------------------- # -__version__ = '1.2.19' +__version__ = '1.2.24' # ------------------------------------------------------------------------------- # @@ -91,6 +105,7 @@ class CloudScraper(Session): 'allow_brotli', True if 'brotli' in sys.modules.keys() else False ) + self.user_agent = User_Agent( allow_brotli=self.allow_brotli, browser=kwargs.pop('browser', None) @@ -107,13 +122,16 @@ class CloudScraper(Session): # Set a random User-Agent if no custom User-Agent has been set # ------------------------------------------------------------------------------- # self.headers = self.user_agent.headers + if not self.cipherSuite: + self.cipherSuite = self.user_agent.cipherSuite + + if isinstance(self.cipherSuite, list): + self.cipherSuite = ':'.join(self.cipherSuite) self.mount( 'https://', CipherSuiteAdapter( - cipherSuite=':'.join(self.user_agent.cipherSuite) - if not self.cipherSuite else ':'.join(self.cipherSuite) - if isinstance(self.cipherSuite, list) else self.cipherSuite + cipherSuite=self.cipherSuite ) ) @@ -138,6 +156,20 @@ class CloudScraper(Session): except ValueError as e: print("Debug Error: {}".format(getattr(e, 'message', e))) + # ------------------------------------------------------------------------------- # + # Unescape / decode html entities + # ------------------------------------------------------------------------------- # + + @staticmethod + def unescape(html_text): + if sys.version_info >= (3, 0): + if sys.version_info >= (3, 4): + return html.unescape(html_text) + + return HTMLParser().unescape(html_text) + + return HTMLParser().unescape(html_text) + # ------------------------------------------------------------------------------- # # Decode Brotli on older versions of urllib3 manually # ------------------------------------------------------------------------------- # @@ -186,7 +218,7 @@ class CloudScraper(Session): sys.tracebacklimit = 0 _ = self._solveDepthCnt self._solveDepthCnt = 0 - raise RuntimeError( + raise cloudflare_exceptions.Cloudflare_Loop_Protection( "!!Loop Protection!! We have tried to solve {} time(s) in a row.".format(_) ) @@ -269,7 +301,7 @@ class CloudScraper(Session): def is_Challenge_Request(self, resp): if self.is_Firewall_Blocked(resp): sys.tracebacklimit = 0 - raise RuntimeError('Cloudflare has blocked this request (Code 1020 Detected).') + raise cloudflare_exceptions.Cloudflare_Block('Cloudflare has blocked this request (Code 1020 Detected).') if self.is_reCaptcha_Challenge(resp) or self.is_IUAM_Challenge(resp): return True @@ -280,17 +312,18 @@ class CloudScraper(Session): # Try to solve cloudflare javascript challenge. # ------------------------------------------------------------------------------- # - @staticmethod - def IUAM_Challenge_Response(body, url, interpreter): + def IUAM_Challenge_Response(self, body, url, interpreter): try: challengeUUID = re.search( r'id="challenge-form" action="(?P<challengeUUID>\S+)"', body, re.M | re.DOTALL ).groupdict().get('challengeUUID', '') + payload = OrderedDict(re.findall(r'name="(r|jschl_vc|pass)"\svalue="(.*?)"', body)) + except AttributeError: sys.tracebacklimit = 0 - raise RuntimeError( + raise cloudflare_exceptions.Cloudflare_Error_IUAM( "Cloudflare IUAM detected, unfortunately we can't extract the parameters correctly." ) @@ -301,7 +334,7 @@ class CloudScraper(Session): interpreter ).solveChallenge(body, hostParsed.netloc) except Exception as e: - raise RuntimeError( + raise cloudflare_exceptions.Cloudflare_Error_IUAM( 'Unable to parse Cloudflare anti-bots page: {}'.format( getattr(e, 'message', e) ) @@ -311,7 +344,7 @@ class CloudScraper(Session): 'url': '{}://{}{}'.format( hostParsed.scheme, hostParsed.netloc, - challengeUUID + self.unescape(challengeUUID) ), 'data': payload } @@ -320,8 +353,7 @@ class CloudScraper(Session): # Try to solve the reCaptcha challenge via 3rd party. # ------------------------------------------------------------------------------- # - @staticmethod - def reCaptcha_Challenge_Response(provider, provider_params, body, url): + def reCaptcha_Challenge_Response(self, provider, provider_params, body, url): try: payload = re.search( r'(name="r"\svalue="(?P<r>\S+)"|).*?challenge-form" action="(?P<challengeUUID>\S+)".*?' @@ -330,7 +362,7 @@ class CloudScraper(Session): ).groupdict() except (AttributeError): sys.tracebacklimit = 0 - raise RuntimeError( + raise cloudflare_exceptions.Cloudflare_Error_reCaptcha( "Cloudflare reCaptcha detected, unfortunately we can't extract the parameters correctly." ) @@ -339,7 +371,7 @@ class CloudScraper(Session): 'url': '{}://{}{}'.format( hostParsed.scheme, hostParsed.netloc, - payload.get('challengeUUID', '') + self.unescape(payload.get('challengeUUID', '')) ), 'data': OrderedDict([ ('r', payload.get('r', '')), @@ -377,7 +409,7 @@ class CloudScraper(Session): if not self.recaptcha or not isinstance(self.recaptcha, dict) or not self.recaptcha.get('provider'): sys.tracebacklimit = 0 - raise RuntimeError( + raise cloudflare_exceptions.Cloudflare_reCaptcha_Provider( "Cloudflare reCaptcha detected, unfortunately you haven't loaded an anti reCaptcha provider " "correctly via the 'recaptcha' parameter." ) @@ -413,7 +445,7 @@ class CloudScraper(Session): self.delay = delay except (AttributeError, ValueError): sys.tracebacklimit = 0 - raise RuntimeError("Cloudflare IUAM possibility malformed, issue extracing delay value.") + raise cloudflare_exceptions.Cloudflare_Error_IUAM("Cloudflare IUAM possibility malformed, issue extracing delay value.") sleep(self.delay) @@ -473,34 +505,25 @@ class CloudScraper(Session): return challengeSubmitResponse else: cloudflare_kwargs = deepcopy(kwargs) + cloudflare_kwargs['headers'] = updateAttr( + cloudflare_kwargs, + 'headers', + {'Referer': challengeSubmitResponse.url} + ) if not urlparse(challengeSubmitResponse.headers['Location']).netloc: - cloudflare_kwargs['headers'] = updateAttr( - cloudflare_kwargs, - 'headers', - {'Referer': '{}://{}'.format(urlParsed.scheme, urlParsed.netloc)} - ) - return self.request( - resp.request.method, - '{}://{}{}'.format( - urlParsed.scheme, - urlParsed.netloc, - challengeSubmitResponse.headers['Location'] - ), - **cloudflare_kwargs + redirect_location = urljoin( + challengeSubmitResponse.url, + challengeSubmitResponse.headers['Location'] ) else: - redirectParsed = urlparse(challengeSubmitResponse.headers['Location']) - cloudflare_kwargs['headers'] = updateAttr( - cloudflare_kwargs, - 'headers', - {'Referer': '{}://{}'.format(redirectParsed.scheme, redirectParsed.netloc)} - ) - return self.request( - resp.request.method, - challengeSubmitResponse.headers['Location'], - **cloudflare_kwargs - ) + redirect_location = challengeSubmitResponse.headers['Location'] + + return self.request( + resp.request.method, + redirect_location, + **cloudflare_kwargs + ) # ------------------------------------------------------------------------------- # # We shouldn't be here... @@ -561,7 +584,7 @@ class CloudScraper(Session): break else: sys.tracebacklimit = 0 - raise RuntimeError( + raise cloudflare_exceptions.Cloudflare_Error_IUAM( "Unable to find Cloudflare cookies. Does the site actually " "have Cloudflare IUAM (I'm Under Attack Mode) enabled?" ) diff --git a/lib/cloudscraper/exceptions/__init__.py b/lib/cloudscraper/exceptions/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/lib/cloudscraper/exceptions/cloudflare_exceptions.py b/lib/cloudscraper/exceptions/cloudflare_exceptions.py new file mode 100644 index 00000000..e7a25e16 --- /dev/null +++ b/lib/cloudscraper/exceptions/cloudflare_exceptions.py @@ -0,0 +1,31 @@ +# ------------------------------------------------------------------------------- # + + +class Cloudflare_Loop_Protection(Exception): + """ + Raise error for recursive depth protection + """ + + +class Cloudflare_Block(Exception): + """ + Raise error for Cloudflare 1020 block + """ + + +class Cloudflare_Error_IUAM(Exception): + """ + Raise error for problem extracting IUAM paramters from Cloudflare payload + """ + + +class Cloudflare_Error_reCaptcha(Exception): + """ + Raise error for problem extracting reCaptcha paramters from Cloudflare payload + """ + + +class Cloudflare_reCaptcha_Provider(Exception): + """ + Raise error for reCaptcha from Cloudflare, no provider loaded. + """ diff --git a/lib/cloudscraper/exceptions/reCaptcha_exceptions.py b/lib/cloudscraper/exceptions/reCaptcha_exceptions.py new file mode 100644 index 00000000..4a263d1d --- /dev/null +++ b/lib/cloudscraper/exceptions/reCaptcha_exceptions.py @@ -0,0 +1,49 @@ +# ------------------------------------------------------------------------------- # + + +class reCaptcha_Service_Unavailable(Exception): + """ + Raise error for external services that cannot be reached + """ + + +class reCaptcha_Error_From_API(Exception): + """ + Raise error for error from API response. + """ + + +class reCaptcha_Account_Error(Exception): + """ + Raise error for reCaptcha provider account problem. + """ + + +class reCaptcha_Timeout(Exception): + """ + Raise error for reCaptcha provider taking too long. + """ + + +class reCaptcha_Bad_Parameter(NotImplementedError): + """ + Raise error for bad or missing Parameter. + """ + + +class reCaptcha_Bad_Job_ID(Exception): + """ + Raise error for invalid job id. + """ + + +class reCaptcha_Report_Error(Exception): + """ + Raise error for reCaptcha provider unable to report bad solve. + """ + + +class reCaptcha_Import_Error(Exception): + """ + Raise error for reCaptcha, cannot import a module. + """ diff --git a/lib/cloudscraper/reCaptcha/2captcha.py b/lib/cloudscraper/reCaptcha/2captcha.py index 0a486d3b..8a787d0e 100644 --- a/lib/cloudscraper/reCaptcha/2captcha.py +++ b/lib/cloudscraper/reCaptcha/2captcha.py @@ -1,13 +1,17 @@ from __future__ import absolute_import import requests +import reCaptcha_exceptions try: import polling except ImportError: import sys sys.tracebacklimit = 0 - raise RuntimeError("Please install the python module 'polling' via pip or download it from https://github.com/justiniso/polling/") + raise reCaptcha_exceptions.reCaptcha_Import_Error( + "Please install the python module 'polling' via pip or download it from " + "https://github.com/justiniso/polling/" + ) from . import reCaptcha @@ -24,7 +28,7 @@ class captchaSolver(reCaptcha): @staticmethod def checkErrorStatus(response, request_type): if response.status_code in [500, 502]: - raise RuntimeError('2Captcha: Server Side Error {}'.format(response.status_code)) + raise reCaptcha_exceptions.reCaptcha_Service_Unavailable('2Captcha: Server Side Error {}'.format(response.status_code)) errors = { 'in.php': { @@ -71,16 +75,23 @@ class captchaSolver(reCaptcha): } if response.json().get('status') is False and response.json().get('request') in errors.get(request_type): - raise RuntimeError('{} {}'.format(response.json().get('request'), errors.get(request_type).get(response.json().get('request')))) + raise reCaptcha_exceptions.reCaptcha_Error_From_API( + '{} {}'.format( + response.json().get('request'), + errors.get(request_type).get(response.json().get('request')) + ) + ) # ------------------------------------------------------------------------------- # def reportJob(self, jobID): if not jobID: - raise RuntimeError("2Captcha: Error bad job id to request reCaptcha.") + raise reCaptcha_exceptions.reCaptcha_Bad_Job_ID( + "2Captcha: Error bad job id to request reCaptcha." + ) def _checkRequest(response): - if response.status_code in [200, 303] and response.json().get('status') == 1: + if response.ok and response.json().get('status') == 1: return response self.checkErrorStatus(response, 'res.php') @@ -105,7 +116,9 @@ class captchaSolver(reCaptcha): if response: return True else: - raise RuntimeError("2Captcha: Error - Failed to report bad reCaptcha solve.") + raise reCaptcha_exceptions.reCaptcha_Report_Error( + "2Captcha: Error - Failed to report bad reCaptcha solve." + ) # ------------------------------------------------------------------------------- # @@ -114,7 +127,7 @@ class captchaSolver(reCaptcha): raise RuntimeError("2Captcha: Error bad job id to request reCaptcha.") def _checkRequest(response): - if response.status_code in [200, 303] and response.json().get('status') == 1: + if response.ok and response.json().get('status') == 1: return response self.checkErrorStatus(response, 'res.php') @@ -139,13 +152,15 @@ class captchaSolver(reCaptcha): if response: return response.json().get('request') else: - raise RuntimeError("2Captcha: Error failed to solve reCaptcha.") + raise reCaptcha_exceptions.reCaptcha_Timeout( + "2Captcha: Error failed to solve reCaptcha." + ) # ------------------------------------------------------------------------------- # def requestSolve(self, site_url, site_key): def _checkRequest(response): - if response.status_code in [200, 303] and response.json().get("status") == 1 and response.json().get('request'): + if response.ok and response.json().get("status") == 1 and response.json().get('request'): return response self.checkErrorStatus(response, 'in.php') @@ -173,7 +188,9 @@ class captchaSolver(reCaptcha): if response: return response.json().get('request') else: - raise RuntimeError('2Captcha: Error no job id was returned.') + raise reCaptcha_exceptions.reCaptcha_Bad_Job_ID( + '2Captcha: Error no job id was returned.' + ) # ------------------------------------------------------------------------------- # @@ -181,7 +198,9 @@ class captchaSolver(reCaptcha): jobID = None if not reCaptchaParams.get('api_key'): - raise ValueError("2Captcha: Missing api_key parameter.") + raise reCaptcha_exceptions.reCaptcha_Bad_Parameter( + "2Captcha: Missing api_key parameter." + ) self.api_key = reCaptchaParams.get('api_key') @@ -196,9 +215,13 @@ class captchaSolver(reCaptcha): if jobID: self.reportJob(jobID) except polling.TimeoutException: - raise RuntimeError("2Captcha: reCaptcha solve took to long and also failed reporting the job.") + raise reCaptcha_exceptions.reCaptcha_Timeout( + "2Captcha: reCaptcha solve took to long and also failed reporting the job the job id {}.".format(jobID) + ) - raise RuntimeError("2Captcha: reCaptcha solve took to long to execute, aborting.") + raise reCaptcha_exceptions.reCaptcha_Timeout( + "2Captcha: reCaptcha solve took to long to execute job id {}, aborting.".format(jobID) + ) # ------------------------------------------------------------------------------- # diff --git a/lib/cloudscraper/reCaptcha/9kw.py b/lib/cloudscraper/reCaptcha/9kw.py new file mode 100644 index 00000000..d887cf80 --- /dev/null +++ b/lib/cloudscraper/reCaptcha/9kw.py @@ -0,0 +1,202 @@ +from __future__ import absolute_import + +import re +import requests +import reCaptcha_exceptions + +try: + import polling +except ImportError: + import sys + sys.tracebacklimit = 0 + raise reCaptcha_exceptions.reCaptcha_Import_Error( + "Please install the python module 'polling' via pip or download it from " + "https://github.com/justiniso/polling/" + ) + +from . import reCaptcha + + +class captchaSolver(reCaptcha): + + def __init__(self): + super(captchaSolver, self).__init__('9kw') + self.host = 'https://www.9kw.eu/index.cgi' + self.maxtimeout = 180 + self.session = requests.Session() + + # ------------------------------------------------------------------------------- # + + @staticmethod + def checkErrorStatus(response): + if response.status_code in [500, 502]: + raise reCaptcha_exceptions.reCaptcha_Service_Unavailable( + '9kw: Server Side Error {}'.format(response.status_code) + ) + + error_codes = { + 1: 'No API Key available.', + 2: 'No API key found.', + 3: 'No active API key found.', + 4: 'API Key has been disabled by the operator. ', + 5: 'No user found.', + 6: 'No data found.', + 7: 'Found No ID.', + 8: 'found No captcha.', + 9: 'No image found.', + 10: 'Image size not allowed.', + 11: 'credit is not sufficient.', + 12: 'what was done.', + 13: 'No answer contain.', + 14: 'Captcha already been answered.', + 15: 'Captcha to quickly filed.', + 16: 'JD check active.', + 17: 'Unknown problem.', + 18: 'Found No ID.', + 19: 'Incorrect answer.', + 20: 'Do not timely filed (Incorrect UserID).', + 21: 'Link not allowed.', + 22: 'Prohibited submit.', + 23: 'Entering prohibited.', + 24: 'Too little credit.', + 25: 'No entry found.', + 26: 'No Conditions accepted.', + 27: 'No coupon code found in the database.', + 28: 'Already unused voucher code.', + 29: 'maxTimeout under 60 seconds.', + 30: 'User not found.', + 31: 'An account is not yet 24 hours in system.', + 32: 'An account does not have the full rights.', + 33: 'Plugin needed a update.', + 34: 'No HTTPS allowed.', + 35: 'No HTTP allowed.', + 36: 'Source not allowed.', + 37: 'Transfer denied.', + 38: 'Incorrect answer without space', + 39: 'Incorrect answer with space', + 40: 'Incorrect answer with not only numbers', + 41: 'Incorrect answer with not only A-Z, a-z', + 42: 'Incorrect answer with not only 0-9, A-Z, a-z', + 43: 'Incorrect answer with not only [0-9,- ]', + 44: 'Incorrect answer with not only [0-9A-Za-z,- ]', + 45: 'Incorrect answer with not only coordinates', + 46: 'Incorrect answer with not only multiple coordinates', + 47: 'Incorrect answer with not only data', + 48: 'Incorrect answer with not only rotate number', + 49: 'Incorrect answer with not only text', + 50: 'Incorrect answer with not only text and too short', + 51: 'Incorrect answer with not enough chars', + 52: 'Incorrect answer with too many chars', + 53: 'Incorrect answer without no or yes', + 54: 'Assignment was not found.' + } + + if response.text.startswith('{'): + if response.json().get('error'): + raise reCaptcha_exceptions.reCaptcha_Error_From_API(error_codes.get(int(response.json().get('error')))) + else: + error_code = int(re.search(r'^00(?P<error_code>\d+)', response.text).groupdict().get('error_code', 0)) + if error_code: + raise reCaptcha_exceptions.reCaptcha_Error_From_API(error_codes.get(error_code)) + + # ------------------------------------------------------------------------------- # + + def requestJob(self, jobID): + if not jobID: + raise reCaptcha_exceptions.reCaptcha_Bad_Job_ID( + "9kw: Error bad job id to request reCaptcha against." + ) + + def _checkRequest(response): + if response.ok and response.json().get('answer') != 'NO DATA': + return response + + self.checkErrorStatus(response) + + return None + + response = polling.poll( + lambda: self.session.get( + self.host, + params={ + 'apikey': self.api_key, + 'action': 'usercaptchacorrectdata', + 'id': jobID, + 'info': 1, + 'json': 1 + } + ), + check_success=_checkRequest, + step=10, + timeout=(self.maxtimeout + 10) + ) + + if response: + return response.json().get('answer') + else: + raise reCaptcha_exceptions.reCaptcha_Timeout("9kw: Error failed to solve reCaptcha.") + + # ------------------------------------------------------------------------------- # + + def requestSolve(self, site_url, site_key): + def _checkRequest(response): + if response.ok and response.text.startswith('{') and response.json().get('captchaid'): + return response + + self.checkErrorStatus(response) + + return None + + response = polling.poll( + lambda: self.session.post( + self.host, + data={ + 'apikey': self.api_key, + 'action': 'usercaptchaupload', + 'interactive': 1, + 'file-upload-01': site_key, + 'oldsource': 'recaptchav2', + 'pageurl': site_url, + 'maxtimeout': self.maxtimeout, + 'json': 1 + }, + allow_redirects=False + ), + check_success=_checkRequest, + step=5, + timeout=(self.maxtimeout + 10) + ) + + if response: + return response.json().get('captchaid') + else: + raise reCaptcha_exceptions.reCaptcha_Bad_Job_ID('9kw: Error no valid job id was returned.') + + # ------------------------------------------------------------------------------- # + + def getCaptchaAnswer(self, site_url, site_key, reCaptchaParams): + jobID = None + + if not reCaptchaParams.get('api_key'): + raise reCaptcha_exceptions.reCaptcha_Bad_Parameter("9kw: Missing api_key parameter.") + + self.api_key = reCaptchaParams.get('api_key') + + if reCaptchaParams.get('maxtimeout'): + self.maxtimeout = reCaptchaParams.get('maxtimeout') + + if reCaptchaParams.get('proxy'): + self.session.proxies = reCaptchaParams.get('proxies') + + try: + jobID = self.requestSolve(site_url, site_key) + return self.requestJob(jobID) + except polling.TimeoutException: + raise reCaptcha_exceptions.reCaptcha_Timeout( + "9kw: reCaptcha solve took to long to execute 'captchaid' {}, aborting.".format(jobID) + ) + +# ------------------------------------------------------------------------------- # + + +captchaSolver() diff --git a/lib/cloudscraper/reCaptcha/anticaptcha.py b/lib/cloudscraper/reCaptcha/anticaptcha.py index d69a9172..15993f86 100644 --- a/lib/cloudscraper/reCaptcha/anticaptcha.py +++ b/lib/cloudscraper/reCaptcha/anticaptcha.py @@ -1,12 +1,16 @@ from __future__ import absolute_import import sys +import reCaptcha_exceptions try: from python_anticaptcha import AnticaptchaClient, NoCaptchaTaskProxylessTask except ImportError: sys.tracebacklimit = 0 - raise RuntimeError("Please install the python module 'python_anticaptcha' via pip or download it from https://github.com/ad-m/python-anticaptcha") + raise reCaptcha_exceptions.reCaptcha_Import_Error( + "Please install the python module 'python_anticaptcha' via pip or download it from " + "https://github.com/ad-m/python-anticaptcha" + ) from . import reCaptcha @@ -16,9 +20,11 @@ class captchaSolver(reCaptcha): def __init__(self): super(captchaSolver, self).__init__('anticaptcha') + # ------------------------------------------------------------------------------- # + def getCaptchaAnswer(self, site_url, site_key, reCaptchaParams): if not reCaptchaParams.get('api_key'): - raise ValueError("reCaptcha provider 'anticaptcha' was not provided an 'api_key' parameter.") + raise reCaptcha_exceptions.reCaptcha_Bad_Parameter("anticaptcha: Missing api_key parameter.") client = AnticaptchaClient(reCaptchaParams.get('api_key')) @@ -29,10 +35,14 @@ class captchaSolver(reCaptcha): if not hasattr(client, 'createTaskSmee'): sys.tracebacklimit = 0 - raise RuntimeError("Please upgrade 'python_anticaptcha' via pip or download it from https://github.com/ad-m/python-anticaptcha") + raise reCaptcha_exceptions.reCaptcha_Import_Error( + "Please upgrade 'python_anticaptcha' via pip or download it from https://github.com/ad-m/python-anticaptcha" + ) job = client.createTaskSmee(task) return job.get_solution_response() +# ------------------------------------------------------------------------------- # + captchaSolver() diff --git a/lib/cloudscraper/reCaptcha/deathbycaptcha.py b/lib/cloudscraper/reCaptcha/deathbycaptcha.py index 530c8739..dc0e4997 100644 --- a/lib/cloudscraper/reCaptcha/deathbycaptcha.py +++ b/lib/cloudscraper/reCaptcha/deathbycaptcha.py @@ -2,13 +2,17 @@ from __future__ import absolute_import import json import requests +import reCaptcha_exceptions try: import polling except ImportError: import sys sys.tracebacklimit = 0 - raise RuntimeError("Please install the python module 'polling' via pip or download it from https://github.com/justiniso/polling/") + raise reCaptcha_exceptions.reCaptcha_Import_Error( + "Please install the python module 'polling' via pip or download it from " + "https://github.com/justiniso/polling/" + ) from . import reCaptcha @@ -20,7 +24,7 @@ class captchaSolver(reCaptcha): self.host = 'http://api.dbcapi.me/api' self.session = requests.Session() - # ------------------------------------------------------------------------------- # + # ------------------------------------------------------------------------------- # @staticmethod def checkErrorStatus(response): @@ -34,21 +38,21 @@ class captchaSolver(reCaptcha): ) if response.status_code in errors: - raise RuntimeError(errors.get(response.status_code)) + raise reCaptcha_exceptions.reCaptcha_Service_Unavailable(errors.get(response.status_code)) - # ------------------------------------------------------------------------------- # + # ------------------------------------------------------------------------------- # def login(self, username, password): self.username = username self.password = password def _checkRequest(response): - if response.status_code == 200: + if response.ok: if response.json().get('is_banned'): - raise RuntimeError('DeathByCaptcha: Your account is banned.') + raise reCaptcha_exceptions.reCaptcha_Account_Error('DeathByCaptcha: Your account is banned.') if response.json().get('balanace') == 0: - raise RuntimeError('DeathByCaptcha: insufficient credits.') + raise reCaptcha_exceptions.reCaptcha_Account_Error('DeathByCaptcha: insufficient credits.') return response @@ -72,11 +76,13 @@ class captchaSolver(reCaptcha): self.debugRequest(response) - # ------------------------------------------------------------------------------- # + # ------------------------------------------------------------------------------- # def reportJob(self, jobID): if not jobID: - raise RuntimeError("DeathByCaptcha: Error bad job id to report failed reCaptcha.") + raise reCaptcha_exceptions.reCaptcha_Bad_Job_ID( + "DeathByCaptcha: Error bad job id to report failed reCaptcha." + ) def _checkRequest(response): if response.status_code == 200: @@ -103,16 +109,20 @@ class captchaSolver(reCaptcha): if response: return True else: - raise RuntimeError("DeathByCaptcha: Error report failed reCaptcha.") + raise reCaptcha_exceptions.reCaptcha_Report_Error( + "DeathByCaptcha: Error report failed reCaptcha." + ) - # ------------------------------------------------------------------------------- # + # ------------------------------------------------------------------------------- # def requestJob(self, jobID): if not jobID: - raise RuntimeError("DeathByCaptcha: Error bad job id to request reCaptcha.") + raise reCaptcha_exceptions.reCaptcha_Bad_Job_ID( + "DeathByCaptcha: Error bad job id to request reCaptcha." + ) def _checkRequest(response): - if response.status_code in [200, 303] and response.json().get('text'): + if response.ok and response.json().get('text'): return response self.checkErrorStatus(response) @@ -132,13 +142,15 @@ class captchaSolver(reCaptcha): if response: return response.json().get('text') else: - raise RuntimeError("DeathByCaptcha: Error failed to solve reCaptcha.") + raise reCaptcha_exceptions.reCaptcha_Timeout( + "DeathByCaptcha: Error failed to solve reCaptcha." + ) - # ------------------------------------------------------------------------------- # + # ------------------------------------------------------------------------------- # def requestSolve(self, site_url, site_key): def _checkRequest(response): - if response.status_code in [200, 303] and response.json().get("is_correct") and response.json().get('captcha'): + if response.ok and response.json().get("is_correct") and response.json().get('captcha'): return response self.checkErrorStatus(response) @@ -168,16 +180,20 @@ class captchaSolver(reCaptcha): if response: return response.json().get('captcha') else: - raise RuntimeError('DeathByCaptcha: Error no job id was returned.') + raise reCaptcha_exceptions.reCaptcha_Bad_Job_ID( + 'DeathByCaptcha: Error no job id was returned.' + ) - # ------------------------------------------------------------------------------- # + # ------------------------------------------------------------------------------- # def getCaptchaAnswer(self, site_url, site_key, reCaptchaParams): jobID = None for param in ['username', 'password']: if not reCaptchaParams.get(param): - raise ValueError("DeathByCaptcha: Missing '{}' parameter.".format(param)) + raise reCaptcha_exceptions.reCaptcha_Bad_Parameter( + "DeathByCaptcha: Missing '{}' parameter.".format(param) + ) setattr(self, param, reCaptchaParams.get(param)) if reCaptchaParams.get('proxy'): @@ -191,9 +207,13 @@ class captchaSolver(reCaptcha): if jobID: self.reportJob(jobID) except polling.TimeoutException: - raise RuntimeError("DeathByCaptcha: reCaptcha solve took to long and also failed reporting the job.") + raise reCaptcha_exceptions.reCaptcha_Timeout( + "DeathByCaptcha: reCaptcha solve took to long and also failed reporting the job id {}.".format(jobID) + ) - raise RuntimeError("DeathByCaptcha: reCaptcha solve took to long to execute, aborting.") + raise reCaptcha_exceptions.reCaptcha_Timeout( + "DeathByCaptcha: reCaptcha solve took to long to execute job id {}, aborting.".format(jobID) + ) # ------------------------------------------------------------------------------- # diff --git a/lib/cloudscraper/user_agent/__init__.py b/lib/cloudscraper/user_agent/__init__.py index ccd3cb4c..df47ca26 100644 --- a/lib/cloudscraper/user_agent/__init__.py +++ b/lib/cloudscraper/user_agent/__init__.py @@ -47,7 +47,7 @@ class User_Agent(): for browser in user_agents: for release in user_agents[browser]['releases']: for platform in ['mobile', 'desktop']: - if re.search(self.custom, ' '.join(user_agents[browser]['releases'][release]['User-Agent'][platform])): + if re.search(re.escape(self.custom), ' '.join(user_agents[browser]['releases'][release]['User-Agent'][platform])): self.browser = browser self.loadHeaders(user_agents, release) self.headers['User-Agent'] = self.custom @@ -74,10 +74,11 @@ class User_Agent(): sys.tracebacklimit = 0 raise RuntimeError("Sorry you can't have mobile and desktop disabled at the same time.") - user_agents = json.load( - open(os.path.join(os.path.dirname(__file__), 'browsers.json'), 'r'), - object_pairs_hook=OrderedDict - ) + with open(os.path.join(os.path.dirname(__file__), 'browsers.json'), 'r') as fp: + user_agents = json.load( + fp, + object_pairs_hook=OrderedDict + ) if self.custom: if not self.tryMatchCustom(user_agents): diff --git a/lib/cloudscraper/user_agent/browsers.json b/lib/cloudscraper/user_agent/browsers.json index d3c610bc..ef6d8591 100644 --- a/lib/cloudscraper/user_agent/browsers.json +++ b/lib/cloudscraper/user_agent/browsers.json @@ -13,9 +13,14 @@ "TLS_CHACHA20_POLY1305_SHA256", "ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "ECDHE-RSA-AES256-SHA", "AES128-GCM-SHA256", "AES256-GCM-SHA384", + "AES128-SHA", "AES256-SHA" ], "releases": { @@ -12814,10 +12819,15 @@ "ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-ECDSA-AES256-SHA", "ECDHE-ECDSA-AES128-SHA", + "ECDHE-RSA-AES256-SHA", "DHE-RSA-AES128-SHA", "DHE-RSA-AES256-SHA", + "AES128-SHA", "AES256-SHA" ], "releases": { diff --git a/lib/future/backports/email/base64mime.py b/lib/future/backports/email/base64mime.py index 416d612e..296392a6 100644 --- a/lib/future/backports/email/base64mime.py +++ b/lib/future/backports/email/base64mime.py @@ -28,6 +28,7 @@ from __future__ import division from __future__ import absolute_import from future.builtins import range from future.builtins import bytes +from future.builtins import str __all__ = [ 'body_decode', diff --git a/lib/future/backports/test/pystone.py b/lib/future/backports/test/pystone.py old mode 100644 new mode 100755 diff --git a/lib/future/backports/urllib/error.py b/lib/future/backports/urllib/error.py index 6bcbeafb..a473e445 100644 --- a/lib/future/backports/urllib/error.py +++ b/lib/future/backports/urllib/error.py @@ -11,9 +11,9 @@ an application may want to handle an exception like a regular response. """ from __future__ import absolute_import, division, unicode_literals -from ... import standard_library +from future import standard_library -from . import response as urllib_response +from future.backports.urllib import response as urllib_response __all__ = ['URLError', 'HTTPError', 'ContentTooShortError'] diff --git a/lib/future/backports/urllib/parse.py b/lib/future/backports/urllib/parse.py index 2def4db8..04e52d49 100644 --- a/lib/future/backports/urllib/parse.py +++ b/lib/future/backports/urllib/parse.py @@ -87,7 +87,7 @@ def clear_cache(): # decoding and encoding. If valid use cases are # presented, we may relax this by using latin-1 # decoding internally for 3.3 -_implicit_encoding = 'utf8' +_implicit_encoding = 'ascii' _implicit_errors = 'strict' def _noop(obj): @@ -122,7 +122,7 @@ class _ResultMixinStr(object): """Standard approach to encoding parsed results from str to bytes""" __slots__ = () - def encode(self, encoding='utf8', errors='strict'): + def encode(self, encoding='ascii', errors='strict'): return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self)) @@ -130,7 +130,7 @@ class _ResultMixinBytes(object): """Standard approach to decoding parsed results from bytes to str""" __slots__ = () - def decode(self, encoding='utf8', errors='strict'): + def decode(self, encoding='ascii', errors='strict'): return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self)) @@ -730,7 +730,7 @@ def quote_from_bytes(bs, safe='/'): ### if isinstance(safe, str): # Normalize 'safe' by converting to bytes and removing non-ASCII chars - safe = str(safe).encode('utf8', 'ignore') + safe = str(safe).encode('ascii', 'ignore') else: ### For Python-Future: safe = bytes(safe) diff --git a/lib/future/backports/urllib/request.py b/lib/future/backports/urllib/request.py index aa28d1e7..baee5401 100644 --- a/lib/future/backports/urllib/request.py +++ b/lib/future/backports/urllib/request.py @@ -827,7 +827,7 @@ class ProxyHandler(BaseHandler): if user and password: user_pass = '%s:%s' % (unquote(user), unquote(password)) - creds = base64.b64encode(user_pass.encode()).decode("utf8") + creds = base64.b64encode(user_pass.encode()).decode("ascii") req.add_header('Proxy-authorization', 'Basic ' + creds) hostport = unquote(hostport) req.set_proxy(hostport, proxy_type) @@ -977,7 +977,7 @@ class AbstractBasicAuthHandler(object): user, pw = self.passwd.find_user_password(realm, host) if pw is not None: raw = "%s:%s" % (user, pw) - auth = "Basic " + base64.b64encode(raw.encode()).decode("utf8") + auth = "Basic " + base64.b64encode(raw.encode()).decode("ascii") if req.headers.get(self.auth_header, None) == auth: return None req.add_unredirected_header(self.auth_header, auth) @@ -1080,7 +1080,7 @@ class AbstractDigestAuthHandler(object): # authentication, and to provide some message integrity protection. # This isn't a fabulous effort, but it's probably Good Enough. s = "%s:%s:%s:" % (self.nonce_count, nonce, time.ctime()) - b = s.encode("utf8") + _randombytes(8) + b = s.encode("ascii") + _randombytes(8) dig = hashlib.sha1(b).hexdigest() return dig[:16] @@ -1147,9 +1147,9 @@ class AbstractDigestAuthHandler(object): def get_algorithm_impls(self, algorithm): # lambdas assume digest modules are imported at the top level if algorithm == 'MD5': - H = lambda x: hashlib.md5(x.encode("utf8")).hexdigest() + H = lambda x: hashlib.md5(x.encode("ascii")).hexdigest() elif algorithm == 'SHA': - H = lambda x: hashlib.sha1(x.encode("utf8")).hexdigest() + H = lambda x: hashlib.sha1(x.encode("ascii")).hexdigest() # XXX MD5-sess KD = lambda s, d: H("%s:%s" % (s, d)) return H, KD @@ -1829,13 +1829,13 @@ class URLopener(object): if proxy_passwd: proxy_passwd = unquote(proxy_passwd) - proxy_auth = base64.b64encode(proxy_passwd.encode()).decode('utf8') + proxy_auth = base64.b64encode(proxy_passwd.encode()).decode('ascii') else: proxy_auth = None if user_passwd: user_passwd = unquote(user_passwd) - auth = base64.b64encode(user_passwd.encode()).decode('utf8') + auth = base64.b64encode(user_passwd.encode()).decode('ascii') else: auth = None http_conn = connection_factory(host) @@ -2040,7 +2040,7 @@ class URLopener(object): msg.append('Content-type: %s' % type) if encoding == 'base64': # XXX is this encoding/decoding ok? - data = base64.decodebytes(data.encode('utf8')).decode('latin-1') + data = base64.decodebytes(data.encode('ascii')).decode('latin-1') else: data = unquote(data) msg.append('Content-Length: %d' % len(data)) @@ -2498,17 +2498,7 @@ def _proxy_bypass_macosx_sysconf(host, proxy_settings): if sys.platform == 'darwin': - try: - from _scproxy import _get_proxy_settings, _get_proxies - except: - try: - # By default use environment variables - _get_proxy_settings = getproxies_environment - _get_proxies = proxy_bypass_environment - getproxies = getproxies_environment - proxy_bypass = proxy_bypass_environment - except: - pass + from _scproxy import _get_proxy_settings, _get_proxies def proxy_bypass_macosx_sysconf(host): proxy_settings = _get_proxy_settings() diff --git a/lib/future/backports/urllib/robotparser.py b/lib/future/backports/urllib/robotparser.py index 6f7abaf6..a0f36511 100644 --- a/lib/future/backports/urllib/robotparser.py +++ b/lib/future/backports/urllib/robotparser.py @@ -1,5 +1,5 @@ from __future__ import absolute_import, division, unicode_literals -from ...builtins import str +from future.builtins import str """ robotparser.py Copyright (C) 2000 Bastian Kleineidam @@ -13,8 +13,8 @@ from ...builtins import str """ # Was: import urllib.parse, urllib.request -from .. import urllib -from . import parse as _parse, request as _request +from future.backports import urllib +from future.backports.urllib import parse as _parse, request as _request urllib.parse = _parse urllib.request = _request diff --git a/lib/future/builtins/newround.py b/lib/future/builtins/newround.py index 394a2c63..e2976a76 100644 --- a/lib/future/builtins/newround.py +++ b/lib/future/builtins/newround.py @@ -2,6 +2,7 @@ ``python-future``: pure Python implementation of Python 3 round(). """ +from __future__ import division from future.utils import PYPY, PY26, bind_method # Use the decimal module for simplicity of implementation (and @@ -29,8 +30,6 @@ def newround(number, ndigits=None): if hasattr(number, '__round__'): return number.__round__(ndigits) - if ndigits < 0: - raise NotImplementedError('negative ndigits not supported yet') exponent = Decimal('10') ** (-ndigits) if PYPY: @@ -42,15 +41,19 @@ def newround(number, ndigits=None): d = number else: if not PY26: - d = Decimal.from_float(number).quantize(exponent, - rounding=ROUND_HALF_EVEN) + d = Decimal.from_float(number) else: - d = from_float_26(number).quantize(exponent, rounding=ROUND_HALF_EVEN) + d = from_float_26(number) + + if ndigits < 0: + result = newround(d / exponent) * exponent + else: + result = d.quantize(exponent, rounding=ROUND_HALF_EVEN) if return_int: - return int(d) + return int(result) else: - return float(d) + return float(result) ### From Python 2.7's decimal.py. Only needed to support Py2.6: diff --git a/lib/future/moves/tkinter/filedialog.py b/lib/future/moves/tkinter/filedialog.py index 973923e2..6a6f03ca 100644 --- a/lib/future/moves/tkinter/filedialog.py +++ b/lib/future/moves/tkinter/filedialog.py @@ -10,3 +10,9 @@ else: except ImportError: raise ImportError('The FileDialog module is missing. Does your Py2 ' 'installation include tkinter?') + + try: + from tkFileDialog import * + except ImportError: + raise ImportError('The tkFileDialog module is missing. Does your Py2 ' + 'installation include tkinter?') diff --git a/lib/future/standard_library/__init__.py b/lib/future/standard_library/__init__.py index dcfc58fe..cff02f95 100644 --- a/lib/future/standard_library/__init__.py +++ b/lib/future/standard_library/__init__.py @@ -450,63 +450,35 @@ def install_aliases(): # if hasattr(install_aliases, 'run_already'): # return for (newmodname, newobjname, oldmodname, oldobjname) in MOVES: - try: - __import__(newmodname) - # We look up the module in sys.modules because __import__ just returns the - # top-level package: - newmod = sys.modules[newmodname] - # newmod.__future_module__ = True + __import__(newmodname) + # We look up the module in sys.modules because __import__ just returns the + # top-level package: + newmod = sys.modules[newmodname] + # newmod.__future_module__ = True - __import__(oldmodname) - oldmod = sys.modules[oldmodname] + __import__(oldmodname) + oldmod = sys.modules[oldmodname] - obj = getattr(oldmod, oldobjname) - setattr(newmod, newobjname, obj) - except: - try: - flog.warning('*** FUTURE ERROR in module %s %s ' % (str(oldmod), str(oldobjname))) - except: - pass + obj = getattr(oldmod, oldobjname) + setattr(newmod, newobjname, obj) # Hack for urllib so it appears to have the same structure on Py2 as on Py3 - try: - import urllib - from future.backports.urllib import response - urllib.response = response - sys.modules['urllib.response'] = response - from future.backports.urllib import parse - urllib.parse = parse - sys.modules['urllib.parse'] = parse - from future.backports.urllib import error - urllib.error = error - sys.modules['urllib.error'] = error - except ImportError: - try: - flog.warning('*** FUTURE ERROR importing URLLIB.response, parse, error') - urllib.response = urllib - sys.modules['urllib.response'] = urllib - urllib.parse = urllib - sys.modules['urllib.parse'] = urllib - urllib.error = urllib - sys.modules['urllib.error'] = urllib - except: - pass - try: - from future.backports.urllib import request - urllib.request = request - sys.modules['urllib.request'] = request - from future.backports.urllib import robotparser - urllib.robotparser = robotparser - sys.modules['urllib.robotparser'] = robotparser - except ImportError: - try: - flog.warning('*** FUTURE ERROR importing URLLIB.Request') - urllib.request = urllib - sys.modules['urllib.request'] = urllib - urllib.robotparser = urllib - sys.modules['urllib.robotparser'] = urllib - except: - pass + import urllib + from future.backports.urllib import request + from future.backports.urllib import response + from future.backports.urllib import parse + from future.backports.urllib import error + from future.backports.urllib import robotparser + urllib.request = request + urllib.response = response + urllib.parse = parse + urllib.error = error + urllib.robotparser = robotparser + sys.modules['urllib.request'] = request + sys.modules['urllib.response'] = response + sys.modules['urllib.parse'] = parse + sys.modules['urllib.error'] = error + sys.modules['urllib.robotparser'] = robotparser # Patch the test module so it appears to have the same structure on Py2 as on Py3 try: @@ -518,11 +490,8 @@ def install_aliases(): except ImportError: pass else: - try: - test.support = support - sys.modules['test.support'] = support - except: - pass + test.support = support + sys.modules['test.support'] = support # Patch the dbm module so it appears to have the same structure on Py2 as on Py3 try: @@ -530,26 +499,23 @@ def install_aliases(): except ImportError: pass else: + from future.moves.dbm import dumb + dbm.dumb = dumb + sys.modules['dbm.dumb'] = dumb try: - from future.moves.dbm import dumb - dbm.dumb = dumb - sys.modules['dbm.dumb'] = dumb - try: - from future.moves.dbm import gnu - except ImportError: - pass - else: - dbm.gnu = gnu - sys.modules['dbm.gnu'] = gnu - try: - from future.moves.dbm import ndbm - except ImportError: - pass - else: - dbm.ndbm = ndbm - sys.modules['dbm.ndbm'] = ndbm - except: - flog.warning('*** FUTURE ERROR importing MOVES.dbm') + from future.moves.dbm import gnu + except ImportError: + pass + else: + dbm.gnu = gnu + sys.modules['dbm.gnu'] = gnu + try: + from future.moves.dbm import ndbm + except ImportError: + pass + else: + dbm.ndbm = ndbm + sys.modules['dbm.ndbm'] = ndbm # install_aliases.run_already = True diff --git a/lib/future/utils/__init__.py b/lib/future/utils/__init__.py index 46bd96de..846d5da6 100644 --- a/lib/future/utils/__init__.py +++ b/lib/future/utils/__init__.py @@ -527,9 +527,9 @@ def implements_iterator(cls): return cls if PY3: - get_next = lambda x: x.next -else: get_next = lambda x: x.__next__ +else: + get_next = lambda x: x.next def encode_filename(filename): diff --git a/lib/generictools.py b/lib/generictools.py index 1d755fc4..8c61e20d 100644 --- a/lib/generictools.py +++ b/lib/generictools.py @@ -11,7 +11,10 @@ import re import os import sys import urllib -import urlparse +try: + import urlparse +except: + import urllib.parse as urlparse import datetime import time import traceback @@ -161,7 +164,8 @@ def update_title(item): item.channel = new_item.channel #Restuaramos el nombre del canal, por si lo habíamos cambiado if item.tmdb_stat == True: if new_item.contentSerieName: #Si es serie... - if config.get_setting("filter_languages", item.channel) >= 0: + filter_languages = config.get_setting("filter_languages", item.channel) + if filter_languages and filter_languages >= 0: item.title_from_channel = new_item.contentSerieName #Guardo el título incial para Filtertools item.contentSerieName = new_item.contentSerieName #Guardo el título incial para Filtertools else: diff --git a/lib/githash.py b/lib/githash.py index a60a3924..37b301d0 100644 --- a/lib/githash.py +++ b/lib/githash.py @@ -9,7 +9,6 @@ has been tested with Python2.7 and Python 3.4. from __future__ import print_function -import argparse import os import stat import sys diff --git a/lib/httplib2/__init__.py b/lib/httplib2/__init__.py index 10b5ed7f..e69de29b 100644 --- a/lib/httplib2/__init__.py +++ b/lib/httplib2/__init__.py @@ -1,2231 +0,0 @@ -"""Small, fast HTTP client library for Python. - -Features persistent connections, cache, and Google App Engine Standard -Environment support. -""" - -from __future__ import print_function - -__author__ = "Joe Gregorio (joe@bitworking.org)" -__copyright__ = "Copyright 2006, Joe Gregorio" -__contributors__ = [ - "Thomas Broyer (t.broyer@ltgt.net)", - "James Antill", - "Xavier Verges Farrero", - "Jonathan Feinberg", - "Blair Zajac", - "Sam Ruby", - "Louis Nyffenegger", - "Alex Yu", -] -__license__ = "MIT" -__version__ = '0.13.1' - -import base64 -import calendar -import copy -import email -import email.FeedParser -import email.Message -import email.Utils -import errno -import gzip -import httplib -import os -import random -import re -import StringIO -import sys -import time -import urllib -import urlparse -import zlib - -try: - from hashlib import sha1 as _sha, md5 as _md5 -except ImportError: - # prior to Python 2.5, these were separate modules - import sha - import md5 - - _sha = sha.new - _md5 = md5.new -import hmac -from gettext import gettext as _ -import socket - -try: - from httplib2 import socks -except ImportError: - try: - import socks - except (ImportError, AttributeError): - socks = None - -# Build the appropriate socket wrapper for ssl -ssl = None -ssl_SSLError = None -ssl_CertificateError = None -try: - import ssl # python 2.6 -except ImportError: - pass -if ssl is not None: - ssl_SSLError = getattr(ssl, "SSLError", None) - ssl_CertificateError = getattr(ssl, "CertificateError", None) - - -def _ssl_wrap_socket( - sock, key_file, cert_file, disable_validation, ca_certs, ssl_version, hostname -): - if disable_validation: - cert_reqs = ssl.CERT_NONE - else: - cert_reqs = ssl.CERT_REQUIRED - if ssl_version is None: - ssl_version = ssl.PROTOCOL_SSLv23 - - if hasattr(ssl, "SSLContext"): # Python 2.7.9 - context = ssl.SSLContext(ssl_version) - context.verify_mode = cert_reqs - context.check_hostname = cert_reqs != ssl.CERT_NONE - if cert_file: - context.load_cert_chain(cert_file, key_file) - if ca_certs: - context.load_verify_locations(ca_certs) - return context.wrap_socket(sock, server_hostname=hostname) - else: - return ssl.wrap_socket( - sock, - keyfile=key_file, - certfile=cert_file, - cert_reqs=cert_reqs, - ca_certs=ca_certs, - ssl_version=ssl_version, - ) - - -def _ssl_wrap_socket_unsupported( - sock, key_file, cert_file, disable_validation, ca_certs, ssl_version, hostname -): - if not disable_validation: - raise CertificateValidationUnsupported( - "SSL certificate validation is not supported without " - "the ssl module installed. To avoid this error, install " - "the ssl module, or explicity disable validation." - ) - ssl_sock = socket.ssl(sock, key_file, cert_file) - return httplib.FakeSocket(sock, ssl_sock) - - -if ssl is None: - _ssl_wrap_socket = _ssl_wrap_socket_unsupported - -if sys.version_info >= (2, 3): - from iri2uri import iri2uri -else: - - def iri2uri(uri): - return uri - - -def has_timeout(timeout): # python 2.6 - if hasattr(socket, "_GLOBAL_DEFAULT_TIMEOUT"): - return timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT - return timeout is not None - - -__all__ = [ - "Http", - "Response", - "ProxyInfo", - "HttpLib2Error", - "RedirectMissingLocation", - "RedirectLimit", - "FailedToDecompressContent", - "UnimplementedDigestAuthOptionError", - "UnimplementedHmacDigestAuthOptionError", - "debuglevel", - "ProxiesUnavailableError", -] - -# The httplib debug level, set to a non-zero value to get debug output -debuglevel = 0 - -# A request will be tried 'RETRIES' times if it fails at the socket/connection level. -RETRIES = 2 - -# Python 2.3 support -if sys.version_info < (2, 4): - - def sorted(seq): - seq.sort() - return seq - - -# Python 2.3 support -def HTTPResponse__getheaders(self): - """Return list of (header, value) tuples.""" - if self.msg is None: - raise httplib.ResponseNotReady() - return self.msg.items() - - -if not hasattr(httplib.HTTPResponse, "getheaders"): - httplib.HTTPResponse.getheaders = HTTPResponse__getheaders - - -# All exceptions raised here derive from HttpLib2Error -class HttpLib2Error(Exception): - pass - - -# Some exceptions can be caught and optionally -# be turned back into responses. -class HttpLib2ErrorWithResponse(HttpLib2Error): - def __init__(self, desc, response, content): - self.response = response - self.content = content - HttpLib2Error.__init__(self, desc) - - -class RedirectMissingLocation(HttpLib2ErrorWithResponse): - pass - - -class RedirectLimit(HttpLib2ErrorWithResponse): - pass - - -class FailedToDecompressContent(HttpLib2ErrorWithResponse): - pass - - -class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): - pass - - -class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): - pass - - -class MalformedHeader(HttpLib2Error): - pass - - -class RelativeURIError(HttpLib2Error): - pass - - -class ServerNotFoundError(HttpLib2Error): - pass - - -class ProxiesUnavailableError(HttpLib2Error): - pass - - -class CertificateValidationUnsupported(HttpLib2Error): - pass - - -class SSLHandshakeError(HttpLib2Error): - pass - - -class NotSupportedOnThisPlatform(HttpLib2Error): - pass - - -class CertificateHostnameMismatch(SSLHandshakeError): - def __init__(self, desc, host, cert): - HttpLib2Error.__init__(self, desc) - self.host = host - self.cert = cert - - -class NotRunningAppEngineEnvironment(HttpLib2Error): - pass - - -# Open Items: -# ----------- -# Proxy support - -# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?) - -# Pluggable cache storage (supports storing the cache in -# flat files by default. We need a plug-in architecture -# that can support Berkeley DB and Squid) - -# == Known Issues == -# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator. -# Does not handle Cache-Control: max-stale -# Does not use Age: headers when calculating cache freshness. - -# The number of redirections to follow before giving up. -# Note that only GET redirects are automatically followed. -# Will also honor 301 requests by saving that info and never -# requesting that URI again. -DEFAULT_MAX_REDIRECTS = 5 - -from lib.httplib2 import certs -CA_CERTS = certs.where() - -# Which headers are hop-by-hop headers by default -HOP_BY_HOP = [ - "connection", - "keep-alive", - "proxy-authenticate", - "proxy-authorization", - "te", - "trailers", - "transfer-encoding", - "upgrade", -] - - -def _get_end2end_headers(response): - hopbyhop = list(HOP_BY_HOP) - hopbyhop.extend([x.strip() for x in response.get("connection", "").split(",")]) - return [header for header in response.keys() if header not in hopbyhop] - - -URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") - - -def parse_uri(uri): - """Parses a URI using the regex given in Appendix B of RFC 3986. - - (scheme, authority, path, query, fragment) = parse_uri(uri) - """ - groups = URI.match(uri).groups() - return (groups[1], groups[3], groups[4], groups[6], groups[8]) - - -def urlnorm(uri): - (scheme, authority, path, query, fragment) = parse_uri(uri) - if not scheme or not authority: - raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri) - authority = authority.lower() - scheme = scheme.lower() - if not path: - path = "/" - # Could do syntax based normalization of the URI before - # computing the digest. See Section 6.2.2 of Std 66. - request_uri = query and "?".join([path, query]) or path - scheme = scheme.lower() - defrag_uri = scheme + "://" + authority + request_uri - return scheme, authority, request_uri, defrag_uri - - -# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/) -re_url_scheme = re.compile(r"^\w+://") -re_unsafe = re.compile(r"[^\w\-_.()=!]+") - - -def safename(filename): - """Return a filename suitable for the cache. - Strips dangerous and common characters to create a filename we - can use to store the cache in. - """ - if isinstance(filename, str): - filename_bytes = filename - filename = filename.decode("utf-8") - else: - filename_bytes = filename.encode("utf-8") - filemd5 = _md5(filename_bytes).hexdigest() - filename = re_url_scheme.sub("", filename) - filename = re_unsafe.sub("", filename) - - # limit length of filename (vital for Windows) - # https://github.com/httplib2/httplib2/pull/74 - # C:\Users\ <username> \AppData\Local\Temp\ <safe_filename> , <md5> - # 9 chars + max 104 chars + 20 chars + x + 1 + 32 = max 259 chars - # Thus max safe filename x = 93 chars. Let it be 90 to make a round sum: - filename = filename[:90] - - return ",".join((filename, filemd5)) - - -NORMALIZE_SPACE = re.compile(r"(?:\r\n)?[ \t]+") - - -def _normalize_headers(headers): - return dict( - [ - (key.lower(), NORMALIZE_SPACE.sub(value, " ").strip()) - for (key, value) in headers.iteritems() - ] - ) - - -def _parse_cache_control(headers): - retval = {} - if "cache-control" in headers: - parts = headers["cache-control"].split(",") - parts_with_args = [ - tuple([x.strip().lower() for x in part.split("=", 1)]) - for part in parts - if -1 != part.find("=") - ] - parts_wo_args = [ - (name.strip().lower(), 1) for name in parts if -1 == name.find("=") - ] - retval = dict(parts_with_args + parts_wo_args) - return retval - - -# Whether to use a strict mode to parse WWW-Authenticate headers -# Might lead to bad results in case of ill-formed header value, -# so disabled by default, falling back to relaxed parsing. -# Set to true to turn on, usefull for testing servers. -USE_WWW_AUTH_STRICT_PARSING = 0 - -# In regex below: -# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP -# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space -# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both: -# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"? -WWW_AUTH_STRICT = re.compile( - r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$" -) -WWW_AUTH_RELAXED = re.compile( - r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$" -) -UNQUOTE_PAIRS = re.compile(r"\\(.)") - - -def _parse_www_authenticate(headers, headername="www-authenticate"): - """Returns a dictionary of dictionaries, one dict - per auth_scheme.""" - retval = {} - if headername in headers: - try: - - authenticate = headers[headername].strip() - www_auth = ( - USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED - ) - while authenticate: - # Break off the scheme at the beginning of the line - if headername == "authentication-info": - (auth_scheme, the_rest) = ("digest", authenticate) - else: - (auth_scheme, the_rest) = authenticate.split(" ", 1) - # Now loop over all the key value pairs that come after the scheme, - # being careful not to roll into the next scheme - match = www_auth.search(the_rest) - auth_params = {} - while match: - if match and len(match.groups()) == 3: - (key, value, the_rest) = match.groups() - auth_params[key.lower()] = UNQUOTE_PAIRS.sub( - r"\1", value - ) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')]) - match = www_auth.search(the_rest) - retval[auth_scheme.lower()] = auth_params - authenticate = the_rest.strip() - - except ValueError: - raise MalformedHeader("WWW-Authenticate") - return retval - - -# TODO: add current time as _entry_disposition argument to avoid sleep in tests -def _entry_disposition(response_headers, request_headers): - """Determine freshness from the Date, Expires and Cache-Control headers. - - We don't handle the following: - - 1. Cache-Control: max-stale - 2. Age: headers are not used in the calculations. - - Not that this algorithm is simpler than you might think - because we are operating as a private (non-shared) cache. - This lets us ignore 's-maxage'. We can also ignore - 'proxy-invalidate' since we aren't a proxy. - We will never return a stale document as - fresh as a design decision, and thus the non-implementation - of 'max-stale'. This also lets us safely ignore 'must-revalidate' - since we operate as if every server has sent 'must-revalidate'. - Since we are private we get to ignore both 'public' and - 'private' parameters. We also ignore 'no-transform' since - we don't do any transformations. - The 'no-store' parameter is handled at a higher level. - So the only Cache-Control parameters we look at are: - - no-cache - only-if-cached - max-age - min-fresh - """ - - retval = "STALE" - cc = _parse_cache_control(request_headers) - cc_response = _parse_cache_control(response_headers) - - if ( - "pragma" in request_headers - and request_headers["pragma"].lower().find("no-cache") != -1 - ): - retval = "TRANSPARENT" - if "cache-control" not in request_headers: - request_headers["cache-control"] = "no-cache" - elif "no-cache" in cc: - retval = "TRANSPARENT" - elif "no-cache" in cc_response: - retval = "STALE" - elif "only-if-cached" in cc: - retval = "FRESH" - elif "date" in response_headers: - date = calendar.timegm(email.Utils.parsedate_tz(response_headers["date"])) - now = time.time() - current_age = max(0, now - date) - if "max-age" in cc_response: - try: - freshness_lifetime = int(cc_response["max-age"]) - except ValueError: - freshness_lifetime = 0 - elif "expires" in response_headers: - expires = email.Utils.parsedate_tz(response_headers["expires"]) - if None == expires: - freshness_lifetime = 0 - else: - freshness_lifetime = max(0, calendar.timegm(expires) - date) - else: - freshness_lifetime = 0 - if "max-age" in cc: - try: - freshness_lifetime = int(cc["max-age"]) - except ValueError: - freshness_lifetime = 0 - if "min-fresh" in cc: - try: - min_fresh = int(cc["min-fresh"]) - except ValueError: - min_fresh = 0 - current_age += min_fresh - if freshness_lifetime > current_age: - retval = "FRESH" - return retval - - -def _decompressContent(response, new_content): - content = new_content - try: - encoding = response.get("content-encoding", None) - if encoding in ["gzip", "deflate"]: - if encoding == "gzip": - content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read() - if encoding == "deflate": - content = zlib.decompress(content, -zlib.MAX_WBITS) - response["content-length"] = str(len(content)) - # Record the historical presence of the encoding in a way the won't interfere. - response["-content-encoding"] = response["content-encoding"] - del response["content-encoding"] - except (IOError, zlib.error): - content = "" - raise FailedToDecompressContent( - _("Content purported to be compressed with %s but failed to decompress.") - % response.get("content-encoding"), - response, - content, - ) - return content - - -def _updateCache(request_headers, response_headers, content, cache, cachekey): - if cachekey: - cc = _parse_cache_control(request_headers) - cc_response = _parse_cache_control(response_headers) - if "no-store" in cc or "no-store" in cc_response: - cache.delete(cachekey) - else: - info = email.Message.Message() - for key, value in response_headers.iteritems(): - if key not in ["status", "content-encoding", "transfer-encoding"]: - info[key] = value - - # Add annotations to the cache to indicate what headers - # are variant for this request. - vary = response_headers.get("vary", None) - if vary: - vary_headers = vary.lower().replace(" ", "").split(",") - for header in vary_headers: - key = "-varied-%s" % header - try: - info[key] = request_headers[header] - except KeyError: - pass - - status = response_headers.status - if status == 304: - status = 200 - - status_header = "status: %d\r\n" % status - - header_str = info.as_string() - - header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str) - text = "".join([status_header, header_str, content]) - - cache.set(cachekey, text) - - -def _cnonce(): - dig = _md5( - "%s:%s" - % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)]) - ).hexdigest() - return dig[:16] - - -def _wsse_username_token(cnonce, iso_now, password): - return base64.b64encode( - _sha("%s%s%s" % (cnonce, iso_now, password)).digest() - ).strip() - - -# For credentials we need two things, first -# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.) -# Then we also need a list of URIs that have already demanded authentication -# That list is tricky since sub-URIs can take the same auth, or the -# auth scheme may change as you descend the tree. -# So we also need each Auth instance to be able to tell us -# how close to the 'top' it is. - - -class Authentication(object): - def __init__( - self, credentials, host, request_uri, headers, response, content, http - ): - (scheme, authority, path, query, fragment) = parse_uri(request_uri) - self.path = path - self.host = host - self.credentials = credentials - self.http = http - - def depth(self, request_uri): - (scheme, authority, path, query, fragment) = parse_uri(request_uri) - return request_uri[len(self.path) :].count("/") - - def inscope(self, host, request_uri): - # XXX Should we normalize the request_uri? - (scheme, authority, path, query, fragment) = parse_uri(request_uri) - return (host == self.host) and path.startswith(self.path) - - def request(self, method, request_uri, headers, content): - """Modify the request headers to add the appropriate - Authorization header. Over-ride this in sub-classes.""" - pass - - def response(self, response, content): - """Gives us a chance to update with new nonces - or such returned from the last authorized response. - Over-rise this in sub-classes if necessary. - - Return TRUE is the request is to be retried, for - example Digest may return stale=true. - """ - return False - - -class BasicAuthentication(Authentication): - def __init__( - self, credentials, host, request_uri, headers, response, content, http - ): - Authentication.__init__( - self, credentials, host, request_uri, headers, response, content, http - ) - - def request(self, method, request_uri, headers, content): - """Modify the request headers to add the appropriate - Authorization header.""" - headers["authorization"] = ( - "Basic " + base64.b64encode("%s:%s" % self.credentials).strip() - ) - - -class DigestAuthentication(Authentication): - """Only do qop='auth' and MD5, since that - is all Apache currently implements""" - - def __init__( - self, credentials, host, request_uri, headers, response, content, http - ): - Authentication.__init__( - self, credentials, host, request_uri, headers, response, content, http - ) - challenge = _parse_www_authenticate(response, "www-authenticate") - self.challenge = challenge["digest"] - qop = self.challenge.get("qop", "auth") - self.challenge["qop"] = ( - ("auth" in [x.strip() for x in qop.split()]) and "auth" or None - ) - if self.challenge["qop"] is None: - raise UnimplementedDigestAuthOptionError( - _("Unsupported value for qop: %s." % qop) - ) - self.challenge["algorithm"] = self.challenge.get("algorithm", "MD5").upper() - if self.challenge["algorithm"] != "MD5": - raise UnimplementedDigestAuthOptionError( - _("Unsupported value for algorithm: %s." % self.challenge["algorithm"]) - ) - self.A1 = "".join( - [ - self.credentials[0], - ":", - self.challenge["realm"], - ":", - self.credentials[1], - ] - ) - self.challenge["nc"] = 1 - - def request(self, method, request_uri, headers, content, cnonce=None): - """Modify the request headers""" - H = lambda x: _md5(x).hexdigest() - KD = lambda s, d: H("%s:%s" % (s, d)) - A2 = "".join([method, ":", request_uri]) - self.challenge["cnonce"] = cnonce or _cnonce() - request_digest = '"%s"' % KD( - H(self.A1), - "%s:%s:%s:%s:%s" - % ( - self.challenge["nonce"], - "%08x" % self.challenge["nc"], - self.challenge["cnonce"], - self.challenge["qop"], - H(A2), - ), - ) - headers["authorization"] = ( - 'Digest username="%s", realm="%s", nonce="%s", ' - 'uri="%s", algorithm=%s, response=%s, qop=%s, ' - 'nc=%08x, cnonce="%s"' - ) % ( - self.credentials[0], - self.challenge["realm"], - self.challenge["nonce"], - request_uri, - self.challenge["algorithm"], - request_digest, - self.challenge["qop"], - self.challenge["nc"], - self.challenge["cnonce"], - ) - if self.challenge.get("opaque"): - headers["authorization"] += ', opaque="%s"' % self.challenge["opaque"] - self.challenge["nc"] += 1 - - def response(self, response, content): - if "authentication-info" not in response: - challenge = _parse_www_authenticate(response, "www-authenticate").get( - "digest", {} - ) - if "true" == challenge.get("stale"): - self.challenge["nonce"] = challenge["nonce"] - self.challenge["nc"] = 1 - return True - else: - updated_challenge = _parse_www_authenticate( - response, "authentication-info" - ).get("digest", {}) - - if "nextnonce" in updated_challenge: - self.challenge["nonce"] = updated_challenge["nextnonce"] - self.challenge["nc"] = 1 - return False - - -class HmacDigestAuthentication(Authentication): - """Adapted from Robert Sayre's code and DigestAuthentication above.""" - - __author__ = "Thomas Broyer (t.broyer@ltgt.net)" - - def __init__( - self, credentials, host, request_uri, headers, response, content, http - ): - Authentication.__init__( - self, credentials, host, request_uri, headers, response, content, http - ) - challenge = _parse_www_authenticate(response, "www-authenticate") - self.challenge = challenge["hmacdigest"] - # TODO: self.challenge['domain'] - self.challenge["reason"] = self.challenge.get("reason", "unauthorized") - if self.challenge["reason"] not in ["unauthorized", "integrity"]: - self.challenge["reason"] = "unauthorized" - self.challenge["salt"] = self.challenge.get("salt", "") - if not self.challenge.get("snonce"): - raise UnimplementedHmacDigestAuthOptionError( - _("The challenge doesn't contain a server nonce, or this one is empty.") - ) - self.challenge["algorithm"] = self.challenge.get("algorithm", "HMAC-SHA-1") - if self.challenge["algorithm"] not in ["HMAC-SHA-1", "HMAC-MD5"]: - raise UnimplementedHmacDigestAuthOptionError( - _("Unsupported value for algorithm: %s." % self.challenge["algorithm"]) - ) - self.challenge["pw-algorithm"] = self.challenge.get("pw-algorithm", "SHA-1") - if self.challenge["pw-algorithm"] not in ["SHA-1", "MD5"]: - raise UnimplementedHmacDigestAuthOptionError( - _( - "Unsupported value for pw-algorithm: %s." - % self.challenge["pw-algorithm"] - ) - ) - if self.challenge["algorithm"] == "HMAC-MD5": - self.hashmod = _md5 - else: - self.hashmod = _sha - if self.challenge["pw-algorithm"] == "MD5": - self.pwhashmod = _md5 - else: - self.pwhashmod = _sha - self.key = "".join( - [ - self.credentials[0], - ":", - self.pwhashmod.new( - "".join([self.credentials[1], self.challenge["salt"]]) - ) - .hexdigest() - .lower(), - ":", - self.challenge["realm"], - ] - ) - self.key = self.pwhashmod.new(self.key).hexdigest().lower() - - def request(self, method, request_uri, headers, content): - """Modify the request headers""" - keys = _get_end2end_headers(headers) - keylist = "".join(["%s " % k for k in keys]) - headers_val = "".join([headers[k] for k in keys]) - created = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) - cnonce = _cnonce() - request_digest = "%s:%s:%s:%s:%s" % ( - method, - request_uri, - cnonce, - self.challenge["snonce"], - headers_val, - ) - request_digest = ( - hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower() - ) - headers["authorization"] = ( - 'HMACDigest username="%s", realm="%s", snonce="%s",' - ' cnonce="%s", uri="%s", created="%s", ' - 'response="%s", headers="%s"' - ) % ( - self.credentials[0], - self.challenge["realm"], - self.challenge["snonce"], - cnonce, - request_uri, - created, - request_digest, - keylist, - ) - - def response(self, response, content): - challenge = _parse_www_authenticate(response, "www-authenticate").get( - "hmacdigest", {} - ) - if challenge.get("reason") in ["integrity", "stale"]: - return True - return False - - -class WsseAuthentication(Authentication): - """This is thinly tested and should not be relied upon. - At this time there isn't any third party server to test against. - Blogger and TypePad implemented this algorithm at one point - but Blogger has since switched to Basic over HTTPS and - TypePad has implemented it wrong, by never issuing a 401 - challenge but instead requiring your client to telepathically know that - their endpoint is expecting WSSE profile="UsernameToken".""" - - def __init__( - self, credentials, host, request_uri, headers, response, content, http - ): - Authentication.__init__( - self, credentials, host, request_uri, headers, response, content, http - ) - - def request(self, method, request_uri, headers, content): - """Modify the request headers to add the appropriate - Authorization header.""" - headers["authorization"] = 'WSSE profile="UsernameToken"' - iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) - cnonce = _cnonce() - password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1]) - headers["X-WSSE"] = ( - 'UsernameToken Username="%s", PasswordDigest="%s", ' - 'Nonce="%s", Created="%s"' - ) % (self.credentials[0], password_digest, cnonce, iso_now) - - -class GoogleLoginAuthentication(Authentication): - def __init__( - self, credentials, host, request_uri, headers, response, content, http - ): - from urllib import urlencode - - Authentication.__init__( - self, credentials, host, request_uri, headers, response, content, http - ) - challenge = _parse_www_authenticate(response, "www-authenticate") - service = challenge["googlelogin"].get("service", "xapi") - # Bloggger actually returns the service in the challenge - # For the rest we guess based on the URI - if service == "xapi" and request_uri.find("calendar") > 0: - service = "cl" - # No point in guessing Base or Spreadsheet - # elif request_uri.find("spreadsheets") > 0: - # service = "wise" - - auth = dict( - Email=credentials[0], - Passwd=credentials[1], - service=service, - source=headers["user-agent"], - ) - resp, content = self.http.request( - "https://www.google.com/accounts/ClientLogin", - method="POST", - body=urlencode(auth), - headers={"Content-Type": "application/x-www-form-urlencoded"}, - ) - lines = content.split("\n") - d = dict([tuple(line.split("=", 1)) for line in lines if line]) - if resp.status == 403: - self.Auth = "" - else: - self.Auth = d["Auth"] - - def request(self, method, request_uri, headers, content): - """Modify the request headers to add the appropriate - Authorization header.""" - headers["authorization"] = "GoogleLogin Auth=" + self.Auth - - -AUTH_SCHEME_CLASSES = { - "basic": BasicAuthentication, - "wsse": WsseAuthentication, - "digest": DigestAuthentication, - "hmacdigest": HmacDigestAuthentication, - "googlelogin": GoogleLoginAuthentication, -} - -AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"] - - -class FileCache(object): - """Uses a local directory as a store for cached files. - Not really safe to use if multiple threads or processes are going to - be running on the same cache. - """ - - def __init__( - self, cache, safe=safename - ): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior - self.cache = cache - self.safe = safe - if not os.path.exists(cache): - os.makedirs(self.cache) - - def get(self, key): - retval = None - cacheFullPath = os.path.join(self.cache, self.safe(key)) - try: - f = file(cacheFullPath, "rb") - retval = f.read() - f.close() - except IOError: - pass - return retval - - def set(self, key, value): - cacheFullPath = os.path.join(self.cache, self.safe(key)) - f = file(cacheFullPath, "wb") - f.write(value) - f.close() - - def delete(self, key): - cacheFullPath = os.path.join(self.cache, self.safe(key)) - if os.path.exists(cacheFullPath): - os.remove(cacheFullPath) - - -class Credentials(object): - def __init__(self): - self.credentials = [] - - def add(self, name, password, domain=""): - self.credentials.append((domain.lower(), name, password)) - - def clear(self): - self.credentials = [] - - def iter(self, domain): - for (cdomain, name, password) in self.credentials: - if cdomain == "" or domain == cdomain: - yield (name, password) - - -class KeyCerts(Credentials): - """Identical to Credentials except that - name/password are mapped to key/cert.""" - - pass - - -class AllHosts(object): - pass - - -class ProxyInfo(object): - """Collect information required to use a proxy.""" - - bypass_hosts = () - - def __init__( - self, - proxy_type, - proxy_host, - proxy_port, - proxy_rdns=True, - proxy_user=None, - proxy_pass=None, - proxy_headers=None, - ): - """Args: - - proxy_type: The type of proxy server. This must be set to one of - socks.PROXY_TYPE_XXX constants. For example: p = - ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', - proxy_port=8000) - proxy_host: The hostname or IP address of the proxy server. - proxy_port: The port that the proxy server is running on. - proxy_rdns: If True (default), DNS queries will not be performed - locally, and instead, handed to the proxy to resolve. This is useful - if the network does not allow resolution of non-local names. In - httplib2 0.9 and earlier, this defaulted to False. - proxy_user: The username used to authenticate with the proxy server. - proxy_pass: The password used to authenticate with the proxy server. - proxy_headers: Additional or modified headers for the proxy connect - request. - """ - self.proxy_type = proxy_type - self.proxy_host = proxy_host - self.proxy_port = proxy_port - self.proxy_rdns = proxy_rdns - self.proxy_user = proxy_user - self.proxy_pass = proxy_pass - self.proxy_headers = proxy_headers - - def astuple(self): - return ( - self.proxy_type, - self.proxy_host, - self.proxy_port, - self.proxy_rdns, - self.proxy_user, - self.proxy_pass, - self.proxy_headers, - ) - - def isgood(self): - return (self.proxy_host != None) and (self.proxy_port != None) - - def applies_to(self, hostname): - return not self.bypass_host(hostname) - - def bypass_host(self, hostname): - """Has this host been excluded from the proxy config""" - if self.bypass_hosts is AllHosts: - return True - - hostname = "." + hostname.lstrip(".") - for skip_name in self.bypass_hosts: - # *.suffix - if skip_name.startswith(".") and hostname.endswith(skip_name): - return True - # exact match - if hostname == "." + skip_name: - return True - return False - - def __repr__(self): - return ( - "<ProxyInfo type={p.proxy_type} " - "host:port={p.proxy_host}:{p.proxy_port} rdns={p.proxy_rdns}" - + " user={p.proxy_user} headers={p.proxy_headers}>" - ).format(p=self) - - -def proxy_info_from_environment(method="http"): - """Read proxy info from the environment variables. - """ - if method not in ["http", "https"]: - return - - env_var = method + "_proxy" - url = os.environ.get(env_var, os.environ.get(env_var.upper())) - if not url: - return - return proxy_info_from_url(url, method, None) - - -def proxy_info_from_url(url, method="http", noproxy=None): - """Construct a ProxyInfo from a URL (such as http_proxy env var) - """ - url = urlparse.urlparse(url) - username = None - password = None - port = None - if "@" in url[1]: - ident, host_port = url[1].split("@", 1) - if ":" in ident: - username, password = ident.split(":", 1) - else: - password = ident - else: - host_port = url[1] - if ":" in host_port: - host, port = host_port.split(":", 1) - else: - host = host_port - - if port: - port = int(port) - else: - port = dict(https=443, http=80)[method] - - proxy_type = 3 # socks.PROXY_TYPE_HTTP - pi = ProxyInfo( - proxy_type=proxy_type, - proxy_host=host, - proxy_port=port, - proxy_user=username or None, - proxy_pass=password or None, - proxy_headers=None, - ) - - bypass_hosts = [] - # If not given an explicit noproxy value, respect values in env vars. - if noproxy is None: - noproxy = os.environ.get("no_proxy", os.environ.get("NO_PROXY", "")) - # Special case: A single '*' character means all hosts should be bypassed. - if noproxy == "*": - bypass_hosts = AllHosts - elif noproxy.strip(): - bypass_hosts = noproxy.split(",") - bypass_hosts = filter(bool, bypass_hosts) # To exclude empty string. - - pi.bypass_hosts = bypass_hosts - return pi - - -class HTTPConnectionWithTimeout(httplib.HTTPConnection): - """HTTPConnection subclass that supports timeouts - - All timeouts are in seconds. If None is passed for timeout then - Python's default timeout for sockets will be used. See for example - the docs of socket.setdefaulttimeout(): - http://docs.python.org/library/socket.html#socket.setdefaulttimeout - """ - - def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None): - httplib.HTTPConnection.__init__(self, host, port, strict) - self.timeout = timeout - self.proxy_info = proxy_info - - def connect(self): - """Connect to the host and port specified in __init__.""" - # Mostly verbatim from httplib.py. - if self.proxy_info and socks is None: - raise ProxiesUnavailableError( - "Proxy support missing but proxy use was requested!" - ) - msg = "getaddrinfo returns an empty list" - if self.proxy_info and self.proxy_info.isgood(): - use_proxy = True - proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers = ( - self.proxy_info.astuple() - ) - - host = proxy_host - port = proxy_port - else: - use_proxy = False - - host = self.host - port = self.port - - for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): - af, socktype, proto, canonname, sa = res - try: - if use_proxy: - self.sock = socks.socksocket(af, socktype, proto) - self.sock.setproxy( - proxy_type, - proxy_host, - proxy_port, - proxy_rdns, - proxy_user, - proxy_pass, - proxy_headers, - ) - else: - self.sock = socket.socket(af, socktype, proto) - self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - # Different from httplib: support timeouts. - if has_timeout(self.timeout): - self.sock.settimeout(self.timeout) - # End of difference from httplib. - if self.debuglevel > 0: - print("connect: (%s, %s) ************" % (self.host, self.port)) - if use_proxy: - print( - "proxy: %s ************" - % str( - ( - proxy_host, - proxy_port, - proxy_rdns, - proxy_user, - proxy_pass, - proxy_headers, - ) - ) - ) - if use_proxy: - self.sock.connect((self.host, self.port) + sa[2:]) - else: - self.sock.connect(sa) - except socket.error as msg: - if self.debuglevel > 0: - print("connect fail: (%s, %s)" % (self.host, self.port)) - if use_proxy: - print( - "proxy: %s" - % str( - ( - proxy_host, - proxy_port, - proxy_rdns, - proxy_user, - proxy_pass, - proxy_headers, - ) - ) - ) - if self.sock: - self.sock.close() - self.sock = None - continue - break - if not self.sock: - raise socket.error(msg) - - -class HTTPSConnectionWithTimeout(httplib.HTTPSConnection): - """This class allows communication via SSL. - - All timeouts are in seconds. If None is passed for timeout then - Python's default timeout for sockets will be used. See for example - the docs of socket.setdefaulttimeout(): - http://docs.python.org/library/socket.html#socket.setdefaulttimeout - """ - - def __init__( - self, - host, - port=None, - key_file=None, - cert_file=None, - strict=None, - timeout=None, - proxy_info=None, - ca_certs=None, - disable_ssl_certificate_validation=False, - ssl_version=None, - ): - httplib.HTTPSConnection.__init__( - self, host, port=port, key_file=key_file, cert_file=cert_file, strict=strict - ) - self.timeout = timeout - self.proxy_info = proxy_info - if ca_certs is None: - ca_certs = CA_CERTS - self.ca_certs = ca_certs - self.disable_ssl_certificate_validation = disable_ssl_certificate_validation - self.ssl_version = ssl_version - - # The following two methods were adapted from https_wrapper.py, released - # with the Google Appengine SDK at - # http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py - # under the following license: - # - # Copyright 2007 Google Inc. - # - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. - # You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - # See the License for the specific language governing permissions and - # limitations under the License. - # - - def _GetValidHostsForCert(self, cert): - """Returns a list of valid host globs for an SSL certificate. - - Args: - cert: A dictionary representing an SSL certificate. - Returns: - list: A list of valid host globs. - """ - if "subjectAltName" in cert: - return [x[1] for x in cert["subjectAltName"] if x[0].lower() == "dns"] - else: - return [x[0][1] for x in cert["subject"] if x[0][0].lower() == "commonname"] - - def _ValidateCertificateHostname(self, cert, hostname): - """Validates that a given hostname is valid for an SSL certificate. - - Args: - cert: A dictionary representing an SSL certificate. - hostname: The hostname to test. - Returns: - bool: Whether or not the hostname is valid for this certificate. - """ - hosts = self._GetValidHostsForCert(cert) - for host in hosts: - host_re = host.replace(".", "\.").replace("*", "[^.]*") - if re.search("^%s$" % (host_re,), hostname, re.I): - return True - return False - - def connect(self): - "Connect to a host on a given (SSL) port." - - msg = "getaddrinfo returns an empty list" - if self.proxy_info and self.proxy_info.isgood(): - use_proxy = True - proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers = ( - self.proxy_info.astuple() - ) - - host = proxy_host - port = proxy_port - else: - use_proxy = False - - host = self.host - port = self.port - - address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM) - for family, socktype, proto, canonname, sockaddr in address_info: - try: - if use_proxy: - sock = socks.socksocket(family, socktype, proto) - - sock.setproxy( - proxy_type, - proxy_host, - proxy_port, - proxy_rdns, - proxy_user, - proxy_pass, - proxy_headers, - ) - else: - sock = socket.socket(family, socktype, proto) - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - if has_timeout(self.timeout): - sock.settimeout(self.timeout) - - if use_proxy: - sock.connect((self.host, self.port) + sockaddr[:2]) - else: - sock.connect(sockaddr) - self.sock = _ssl_wrap_socket( - sock, - self.key_file, - self.cert_file, - self.disable_ssl_certificate_validation, - self.ca_certs, - self.ssl_version, - self.host, - ) - if self.debuglevel > 0: - print("connect: (%s, %s)" % (self.host, self.port)) - if use_proxy: - print( - "proxy: %s" - % str( - ( - proxy_host, - proxy_port, - proxy_rdns, - proxy_user, - proxy_pass, - proxy_headers, - ) - ) - ) - if not self.disable_ssl_certificate_validation: - cert = self.sock.getpeercert() - hostname = self.host.split(":", 0)[0] - if not self._ValidateCertificateHostname(cert, hostname): - raise CertificateHostnameMismatch( - "Server presented certificate that does not match " - "host %s: %s" % (hostname, cert), - hostname, - cert, - ) - except ( - ssl_SSLError, - ssl_CertificateError, - CertificateHostnameMismatch, - ) as e: - if sock: - sock.close() - if self.sock: - self.sock.close() - self.sock = None - # Unfortunately the ssl module doesn't seem to provide any way - # to get at more detailed error information, in particular - # whether the error is due to certificate validation or - # something else (such as SSL protocol mismatch). - if getattr(e, "errno", None) == ssl.SSL_ERROR_SSL: - raise SSLHandshakeError(e) - else: - raise - except (socket.timeout, socket.gaierror): - raise - except socket.error as msg: - if self.debuglevel > 0: - print("connect fail: (%s, %s)" % (self.host, self.port)) - if use_proxy: - print( - "proxy: %s" - % str( - ( - proxy_host, - proxy_port, - proxy_rdns, - proxy_user, - proxy_pass, - proxy_headers, - ) - ) - ) - if self.sock: - self.sock.close() - self.sock = None - continue - break - if not self.sock: - raise socket.error(msg) - - -SCHEME_TO_CONNECTION = { - "http": HTTPConnectionWithTimeout, - "https": HTTPSConnectionWithTimeout, -} - - -def _new_fixed_fetch(validate_certificate): - - def fixed_fetch( - url, - payload=None, - method="GET", - headers={}, - allow_truncated=False, - follow_redirects=True, - deadline=None, - ): - return fetch( - url, - payload=payload, - method=method, - headers=headers, - allow_truncated=allow_truncated, - follow_redirects=follow_redirects, - deadline=deadline, - validate_certificate=validate_certificate, - ) - - return fixed_fetch - - -class AppEngineHttpConnection(httplib.HTTPConnection): - """Use httplib on App Engine, but compensate for its weirdness. - - The parameters key_file, cert_file, proxy_info, ca_certs, - disable_ssl_certificate_validation, and ssl_version are all dropped on - the ground. - """ - - def __init__( - self, - host, - port=None, - key_file=None, - cert_file=None, - strict=None, - timeout=None, - proxy_info=None, - ca_certs=None, - disable_ssl_certificate_validation=False, - ssl_version=None, - ): - httplib.HTTPConnection.__init__( - self, host, port=port, strict=strict, timeout=timeout - ) - - -class AppEngineHttpsConnection(httplib.HTTPSConnection): - """Same as AppEngineHttpConnection, but for HTTPS URIs. - - The parameters proxy_info, ca_certs, disable_ssl_certificate_validation, - and ssl_version are all dropped on the ground. - """ - - def __init__( - self, - host, - port=None, - key_file=None, - cert_file=None, - strict=None, - timeout=None, - proxy_info=None, - ca_certs=None, - disable_ssl_certificate_validation=False, - ssl_version=None, - ): - httplib.HTTPSConnection.__init__( - self, - host, - port=port, - key_file=key_file, - cert_file=cert_file, - strict=strict, - timeout=timeout, - ) - self._fetch = _new_fixed_fetch(not disable_ssl_certificate_validation) - - -# Use a different connection object for Google App Engine Standard Environment. -def is_gae_instance(): - server_software = os.environ.get('SERVER_SOFTWARE', '') - if (server_software.startswith('Google App Engine/') or - server_software.startswith('Development/') or - server_software.startswith('testutil/')): - return True - return False - - -try: - if not is_gae_instance(): - raise NotRunningAppEngineEnvironment() - - from google.appengine.api import apiproxy_stub_map - if apiproxy_stub_map.apiproxy.GetStub("urlfetch") is None: - raise ImportError - - from google.appengine.api.urlfetch import fetch - - # Update the connection classes to use the Googel App Engine specific ones. - SCHEME_TO_CONNECTION = { - "http": AppEngineHttpConnection, - "https": AppEngineHttpsConnection, - } -except (ImportError, NotRunningAppEngineEnvironment): - pass - - -class Http(object): - """An HTTP client that handles: - - - all methods - - caching - - ETags - - compression, - - HTTPS - - Basic - - Digest - - WSSE - - and more. - """ - - def __init__( - self, - cache=None, - timeout=None, - proxy_info=proxy_info_from_environment, - ca_certs=None, - disable_ssl_certificate_validation=False, - ssl_version=None, - ): - """If 'cache' is a string then it is used as a directory name for - a disk cache. Otherwise it must be an object that supports the - same interface as FileCache. - - All timeouts are in seconds. If None is passed for timeout - then Python's default timeout for sockets will be used. See - for example the docs of socket.setdefaulttimeout(): - http://docs.python.org/library/socket.html#socket.setdefaulttimeout - - `proxy_info` may be: - - a callable that takes the http scheme ('http' or 'https') and - returns a ProxyInfo instance per request. By default, uses - proxy_nfo_from_environment. - - a ProxyInfo instance (static proxy config). - - None (proxy disabled). - - ca_certs is the path of a file containing root CA certificates for SSL - server certificate validation. By default, a CA cert file bundled with - httplib2 is used. - - If disable_ssl_certificate_validation is true, SSL cert validation will - not be performed. - - By default, ssl.PROTOCOL_SSLv23 will be used for the ssl version. - """ - self.proxy_info = proxy_info - self.ca_certs = ca_certs - self.disable_ssl_certificate_validation = disable_ssl_certificate_validation - self.ssl_version = ssl_version - - # Map domain name to an httplib connection - self.connections = {} - # The location of the cache, for now a directory - # where cached responses are held. - if cache and isinstance(cache, basestring): - self.cache = FileCache(cache) - else: - self.cache = cache - - # Name/password - self.credentials = Credentials() - - # Key/cert - self.certificates = KeyCerts() - - # authorization objects - self.authorizations = [] - - # If set to False then no redirects are followed, even safe ones. - self.follow_redirects = True - - # Which HTTP methods do we apply optimistic concurrency to, i.e. - # which methods get an "if-match:" etag header added to them. - self.optimistic_concurrency_methods = ["PUT", "PATCH"] - - # If 'follow_redirects' is True, and this is set to True then - # all redirecs are followed, including unsafe ones. - self.follow_all_redirects = False - - self.ignore_etag = False - - self.force_exception_to_status_code = False - - self.timeout = timeout - - # Keep Authorization: headers on a redirect. - self.forward_authorization_headers = False - - def __getstate__(self): - state_dict = copy.copy(self.__dict__) - # In case request is augmented by some foreign object such as - # credentials which handle auth - if "request" in state_dict: - del state_dict["request"] - if "connections" in state_dict: - del state_dict["connections"] - return state_dict - - def __setstate__(self, state): - self.__dict__.update(state) - self.connections = {} - - def _auth_from_challenge(self, host, request_uri, headers, response, content): - """A generator that creates Authorization objects - that can be applied to requests. - """ - challenges = _parse_www_authenticate(response, "www-authenticate") - for cred in self.credentials.iter(host): - for scheme in AUTH_SCHEME_ORDER: - if scheme in challenges: - yield AUTH_SCHEME_CLASSES[scheme]( - cred, host, request_uri, headers, response, content, self - ) - - def add_credentials(self, name, password, domain=""): - """Add a name and password that will be used - any time a request requires authentication.""" - self.credentials.add(name, password, domain) - - def add_certificate(self, key, cert, domain): - """Add a key and cert that will be used - any time a request requires authentication.""" - self.certificates.add(key, cert, domain) - - def clear_credentials(self): - """Remove all the names and passwords - that are used for authentication""" - self.credentials.clear() - self.authorizations = [] - - def _conn_request(self, conn, request_uri, method, body, headers): - i = 0 - seen_bad_status_line = False - while i < RETRIES: - i += 1 - try: - if hasattr(conn, "sock") and conn.sock is None: - conn.connect() - conn.request(method, request_uri, body, headers) - except socket.timeout: - raise - except socket.gaierror: - conn.close() - raise ServerNotFoundError("Unable to find the server at %s" % conn.host) - except ssl_SSLError: - conn.close() - raise - except socket.error as e: - err = 0 - if hasattr(e, "args"): - err = getattr(e, "args")[0] - else: - err = e.errno - if err == errno.ECONNREFUSED: # Connection refused - raise - if err in (errno.ENETUNREACH, errno.EADDRNOTAVAIL) and i < RETRIES: - continue # retry on potentially transient socket errors - except httplib.HTTPException: - # Just because the server closed the connection doesn't apparently mean - # that the server didn't send a response. - if hasattr(conn, "sock") and conn.sock is None: - if i < RETRIES - 1: - conn.close() - conn.connect() - continue - else: - conn.close() - raise - if i < RETRIES - 1: - conn.close() - conn.connect() - continue - try: - response = conn.getresponse() - except httplib.BadStatusLine: - # If we get a BadStatusLine on the first try then that means - # the connection just went stale, so retry regardless of the - # number of RETRIES set. - if not seen_bad_status_line and i == 1: - i = 0 - seen_bad_status_line = True - conn.close() - conn.connect() - continue - else: - conn.close() - raise - except (socket.error, httplib.HTTPException): - if i < RETRIES - 1: - conn.close() - conn.connect() - continue - else: - conn.close() - raise - else: - content = "" - if method == "HEAD": - conn.close() - else: - content = response.read() - response = Response(response) - if method != "HEAD": - content = _decompressContent(response, content) - break - return (response, content) - - def _request( - self, - conn, - host, - absolute_uri, - request_uri, - method, - body, - headers, - redirections, - cachekey, - ): - """Do the actual request using the connection object - and also follow one level of redirects if necessary""" - - auths = [ - (auth.depth(request_uri), auth) - for auth in self.authorizations - if auth.inscope(host, request_uri) - ] - auth = auths and sorted(auths)[0][1] or None - if auth: - auth.request(method, request_uri, headers, body) - - (response, content) = self._conn_request( - conn, request_uri, method, body, headers - ) - - if auth: - if auth.response(response, body): - auth.request(method, request_uri, headers, body) - (response, content) = self._conn_request( - conn, request_uri, method, body, headers - ) - response._stale_digest = 1 - - if response.status == 401: - for authorization in self._auth_from_challenge( - host, request_uri, headers, response, content - ): - authorization.request(method, request_uri, headers, body) - (response, content) = self._conn_request( - conn, request_uri, method, body, headers - ) - if response.status != 401: - self.authorizations.append(authorization) - authorization.response(response, body) - break - - if ( - self.follow_all_redirects - or (method in ["GET", "HEAD"]) - or response.status == 303 - ): - if self.follow_redirects and response.status in [300, 301, 302, 303, 307]: - # Pick out the location header and basically start from the beginning - # remembering first to strip the ETag header and decrement our 'depth' - if redirections: - if "location" not in response and response.status != 300: - raise RedirectMissingLocation( - _( - "Redirected but the response is missing a Location: header." - ), - response, - content, - ) - # Fix-up relative redirects (which violate an RFC 2616 MUST) - if "location" in response: - location = response["location"] - (scheme, authority, path, query, fragment) = parse_uri(location) - if authority == None: - response["location"] = urlparse.urljoin( - absolute_uri, location - ) - if response.status == 301 and method in ["GET", "HEAD"]: - response["-x-permanent-redirect-url"] = response["location"] - if "content-location" not in response: - response["content-location"] = absolute_uri - _updateCache(headers, response, content, self.cache, cachekey) - if "if-none-match" in headers: - del headers["if-none-match"] - if "if-modified-since" in headers: - del headers["if-modified-since"] - if ( - "authorization" in headers - and not self.forward_authorization_headers - ): - del headers["authorization"] - if "location" in response: - location = response["location"] - old_response = copy.deepcopy(response) - if "content-location" not in old_response: - old_response["content-location"] = absolute_uri - redirect_method = method - if response.status in [302, 303]: - redirect_method = "GET" - body = None - (response, content) = self.request( - location, - method=redirect_method, - body=body, - headers=headers, - redirections=redirections - 1, - ) - response.previous = old_response - else: - raise RedirectLimit( - "Redirected more times than rediection_limit allows.", - response, - content, - ) - elif response.status in [200, 203] and method in ["GET", "HEAD"]: - # Don't cache 206's since we aren't going to handle byte range requests - if "content-location" not in response: - response["content-location"] = absolute_uri - _updateCache(headers, response, content, self.cache, cachekey) - - return (response, content) - - def _normalize_headers(self, headers): - return _normalize_headers(headers) - - # Need to catch and rebrand some exceptions - # Then need to optionally turn all exceptions into status codes - # including all socket.* and httplib.* exceptions. - - def request( - self, - uri, - method="GET", - body=None, - headers=None, - redirections=DEFAULT_MAX_REDIRECTS, - connection_type=None, - ): - """ Performs a single HTTP request. - - The 'uri' is the URI of the HTTP resource and can begin with either - 'http' or 'https'. The value of 'uri' must be an absolute URI. - - The 'method' is the HTTP method to perform, such as GET, POST, DELETE, - etc. There is no restriction on the methods allowed. - - The 'body' is the entity body to be sent with the request. It is a - string object. - - Any extra headers that are to be sent with the request should be - provided in the 'headers' dictionary. - - The maximum number of redirect to follow before raising an - exception is 'redirections. The default is 5. - - The return value is a tuple of (response, content), the first - being and instance of the 'Response' class, the second being - a string that contains the response entity body. - """ - conn_key = '' - - try: - if headers is None: - headers = {} - else: - headers = self._normalize_headers(headers) - - if "user-agent" not in headers: - headers["user-agent"] = "Python-httplib2/%s (gzip)" % __version__ - - uri = iri2uri(uri) - - (scheme, authority, request_uri, defrag_uri) = urlnorm(uri) - - proxy_info = self._get_proxy_info(scheme, authority) - - conn_key = scheme + ":" + authority - conn = self.connections.get(conn_key) - if conn is None: - if not connection_type: - connection_type = SCHEME_TO_CONNECTION[scheme] - certs = list(self.certificates.iter(authority)) - if scheme == "https": - if certs: - conn = self.connections[conn_key] = connection_type( - authority, - key_file=certs[0][0], - cert_file=certs[0][1], - timeout=self.timeout, - proxy_info=proxy_info, - ca_certs=self.ca_certs, - disable_ssl_certificate_validation=self.disable_ssl_certificate_validation, - ssl_version=self.ssl_version, - ) - else: - conn = self.connections[conn_key] = connection_type( - authority, - timeout=self.timeout, - proxy_info=proxy_info, - ca_certs=self.ca_certs, - disable_ssl_certificate_validation=self.disable_ssl_certificate_validation, - ssl_version=self.ssl_version, - ) - else: - conn = self.connections[conn_key] = connection_type( - authority, timeout=self.timeout, proxy_info=proxy_info - ) - conn.set_debuglevel(debuglevel) - - if "range" not in headers and "accept-encoding" not in headers: - headers["accept-encoding"] = "gzip, deflate" - - info = email.Message.Message() - cached_value = None - if self.cache: - cachekey = defrag_uri.encode("utf-8") - cached_value = self.cache.get(cachekey) - if cached_value: - # info = email.message_from_string(cached_value) - # - # Need to replace the line above with the kludge below - # to fix the non-existent bug not fixed in this - # bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html - try: - info, content = cached_value.split("\r\n\r\n", 1) - feedparser = email.FeedParser.FeedParser() - feedparser.feed(info) - info = feedparser.close() - feedparser._parse = None - except (IndexError, ValueError): - self.cache.delete(cachekey) - cachekey = None - cached_value = None - else: - cachekey = None - - if ( - method in self.optimistic_concurrency_methods - and self.cache - and "etag" in info - and not self.ignore_etag - and "if-match" not in headers - ): - # http://www.w3.org/1999/04/Editing/ - headers["if-match"] = info["etag"] - - if method not in ["GET", "HEAD"] and self.cache and cachekey: - # RFC 2616 Section 13.10 - self.cache.delete(cachekey) - - # Check the vary header in the cache to see if this request - # matches what varies in the cache. - if method in ["GET", "HEAD"] and "vary" in info: - vary = info["vary"] - vary_headers = vary.lower().replace(" ", "").split(",") - for header in vary_headers: - key = "-varied-%s" % header - value = info[key] - if headers.get(header, None) != value: - cached_value = None - break - - if ( - cached_value - and method in ["GET", "HEAD"] - and self.cache - and "range" not in headers - ): - if "-x-permanent-redirect-url" in info: - # Should cached permanent redirects be counted in our redirection count? For now, yes. - if redirections <= 0: - raise RedirectLimit( - "Redirected more times than rediection_limit allows.", - {}, - "", - ) - (response, new_content) = self.request( - info["-x-permanent-redirect-url"], - method="GET", - headers=headers, - redirections=redirections - 1, - ) - response.previous = Response(info) - response.previous.fromcache = True - else: - # Determine our course of action: - # Is the cached entry fresh or stale? - # Has the client requested a non-cached response? - # - # There seems to be three possible answers: - # 1. [FRESH] Return the cache entry w/o doing a GET - # 2. [STALE] Do the GET (but add in cache validators if available) - # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request - entry_disposition = _entry_disposition(info, headers) - - if entry_disposition == "FRESH": - if not cached_value: - info["status"] = "504" - content = "" - response = Response(info) - if cached_value: - response.fromcache = True - return (response, content) - - if entry_disposition == "STALE": - if ( - "etag" in info - and not self.ignore_etag - and not "if-none-match" in headers - ): - headers["if-none-match"] = info["etag"] - if "last-modified" in info and not "last-modified" in headers: - headers["if-modified-since"] = info["last-modified"] - elif entry_disposition == "TRANSPARENT": - pass - - (response, new_content) = self._request( - conn, - authority, - uri, - request_uri, - method, - body, - headers, - redirections, - cachekey, - ) - - if response.status == 304 and method == "GET": - # Rewrite the cache entry with the new end-to-end headers - # Take all headers that are in response - # and overwrite their values in info. - # unless they are hop-by-hop, or are listed in the connection header. - - for key in _get_end2end_headers(response): - info[key] = response[key] - merged_response = Response(info) - if hasattr(response, "_stale_digest"): - merged_response._stale_digest = response._stale_digest - _updateCache( - headers, merged_response, content, self.cache, cachekey - ) - response = merged_response - response.status = 200 - response.fromcache = True - - elif response.status == 200: - content = new_content - else: - self.cache.delete(cachekey) - content = new_content - else: - cc = _parse_cache_control(headers) - if "only-if-cached" in cc: - info["status"] = "504" - response = Response(info) - content = "" - else: - (response, content) = self._request( - conn, - authority, - uri, - request_uri, - method, - body, - headers, - redirections, - cachekey, - ) - except Exception as e: - is_timeout = isinstance(e, socket.timeout) - if is_timeout: - conn = self.connections.pop(conn_key, None) - if conn: - conn.close() - - if self.force_exception_to_status_code: - if isinstance(e, HttpLib2ErrorWithResponse): - response = e.response - content = e.content - response.status = 500 - response.reason = str(e) - elif is_timeout: - content = "Request Timeout" - response = Response( - { - "content-type": "text/plain", - "status": "408", - "content-length": len(content), - } - ) - response.reason = "Request Timeout" - else: - content = str(e) - response = Response( - { - "content-type": "text/plain", - "status": "400", - "content-length": len(content), - } - ) - response.reason = "Bad Request" - else: - raise - - return (response, content) - - def _get_proxy_info(self, scheme, authority): - """Return a ProxyInfo instance (or None) based on the scheme - and authority. - """ - hostname, port = urllib.splitport(authority) - proxy_info = self.proxy_info - if callable(proxy_info): - proxy_info = proxy_info(scheme) - - if hasattr(proxy_info, "applies_to") and not proxy_info.applies_to(hostname): - proxy_info = None - return proxy_info - - -class Response(dict): - """An object more like email.Message than httplib.HTTPResponse.""" - - """Is this response from our local cache""" - fromcache = False - """HTTP protocol version used by server. - - 10 for HTTP/1.0, 11 for HTTP/1.1. - """ - version = 11 - - "Status code returned by server. " - status = 200 - """Reason phrase returned by server.""" - reason = "Ok" - - previous = None - - def __init__(self, info): - # info is either an email.Message or - # an httplib.HTTPResponse object. - if isinstance(info, httplib.HTTPResponse): - for key, value in info.getheaders(): - self[key.lower()] = value - self.status = info.status - self["status"] = str(self.status) - self.reason = info.reason - self.version = info.version - elif isinstance(info, email.Message.Message): - for key, value in info.items(): - self[key.lower()] = value - self.status = int(self["status"]) - else: - for key, value in info.iteritems(): - self[key.lower()] = value - self.status = int(self.get("status", self.status)) - self.reason = self.get("reason", self.reason) - - def __getattr__(self, name): - if name == "dict": - return self - else: - raise AttributeError(name) diff --git a/lib/httplib2/py2/__init__.py b/lib/httplib2/py2/__init__.py new file mode 100644 index 00000000..99cdd453 --- /dev/null +++ b/lib/httplib2/py2/__init__.py @@ -0,0 +1,2285 @@ +"""Small, fast HTTP client library for Python. + +Features persistent connections, cache, and Google App Engine Standard +Environment support. +""" + +from __future__ import print_function + +__author__ = "Joe Gregorio (joe@bitworking.org)" +__copyright__ = "Copyright 2006, Joe Gregorio" +__contributors__ = [ + "Thomas Broyer (t.broyer@ltgt.net)", + "James Antill", + "Xavier Verges Farrero", + "Jonathan Feinberg", + "Blair Zajac", + "Sam Ruby", + "Louis Nyffenegger", + "Alex Yu", +] +__license__ = "MIT" +__version__ = '0.17.0' + +import base64 +import calendar +import copy +import email +import email.FeedParser +import email.Message +import email.Utils +import errno +import gzip +import httplib +import os +import random +import re +import StringIO +import sys +import time +import urllib +import urlparse +import zlib + +try: + from hashlib import sha1 as _sha, md5 as _md5 +except ImportError: + # prior to Python 2.5, these were separate modules + import sha + import md5 + + _sha = sha.new + _md5 = md5.new +import hmac +from gettext import gettext as _ +import socket + +try: + import socks +except ImportError: + try: + import socks + except (ImportError, AttributeError): + socks = None + +# Build the appropriate socket wrapper for ssl +ssl = None +ssl_SSLError = None +ssl_CertificateError = None +try: + import ssl # python 2.6 +except ImportError: + pass +if ssl is not None: + ssl_SSLError = getattr(ssl, "SSLError", None) + ssl_CertificateError = getattr(ssl, "CertificateError", None) + + +def _ssl_wrap_socket( + sock, key_file, cert_file, disable_validation, ca_certs, ssl_version, hostname, key_password +): + if disable_validation: + cert_reqs = ssl.CERT_NONE + else: + cert_reqs = ssl.CERT_REQUIRED + if ssl_version is None: + ssl_version = ssl.PROTOCOL_SSLv23 + + if hasattr(ssl, "SSLContext"): # Python 2.7.9 + context = ssl.SSLContext(ssl_version) + context.verify_mode = cert_reqs + context.check_hostname = cert_reqs != ssl.CERT_NONE + if cert_file: + if key_password: + context.load_cert_chain(cert_file, key_file, key_password) + else: + context.load_cert_chain(cert_file, key_file) + if ca_certs: + context.load_verify_locations(ca_certs) + return context.wrap_socket(sock, server_hostname=hostname) + else: + if key_password: + raise NotSupportedOnThisPlatform("Certificate with password is not supported.") + return ssl.wrap_socket( + sock, + keyfile=key_file, + certfile=cert_file, + cert_reqs=cert_reqs, + ca_certs=ca_certs, + ssl_version=ssl_version, + ) + + +def _ssl_wrap_socket_unsupported( + sock, key_file, cert_file, disable_validation, ca_certs, ssl_version, hostname, key_password +): + if not disable_validation: + raise CertificateValidationUnsupported( + "SSL certificate validation is not supported without " + "the ssl module installed. To avoid this error, install " + "the ssl module, or explicity disable validation." + ) + if key_password: + raise NotSupportedOnThisPlatform("Certificate with password is not supported.") + ssl_sock = socket.ssl(sock, key_file, cert_file) + return httplib.FakeSocket(sock, ssl_sock) + + +if ssl is None: + _ssl_wrap_socket = _ssl_wrap_socket_unsupported + +if sys.version_info >= (2, 3): + from iri2uri import iri2uri +else: + + def iri2uri(uri): + return uri + + +def has_timeout(timeout): # python 2.6 + if hasattr(socket, "_GLOBAL_DEFAULT_TIMEOUT"): + return timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT + return timeout is not None + + +__all__ = [ + "Http", + "Response", + "ProxyInfo", + "HttpLib2Error", + "RedirectMissingLocation", + "RedirectLimit", + "FailedToDecompressContent", + "UnimplementedDigestAuthOptionError", + "UnimplementedHmacDigestAuthOptionError", + "debuglevel", + "ProxiesUnavailableError", +] + +# The httplib debug level, set to a non-zero value to get debug output +debuglevel = 0 + +# A request will be tried 'RETRIES' times if it fails at the socket/connection level. +RETRIES = 2 + +# Python 2.3 support +if sys.version_info < (2, 4): + + def sorted(seq): + seq.sort() + return seq + + +# Python 2.3 support +def HTTPResponse__getheaders(self): + """Return list of (header, value) tuples.""" + if self.msg is None: + raise httplib.ResponseNotReady() + return self.msg.items() + + +if not hasattr(httplib.HTTPResponse, "getheaders"): + httplib.HTTPResponse.getheaders = HTTPResponse__getheaders + + +# All exceptions raised here derive from HttpLib2Error +class HttpLib2Error(Exception): + pass + + +# Some exceptions can be caught and optionally +# be turned back into responses. +class HttpLib2ErrorWithResponse(HttpLib2Error): + def __init__(self, desc, response, content): + self.response = response + self.content = content + HttpLib2Error.__init__(self, desc) + + +class RedirectMissingLocation(HttpLib2ErrorWithResponse): + pass + + +class RedirectLimit(HttpLib2ErrorWithResponse): + pass + + +class FailedToDecompressContent(HttpLib2ErrorWithResponse): + pass + + +class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): + pass + + +class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): + pass + + +class MalformedHeader(HttpLib2Error): + pass + + +class RelativeURIError(HttpLib2Error): + pass + + +class ServerNotFoundError(HttpLib2Error): + pass + + +class ProxiesUnavailableError(HttpLib2Error): + pass + + +class CertificateValidationUnsupported(HttpLib2Error): + pass + + +class SSLHandshakeError(HttpLib2Error): + pass + + +class NotSupportedOnThisPlatform(HttpLib2Error): + pass + + +class CertificateHostnameMismatch(SSLHandshakeError): + def __init__(self, desc, host, cert): + HttpLib2Error.__init__(self, desc) + self.host = host + self.cert = cert + + +class NotRunningAppEngineEnvironment(HttpLib2Error): + pass + + +# Open Items: +# ----------- +# Proxy support + +# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?) + +# Pluggable cache storage (supports storing the cache in +# flat files by default. We need a plug-in architecture +# that can support Berkeley DB and Squid) + +# == Known Issues == +# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator. +# Does not handle Cache-Control: max-stale +# Does not use Age: headers when calculating cache freshness. + +# The number of redirections to follow before giving up. +# Note that only GET redirects are automatically followed. +# Will also honor 301 requests by saving that info and never +# requesting that URI again. +DEFAULT_MAX_REDIRECTS = 5 + +import certs +CA_CERTS = certs.where() + +# Which headers are hop-by-hop headers by default +HOP_BY_HOP = [ + "connection", + "keep-alive", + "proxy-authenticate", + "proxy-authorization", + "te", + "trailers", + "transfer-encoding", + "upgrade", +] + +# https://tools.ietf.org/html/rfc7231#section-8.1.3 +SAFE_METHODS = ("GET", "HEAD") # TODO add "OPTIONS", "TRACE" + +# To change, assign to `Http().redirect_codes` +REDIRECT_CODES = frozenset((300, 301, 302, 303, 307, 308)) + + +def _get_end2end_headers(response): + hopbyhop = list(HOP_BY_HOP) + hopbyhop.extend([x.strip() for x in response.get("connection", "").split(",")]) + return [header for header in response.keys() if header not in hopbyhop] + + +URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") + + +def parse_uri(uri): + """Parses a URI using the regex given in Appendix B of RFC 3986. + + (scheme, authority, path, query, fragment) = parse_uri(uri) + """ + groups = URI.match(uri).groups() + return (groups[1], groups[3], groups[4], groups[6], groups[8]) + + +def urlnorm(uri): + (scheme, authority, path, query, fragment) = parse_uri(uri) + if not scheme or not authority: + raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri) + authority = authority.lower() + scheme = scheme.lower() + if not path: + path = "/" + # Could do syntax based normalization of the URI before + # computing the digest. See Section 6.2.2 of Std 66. + request_uri = query and "?".join([path, query]) or path + scheme = scheme.lower() + defrag_uri = scheme + "://" + authority + request_uri + return scheme, authority, request_uri, defrag_uri + + +# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/) +re_url_scheme = re.compile(r"^\w+://") +re_unsafe = re.compile(r"[^\w\-_.()=!]+") + + +def safename(filename): + """Return a filename suitable for the cache. + Strips dangerous and common characters to create a filename we + can use to store the cache in. + """ + if isinstance(filename, str): + filename_bytes = filename + filename = filename.decode("utf-8") + else: + filename_bytes = filename.encode("utf-8") + filemd5 = _md5(filename_bytes).hexdigest() + filename = re_url_scheme.sub("", filename) + filename = re_unsafe.sub("", filename) + + # limit length of filename (vital for Windows) + # https://github.com/httplib2/httplib2/pull/74 + # C:\Users\ <username> \AppData\Local\Temp\ <safe_filename> , <md5> + # 9 chars + max 104 chars + 20 chars + x + 1 + 32 = max 259 chars + # Thus max safe filename x = 93 chars. Let it be 90 to make a round sum: + filename = filename[:90] + + return ",".join((filename, filemd5)) + + +NORMALIZE_SPACE = re.compile(r"(?:\r\n)?[ \t]+") + + +def _normalize_headers(headers): + return dict( + [ + (key.lower(), NORMALIZE_SPACE.sub(value, " ").strip()) + for (key, value) in headers.iteritems() + ] + ) + + +def _parse_cache_control(headers): + retval = {} + if "cache-control" in headers: + parts = headers["cache-control"].split(",") + parts_with_args = [ + tuple([x.strip().lower() for x in part.split("=", 1)]) + for part in parts + if -1 != part.find("=") + ] + parts_wo_args = [ + (name.strip().lower(), 1) for name in parts if -1 == name.find("=") + ] + retval = dict(parts_with_args + parts_wo_args) + return retval + + +# Whether to use a strict mode to parse WWW-Authenticate headers +# Might lead to bad results in case of ill-formed header value, +# so disabled by default, falling back to relaxed parsing. +# Set to true to turn on, usefull for testing servers. +USE_WWW_AUTH_STRICT_PARSING = 0 + +# In regex below: +# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP +# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space +# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both: +# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"? +WWW_AUTH_STRICT = re.compile( + r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$" +) +WWW_AUTH_RELAXED = re.compile( + r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$" +) +UNQUOTE_PAIRS = re.compile(r"\\(.)") + + +def _parse_www_authenticate(headers, headername="www-authenticate"): + """Returns a dictionary of dictionaries, one dict + per auth_scheme.""" + retval = {} + if headername in headers: + try: + + authenticate = headers[headername].strip() + www_auth = ( + USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED + ) + while authenticate: + # Break off the scheme at the beginning of the line + if headername == "authentication-info": + (auth_scheme, the_rest) = ("digest", authenticate) + else: + (auth_scheme, the_rest) = authenticate.split(" ", 1) + # Now loop over all the key value pairs that come after the scheme, + # being careful not to roll into the next scheme + match = www_auth.search(the_rest) + auth_params = {} + while match: + if match and len(match.groups()) == 3: + (key, value, the_rest) = match.groups() + auth_params[key.lower()] = UNQUOTE_PAIRS.sub( + r"\1", value + ) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')]) + match = www_auth.search(the_rest) + retval[auth_scheme.lower()] = auth_params + authenticate = the_rest.strip() + + except ValueError: + raise MalformedHeader("WWW-Authenticate") + return retval + + +# TODO: add current time as _entry_disposition argument to avoid sleep in tests +def _entry_disposition(response_headers, request_headers): + """Determine freshness from the Date, Expires and Cache-Control headers. + + We don't handle the following: + + 1. Cache-Control: max-stale + 2. Age: headers are not used in the calculations. + + Not that this algorithm is simpler than you might think + because we are operating as a private (non-shared) cache. + This lets us ignore 's-maxage'. We can also ignore + 'proxy-invalidate' since we aren't a proxy. + We will never return a stale document as + fresh as a design decision, and thus the non-implementation + of 'max-stale'. This also lets us safely ignore 'must-revalidate' + since we operate as if every server has sent 'must-revalidate'. + Since we are private we get to ignore both 'public' and + 'private' parameters. We also ignore 'no-transform' since + we don't do any transformations. + The 'no-store' parameter is handled at a higher level. + So the only Cache-Control parameters we look at are: + + no-cache + only-if-cached + max-age + min-fresh + """ + + retval = "STALE" + cc = _parse_cache_control(request_headers) + cc_response = _parse_cache_control(response_headers) + + if ( + "pragma" in request_headers + and request_headers["pragma"].lower().find("no-cache") != -1 + ): + retval = "TRANSPARENT" + if "cache-control" not in request_headers: + request_headers["cache-control"] = "no-cache" + elif "no-cache" in cc: + retval = "TRANSPARENT" + elif "no-cache" in cc_response: + retval = "STALE" + elif "only-if-cached" in cc: + retval = "FRESH" + elif "date" in response_headers: + date = calendar.timegm(email.Utils.parsedate_tz(response_headers["date"])) + now = time.time() + current_age = max(0, now - date) + if "max-age" in cc_response: + try: + freshness_lifetime = int(cc_response["max-age"]) + except ValueError: + freshness_lifetime = 0 + elif "expires" in response_headers: + expires = email.Utils.parsedate_tz(response_headers["expires"]) + if None == expires: + freshness_lifetime = 0 + else: + freshness_lifetime = max(0, calendar.timegm(expires) - date) + else: + freshness_lifetime = 0 + if "max-age" in cc: + try: + freshness_lifetime = int(cc["max-age"]) + except ValueError: + freshness_lifetime = 0 + if "min-fresh" in cc: + try: + min_fresh = int(cc["min-fresh"]) + except ValueError: + min_fresh = 0 + current_age += min_fresh + if freshness_lifetime > current_age: + retval = "FRESH" + return retval + + +def _decompressContent(response, new_content): + content = new_content + try: + encoding = response.get("content-encoding", None) + if encoding in ["gzip", "deflate"]: + if encoding == "gzip": + content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read() + if encoding == "deflate": + content = zlib.decompress(content, -zlib.MAX_WBITS) + response["content-length"] = str(len(content)) + # Record the historical presence of the encoding in a way the won't interfere. + response["-content-encoding"] = response["content-encoding"] + del response["content-encoding"] + except (IOError, zlib.error): + content = "" + raise FailedToDecompressContent( + _("Content purported to be compressed with %s but failed to decompress.") + % response.get("content-encoding"), + response, + content, + ) + return content + + +def _updateCache(request_headers, response_headers, content, cache, cachekey): + if cachekey: + cc = _parse_cache_control(request_headers) + cc_response = _parse_cache_control(response_headers) + if "no-store" in cc or "no-store" in cc_response: + cache.delete(cachekey) + else: + info = email.Message.Message() + for key, value in response_headers.iteritems(): + if key not in ["status", "content-encoding", "transfer-encoding"]: + info[key] = value + + # Add annotations to the cache to indicate what headers + # are variant for this request. + vary = response_headers.get("vary", None) + if vary: + vary_headers = vary.lower().replace(" ", "").split(",") + for header in vary_headers: + key = "-varied-%s" % header + try: + info[key] = request_headers[header] + except KeyError: + pass + + status = response_headers.status + if status == 304: + status = 200 + + status_header = "status: %d\r\n" % status + + header_str = info.as_string() + + header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str) + text = "".join([status_header, header_str, content]) + + cache.set(cachekey, text) + + +def _cnonce(): + dig = _md5( + "%s:%s" + % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)]) + ).hexdigest() + return dig[:16] + + +def _wsse_username_token(cnonce, iso_now, password): + return base64.b64encode( + _sha("%s%s%s" % (cnonce, iso_now, password)).digest() + ).strip() + + +# For credentials we need two things, first +# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.) +# Then we also need a list of URIs that have already demanded authentication +# That list is tricky since sub-URIs can take the same auth, or the +# auth scheme may change as you descend the tree. +# So we also need each Auth instance to be able to tell us +# how close to the 'top' it is. + + +class Authentication(object): + def __init__( + self, credentials, host, request_uri, headers, response, content, http + ): + (scheme, authority, path, query, fragment) = parse_uri(request_uri) + self.path = path + self.host = host + self.credentials = credentials + self.http = http + + def depth(self, request_uri): + (scheme, authority, path, query, fragment) = parse_uri(request_uri) + return request_uri[len(self.path) :].count("/") + + def inscope(self, host, request_uri): + # XXX Should we normalize the request_uri? + (scheme, authority, path, query, fragment) = parse_uri(request_uri) + return (host == self.host) and path.startswith(self.path) + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header. Over-ride this in sub-classes.""" + pass + + def response(self, response, content): + """Gives us a chance to update with new nonces + or such returned from the last authorized response. + Over-rise this in sub-classes if necessary. + + Return TRUE is the request is to be retried, for + example Digest may return stale=true. + """ + return False + + +class BasicAuthentication(Authentication): + def __init__( + self, credentials, host, request_uri, headers, response, content, http + ): + Authentication.__init__( + self, credentials, host, request_uri, headers, response, content, http + ) + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers["authorization"] = ( + "Basic " + base64.b64encode("%s:%s" % self.credentials).strip() + ) + + +class DigestAuthentication(Authentication): + """Only do qop='auth' and MD5, since that + is all Apache currently implements""" + + def __init__( + self, credentials, host, request_uri, headers, response, content, http + ): + Authentication.__init__( + self, credentials, host, request_uri, headers, response, content, http + ) + challenge = _parse_www_authenticate(response, "www-authenticate") + self.challenge = challenge["digest"] + qop = self.challenge.get("qop", "auth") + self.challenge["qop"] = ( + ("auth" in [x.strip() for x in qop.split()]) and "auth" or None + ) + if self.challenge["qop"] is None: + raise UnimplementedDigestAuthOptionError( + _("Unsupported value for qop: %s." % qop) + ) + self.challenge["algorithm"] = self.challenge.get("algorithm", "MD5").upper() + if self.challenge["algorithm"] != "MD5": + raise UnimplementedDigestAuthOptionError( + _("Unsupported value for algorithm: %s." % self.challenge["algorithm"]) + ) + self.A1 = "".join( + [ + self.credentials[0], + ":", + self.challenge["realm"], + ":", + self.credentials[1], + ] + ) + self.challenge["nc"] = 1 + + def request(self, method, request_uri, headers, content, cnonce=None): + """Modify the request headers""" + H = lambda x: _md5(x).hexdigest() + KD = lambda s, d: H("%s:%s" % (s, d)) + A2 = "".join([method, ":", request_uri]) + self.challenge["cnonce"] = cnonce or _cnonce() + request_digest = '"%s"' % KD( + H(self.A1), + "%s:%s:%s:%s:%s" + % ( + self.challenge["nonce"], + "%08x" % self.challenge["nc"], + self.challenge["cnonce"], + self.challenge["qop"], + H(A2), + ), + ) + headers["authorization"] = ( + 'Digest username="%s", realm="%s", nonce="%s", ' + 'uri="%s", algorithm=%s, response=%s, qop=%s, ' + 'nc=%08x, cnonce="%s"' + ) % ( + self.credentials[0], + self.challenge["realm"], + self.challenge["nonce"], + request_uri, + self.challenge["algorithm"], + request_digest, + self.challenge["qop"], + self.challenge["nc"], + self.challenge["cnonce"], + ) + if self.challenge.get("opaque"): + headers["authorization"] += ', opaque="%s"' % self.challenge["opaque"] + self.challenge["nc"] += 1 + + def response(self, response, content): + if "authentication-info" not in response: + challenge = _parse_www_authenticate(response, "www-authenticate").get( + "digest", {} + ) + if "true" == challenge.get("stale"): + self.challenge["nonce"] = challenge["nonce"] + self.challenge["nc"] = 1 + return True + else: + updated_challenge = _parse_www_authenticate( + response, "authentication-info" + ).get("digest", {}) + + if "nextnonce" in updated_challenge: + self.challenge["nonce"] = updated_challenge["nextnonce"] + self.challenge["nc"] = 1 + return False + + +class HmacDigestAuthentication(Authentication): + """Adapted from Robert Sayre's code and DigestAuthentication above.""" + + __author__ = "Thomas Broyer (t.broyer@ltgt.net)" + + def __init__( + self, credentials, host, request_uri, headers, response, content, http + ): + Authentication.__init__( + self, credentials, host, request_uri, headers, response, content, http + ) + challenge = _parse_www_authenticate(response, "www-authenticate") + self.challenge = challenge["hmacdigest"] + # TODO: self.challenge['domain'] + self.challenge["reason"] = self.challenge.get("reason", "unauthorized") + if self.challenge["reason"] not in ["unauthorized", "integrity"]: + self.challenge["reason"] = "unauthorized" + self.challenge["salt"] = self.challenge.get("salt", "") + if not self.challenge.get("snonce"): + raise UnimplementedHmacDigestAuthOptionError( + _("The challenge doesn't contain a server nonce, or this one is empty.") + ) + self.challenge["algorithm"] = self.challenge.get("algorithm", "HMAC-SHA-1") + if self.challenge["algorithm"] not in ["HMAC-SHA-1", "HMAC-MD5"]: + raise UnimplementedHmacDigestAuthOptionError( + _("Unsupported value for algorithm: %s." % self.challenge["algorithm"]) + ) + self.challenge["pw-algorithm"] = self.challenge.get("pw-algorithm", "SHA-1") + if self.challenge["pw-algorithm"] not in ["SHA-1", "MD5"]: + raise UnimplementedHmacDigestAuthOptionError( + _( + "Unsupported value for pw-algorithm: %s." + % self.challenge["pw-algorithm"] + ) + ) + if self.challenge["algorithm"] == "HMAC-MD5": + self.hashmod = _md5 + else: + self.hashmod = _sha + if self.challenge["pw-algorithm"] == "MD5": + self.pwhashmod = _md5 + else: + self.pwhashmod = _sha + self.key = "".join( + [ + self.credentials[0], + ":", + self.pwhashmod.new( + "".join([self.credentials[1], self.challenge["salt"]]) + ) + .hexdigest() + .lower(), + ":", + self.challenge["realm"], + ] + ) + self.key = self.pwhashmod.new(self.key).hexdigest().lower() + + def request(self, method, request_uri, headers, content): + """Modify the request headers""" + keys = _get_end2end_headers(headers) + keylist = "".join(["%s " % k for k in keys]) + headers_val = "".join([headers[k] for k in keys]) + created = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + cnonce = _cnonce() + request_digest = "%s:%s:%s:%s:%s" % ( + method, + request_uri, + cnonce, + self.challenge["snonce"], + headers_val, + ) + request_digest = ( + hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower() + ) + headers["authorization"] = ( + 'HMACDigest username="%s", realm="%s", snonce="%s",' + ' cnonce="%s", uri="%s", created="%s", ' + 'response="%s", headers="%s"' + ) % ( + self.credentials[0], + self.challenge["realm"], + self.challenge["snonce"], + cnonce, + request_uri, + created, + request_digest, + keylist, + ) + + def response(self, response, content): + challenge = _parse_www_authenticate(response, "www-authenticate").get( + "hmacdigest", {} + ) + if challenge.get("reason") in ["integrity", "stale"]: + return True + return False + + +class WsseAuthentication(Authentication): + """This is thinly tested and should not be relied upon. + At this time there isn't any third party server to test against. + Blogger and TypePad implemented this algorithm at one point + but Blogger has since switched to Basic over HTTPS and + TypePad has implemented it wrong, by never issuing a 401 + challenge but instead requiring your client to telepathically know that + their endpoint is expecting WSSE profile="UsernameToken".""" + + def __init__( + self, credentials, host, request_uri, headers, response, content, http + ): + Authentication.__init__( + self, credentials, host, request_uri, headers, response, content, http + ) + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers["authorization"] = 'WSSE profile="UsernameToken"' + iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + cnonce = _cnonce() + password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1]) + headers["X-WSSE"] = ( + 'UsernameToken Username="%s", PasswordDigest="%s", ' + 'Nonce="%s", Created="%s"' + ) % (self.credentials[0], password_digest, cnonce, iso_now) + + +class GoogleLoginAuthentication(Authentication): + def __init__( + self, credentials, host, request_uri, headers, response, content, http + ): + from urllib import urlencode + + Authentication.__init__( + self, credentials, host, request_uri, headers, response, content, http + ) + challenge = _parse_www_authenticate(response, "www-authenticate") + service = challenge["googlelogin"].get("service", "xapi") + # Bloggger actually returns the service in the challenge + # For the rest we guess based on the URI + if service == "xapi" and request_uri.find("calendar") > 0: + service = "cl" + # No point in guessing Base or Spreadsheet + # elif request_uri.find("spreadsheets") > 0: + # service = "wise" + + auth = dict( + Email=credentials[0], + Passwd=credentials[1], + service=service, + source=headers["user-agent"], + ) + resp, content = self.http.request( + "https://www.google.com/accounts/ClientLogin", + method="POST", + body=urlencode(auth), + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + lines = content.split("\n") + d = dict([tuple(line.split("=", 1)) for line in lines if line]) + if resp.status == 403: + self.Auth = "" + else: + self.Auth = d["Auth"] + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers["authorization"] = "GoogleLogin Auth=" + self.Auth + + +AUTH_SCHEME_CLASSES = { + "basic": BasicAuthentication, + "wsse": WsseAuthentication, + "digest": DigestAuthentication, + "hmacdigest": HmacDigestAuthentication, + "googlelogin": GoogleLoginAuthentication, +} + +AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"] + + +class FileCache(object): + """Uses a local directory as a store for cached files. + Not really safe to use if multiple threads or processes are going to + be running on the same cache. + """ + + def __init__( + self, cache, safe=safename + ): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior + self.cache = cache + self.safe = safe + if not os.path.exists(cache): + os.makedirs(self.cache) + + def get(self, key): + retval = None + cacheFullPath = os.path.join(self.cache, self.safe(key)) + try: + f = file(cacheFullPath, "rb") + retval = f.read() + f.close() + except IOError: + pass + return retval + + def set(self, key, value): + cacheFullPath = os.path.join(self.cache, self.safe(key)) + f = file(cacheFullPath, "wb") + f.write(value) + f.close() + + def delete(self, key): + cacheFullPath = os.path.join(self.cache, self.safe(key)) + if os.path.exists(cacheFullPath): + os.remove(cacheFullPath) + + +class Credentials(object): + def __init__(self): + self.credentials = [] + + def add(self, name, password, domain=""): + self.credentials.append((domain.lower(), name, password)) + + def clear(self): + self.credentials = [] + + def iter(self, domain): + for (cdomain, name, password) in self.credentials: + if cdomain == "" or domain == cdomain: + yield (name, password) + + +class KeyCerts(Credentials): + """Identical to Credentials except that + name/password are mapped to key/cert.""" + def add(self, key, cert, domain, password): + self.credentials.append((domain.lower(), key, cert, password)) + + def iter(self, domain): + for (cdomain, key, cert, password) in self.credentials: + if cdomain == "" or domain == cdomain: + yield (key, cert, password) + + +class AllHosts(object): + pass + + +class ProxyInfo(object): + """Collect information required to use a proxy.""" + + bypass_hosts = () + + def __init__( + self, + proxy_type, + proxy_host, + proxy_port, + proxy_rdns=True, + proxy_user=None, + proxy_pass=None, + proxy_headers=None, + ): + """Args: + + proxy_type: The type of proxy server. This must be set to one of + socks.PROXY_TYPE_XXX constants. For example: p = + ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', + proxy_port=8000) + proxy_host: The hostname or IP address of the proxy server. + proxy_port: The port that the proxy server is running on. + proxy_rdns: If True (default), DNS queries will not be performed + locally, and instead, handed to the proxy to resolve. This is useful + if the network does not allow resolution of non-local names. In + httplib2 0.9 and earlier, this defaulted to False. + proxy_user: The username used to authenticate with the proxy server. + proxy_pass: The password used to authenticate with the proxy server. + proxy_headers: Additional or modified headers for the proxy connect + request. + """ + self.proxy_type = proxy_type + self.proxy_host = proxy_host + self.proxy_port = proxy_port + self.proxy_rdns = proxy_rdns + self.proxy_user = proxy_user + self.proxy_pass = proxy_pass + self.proxy_headers = proxy_headers + + def astuple(self): + return ( + self.proxy_type, + self.proxy_host, + self.proxy_port, + self.proxy_rdns, + self.proxy_user, + self.proxy_pass, + self.proxy_headers, + ) + + def isgood(self): + return (self.proxy_host != None) and (self.proxy_port != None) + + def applies_to(self, hostname): + return not self.bypass_host(hostname) + + def bypass_host(self, hostname): + """Has this host been excluded from the proxy config""" + if self.bypass_hosts is AllHosts: + return True + + hostname = "." + hostname.lstrip(".") + for skip_name in self.bypass_hosts: + # *.suffix + if skip_name.startswith(".") and hostname.endswith(skip_name): + return True + # exact match + if hostname == "." + skip_name: + return True + return False + + def __repr__(self): + return ( + "<ProxyInfo type={p.proxy_type} " + "host:port={p.proxy_host}:{p.proxy_port} rdns={p.proxy_rdns}" + + " user={p.proxy_user} headers={p.proxy_headers}>" + ).format(p=self) + + +def proxy_info_from_environment(method="http"): + """Read proxy info from the environment variables. + """ + if method not in ["http", "https"]: + return + + env_var = method + "_proxy" + url = os.environ.get(env_var, os.environ.get(env_var.upper())) + if not url: + return + return proxy_info_from_url(url, method, None) + + +def proxy_info_from_url(url, method="http", noproxy=None): + """Construct a ProxyInfo from a URL (such as http_proxy env var) + """ + url = urlparse.urlparse(url) + username = None + password = None + port = None + if "@" in url[1]: + ident, host_port = url[1].split("@", 1) + if ":" in ident: + username, password = ident.split(":", 1) + else: + password = ident + else: + host_port = url[1] + if ":" in host_port: + host, port = host_port.split(":", 1) + else: + host = host_port + + if port: + port = int(port) + else: + port = dict(https=443, http=80)[method] + + proxy_type = 3 # socks.PROXY_TYPE_HTTP + pi = ProxyInfo( + proxy_type=proxy_type, + proxy_host=host, + proxy_port=port, + proxy_user=username or None, + proxy_pass=password or None, + proxy_headers=None, + ) + + bypass_hosts = [] + # If not given an explicit noproxy value, respect values in env vars. + if noproxy is None: + noproxy = os.environ.get("no_proxy", os.environ.get("NO_PROXY", "")) + # Special case: A single '*' character means all hosts should be bypassed. + if noproxy == "*": + bypass_hosts = AllHosts + elif noproxy.strip(): + bypass_hosts = noproxy.split(",") + bypass_hosts = filter(bool, bypass_hosts) # To exclude empty string. + + pi.bypass_hosts = bypass_hosts + return pi + + +class HTTPConnectionWithTimeout(httplib.HTTPConnection): + """HTTPConnection subclass that supports timeouts + + All timeouts are in seconds. If None is passed for timeout then + Python's default timeout for sockets will be used. See for example + the docs of socket.setdefaulttimeout(): + http://docs.python.org/library/socket.html#socket.setdefaulttimeout + """ + + def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None): + httplib.HTTPConnection.__init__(self, host, port, strict) + self.timeout = timeout + self.proxy_info = proxy_info + + def connect(self): + """Connect to the host and port specified in __init__.""" + # Mostly verbatim from httplib.py. + if self.proxy_info and socks is None: + raise ProxiesUnavailableError( + "Proxy support missing but proxy use was requested!" + ) + if self.proxy_info and self.proxy_info.isgood(): + use_proxy = True + proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers = ( + self.proxy_info.astuple() + ) + + host = proxy_host + port = proxy_port + else: + use_proxy = False + + host = self.host + port = self.port + + socket_err = None + + for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + try: + if use_proxy: + self.sock = socks.socksocket(af, socktype, proto) + self.sock.setproxy( + proxy_type, + proxy_host, + proxy_port, + proxy_rdns, + proxy_user, + proxy_pass, + proxy_headers, + ) + else: + self.sock = socket.socket(af, socktype, proto) + self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + # Different from httplib: support timeouts. + if has_timeout(self.timeout): + self.sock.settimeout(self.timeout) + # End of difference from httplib. + if self.debuglevel > 0: + print("connect: (%s, %s) ************" % (self.host, self.port)) + if use_proxy: + print( + "proxy: %s ************" + % str( + ( + proxy_host, + proxy_port, + proxy_rdns, + proxy_user, + proxy_pass, + proxy_headers, + ) + ) + ) + if use_proxy: + self.sock.connect((self.host, self.port) + sa[2:]) + else: + self.sock.connect(sa) + except socket.error as e: + socket_err = e + if self.debuglevel > 0: + print("connect fail: (%s, %s)" % (self.host, self.port)) + if use_proxy: + print( + "proxy: %s" + % str( + ( + proxy_host, + proxy_port, + proxy_rdns, + proxy_user, + proxy_pass, + proxy_headers, + ) + ) + ) + if self.sock: + self.sock.close() + self.sock = None + continue + break + if not self.sock: + raise socket_err or socket.error("getaddrinfo returns an empty list") + + +class HTTPSConnectionWithTimeout(httplib.HTTPSConnection): + """This class allows communication via SSL. + + All timeouts are in seconds. If None is passed for timeout then + Python's default timeout for sockets will be used. See for example + the docs of socket.setdefaulttimeout(): + http://docs.python.org/library/socket.html#socket.setdefaulttimeout + """ + + def __init__( + self, + host, + port=None, + key_file=None, + cert_file=None, + strict=None, + timeout=None, + proxy_info=None, + ca_certs=None, + disable_ssl_certificate_validation=False, + ssl_version=None, + key_password=None, + ): + if key_password: + httplib.HTTPSConnection.__init__(self, host, port=port, strict=strict) + self._context.load_cert_chain(cert_file, key_file, key_password) + self.key_file = key_file + self.cert_file = cert_file + self.key_password = key_password + else: + httplib.HTTPSConnection.__init__( + self, host, port=port, key_file=key_file, cert_file=cert_file, strict=strict + ) + self.key_password = None + self.timeout = timeout + self.proxy_info = proxy_info + if ca_certs is None: + ca_certs = CA_CERTS + self.ca_certs = ca_certs + self.disable_ssl_certificate_validation = disable_ssl_certificate_validation + self.ssl_version = ssl_version + + # The following two methods were adapted from https_wrapper.py, released + # with the Google Appengine SDK at + # http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py + # under the following license: + # + # Copyright 2007 Google Inc. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + + def _GetValidHostsForCert(self, cert): + """Returns a list of valid host globs for an SSL certificate. + + Args: + cert: A dictionary representing an SSL certificate. + Returns: + list: A list of valid host globs. + """ + if "subjectAltName" in cert: + return [x[1] for x in cert["subjectAltName"] if x[0].lower() == "dns"] + else: + return [x[0][1] for x in cert["subject"] if x[0][0].lower() == "commonname"] + + def _ValidateCertificateHostname(self, cert, hostname): + """Validates that a given hostname is valid for an SSL certificate. + + Args: + cert: A dictionary representing an SSL certificate. + hostname: The hostname to test. + Returns: + bool: Whether or not the hostname is valid for this certificate. + """ + hosts = self._GetValidHostsForCert(cert) + for host in hosts: + host_re = host.replace(".", "\.").replace("*", "[^.]*") + if re.search("^%s$" % (host_re,), hostname, re.I): + return True + return False + + def connect(self): + "Connect to a host on a given (SSL) port." + + if self.proxy_info and self.proxy_info.isgood(): + use_proxy = True + proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers = ( + self.proxy_info.astuple() + ) + + host = proxy_host + port = proxy_port + else: + use_proxy = False + + host = self.host + port = self.port + + socket_err = None + + address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM) + for family, socktype, proto, canonname, sockaddr in address_info: + try: + if use_proxy: + sock = socks.socksocket(family, socktype, proto) + + sock.setproxy( + proxy_type, + proxy_host, + proxy_port, + proxy_rdns, + proxy_user, + proxy_pass, + proxy_headers, + ) + else: + sock = socket.socket(family, socktype, proto) + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + + if has_timeout(self.timeout): + sock.settimeout(self.timeout) + + if use_proxy: + sock.connect((self.host, self.port) + sockaddr[:2]) + else: + sock.connect(sockaddr) + self.sock = _ssl_wrap_socket( + sock, + self.key_file, + self.cert_file, + self.disable_ssl_certificate_validation, + self.ca_certs, + self.ssl_version, + self.host, + self.key_password, + ) + if self.debuglevel > 0: + print("connect: (%s, %s)" % (self.host, self.port)) + if use_proxy: + print( + "proxy: %s" + % str( + ( + proxy_host, + proxy_port, + proxy_rdns, + proxy_user, + proxy_pass, + proxy_headers, + ) + ) + ) + if not self.disable_ssl_certificate_validation: + cert = self.sock.getpeercert() + hostname = self.host.split(":", 0)[0] + if not self._ValidateCertificateHostname(cert, hostname): + raise CertificateHostnameMismatch( + "Server presented certificate that does not match " + "host %s: %s" % (hostname, cert), + hostname, + cert, + ) + except ( + ssl_SSLError, + ssl_CertificateError, + CertificateHostnameMismatch, + ) as e: + if sock: + sock.close() + if self.sock: + self.sock.close() + self.sock = None + # Unfortunately the ssl module doesn't seem to provide any way + # to get at more detailed error information, in particular + # whether the error is due to certificate validation or + # something else (such as SSL protocol mismatch). + if getattr(e, "errno", None) == ssl.SSL_ERROR_SSL: + raise SSLHandshakeError(e) + else: + raise + except (socket.timeout, socket.gaierror): + raise + except socket.error as e: + socket_err = e + if self.debuglevel > 0: + print("connect fail: (%s, %s)" % (self.host, self.port)) + if use_proxy: + print( + "proxy: %s" + % str( + ( + proxy_host, + proxy_port, + proxy_rdns, + proxy_user, + proxy_pass, + proxy_headers, + ) + ) + ) + if self.sock: + self.sock.close() + self.sock = None + continue + break + if not self.sock: + raise socket_err or socket.error("getaddrinfo returns an empty list") + + +SCHEME_TO_CONNECTION = { + "http": HTTPConnectionWithTimeout, + "https": HTTPSConnectionWithTimeout, +} + + +def _new_fixed_fetch(validate_certificate): + + def fixed_fetch( + url, + payload=None, + method="GET", + headers={}, + allow_truncated=False, + follow_redirects=True, + deadline=None, + ): + return fetch( + url, + payload=payload, + method=method, + headers=headers, + allow_truncated=allow_truncated, + follow_redirects=follow_redirects, + deadline=deadline, + validate_certificate=validate_certificate, + ) + + return fixed_fetch + + +class AppEngineHttpConnection(httplib.HTTPConnection): + """Use httplib on App Engine, but compensate for its weirdness. + + The parameters key_file, cert_file, proxy_info, ca_certs, + disable_ssl_certificate_validation, and ssl_version are all dropped on + the ground. + """ + + def __init__( + self, + host, + port=None, + key_file=None, + cert_file=None, + strict=None, + timeout=None, + proxy_info=None, + ca_certs=None, + disable_ssl_certificate_validation=False, + ssl_version=None, + ): + httplib.HTTPConnection.__init__( + self, host, port=port, strict=strict, timeout=timeout + ) + + +class AppEngineHttpsConnection(httplib.HTTPSConnection): + """Same as AppEngineHttpConnection, but for HTTPS URIs. + + The parameters proxy_info, ca_certs, disable_ssl_certificate_validation, + and ssl_version are all dropped on the ground. + """ + + def __init__( + self, + host, + port=None, + key_file=None, + cert_file=None, + strict=None, + timeout=None, + proxy_info=None, + ca_certs=None, + disable_ssl_certificate_validation=False, + ssl_version=None, + key_password=None, + ): + if key_password: + raise NotSupportedOnThisPlatform("Certificate with password is not supported.") + httplib.HTTPSConnection.__init__( + self, + host, + port=port, + key_file=key_file, + cert_file=cert_file, + strict=strict, + timeout=timeout, + ) + self._fetch = _new_fixed_fetch(not disable_ssl_certificate_validation) + + +# Use a different connection object for Google App Engine Standard Environment. +def is_gae_instance(): + server_software = os.environ.get('SERVER_SOFTWARE', '') + if (server_software.startswith('Google App Engine/') or + server_software.startswith('Development/') or + server_software.startswith('testutil/')): + return True + return False + + +try: + if not is_gae_instance(): + raise NotRunningAppEngineEnvironment() + + from google.appengine.api import apiproxy_stub_map + if apiproxy_stub_map.apiproxy.GetStub("urlfetch") is None: + raise ImportError + + from google.appengine.api.urlfetch import fetch + + # Update the connection classes to use the Googel App Engine specific ones. + SCHEME_TO_CONNECTION = { + "http": AppEngineHttpConnection, + "https": AppEngineHttpsConnection, + } +except (ImportError, NotRunningAppEngineEnvironment): + pass + + +class Http(object): + """An HTTP client that handles: + + - all methods + - caching + - ETags + - compression, + - HTTPS + - Basic + - Digest + - WSSE + + and more. + """ + + def __init__( + self, + cache=None, + timeout=None, + proxy_info=proxy_info_from_environment, + ca_certs=None, + disable_ssl_certificate_validation=False, + ssl_version=None, + ): + """If 'cache' is a string then it is used as a directory name for + a disk cache. Otherwise it must be an object that supports the + same interface as FileCache. + + All timeouts are in seconds. If None is passed for timeout + then Python's default timeout for sockets will be used. See + for example the docs of socket.setdefaulttimeout(): + http://docs.python.org/library/socket.html#socket.setdefaulttimeout + + `proxy_info` may be: + - a callable that takes the http scheme ('http' or 'https') and + returns a ProxyInfo instance per request. By default, uses + proxy_nfo_from_environment. + - a ProxyInfo instance (static proxy config). + - None (proxy disabled). + + ca_certs is the path of a file containing root CA certificates for SSL + server certificate validation. By default, a CA cert file bundled with + httplib2 is used. + + If disable_ssl_certificate_validation is true, SSL cert validation will + not be performed. + + By default, ssl.PROTOCOL_SSLv23 will be used for the ssl version. + """ + self.proxy_info = proxy_info + self.ca_certs = ca_certs + self.disable_ssl_certificate_validation = disable_ssl_certificate_validation + self.ssl_version = ssl_version + + # Map domain name to an httplib connection + self.connections = {} + # The location of the cache, for now a directory + # where cached responses are held. + if cache and isinstance(cache, basestring): + self.cache = FileCache(cache) + else: + self.cache = cache + + # Name/password + self.credentials = Credentials() + + # Key/cert + self.certificates = KeyCerts() + + # authorization objects + self.authorizations = [] + + # If set to False then no redirects are followed, even safe ones. + self.follow_redirects = True + + self.redirect_codes = REDIRECT_CODES + + # Which HTTP methods do we apply optimistic concurrency to, i.e. + # which methods get an "if-match:" etag header added to them. + self.optimistic_concurrency_methods = ["PUT", "PATCH"] + + self.safe_methods = list(SAFE_METHODS) + + # If 'follow_redirects' is True, and this is set to True then + # all redirecs are followed, including unsafe ones. + self.follow_all_redirects = False + + self.ignore_etag = False + + self.force_exception_to_status_code = False + + self.timeout = timeout + + # Keep Authorization: headers on a redirect. + self.forward_authorization_headers = False + + def close(self): + """Close persistent connections, clear sensitive data. + Not thread-safe, requires external synchronization against concurrent requests. + """ + existing, self.connections = self.connections, {} + for _, c in existing.iteritems(): + c.close() + self.certificates.clear() + self.clear_credentials() + + def __getstate__(self): + state_dict = copy.copy(self.__dict__) + # In case request is augmented by some foreign object such as + # credentials which handle auth + if "request" in state_dict: + del state_dict["request"] + if "connections" in state_dict: + del state_dict["connections"] + return state_dict + + def __setstate__(self, state): + self.__dict__.update(state) + self.connections = {} + + def _auth_from_challenge(self, host, request_uri, headers, response, content): + """A generator that creates Authorization objects + that can be applied to requests. + """ + challenges = _parse_www_authenticate(response, "www-authenticate") + for cred in self.credentials.iter(host): + for scheme in AUTH_SCHEME_ORDER: + if scheme in challenges: + yield AUTH_SCHEME_CLASSES[scheme]( + cred, host, request_uri, headers, response, content, self + ) + + def add_credentials(self, name, password, domain=""): + """Add a name and password that will be used + any time a request requires authentication.""" + self.credentials.add(name, password, domain) + + def add_certificate(self, key, cert, domain, password=None): + """Add a key and cert that will be used + any time a request requires authentication.""" + self.certificates.add(key, cert, domain, password) + + def clear_credentials(self): + """Remove all the names and passwords + that are used for authentication""" + self.credentials.clear() + self.authorizations = [] + + def _conn_request(self, conn, request_uri, method, body, headers): + i = 0 + seen_bad_status_line = False + while i < RETRIES: + i += 1 + try: + if hasattr(conn, "sock") and conn.sock is None: + conn.connect() + conn.request(method, request_uri, body, headers) + except socket.timeout: + raise + except socket.gaierror: + conn.close() + raise ServerNotFoundError("Unable to find the server at %s" % conn.host) + except ssl_SSLError: + conn.close() + raise + except socket.error as e: + err = 0 + if hasattr(e, "args"): + err = getattr(e, "args")[0] + else: + err = e.errno + if err == errno.ECONNREFUSED: # Connection refused + raise + if err in (errno.ENETUNREACH, errno.EADDRNOTAVAIL) and i < RETRIES: + continue # retry on potentially transient socket errors + except httplib.HTTPException: + # Just because the server closed the connection doesn't apparently mean + # that the server didn't send a response. + if hasattr(conn, "sock") and conn.sock is None: + if i < RETRIES - 1: + conn.close() + conn.connect() + continue + else: + conn.close() + raise + if i < RETRIES - 1: + conn.close() + conn.connect() + continue + try: + response = conn.getresponse() + except httplib.BadStatusLine: + # If we get a BadStatusLine on the first try then that means + # the connection just went stale, so retry regardless of the + # number of RETRIES set. + if not seen_bad_status_line and i == 1: + i = 0 + seen_bad_status_line = True + conn.close() + conn.connect() + continue + else: + conn.close() + raise + except (socket.error, httplib.HTTPException): + if i < RETRIES - 1: + conn.close() + conn.connect() + continue + else: + conn.close() + raise + else: + content = "" + if method == "HEAD": + conn.close() + else: + content = response.read() + response = Response(response) + if method != "HEAD": + content = _decompressContent(response, content) + break + return (response, content) + + def _request( + self, + conn, + host, + absolute_uri, + request_uri, + method, + body, + headers, + redirections, + cachekey, + ): + """Do the actual request using the connection object + and also follow one level of redirects if necessary""" + + auths = [ + (auth.depth(request_uri), auth) + for auth in self.authorizations + if auth.inscope(host, request_uri) + ] + auth = auths and sorted(auths)[0][1] or None + if auth: + auth.request(method, request_uri, headers, body) + + (response, content) = self._conn_request( + conn, request_uri, method, body, headers + ) + + if auth: + if auth.response(response, body): + auth.request(method, request_uri, headers, body) + (response, content) = self._conn_request( + conn, request_uri, method, body, headers + ) + response._stale_digest = 1 + + if response.status == 401: + for authorization in self._auth_from_challenge( + host, request_uri, headers, response, content + ): + authorization.request(method, request_uri, headers, body) + (response, content) = self._conn_request( + conn, request_uri, method, body, headers + ) + if response.status != 401: + self.authorizations.append(authorization) + authorization.response(response, body) + break + + if ( + self.follow_all_redirects + or method in self.safe_methods + or response.status in (303, 308) + ): + if self.follow_redirects and response.status in self.redirect_codes: + # Pick out the location header and basically start from the beginning + # remembering first to strip the ETag header and decrement our 'depth' + if redirections: + if "location" not in response and response.status != 300: + raise RedirectMissingLocation( + _( + "Redirected but the response is missing a Location: header." + ), + response, + content, + ) + # Fix-up relative redirects (which violate an RFC 2616 MUST) + if "location" in response: + location = response["location"] + (scheme, authority, path, query, fragment) = parse_uri(location) + if authority == None: + response["location"] = urlparse.urljoin( + absolute_uri, location + ) + if response.status == 308 or (response.status == 301 and method in self.safe_methods): + response["-x-permanent-redirect-url"] = response["location"] + if "content-location" not in response: + response["content-location"] = absolute_uri + _updateCache(headers, response, content, self.cache, cachekey) + if "if-none-match" in headers: + del headers["if-none-match"] + if "if-modified-since" in headers: + del headers["if-modified-since"] + if ( + "authorization" in headers + and not self.forward_authorization_headers + ): + del headers["authorization"] + if "location" in response: + location = response["location"] + old_response = copy.deepcopy(response) + if "content-location" not in old_response: + old_response["content-location"] = absolute_uri + redirect_method = method + if response.status in [302, 303]: + redirect_method = "GET" + body = None + (response, content) = self.request( + location, + method=redirect_method, + body=body, + headers=headers, + redirections=redirections - 1, + ) + response.previous = old_response + else: + raise RedirectLimit( + "Redirected more times than rediection_limit allows.", + response, + content, + ) + elif response.status in [200, 203] and method in self.safe_methods: + # Don't cache 206's since we aren't going to handle byte range requests + if "content-location" not in response: + response["content-location"] = absolute_uri + _updateCache(headers, response, content, self.cache, cachekey) + + return (response, content) + + def _normalize_headers(self, headers): + return _normalize_headers(headers) + + # Need to catch and rebrand some exceptions + # Then need to optionally turn all exceptions into status codes + # including all socket.* and httplib.* exceptions. + + def request( + self, + uri, + method="GET", + body=None, + headers=None, + redirections=DEFAULT_MAX_REDIRECTS, + connection_type=None, + ): + """ Performs a single HTTP request. + + The 'uri' is the URI of the HTTP resource and can begin with either + 'http' or 'https'. The value of 'uri' must be an absolute URI. + + The 'method' is the HTTP method to perform, such as GET, POST, DELETE, + etc. There is no restriction on the methods allowed. + + The 'body' is the entity body to be sent with the request. It is a + string object. + + Any extra headers that are to be sent with the request should be + provided in the 'headers' dictionary. + + The maximum number of redirect to follow before raising an + exception is 'redirections. The default is 5. + + The return value is a tuple of (response, content), the first + being and instance of the 'Response' class, the second being + a string that contains the response entity body. + """ + conn_key = '' + + try: + if headers is None: + headers = {} + else: + headers = self._normalize_headers(headers) + + if "user-agent" not in headers: + headers["user-agent"] = "Python-httplib2/%s (gzip)" % __version__ + + uri = iri2uri(uri) + + (scheme, authority, request_uri, defrag_uri) = urlnorm(uri) + + proxy_info = self._get_proxy_info(scheme, authority) + + conn_key = scheme + ":" + authority + conn = self.connections.get(conn_key) + if conn is None: + if not connection_type: + connection_type = SCHEME_TO_CONNECTION[scheme] + certs = list(self.certificates.iter(authority)) + if scheme == "https": + if certs: + conn = self.connections[conn_key] = connection_type( + authority, + key_file=certs[0][0], + cert_file=certs[0][1], + timeout=self.timeout, + proxy_info=proxy_info, + ca_certs=self.ca_certs, + disable_ssl_certificate_validation=self.disable_ssl_certificate_validation, + ssl_version=self.ssl_version, + key_password=certs[0][2], + ) + else: + conn = self.connections[conn_key] = connection_type( + authority, + timeout=self.timeout, + proxy_info=proxy_info, + ca_certs=self.ca_certs, + disable_ssl_certificate_validation=self.disable_ssl_certificate_validation, + ssl_version=self.ssl_version, + ) + else: + conn = self.connections[conn_key] = connection_type( + authority, timeout=self.timeout, proxy_info=proxy_info + ) + conn.set_debuglevel(debuglevel) + + if "range" not in headers and "accept-encoding" not in headers: + headers["accept-encoding"] = "gzip, deflate" + + info = email.Message.Message() + cachekey = None + cached_value = None + if self.cache: + cachekey = defrag_uri.encode("utf-8") + cached_value = self.cache.get(cachekey) + if cached_value: + # info = email.message_from_string(cached_value) + # + # Need to replace the line above with the kludge below + # to fix the non-existent bug not fixed in this + # bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html + try: + info, content = cached_value.split("\r\n\r\n", 1) + feedparser = email.FeedParser.FeedParser() + feedparser.feed(info) + info = feedparser.close() + feedparser._parse = None + except (IndexError, ValueError): + self.cache.delete(cachekey) + cachekey = None + cached_value = None + + if ( + method in self.optimistic_concurrency_methods + and self.cache + and "etag" in info + and not self.ignore_etag + and "if-match" not in headers + ): + # http://www.w3.org/1999/04/Editing/ + headers["if-match"] = info["etag"] + + # https://tools.ietf.org/html/rfc7234 + # A cache MUST invalidate the effective Request URI as well as [...] Location and Content-Location + # when a non-error status code is received in response to an unsafe request method. + if self.cache and cachekey and method not in self.safe_methods: + self.cache.delete(cachekey) + + # Check the vary header in the cache to see if this request + # matches what varies in the cache. + if method in self.safe_methods and "vary" in info: + vary = info["vary"] + vary_headers = vary.lower().replace(" ", "").split(",") + for header in vary_headers: + key = "-varied-%s" % header + value = info[key] + if headers.get(header, None) != value: + cached_value = None + break + + if ( + self.cache + and cached_value + and (method in self.safe_methods or info["status"] == "308") + and "range" not in headers + ): + redirect_method = method + if info["status"] not in ("307", "308"): + redirect_method = "GET" + if "-x-permanent-redirect-url" in info: + # Should cached permanent redirects be counted in our redirection count? For now, yes. + if redirections <= 0: + raise RedirectLimit( + "Redirected more times than rediection_limit allows.", + {}, + "", + ) + (response, new_content) = self.request( + info["-x-permanent-redirect-url"], + method=redirect_method, + headers=headers, + redirections=redirections - 1, + ) + response.previous = Response(info) + response.previous.fromcache = True + else: + # Determine our course of action: + # Is the cached entry fresh or stale? + # Has the client requested a non-cached response? + # + # There seems to be three possible answers: + # 1. [FRESH] Return the cache entry w/o doing a GET + # 2. [STALE] Do the GET (but add in cache validators if available) + # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request + entry_disposition = _entry_disposition(info, headers) + + if entry_disposition == "FRESH": + if not cached_value: + info["status"] = "504" + content = "" + response = Response(info) + if cached_value: + response.fromcache = True + return (response, content) + + if entry_disposition == "STALE": + if ( + "etag" in info + and not self.ignore_etag + and not "if-none-match" in headers + ): + headers["if-none-match"] = info["etag"] + if "last-modified" in info and not "last-modified" in headers: + headers["if-modified-since"] = info["last-modified"] + elif entry_disposition == "TRANSPARENT": + pass + + (response, new_content) = self._request( + conn, + authority, + uri, + request_uri, + method, + body, + headers, + redirections, + cachekey, + ) + + if response.status == 304 and method == "GET": + # Rewrite the cache entry with the new end-to-end headers + # Take all headers that are in response + # and overwrite their values in info. + # unless they are hop-by-hop, or are listed in the connection header. + + for key in _get_end2end_headers(response): + info[key] = response[key] + merged_response = Response(info) + if hasattr(response, "_stale_digest"): + merged_response._stale_digest = response._stale_digest + _updateCache( + headers, merged_response, content, self.cache, cachekey + ) + response = merged_response + response.status = 200 + response.fromcache = True + + elif response.status == 200: + content = new_content + else: + self.cache.delete(cachekey) + content = new_content + else: + cc = _parse_cache_control(headers) + if "only-if-cached" in cc: + info["status"] = "504" + response = Response(info) + content = "" + else: + (response, content) = self._request( + conn, + authority, + uri, + request_uri, + method, + body, + headers, + redirections, + cachekey, + ) + except Exception as e: + is_timeout = isinstance(e, socket.timeout) + if is_timeout: + conn = self.connections.pop(conn_key, None) + if conn: + conn.close() + + if self.force_exception_to_status_code: + if isinstance(e, HttpLib2ErrorWithResponse): + response = e.response + content = e.content + response.status = 500 + response.reason = str(e) + elif is_timeout: + content = "Request Timeout" + response = Response( + { + "content-type": "text/plain", + "status": "408", + "content-length": len(content), + } + ) + response.reason = "Request Timeout" + else: + content = str(e) + response = Response( + { + "content-type": "text/plain", + "status": "400", + "content-length": len(content), + } + ) + response.reason = "Bad Request" + else: + raise + + return (response, content) + + def _get_proxy_info(self, scheme, authority): + """Return a ProxyInfo instance (or None) based on the scheme + and authority. + """ + hostname, port = urllib.splitport(authority) + proxy_info = self.proxy_info + if callable(proxy_info): + proxy_info = proxy_info(scheme) + + if hasattr(proxy_info, "applies_to") and not proxy_info.applies_to(hostname): + proxy_info = None + return proxy_info + + +class Response(dict): + """An object more like email.Message than httplib.HTTPResponse.""" + + """Is this response from our local cache""" + fromcache = False + """HTTP protocol version used by server. + + 10 for HTTP/1.0, 11 for HTTP/1.1. + """ + version = 11 + + "Status code returned by server. " + status = 200 + """Reason phrase returned by server.""" + reason = "Ok" + + previous = None + + def __init__(self, info): + # info is either an email.Message or + # an httplib.HTTPResponse object. + if isinstance(info, httplib.HTTPResponse): + for key, value in info.getheaders(): + self[key.lower()] = value + self.status = info.status + self["status"] = str(self.status) + self.reason = info.reason + self.version = info.version + elif isinstance(info, email.Message.Message): + for key, value in info.items(): + self[key.lower()] = value + self.status = int(self["status"]) + else: + for key, value in info.iteritems(): + self[key.lower()] = value + self.status = int(self.get("status", self.status)) + self.reason = self.get("reason", self.reason) + + def __getattr__(self, name): + if name == "dict": + return self + else: + raise AttributeError(name) diff --git a/lib/httplib2/cacerts.txt b/lib/httplib2/py2/cacerts.txt similarity index 100% rename from lib/httplib2/cacerts.txt rename to lib/httplib2/py2/cacerts.txt diff --git a/lib/httplib2/certs.py b/lib/httplib2/py2/certs.py similarity index 100% rename from lib/httplib2/certs.py rename to lib/httplib2/py2/certs.py diff --git a/lib/httplib2/iri2uri.py b/lib/httplib2/py2/iri2uri.py similarity index 100% rename from lib/httplib2/iri2uri.py rename to lib/httplib2/py2/iri2uri.py diff --git a/lib/httplib2/socks.py b/lib/httplib2/py2/socks.py similarity index 98% rename from lib/httplib2/socks.py rename to lib/httplib2/py2/socks.py index 5cef7760..71eb4ebf 100644 --- a/lib/httplib2/socks.py +++ b/lib/httplib2/py2/socks.py @@ -238,7 +238,15 @@ class socksocket(socket.socket): headers - Additional or modified headers for the proxy connect request. """ - self.__proxy = (proxytype, addr, port, rdns, username, password, headers) + self.__proxy = ( + proxytype, + addr, + port, + rdns, + username.encode() if username else None, + password.encode() if password else None, + headers, + ) def __negotiatesocks5(self, destaddr, destport): """__negotiatesocks5(self,destaddr,destport) diff --git a/lib/httplib2/py3/__init__.py b/lib/httplib2/py3/__init__.py new file mode 100644 index 00000000..c705f434 --- /dev/null +++ b/lib/httplib2/py3/__init__.py @@ -0,0 +1,2077 @@ +# -*- coding: utf-8 -*- +"""Small, fast HTTP client library for Python.""" + +__author__ = "Joe Gregorio (joe@bitworking.org)" +__copyright__ = "Copyright 2006, Joe Gregorio" +__contributors__ = [ + "Thomas Broyer (t.broyer@ltgt.net)", + "James Antill", + "Xavier Verges Farrero", + "Jonathan Feinberg", + "Blair Zajac", + "Sam Ruby", + "Louis Nyffenegger", + "Mark Pilgrim", + "Alex Yu", +] +__license__ = "MIT" +__version__ = '0.17.0' + +import base64 +import calendar +import copy +import email +import email.feedparser +from email import header +import email.message +import email.utils +import errno +from gettext import gettext as _ +import gzip +from hashlib import md5 as _md5 +from hashlib import sha1 as _sha +import hmac +import http.client +import io +import os +import random +import re +import socket +import ssl +import sys +import time +import urllib.parse +import zlib + +try: + import socks +except ImportError: + # TODO: remove this fallback and copypasted socksipy module upon py2/3 merge, + # idea is to have soft-dependency on any compatible module called socks + from . import socks +from .iri2uri import iri2uri + + +def has_timeout(timeout): + if hasattr(socket, "_GLOBAL_DEFAULT_TIMEOUT"): + return timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT + return timeout is not None + + +__all__ = [ + "debuglevel", + "FailedToDecompressContent", + "Http", + "HttpLib2Error", + "ProxyInfo", + "RedirectLimit", + "RedirectMissingLocation", + "Response", + "RETRIES", + "UnimplementedDigestAuthOptionError", + "UnimplementedHmacDigestAuthOptionError", +] + +# The httplib debug level, set to a non-zero value to get debug output +debuglevel = 0 + +# A request will be tried 'RETRIES' times if it fails at the socket/connection level. +RETRIES = 2 + + +# All exceptions raised here derive from HttpLib2Error +class HttpLib2Error(Exception): + pass + + +# Some exceptions can be caught and optionally +# be turned back into responses. +class HttpLib2ErrorWithResponse(HttpLib2Error): + def __init__(self, desc, response, content): + self.response = response + self.content = content + HttpLib2Error.__init__(self, desc) + + +class RedirectMissingLocation(HttpLib2ErrorWithResponse): + pass + + +class RedirectLimit(HttpLib2ErrorWithResponse): + pass + + +class FailedToDecompressContent(HttpLib2ErrorWithResponse): + pass + + +class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): + pass + + +class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): + pass + + +class MalformedHeader(HttpLib2Error): + pass + + +class RelativeURIError(HttpLib2Error): + pass + + +class ServerNotFoundError(HttpLib2Error): + pass + + +class ProxiesUnavailableError(HttpLib2Error): + pass + + +# Open Items: +# ----------- + +# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?) + +# Pluggable cache storage (supports storing the cache in +# flat files by default. We need a plug-in architecture +# that can support Berkeley DB and Squid) + +# == Known Issues == +# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator. +# Does not handle Cache-Control: max-stale +# Does not use Age: headers when calculating cache freshness. + +# The number of redirections to follow before giving up. +# Note that only GET redirects are automatically followed. +# Will also honor 301 requests by saving that info and never +# requesting that URI again. +DEFAULT_MAX_REDIRECTS = 5 + +# Which headers are hop-by-hop headers by default +HOP_BY_HOP = [ + "connection", + "keep-alive", + "proxy-authenticate", + "proxy-authorization", + "te", + "trailers", + "transfer-encoding", + "upgrade", +] + +# https://tools.ietf.org/html/rfc7231#section-8.1.3 +SAFE_METHODS = ("GET", "HEAD", "OPTIONS", "TRACE") + +# To change, assign to `Http().redirect_codes` +REDIRECT_CODES = frozenset((300, 301, 302, 303, 307, 308)) + + +from . import certs +CA_CERTS = certs.where() + +# PROTOCOL_TLS is python 3.5.3+. PROTOCOL_SSLv23 is deprecated. +# Both PROTOCOL_TLS and PROTOCOL_SSLv23 are equivalent and means: +# > Selects the highest protocol version that both the client and server support. +# > Despite the name, this option can select “TLS” protocols as well as “SSL”. +# source: https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLS +DEFAULT_TLS_VERSION = getattr(ssl, "PROTOCOL_TLS", None) or getattr( + ssl, "PROTOCOL_SSLv23" +) + +def _build_ssl_context( + disable_ssl_certificate_validation, ca_certs, cert_file=None, key_file=None, + maximum_version=None, minimum_version=None, key_password=None, +): + if not hasattr(ssl, "SSLContext"): + raise RuntimeError("httplib2 requires Python 3.2+ for ssl.SSLContext") + + context = ssl.SSLContext(DEFAULT_TLS_VERSION) + context.verify_mode = ( + ssl.CERT_NONE if disable_ssl_certificate_validation else ssl.CERT_REQUIRED + ) + + # SSLContext.maximum_version and SSLContext.minimum_version are python 3.7+. + # source: https://docs.python.org/3/library/ssl.html#ssl.SSLContext.maximum_version + if maximum_version is not None: + if hasattr(context, "maximum_version"): + context.maximum_version = getattr(ssl.TLSVersion, maximum_version) + else: + raise RuntimeError("setting tls_maximum_version requires Python 3.7 and OpenSSL 1.1 or newer") + if minimum_version is not None: + if hasattr(context, "minimum_version"): + context.minimum_version = getattr(ssl.TLSVersion, minimum_version) + else: + raise RuntimeError("setting tls_minimum_version requires Python 3.7 and OpenSSL 1.1 or newer") + + # check_hostname requires python 3.4+ + # we will perform the equivalent in HTTPSConnectionWithTimeout.connect() by calling ssl.match_hostname + # if check_hostname is not supported. + if hasattr(context, "check_hostname"): + context.check_hostname = not disable_ssl_certificate_validation + + context.load_verify_locations(ca_certs) + + if cert_file: + context.load_cert_chain(cert_file, key_file, key_password) + + return context + + +def _get_end2end_headers(response): + hopbyhop = list(HOP_BY_HOP) + hopbyhop.extend([x.strip() for x in response.get("connection", "").split(",")]) + return [header for header in list(response.keys()) if header not in hopbyhop] + + +URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") + + +def parse_uri(uri): + """Parses a URI using the regex given in Appendix B of RFC 3986. + + (scheme, authority, path, query, fragment) = parse_uri(uri) + """ + groups = URI.match(uri).groups() + return (groups[1], groups[3], groups[4], groups[6], groups[8]) + + +def urlnorm(uri): + (scheme, authority, path, query, fragment) = parse_uri(uri) + if not scheme or not authority: + raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri) + authority = authority.lower() + scheme = scheme.lower() + if not path: + path = "/" + # Could do syntax based normalization of the URI before + # computing the digest. See Section 6.2.2 of Std 66. + request_uri = query and "?".join([path, query]) or path + scheme = scheme.lower() + defrag_uri = scheme + "://" + authority + request_uri + return scheme, authority, request_uri, defrag_uri + + +# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/) +re_url_scheme = re.compile(r"^\w+://") +re_unsafe = re.compile(r"[^\w\-_.()=!]+", re.ASCII) + + +def safename(filename): + """Return a filename suitable for the cache. + Strips dangerous and common characters to create a filename we + can use to store the cache in. + """ + if isinstance(filename, bytes): + filename_bytes = filename + filename = filename.decode("utf-8") + else: + filename_bytes = filename.encode("utf-8") + filemd5 = _md5(filename_bytes).hexdigest() + filename = re_url_scheme.sub("", filename) + filename = re_unsafe.sub("", filename) + + # limit length of filename (vital for Windows) + # https://github.com/httplib2/httplib2/pull/74 + # C:\Users\ <username> \AppData\Local\Temp\ <safe_filename> , <md5> + # 9 chars + max 104 chars + 20 chars + x + 1 + 32 = max 259 chars + # Thus max safe filename x = 93 chars. Let it be 90 to make a round sum: + filename = filename[:90] + + return ",".join((filename, filemd5)) + + +NORMALIZE_SPACE = re.compile(r"(?:\r\n)?[ \t]+") + + +def _normalize_headers(headers): + return dict( + [ + ( + _convert_byte_str(key).lower(), + NORMALIZE_SPACE.sub(_convert_byte_str(value), " ").strip(), + ) + for (key, value) in headers.items() + ] + ) + + +def _convert_byte_str(s): + if not isinstance(s, str): + return str(s, "utf-8") + return s + + +def _parse_cache_control(headers): + retval = {} + if "cache-control" in headers: + parts = headers["cache-control"].split(",") + parts_with_args = [ + tuple([x.strip().lower() for x in part.split("=", 1)]) + for part in parts + if -1 != part.find("=") + ] + parts_wo_args = [ + (name.strip().lower(), 1) for name in parts if -1 == name.find("=") + ] + retval = dict(parts_with_args + parts_wo_args) + return retval + + +# Whether to use a strict mode to parse WWW-Authenticate headers +# Might lead to bad results in case of ill-formed header value, +# so disabled by default, falling back to relaxed parsing. +# Set to true to turn on, useful for testing servers. +USE_WWW_AUTH_STRICT_PARSING = 0 + +# In regex below: +# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP +# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space +# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both: +# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"? +WWW_AUTH_STRICT = re.compile( + r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$" +) +WWW_AUTH_RELAXED = re.compile( + r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$" +) +UNQUOTE_PAIRS = re.compile(r"\\(.)") + + +def _parse_www_authenticate(headers, headername="www-authenticate"): + """Returns a dictionary of dictionaries, one dict + per auth_scheme.""" + retval = {} + if headername in headers: + try: + authenticate = headers[headername].strip() + www_auth = ( + USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED + ) + while authenticate: + # Break off the scheme at the beginning of the line + if headername == "authentication-info": + (auth_scheme, the_rest) = ("digest", authenticate) + else: + (auth_scheme, the_rest) = authenticate.split(" ", 1) + # Now loop over all the key value pairs that come after the scheme, + # being careful not to roll into the next scheme + match = www_auth.search(the_rest) + auth_params = {} + while match: + if match and len(match.groups()) == 3: + (key, value, the_rest) = match.groups() + auth_params[key.lower()] = UNQUOTE_PAIRS.sub( + r"\1", value + ) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')]) + match = www_auth.search(the_rest) + retval[auth_scheme.lower()] = auth_params + authenticate = the_rest.strip() + except ValueError: + raise MalformedHeader("WWW-Authenticate") + return retval + + +def _entry_disposition(response_headers, request_headers): + """Determine freshness from the Date, Expires and Cache-Control headers. + + We don't handle the following: + + 1. Cache-Control: max-stale + 2. Age: headers are not used in the calculations. + + Not that this algorithm is simpler than you might think + because we are operating as a private (non-shared) cache. + This lets us ignore 's-maxage'. We can also ignore + 'proxy-invalidate' since we aren't a proxy. + We will never return a stale document as + fresh as a design decision, and thus the non-implementation + of 'max-stale'. This also lets us safely ignore 'must-revalidate' + since we operate as if every server has sent 'must-revalidate'. + Since we are private we get to ignore both 'public' and + 'private' parameters. We also ignore 'no-transform' since + we don't do any transformations. + The 'no-store' parameter is handled at a higher level. + So the only Cache-Control parameters we look at are: + + no-cache + only-if-cached + max-age + min-fresh + """ + + retval = "STALE" + cc = _parse_cache_control(request_headers) + cc_response = _parse_cache_control(response_headers) + + if ( + "pragma" in request_headers + and request_headers["pragma"].lower().find("no-cache") != -1 + ): + retval = "TRANSPARENT" + if "cache-control" not in request_headers: + request_headers["cache-control"] = "no-cache" + elif "no-cache" in cc: + retval = "TRANSPARENT" + elif "no-cache" in cc_response: + retval = "STALE" + elif "only-if-cached" in cc: + retval = "FRESH" + elif "date" in response_headers: + date = calendar.timegm(email.utils.parsedate_tz(response_headers["date"])) + now = time.time() + current_age = max(0, now - date) + if "max-age" in cc_response: + try: + freshness_lifetime = int(cc_response["max-age"]) + except ValueError: + freshness_lifetime = 0 + elif "expires" in response_headers: + expires = email.utils.parsedate_tz(response_headers["expires"]) + if None == expires: + freshness_lifetime = 0 + else: + freshness_lifetime = max(0, calendar.timegm(expires) - date) + else: + freshness_lifetime = 0 + if "max-age" in cc: + try: + freshness_lifetime = int(cc["max-age"]) + except ValueError: + freshness_lifetime = 0 + if "min-fresh" in cc: + try: + min_fresh = int(cc["min-fresh"]) + except ValueError: + min_fresh = 0 + current_age += min_fresh + if freshness_lifetime > current_age: + retval = "FRESH" + return retval + + +def _decompressContent(response, new_content): + content = new_content + try: + encoding = response.get("content-encoding", None) + if encoding in ["gzip", "deflate"]: + if encoding == "gzip": + content = gzip.GzipFile(fileobj=io.BytesIO(new_content)).read() + if encoding == "deflate": + content = zlib.decompress(content, -zlib.MAX_WBITS) + response["content-length"] = str(len(content)) + # Record the historical presence of the encoding in a way the won't interfere. + response["-content-encoding"] = response["content-encoding"] + del response["content-encoding"] + except (IOError, zlib.error): + content = "" + raise FailedToDecompressContent( + _("Content purported to be compressed with %s but failed to decompress.") + % response.get("content-encoding"), + response, + content, + ) + return content + + +def _bind_write_headers(msg): + def _write_headers(self): + # Self refers to the Generator object. + for h, v in msg.items(): + print("%s:" % h, end=" ", file=self._fp) + if isinstance(v, header.Header): + print(v.encode(maxlinelen=self._maxheaderlen), file=self._fp) + else: + # email.Header got lots of smarts, so use it. + headers = header.Header( + v, maxlinelen=self._maxheaderlen, charset="utf-8", header_name=h + ) + print(headers.encode(), file=self._fp) + # A blank line always separates headers from body. + print(file=self._fp) + + return _write_headers + + +def _updateCache(request_headers, response_headers, content, cache, cachekey): + if cachekey: + cc = _parse_cache_control(request_headers) + cc_response = _parse_cache_control(response_headers) + if "no-store" in cc or "no-store" in cc_response: + cache.delete(cachekey) + else: + info = email.message.Message() + for key, value in response_headers.items(): + if key not in ["status", "content-encoding", "transfer-encoding"]: + info[key] = value + + # Add annotations to the cache to indicate what headers + # are variant for this request. + vary = response_headers.get("vary", None) + if vary: + vary_headers = vary.lower().replace(" ", "").split(",") + for header in vary_headers: + key = "-varied-%s" % header + try: + info[key] = request_headers[header] + except KeyError: + pass + + status = response_headers.status + if status == 304: + status = 200 + + status_header = "status: %d\r\n" % status + + try: + header_str = info.as_string() + except UnicodeEncodeError: + setattr(info, "_write_headers", _bind_write_headers(info)) + header_str = info.as_string() + + header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str) + text = b"".join( + [status_header.encode("utf-8"), header_str.encode("utf-8"), content] + ) + + cache.set(cachekey, text) + + +def _cnonce(): + dig = _md5( + ( + "%s:%s" + % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)]) + ).encode("utf-8") + ).hexdigest() + return dig[:16] + + +def _wsse_username_token(cnonce, iso_now, password): + return base64.b64encode( + _sha(("%s%s%s" % (cnonce, iso_now, password)).encode("utf-8")).digest() + ).strip() + + +# For credentials we need two things, first +# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.) +# Then we also need a list of URIs that have already demanded authentication +# That list is tricky since sub-URIs can take the same auth, or the +# auth scheme may change as you descend the tree. +# So we also need each Auth instance to be able to tell us +# how close to the 'top' it is. + + +class Authentication(object): + def __init__( + self, credentials, host, request_uri, headers, response, content, http + ): + (scheme, authority, path, query, fragment) = parse_uri(request_uri) + self.path = path + self.host = host + self.credentials = credentials + self.http = http + + def depth(self, request_uri): + (scheme, authority, path, query, fragment) = parse_uri(request_uri) + return request_uri[len(self.path) :].count("/") + + def inscope(self, host, request_uri): + # XXX Should we normalize the request_uri? + (scheme, authority, path, query, fragment) = parse_uri(request_uri) + return (host == self.host) and path.startswith(self.path) + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header. Over-rise this in sub-classes.""" + pass + + def response(self, response, content): + """Gives us a chance to update with new nonces + or such returned from the last authorized response. + Over-rise this in sub-classes if necessary. + + Return TRUE is the request is to be retried, for + example Digest may return stale=true. + """ + return False + + def __eq__(self, auth): + return False + + def __ne__(self, auth): + return True + + def __lt__(self, auth): + return True + + def __gt__(self, auth): + return False + + def __le__(self, auth): + return True + + def __ge__(self, auth): + return False + + def __bool__(self): + return True + + +class BasicAuthentication(Authentication): + def __init__( + self, credentials, host, request_uri, headers, response, content, http + ): + Authentication.__init__( + self, credentials, host, request_uri, headers, response, content, http + ) + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers["authorization"] = "Basic " + base64.b64encode( + ("%s:%s" % self.credentials).encode("utf-8") + ).strip().decode("utf-8") + + +class DigestAuthentication(Authentication): + """Only do qop='auth' and MD5, since that + is all Apache currently implements""" + + def __init__( + self, credentials, host, request_uri, headers, response, content, http + ): + Authentication.__init__( + self, credentials, host, request_uri, headers, response, content, http + ) + challenge = _parse_www_authenticate(response, "www-authenticate") + self.challenge = challenge["digest"] + qop = self.challenge.get("qop", "auth") + self.challenge["qop"] = ( + ("auth" in [x.strip() for x in qop.split()]) and "auth" or None + ) + if self.challenge["qop"] is None: + raise UnimplementedDigestAuthOptionError( + _("Unsupported value for qop: %s." % qop) + ) + self.challenge["algorithm"] = self.challenge.get("algorithm", "MD5").upper() + if self.challenge["algorithm"] != "MD5": + raise UnimplementedDigestAuthOptionError( + _("Unsupported value for algorithm: %s." % self.challenge["algorithm"]) + ) + self.A1 = "".join( + [ + self.credentials[0], + ":", + self.challenge["realm"], + ":", + self.credentials[1], + ] + ) + self.challenge["nc"] = 1 + + def request(self, method, request_uri, headers, content, cnonce=None): + """Modify the request headers""" + H = lambda x: _md5(x.encode("utf-8")).hexdigest() + KD = lambda s, d: H("%s:%s" % (s, d)) + A2 = "".join([method, ":", request_uri]) + self.challenge["cnonce"] = cnonce or _cnonce() + request_digest = '"%s"' % KD( + H(self.A1), + "%s:%s:%s:%s:%s" + % ( + self.challenge["nonce"], + "%08x" % self.challenge["nc"], + self.challenge["cnonce"], + self.challenge["qop"], + H(A2), + ), + ) + headers["authorization"] = ( + 'Digest username="%s", realm="%s", nonce="%s", ' + 'uri="%s", algorithm=%s, response=%s, qop=%s, ' + 'nc=%08x, cnonce="%s"' + ) % ( + self.credentials[0], + self.challenge["realm"], + self.challenge["nonce"], + request_uri, + self.challenge["algorithm"], + request_digest, + self.challenge["qop"], + self.challenge["nc"], + self.challenge["cnonce"], + ) + if self.challenge.get("opaque"): + headers["authorization"] += ', opaque="%s"' % self.challenge["opaque"] + self.challenge["nc"] += 1 + + def response(self, response, content): + if "authentication-info" not in response: + challenge = _parse_www_authenticate(response, "www-authenticate").get( + "digest", {} + ) + if "true" == challenge.get("stale"): + self.challenge["nonce"] = challenge["nonce"] + self.challenge["nc"] = 1 + return True + else: + updated_challenge = _parse_www_authenticate( + response, "authentication-info" + ).get("digest", {}) + + if "nextnonce" in updated_challenge: + self.challenge["nonce"] = updated_challenge["nextnonce"] + self.challenge["nc"] = 1 + return False + + +class HmacDigestAuthentication(Authentication): + """Adapted from Robert Sayre's code and DigestAuthentication above.""" + + __author__ = "Thomas Broyer (t.broyer@ltgt.net)" + + def __init__( + self, credentials, host, request_uri, headers, response, content, http + ): + Authentication.__init__( + self, credentials, host, request_uri, headers, response, content, http + ) + challenge = _parse_www_authenticate(response, "www-authenticate") + self.challenge = challenge["hmacdigest"] + # TODO: self.challenge['domain'] + self.challenge["reason"] = self.challenge.get("reason", "unauthorized") + if self.challenge["reason"] not in ["unauthorized", "integrity"]: + self.challenge["reason"] = "unauthorized" + self.challenge["salt"] = self.challenge.get("salt", "") + if not self.challenge.get("snonce"): + raise UnimplementedHmacDigestAuthOptionError( + _("The challenge doesn't contain a server nonce, or this one is empty.") + ) + self.challenge["algorithm"] = self.challenge.get("algorithm", "HMAC-SHA-1") + if self.challenge["algorithm"] not in ["HMAC-SHA-1", "HMAC-MD5"]: + raise UnimplementedHmacDigestAuthOptionError( + _("Unsupported value for algorithm: %s." % self.challenge["algorithm"]) + ) + self.challenge["pw-algorithm"] = self.challenge.get("pw-algorithm", "SHA-1") + if self.challenge["pw-algorithm"] not in ["SHA-1", "MD5"]: + raise UnimplementedHmacDigestAuthOptionError( + _( + "Unsupported value for pw-algorithm: %s." + % self.challenge["pw-algorithm"] + ) + ) + if self.challenge["algorithm"] == "HMAC-MD5": + self.hashmod = _md5 + else: + self.hashmod = _sha + if self.challenge["pw-algorithm"] == "MD5": + self.pwhashmod = _md5 + else: + self.pwhashmod = _sha + self.key = "".join( + [ + self.credentials[0], + ":", + self.pwhashmod.new( + "".join([self.credentials[1], self.challenge["salt"]]) + ) + .hexdigest() + .lower(), + ":", + self.challenge["realm"], + ] + ) + self.key = self.pwhashmod.new(self.key).hexdigest().lower() + + def request(self, method, request_uri, headers, content): + """Modify the request headers""" + keys = _get_end2end_headers(headers) + keylist = "".join(["%s " % k for k in keys]) + headers_val = "".join([headers[k] for k in keys]) + created = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + cnonce = _cnonce() + request_digest = "%s:%s:%s:%s:%s" % ( + method, + request_uri, + cnonce, + self.challenge["snonce"], + headers_val, + ) + request_digest = ( + hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower() + ) + headers["authorization"] = ( + 'HMACDigest username="%s", realm="%s", snonce="%s",' + ' cnonce="%s", uri="%s", created="%s", ' + 'response="%s", headers="%s"' + ) % ( + self.credentials[0], + self.challenge["realm"], + self.challenge["snonce"], + cnonce, + request_uri, + created, + request_digest, + keylist, + ) + + def response(self, response, content): + challenge = _parse_www_authenticate(response, "www-authenticate").get( + "hmacdigest", {} + ) + if challenge.get("reason") in ["integrity", "stale"]: + return True + return False + + +class WsseAuthentication(Authentication): + """This is thinly tested and should not be relied upon. + At this time there isn't any third party server to test against. + Blogger and TypePad implemented this algorithm at one point + but Blogger has since switched to Basic over HTTPS and + TypePad has implemented it wrong, by never issuing a 401 + challenge but instead requiring your client to telepathically know that + their endpoint is expecting WSSE profile="UsernameToken".""" + + def __init__( + self, credentials, host, request_uri, headers, response, content, http + ): + Authentication.__init__( + self, credentials, host, request_uri, headers, response, content, http + ) + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers["authorization"] = 'WSSE profile="UsernameToken"' + iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + cnonce = _cnonce() + password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1]) + headers["X-WSSE"] = ( + 'UsernameToken Username="%s", PasswordDigest="%s", ' + 'Nonce="%s", Created="%s"' + ) % (self.credentials[0], password_digest, cnonce, iso_now) + + +class GoogleLoginAuthentication(Authentication): + def __init__( + self, credentials, host, request_uri, headers, response, content, http + ): + from urllib.parse import urlencode + + Authentication.__init__( + self, credentials, host, request_uri, headers, response, content, http + ) + challenge = _parse_www_authenticate(response, "www-authenticate") + service = challenge["googlelogin"].get("service", "xapi") + # Bloggger actually returns the service in the challenge + # For the rest we guess based on the URI + if service == "xapi" and request_uri.find("calendar") > 0: + service = "cl" + # No point in guessing Base or Spreadsheet + # elif request_uri.find("spreadsheets") > 0: + # service = "wise" + + auth = dict( + Email=credentials[0], + Passwd=credentials[1], + service=service, + source=headers["user-agent"], + ) + resp, content = self.http.request( + "https://www.google.com/accounts/ClientLogin", + method="POST", + body=urlencode(auth), + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + lines = content.split("\n") + d = dict([tuple(line.split("=", 1)) for line in lines if line]) + if resp.status == 403: + self.Auth = "" + else: + self.Auth = d["Auth"] + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers["authorization"] = "GoogleLogin Auth=" + self.Auth + + +AUTH_SCHEME_CLASSES = { + "basic": BasicAuthentication, + "wsse": WsseAuthentication, + "digest": DigestAuthentication, + "hmacdigest": HmacDigestAuthentication, + "googlelogin": GoogleLoginAuthentication, +} + +AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"] + + +class FileCache(object): + """Uses a local directory as a store for cached files. + Not really safe to use if multiple threads or processes are going to + be running on the same cache. + """ + + def __init__( + self, cache, safe=safename + ): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior + self.cache = cache + self.safe = safe + if not os.path.exists(cache): + os.makedirs(self.cache) + + def get(self, key): + retval = None + cacheFullPath = os.path.join(self.cache, self.safe(key)) + try: + f = open(cacheFullPath, "rb") + retval = f.read() + f.close() + except IOError: + pass + return retval + + def set(self, key, value): + cacheFullPath = os.path.join(self.cache, self.safe(key)) + f = open(cacheFullPath, "wb") + f.write(value) + f.close() + + def delete(self, key): + cacheFullPath = os.path.join(self.cache, self.safe(key)) + if os.path.exists(cacheFullPath): + os.remove(cacheFullPath) + + +class Credentials(object): + def __init__(self): + self.credentials = [] + + def add(self, name, password, domain=""): + self.credentials.append((domain.lower(), name, password)) + + def clear(self): + self.credentials = [] + + def iter(self, domain): + for (cdomain, name, password) in self.credentials: + if cdomain == "" or domain == cdomain: + yield (name, password) + + +class KeyCerts(Credentials): + """Identical to Credentials except that + name/password are mapped to key/cert.""" + def add(self, key, cert, domain, password): + self.credentials.append((domain.lower(), key, cert, password)) + + def iter(self, domain): + for (cdomain, key, cert, password) in self.credentials: + if cdomain == "" or domain == cdomain: + yield (key, cert, password) + + +class AllHosts(object): + pass + + +class ProxyInfo(object): + """Collect information required to use a proxy.""" + + bypass_hosts = () + + def __init__( + self, + proxy_type, + proxy_host, + proxy_port, + proxy_rdns=True, + proxy_user=None, + proxy_pass=None, + proxy_headers=None, + ): + """Args: + + proxy_type: The type of proxy server. This must be set to one of + socks.PROXY_TYPE_XXX constants. For example: p = + ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', + proxy_port=8000) + proxy_host: The hostname or IP address of the proxy server. + proxy_port: The port that the proxy server is running on. + proxy_rdns: If True (default), DNS queries will not be performed + locally, and instead, handed to the proxy to resolve. This is useful + if the network does not allow resolution of non-local names. In + httplib2 0.9 and earlier, this defaulted to False. + proxy_user: The username used to authenticate with the proxy server. + proxy_pass: The password used to authenticate with the proxy server. + proxy_headers: Additional or modified headers for the proxy connect + request. + """ + if isinstance(proxy_user, bytes): + proxy_user = proxy_user.decode() + if isinstance(proxy_pass, bytes): + proxy_pass = proxy_pass.decode() + self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass, self.proxy_headers = ( + proxy_type, + proxy_host, + proxy_port, + proxy_rdns, + proxy_user, + proxy_pass, + proxy_headers, + ) + + def astuple(self): + return ( + self.proxy_type, + self.proxy_host, + self.proxy_port, + self.proxy_rdns, + self.proxy_user, + self.proxy_pass, + self.proxy_headers, + ) + + def isgood(self): + return socks and (self.proxy_host != None) and (self.proxy_port != None) + + def applies_to(self, hostname): + return not self.bypass_host(hostname) + + def bypass_host(self, hostname): + """Has this host been excluded from the proxy config""" + if self.bypass_hosts is AllHosts: + return True + + hostname = "." + hostname.lstrip(".") + for skip_name in self.bypass_hosts: + # *.suffix + if skip_name.startswith(".") and hostname.endswith(skip_name): + return True + # exact match + if hostname == "." + skip_name: + return True + return False + + def __repr__(self): + return ( + "<ProxyInfo type={p.proxy_type} " + "host:port={p.proxy_host}:{p.proxy_port} rdns={p.proxy_rdns}" + + " user={p.proxy_user} headers={p.proxy_headers}>" + ).format(p=self) + + +def proxy_info_from_environment(method="http"): + """Read proxy info from the environment variables. + """ + if method not in ("http", "https"): + return + + env_var = method + "_proxy" + url = os.environ.get(env_var, os.environ.get(env_var.upper())) + if not url: + return + return proxy_info_from_url(url, method, noproxy=None) + + +def proxy_info_from_url(url, method="http", noproxy=None): + """Construct a ProxyInfo from a URL (such as http_proxy env var) + """ + url = urllib.parse.urlparse(url) + username = None + password = None + port = None + if "@" in url[1]: + ident, host_port = url[1].split("@", 1) + if ":" in ident: + username, password = ident.split(":", 1) + else: + password = ident + else: + host_port = url[1] + if ":" in host_port: + host, port = host_port.split(":", 1) + else: + host = host_port + + if port: + port = int(port) + else: + port = dict(https=443, http=80)[method] + + proxy_type = 3 # socks.PROXY_TYPE_HTTP + pi = ProxyInfo( + proxy_type=proxy_type, + proxy_host=host, + proxy_port=port, + proxy_user=username or None, + proxy_pass=password or None, + proxy_headers=None, + ) + + bypass_hosts = [] + # If not given an explicit noproxy value, respect values in env vars. + if noproxy is None: + noproxy = os.environ.get("no_proxy", os.environ.get("NO_PROXY", "")) + # Special case: A single '*' character means all hosts should be bypassed. + if noproxy == "*": + bypass_hosts = AllHosts + elif noproxy.strip(): + bypass_hosts = noproxy.split(",") + bypass_hosts = tuple(filter(bool, bypass_hosts)) # To exclude empty string. + + pi.bypass_hosts = bypass_hosts + return pi + + +class HTTPConnectionWithTimeout(http.client.HTTPConnection): + """HTTPConnection subclass that supports timeouts + + HTTPConnection subclass that supports timeouts + + All timeouts are in seconds. If None is passed for timeout then + Python's default timeout for sockets will be used. See for example + the docs of socket.setdefaulttimeout(): + http://docs.python.org/library/socket.html#socket.setdefaulttimeout + """ + + def __init__(self, host, port=None, timeout=None, proxy_info=None): + http.client.HTTPConnection.__init__(self, host, port=port, timeout=timeout) + + self.proxy_info = proxy_info + if proxy_info and not isinstance(proxy_info, ProxyInfo): + self.proxy_info = proxy_info("http") + + def connect(self): + """Connect to the host and port specified in __init__.""" + if self.proxy_info and socks is None: + raise ProxiesUnavailableError( + "Proxy support missing but proxy use was requested!" + ) + if self.proxy_info and self.proxy_info.isgood() and self.proxy_info.applies_to(self.host): + use_proxy = True + proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers = ( + self.proxy_info.astuple() + ) + + host = proxy_host + port = proxy_port + else: + use_proxy = False + + host = self.host + port = self.port + proxy_type = None + + socket_err = None + + for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + try: + if use_proxy: + self.sock = socks.socksocket(af, socktype, proto) + self.sock.setproxy( + proxy_type, + proxy_host, + proxy_port, + proxy_rdns, + proxy_user, + proxy_pass, + ) + else: + self.sock = socket.socket(af, socktype, proto) + self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + if has_timeout(self.timeout): + self.sock.settimeout(self.timeout) + if self.debuglevel > 0: + print( + "connect: ({0}, {1}) ************".format(self.host, self.port) + ) + if use_proxy: + print( + "proxy: {0} ************".format( + str( + ( + proxy_host, + proxy_port, + proxy_rdns, + proxy_user, + proxy_pass, + proxy_headers, + ) + ) + ) + ) + + self.sock.connect((self.host, self.port) + sa[2:]) + except socket.error as e: + socket_err = e + if self.debuglevel > 0: + print("connect fail: ({0}, {1})".format(self.host, self.port)) + if use_proxy: + print( + "proxy: {0}".format( + str( + ( + proxy_host, + proxy_port, + proxy_rdns, + proxy_user, + proxy_pass, + proxy_headers, + ) + ) + ) + ) + if self.sock: + self.sock.close() + self.sock = None + continue + break + if not self.sock: + raise socket_err + + +class HTTPSConnectionWithTimeout(http.client.HTTPSConnection): + """This class allows communication via SSL. + + All timeouts are in seconds. If None is passed for timeout then + Python's default timeout for sockets will be used. See for example + the docs of socket.setdefaulttimeout(): + http://docs.python.org/library/socket.html#socket.setdefaulttimeout + """ + + def __init__( + self, + host, + port=None, + key_file=None, + cert_file=None, + timeout=None, + proxy_info=None, + ca_certs=None, + disable_ssl_certificate_validation=False, + tls_maximum_version=None, + tls_minimum_version=None, + key_password=None, + ): + + self.disable_ssl_certificate_validation = disable_ssl_certificate_validation + self.ca_certs = ca_certs if ca_certs else CA_CERTS + + self.proxy_info = proxy_info + if proxy_info and not isinstance(proxy_info, ProxyInfo): + self.proxy_info = proxy_info("https") + + context = _build_ssl_context( + self.disable_ssl_certificate_validation, self.ca_certs, cert_file, key_file, + maximum_version=tls_maximum_version, minimum_version=tls_minimum_version, + key_password=key_password, + ) + super(HTTPSConnectionWithTimeout, self).__init__( + host, + port=port, + timeout=timeout, + context=context, + ) + self.key_file = key_file + self.cert_file = cert_file + self.key_password = key_password + + def connect(self): + """Connect to a host on a given (SSL) port.""" + if self.proxy_info and self.proxy_info.isgood(): + use_proxy = True + proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass, proxy_headers = ( + self.proxy_info.astuple() + ) + + host = proxy_host + port = proxy_port + else: + use_proxy = False + + host = self.host + port = self.port + proxy_type = None + proxy_headers = None + + socket_err = None + + address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM) + for family, socktype, proto, canonname, sockaddr in address_info: + try: + if use_proxy: + sock = socks.socksocket(family, socktype, proto) + + sock.setproxy( + proxy_type, + proxy_host, + proxy_port, + proxy_rdns, + proxy_user, + proxy_pass, + ) + else: + sock = socket.socket(family, socktype, proto) + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + if has_timeout(self.timeout): + sock.settimeout(self.timeout) + sock.connect((self.host, self.port)) + + self.sock = self._context.wrap_socket(sock, server_hostname=self.host) + + # Python 3.3 compatibility: emulate the check_hostname behavior + if ( + not hasattr(self._context, "check_hostname") + and not self.disable_ssl_certificate_validation + ): + try: + ssl.match_hostname(self.sock.getpeercert(), self.host) + except Exception: + self.sock.shutdown(socket.SHUT_RDWR) + self.sock.close() + raise + + if self.debuglevel > 0: + print("connect: ({0}, {1})".format(self.host, self.port)) + if use_proxy: + print( + "proxy: {0}".format( + str( + ( + proxy_host, + proxy_port, + proxy_rdns, + proxy_user, + proxy_pass, + proxy_headers, + ) + ) + ) + ) + except (ssl.SSLError, ssl.CertificateError) as e: + if sock: + sock.close() + if self.sock: + self.sock.close() + self.sock = None + raise + except (socket.timeout, socket.gaierror): + raise + except socket.error as e: + socket_err = e + if self.debuglevel > 0: + print("connect fail: ({0}, {1})".format((self.host, self.port))) + if use_proxy: + print( + "proxy: {0}".format( + str( + ( + proxy_host, + proxy_port, + proxy_rdns, + proxy_user, + proxy_pass, + proxy_headers, + ) + ) + ) + ) + if self.sock: + self.sock.close() + self.sock = None + continue + break + if not self.sock: + raise socket_err + + +SCHEME_TO_CONNECTION = { + "http": HTTPConnectionWithTimeout, + "https": HTTPSConnectionWithTimeout, +} + + +class Http(object): + """An HTTP client that handles: + + - all methods + - caching + - ETags + - compression, + - HTTPS + - Basic + - Digest + - WSSE + + and more. + """ + + def __init__( + self, + cache=None, + timeout=None, + proxy_info=proxy_info_from_environment, + ca_certs=None, + disable_ssl_certificate_validation=False, + tls_maximum_version=None, + tls_minimum_version=None, + ): + """If 'cache' is a string then it is used as a directory name for + a disk cache. Otherwise it must be an object that supports the + same interface as FileCache. + + All timeouts are in seconds. If None is passed for timeout + then Python's default timeout for sockets will be used. See + for example the docs of socket.setdefaulttimeout(): + http://docs.python.org/library/socket.html#socket.setdefaulttimeout + + `proxy_info` may be: + - a callable that takes the http scheme ('http' or 'https') and + returns a ProxyInfo instance per request. By default, uses + proxy_info_from_environment. + - a ProxyInfo instance (static proxy config). + - None (proxy disabled). + + ca_certs is the path of a file containing root CA certificates for SSL + server certificate validation. By default, a CA cert file bundled with + httplib2 is used. + + If disable_ssl_certificate_validation is true, SSL cert validation will + not be performed. + + tls_maximum_version / tls_minimum_version require Python 3.7+ / + OpenSSL 1.1.0g+. A value of "TLSv1_3" requires OpenSSL 1.1.1+. +""" + self.proxy_info = proxy_info + self.ca_certs = ca_certs + self.disable_ssl_certificate_validation = disable_ssl_certificate_validation + self.tls_maximum_version = tls_maximum_version + self.tls_minimum_version = tls_minimum_version + # Map domain name to an httplib connection + self.connections = {} + # The location of the cache, for now a directory + # where cached responses are held. + if cache and isinstance(cache, str): + self.cache = FileCache(cache) + else: + self.cache = cache + + # Name/password + self.credentials = Credentials() + + # Key/cert + self.certificates = KeyCerts() + + # authorization objects + self.authorizations = [] + + # If set to False then no redirects are followed, even safe ones. + self.follow_redirects = True + + self.redirect_codes = REDIRECT_CODES + + # Which HTTP methods do we apply optimistic concurrency to, i.e. + # which methods get an "if-match:" etag header added to them. + self.optimistic_concurrency_methods = ["PUT", "PATCH"] + + self.safe_methods = list(SAFE_METHODS) + + # If 'follow_redirects' is True, and this is set to True then + # all redirecs are followed, including unsafe ones. + self.follow_all_redirects = False + + self.ignore_etag = False + + self.force_exception_to_status_code = False + + self.timeout = timeout + + # Keep Authorization: headers on a redirect. + self.forward_authorization_headers = False + + def close(self): + """Close persistent connections, clear sensitive data. + Not thread-safe, requires external synchronization against concurrent requests. + """ + existing, self.connections = self.connections, {} + for _, c in existing.items(): + c.close() + self.certificates.clear() + self.clear_credentials() + + def __getstate__(self): + state_dict = copy.copy(self.__dict__) + # In case request is augmented by some foreign object such as + # credentials which handle auth + if "request" in state_dict: + del state_dict["request"] + if "connections" in state_dict: + del state_dict["connections"] + return state_dict + + def __setstate__(self, state): + self.__dict__.update(state) + self.connections = {} + + def _auth_from_challenge(self, host, request_uri, headers, response, content): + """A generator that creates Authorization objects + that can be applied to requests. + """ + challenges = _parse_www_authenticate(response, "www-authenticate") + for cred in self.credentials.iter(host): + for scheme in AUTH_SCHEME_ORDER: + if scheme in challenges: + yield AUTH_SCHEME_CLASSES[scheme]( + cred, host, request_uri, headers, response, content, self + ) + + def add_credentials(self, name, password, domain=""): + """Add a name and password that will be used + any time a request requires authentication.""" + self.credentials.add(name, password, domain) + + def add_certificate(self, key, cert, domain, password=None): + """Add a key and cert that will be used + any time a request requires authentication.""" + self.certificates.add(key, cert, domain, password) + + def clear_credentials(self): + """Remove all the names and passwords + that are used for authentication""" + self.credentials.clear() + self.authorizations = [] + + def _conn_request(self, conn, request_uri, method, body, headers): + i = 0 + seen_bad_status_line = False + while i < RETRIES: + i += 1 + try: + if conn.sock is None: + conn.connect() + conn.request(method, request_uri, body, headers) + except socket.timeout: + conn.close() + raise + except socket.gaierror: + conn.close() + raise ServerNotFoundError("Unable to find the server at %s" % conn.host) + except socket.error as e: + errno_ = ( + e.args[0].errno if isinstance(e.args[0], socket.error) else e.errno + ) + if errno_ in (errno.ENETUNREACH, errno.EADDRNOTAVAIL) and i < RETRIES: + continue # retry on potentially transient errors + raise + except http.client.HTTPException: + if conn.sock is None: + if i < RETRIES - 1: + conn.close() + conn.connect() + continue + else: + conn.close() + raise + if i < RETRIES - 1: + conn.close() + conn.connect() + continue + # Just because the server closed the connection doesn't apparently mean + # that the server didn't send a response. + pass + try: + response = conn.getresponse() + except (http.client.BadStatusLine, http.client.ResponseNotReady): + # If we get a BadStatusLine on the first try then that means + # the connection just went stale, so retry regardless of the + # number of RETRIES set. + if not seen_bad_status_line and i == 1: + i = 0 + seen_bad_status_line = True + conn.close() + conn.connect() + continue + else: + conn.close() + raise + except socket.timeout: + raise + except (socket.error, http.client.HTTPException): + conn.close() + if i == 0: + conn.close() + conn.connect() + continue + else: + raise + else: + content = b"" + if method == "HEAD": + conn.close() + else: + content = response.read() + response = Response(response) + if method != "HEAD": + content = _decompressContent(response, content) + + break + return (response, content) + + def _request( + self, + conn, + host, + absolute_uri, + request_uri, + method, + body, + headers, + redirections, + cachekey, + ): + """Do the actual request using the connection object + and also follow one level of redirects if necessary""" + + auths = [ + (auth.depth(request_uri), auth) + for auth in self.authorizations + if auth.inscope(host, request_uri) + ] + auth = auths and sorted(auths)[0][1] or None + if auth: + auth.request(method, request_uri, headers, body) + + (response, content) = self._conn_request( + conn, request_uri, method, body, headers + ) + + if auth: + if auth.response(response, body): + auth.request(method, request_uri, headers, body) + (response, content) = self._conn_request( + conn, request_uri, method, body, headers + ) + response._stale_digest = 1 + + if response.status == 401: + for authorization in self._auth_from_challenge( + host, request_uri, headers, response, content + ): + authorization.request(method, request_uri, headers, body) + (response, content) = self._conn_request( + conn, request_uri, method, body, headers + ) + if response.status != 401: + self.authorizations.append(authorization) + authorization.response(response, body) + break + + if ( + self.follow_all_redirects + or method in self.safe_methods + or response.status in (303, 308) + ): + if self.follow_redirects and response.status in self.redirect_codes: + # Pick out the location header and basically start from the beginning + # remembering first to strip the ETag header and decrement our 'depth' + if redirections: + if "location" not in response and response.status != 300: + raise RedirectMissingLocation( + _( + "Redirected but the response is missing a Location: header." + ), + response, + content, + ) + # Fix-up relative redirects (which violate an RFC 2616 MUST) + if "location" in response: + location = response["location"] + (scheme, authority, path, query, fragment) = parse_uri(location) + if authority == None: + response["location"] = urllib.parse.urljoin( + absolute_uri, location + ) + if response.status == 308 or (response.status == 301 and (method in self.safe_methods)): + response["-x-permanent-redirect-url"] = response["location"] + if "content-location" not in response: + response["content-location"] = absolute_uri + _updateCache(headers, response, content, self.cache, cachekey) + if "if-none-match" in headers: + del headers["if-none-match"] + if "if-modified-since" in headers: + del headers["if-modified-since"] + if ( + "authorization" in headers + and not self.forward_authorization_headers + ): + del headers["authorization"] + if "location" in response: + location = response["location"] + old_response = copy.deepcopy(response) + if "content-location" not in old_response: + old_response["content-location"] = absolute_uri + redirect_method = method + if response.status in [302, 303]: + redirect_method = "GET" + body = None + (response, content) = self.request( + location, + method=redirect_method, + body=body, + headers=headers, + redirections=redirections - 1, + ) + response.previous = old_response + else: + raise RedirectLimit( + "Redirected more times than redirection_limit allows.", + response, + content, + ) + elif response.status in [200, 203] and method in self.safe_methods: + # Don't cache 206's since we aren't going to handle byte range requests + if "content-location" not in response: + response["content-location"] = absolute_uri + _updateCache(headers, response, content, self.cache, cachekey) + + return (response, content) + + def _normalize_headers(self, headers): + return _normalize_headers(headers) + + # Need to catch and rebrand some exceptions + # Then need to optionally turn all exceptions into status codes + # including all socket.* and httplib.* exceptions. + + def request( + self, + uri, + method="GET", + body=None, + headers=None, + redirections=DEFAULT_MAX_REDIRECTS, + connection_type=None, + ): + """ Performs a single HTTP request. +The 'uri' is the URI of the HTTP resource and can begin +with either 'http' or 'https'. The value of 'uri' must be an absolute URI. + +The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc. +There is no restriction on the methods allowed. + +The 'body' is the entity body to be sent with the request. It is a string +object. + +Any extra headers that are to be sent with the request should be provided in the +'headers' dictionary. + +The maximum number of redirect to follow before raising an +exception is 'redirections. The default is 5. + +The return value is a tuple of (response, content), the first +being and instance of the 'Response' class, the second being +a string that contains the response entity body. + """ + conn_key = '' + + try: + if headers is None: + headers = {} + else: + headers = self._normalize_headers(headers) + + if "user-agent" not in headers: + headers["user-agent"] = "Python-httplib2/%s (gzip)" % __version__ + + uri = iri2uri(uri) + + (scheme, authority, request_uri, defrag_uri) = urlnorm(uri) + + conn_key = scheme + ":" + authority + conn = self.connections.get(conn_key) + if conn is None: + if not connection_type: + connection_type = SCHEME_TO_CONNECTION[scheme] + certs = list(self.certificates.iter(authority)) + if issubclass(connection_type, HTTPSConnectionWithTimeout): + if certs: + conn = self.connections[conn_key] = connection_type( + authority, + key_file=certs[0][0], + cert_file=certs[0][1], + timeout=self.timeout, + proxy_info=self.proxy_info, + ca_certs=self.ca_certs, + disable_ssl_certificate_validation=self.disable_ssl_certificate_validation, + tls_maximum_version=self.tls_maximum_version, + tls_minimum_version=self.tls_minimum_version, + key_password=certs[0][2], + ) + else: + conn = self.connections[conn_key] = connection_type( + authority, + timeout=self.timeout, + proxy_info=self.proxy_info, + ca_certs=self.ca_certs, + disable_ssl_certificate_validation=self.disable_ssl_certificate_validation, + tls_maximum_version=self.tls_maximum_version, + tls_minimum_version=self.tls_minimum_version, + ) + else: + conn = self.connections[conn_key] = connection_type( + authority, timeout=self.timeout, proxy_info=self.proxy_info + ) + conn.set_debuglevel(debuglevel) + + if "range" not in headers and "accept-encoding" not in headers: + headers["accept-encoding"] = "gzip, deflate" + + info = email.message.Message() + cachekey = None + cached_value = None + if self.cache: + cachekey = defrag_uri + cached_value = self.cache.get(cachekey) + if cached_value: + try: + info, content = cached_value.split(b"\r\n\r\n", 1) + info = email.message_from_bytes(info) + for k, v in info.items(): + if v.startswith("=?") and v.endswith("?="): + info.replace_header( + k, str(*email.header.decode_header(v)[0]) + ) + except (IndexError, ValueError): + self.cache.delete(cachekey) + cachekey = None + cached_value = None + + if ( + method in self.optimistic_concurrency_methods + and self.cache + and "etag" in info + and not self.ignore_etag + and "if-match" not in headers + ): + # http://www.w3.org/1999/04/Editing/ + headers["if-match"] = info["etag"] + + # https://tools.ietf.org/html/rfc7234 + # A cache MUST invalidate the effective Request URI as well as [...] Location and Content-Location + # when a non-error status code is received in response to an unsafe request method. + if self.cache and cachekey and method not in self.safe_methods: + self.cache.delete(cachekey) + + # Check the vary header in the cache to see if this request + # matches what varies in the cache. + if method in self.safe_methods and "vary" in info: + vary = info["vary"] + vary_headers = vary.lower().replace(" ", "").split(",") + for header in vary_headers: + key = "-varied-%s" % header + value = info[key] + if headers.get(header, None) != value: + cached_value = None + break + + if ( + self.cache + and cached_value + and (method in self.safe_methods or info["status"] == "308") + and "range" not in headers + ): + redirect_method = method + if info["status"] not in ("307", "308"): + redirect_method = "GET" + if "-x-permanent-redirect-url" in info: + # Should cached permanent redirects be counted in our redirection count? For now, yes. + if redirections <= 0: + raise RedirectLimit( + "Redirected more times than redirection_limit allows.", + {}, + "", + ) + (response, new_content) = self.request( + info["-x-permanent-redirect-url"], + method=redirect_method, + headers=headers, + redirections=redirections - 1, + ) + response.previous = Response(info) + response.previous.fromcache = True + else: + # Determine our course of action: + # Is the cached entry fresh or stale? + # Has the client requested a non-cached response? + # + # There seems to be three possible answers: + # 1. [FRESH] Return the cache entry w/o doing a GET + # 2. [STALE] Do the GET (but add in cache validators if available) + # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request + entry_disposition = _entry_disposition(info, headers) + + if entry_disposition == "FRESH": + if not cached_value: + info["status"] = "504" + content = b"" + response = Response(info) + if cached_value: + response.fromcache = True + return (response, content) + + if entry_disposition == "STALE": + if ( + "etag" in info + and not self.ignore_etag + and not "if-none-match" in headers + ): + headers["if-none-match"] = info["etag"] + if "last-modified" in info and not "last-modified" in headers: + headers["if-modified-since"] = info["last-modified"] + elif entry_disposition == "TRANSPARENT": + pass + + (response, new_content) = self._request( + conn, + authority, + uri, + request_uri, + method, + body, + headers, + redirections, + cachekey, + ) + + if response.status == 304 and method == "GET": + # Rewrite the cache entry with the new end-to-end headers + # Take all headers that are in response + # and overwrite their values in info. + # unless they are hop-by-hop, or are listed in the connection header. + + for key in _get_end2end_headers(response): + info[key] = response[key] + merged_response = Response(info) + if hasattr(response, "_stale_digest"): + merged_response._stale_digest = response._stale_digest + _updateCache( + headers, merged_response, content, self.cache, cachekey + ) + response = merged_response + response.status = 200 + response.fromcache = True + + elif response.status == 200: + content = new_content + else: + self.cache.delete(cachekey) + content = new_content + else: + cc = _parse_cache_control(headers) + if "only-if-cached" in cc: + info["status"] = "504" + response = Response(info) + content = b"" + else: + (response, content) = self._request( + conn, + authority, + uri, + request_uri, + method, + body, + headers, + redirections, + cachekey, + ) + except Exception as e: + is_timeout = isinstance(e, socket.timeout) + if is_timeout: + conn = self.connections.pop(conn_key, None) + if conn: + conn.close() + + if self.force_exception_to_status_code: + if isinstance(e, HttpLib2ErrorWithResponse): + response = e.response + content = e.content + response.status = 500 + response.reason = str(e) + elif isinstance(e, socket.timeout): + content = b"Request Timeout" + response = Response( + { + "content-type": "text/plain", + "status": "408", + "content-length": len(content), + } + ) + response.reason = "Request Timeout" + else: + content = str(e).encode("utf-8") + response = Response( + { + "content-type": "text/plain", + "status": "400", + "content-length": len(content), + } + ) + response.reason = "Bad Request" + else: + raise + + return (response, content) + + +class Response(dict): + """An object more like email.message than httplib.HTTPResponse.""" + + """Is this response from our local cache""" + fromcache = False + """HTTP protocol version used by server. + + 10 for HTTP/1.0, 11 for HTTP/1.1. + """ + version = 11 + + "Status code returned by server. " + status = 200 + """Reason phrase returned by server.""" + reason = "Ok" + + previous = None + + def __init__(self, info): + # info is either an email.message or + # an httplib.HTTPResponse object. + if isinstance(info, http.client.HTTPResponse): + for key, value in info.getheaders(): + key = key.lower() + prev = self.get(key) + if prev is not None: + value = ", ".join((prev, value)) + self[key] = value + self.status = info.status + self["status"] = str(self.status) + self.reason = info.reason + self.version = info.version + elif isinstance(info, email.message.Message): + for key, value in list(info.items()): + self[key.lower()] = value + self.status = int(self["status"]) + else: + for key, value in info.items(): + self[key.lower()] = value + self.status = int(self.get("status", self.status)) + + def __getattr__(self, name): + if name == "dict": + return self + else: + raise AttributeError(name) diff --git a/lib/httplib2/py3/cacerts.txt b/lib/httplib2/py3/cacerts.txt new file mode 100644 index 00000000..8020c1b4 --- /dev/null +++ b/lib/httplib2/py3/cacerts.txt @@ -0,0 +1,2197 @@ +# Issuer: CN=GTE CyberTrust Global Root O=GTE Corporation OU=GTE CyberTrust Solutions, Inc. +# Subject: CN=GTE CyberTrust Global Root O=GTE Corporation OU=GTE CyberTrust Solutions, Inc. +# Label: "GTE CyberTrust Global Root" +# Serial: 421 +# MD5 Fingerprint: ca:3d:d3:68:f1:03:5c:d0:32:fa:b8:2b:59:e8:5a:db +# SHA1 Fingerprint: 97:81:79:50:d8:1c:96:70:cc:34:d8:09:cf:79:44:31:36:7e:f4:74 +# SHA256 Fingerprint: a5:31:25:18:8d:21:10:aa:96:4b:02:c7:b7:c6:da:32:03:17:08:94:e5:fb:71:ff:fb:66:67:d5:e6:81:0a:36 +-----BEGIN CERTIFICATE----- +MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYD +VQQKEw9HVEUgQ29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNv +bHV0aW9ucywgSW5jLjEjMCEGA1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJv +b3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEzMjM1OTAwWjB1MQswCQYDVQQGEwJV +UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU +cnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds +b2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrH +iM3dFw4usJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTS +r41tiGeA5u2ylc9yMcqlHHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X4 +04Wqk2kmhXBIgD8SFcd5tB8FLztimQIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAG3r +GwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMWM4ETCJ57NE7fQMh017l9 +3PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OFNMQkpw0P +lZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/ +-----END CERTIFICATE----- + +# Issuer: CN=Thawte Server CA O=Thawte Consulting cc OU=Certification Services Division +# Subject: CN=Thawte Server CA O=Thawte Consulting cc OU=Certification Services Division +# Label: "Thawte Server CA" +# Serial: 1 +# MD5 Fingerprint: c5:70:c4:a2:ed:53:78:0c:c8:10:53:81:64:cb:d0:1d +# SHA1 Fingerprint: 23:e5:94:94:51:95:f2:41:48:03:b4:d5:64:d2:a3:a3:f5:d8:8b:8c +# SHA256 Fingerprint: b4:41:0b:73:e2:e6:ea:ca:47:fb:c4:2f:8f:a4:01:8a:f4:38:1d:c5:4c:fa:a8:44:50:46:1e:ed:09:45:4d:e9 +-----BEGIN CERTIFICATE----- +MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkEx +FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD +VQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEm +MCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wHhcNOTYwODAx +MDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3 +dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNl +cyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3 +DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQAD +gY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl/Kj0R1HahbUgdJSGHg91 +yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg71CcEJRCX +L+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGj +EzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG +7oWDTSEwjsrZqG9JGubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6e +QNuozDJ0uW8NxuOzRAvZim+aKZuZGCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZ +qdq5snUb9kLy78fyGPmJvKP/iiMucEc= +-----END CERTIFICATE----- + +# Issuer: CN=Thawte Premium Server CA O=Thawte Consulting cc OU=Certification Services Division +# Subject: CN=Thawte Premium Server CA O=Thawte Consulting cc OU=Certification Services Division +# Label: "Thawte Premium Server CA" +# Serial: 1 +# MD5 Fingerprint: 06:9f:69:79:16:66:90:02:1b:8c:8c:a2:c3:07:6f:3a +# SHA1 Fingerprint: 62:7f:8d:78:27:65:63:99:d2:7d:7f:90:44:c9:fe:b3:f3:3e:fa:9a +# SHA256 Fingerprint: ab:70:36:36:5c:71:54:aa:29:c2:c2:9f:5d:41:91:16:3b:16:2a:22:25:01:13:57:d5:6d:07:ff:a7:bc:1f:72 +-----BEGIN CERTIFICATE----- +MIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkEx +FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD +VQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UEAxMYVGhhd3RlIFByZW1pdW0gU2Vy +dmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZlckB0aGF3dGUuY29t +MB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYTAlpB +MRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsG +A1UEChMUVGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRp +b24gU2VydmljZXMgRGl2aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNl +cnZlciBDQTEoMCYGCSqGSIb3DQEJARYZcHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNv +bTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2aovXwlue2oFBYo847kkE +VdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIhUdib0GfQ +ug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMR +uHM/qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG +9w0BAQQFAAOBgQAmSCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUI +hfzJATj/Tb7yFkJD57taRvvBxhEf8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JM +pAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7tUCemDaYj+bvLpgcUQg== +-----END CERTIFICATE----- + +# Issuer: O=Equifax OU=Equifax Secure Certificate Authority +# Subject: O=Equifax OU=Equifax Secure Certificate Authority +# Label: "Equifax Secure CA" +# Serial: 903804111 +# MD5 Fingerprint: 67:cb:9d:c0:13:24:8a:82:9b:b2:17:1e:d1:1b:ec:d4 +# SHA1 Fingerprint: d2:32:09:ad:23:d3:14:23:21:74:e4:0d:7f:9d:62:13:97:86:63:3a +# SHA256 Fingerprint: 08:29:7a:40:47:db:a2:36:80:c7:31:db:6e:31:76:53:ca:78:48:e1:be:bd:3a:0b:01:79:a7:07:f9:2c:f1:78 +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV +UzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2Vy +dGlmaWNhdGUgQXV0aG9yaXR5MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1 +MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVx +dWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCBnzANBgkqhkiG9w0B +AQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPRfM6f +BeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+A +cJkVV5MW8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kC +AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQ +MA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlm +aWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTgw +ODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvSspXXR9gj +IBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQF +MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA +A4GBAFjOKer89961zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y +7qj/WsjTVbJmcVfewCHrPSqnI0kBBIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh +1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee9570+sB3c4 +-----END CERTIFICATE----- + +# Issuer: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority - G2/(c) 1998 VeriSign, Inc. - For authorized use only/VeriSign Trust Network +# Subject: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority - G2/(c) 1998 VeriSign, Inc. - For authorized use only/VeriSign Trust Network +# Label: "Verisign Class 3 Public Primary Certification Authority - G2" +# Serial: 167285380242319648451154478808036881606 +# MD5 Fingerprint: a2:33:9b:4c:74:78:73:d4:6c:e7:c1:f3:8d:cb:5c:e9 +# SHA1 Fingerprint: 85:37:1c:a6:e5:50:14:3d:ce:28:03:47:1b:de:3a:09:e8:f8:77:0f +# SHA256 Fingerprint: 83:ce:3c:12:29:68:8a:59:3d:48:5f:81:97:3c:0f:91:95:43:1e:da:37:cc:5e:36:43:0e:79:c7:a8:88:63:8b +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJ +BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xh +c3MgMyBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcy +MTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3Jp +emVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMB4X +DTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVTMRcw +FQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMg +UHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEo +YykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5 +MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEB +AQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCOFoUgRm1HP9SFIIThbbP4 +pO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71lSk8UOg0 +13gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwID +AQABMA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSk +U01UbSuvDV1Ai2TT1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7i +F6YM40AIOw7n60RzKprxaZLvcRTDOaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpY +oJ2daZH9 +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Label: "GlobalSign Root CA" +# Serial: 4835703278459707669005204 +# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a +# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c +# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99 +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG +A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv +b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw +MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i +YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT +aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ +jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp +xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp +1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG +snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ +U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 +9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B +AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz +yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE +38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP +AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad +DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME +HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2 +# Label: "GlobalSign Root CA - R2" +# Serial: 4835703278459682885658125 +# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30 +# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe +# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1 +MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL +v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8 +eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq +tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd +C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa +zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB +mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH +V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n +bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG +3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs +J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO +291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS +ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd +AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +# Issuer: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 1 Policy Validation Authority +# Subject: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 1 Policy Validation Authority +# Label: "ValiCert Class 1 VA" +# Serial: 1 +# MD5 Fingerprint: 65:58:ab:15:ad:57:6c:1e:a8:a7:b5:69:ac:bf:ff:eb +# SHA1 Fingerprint: e5:df:74:3c:b6:01:c4:9b:98:43:dc:ab:8c:e8:6a:81:10:9f:e4:8e +# SHA256 Fingerprint: f4:c1:49:55:1a:30:13:a3:5b:c7:bf:fe:17:a7:f3:44:9b:c1:ab:5b:5a:0a:e7:4b:06:c2:3b:90:00:4c:01:04 +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0 +IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz +BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y +aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG +9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNTIyMjM0OFoXDTE5MDYy +NTIyMjM0OFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y +azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw +Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl +cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDYWYJ6ibiWuqYvaG9Y +LqdUHAZu9OqNSLwxlBfw8068srg1knaw0KWlAdcAAxIiGQj4/xEjm84H9b9pGib+ +TunRf50sQB1ZaG6m+FiwnRqP0z/x3BkGgagO4DrdyFNFCQbmD3DD+kCmDuJWBQ8Y +TfwggtFzVXSNdnKgHZ0dwN0/cQIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFBoPUn0 +LBwGlN+VYH+Wexf+T3GtZMjdd9LvWVXoP+iOBSoh8gfStadS/pyxtuJbdxdA6nLW +I8sogTLDAHkY7FkXicnGah5xyf23dKUlRWnFSKsZ4UWKJWsZ7uW7EvV/96aNUcPw +nXS3qT6gpf+2SQMT2iLM7XGCK5nPOrf1LXLI +-----END CERTIFICATE----- + +# Issuer: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 2 Policy Validation Authority +# Subject: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 2 Policy Validation Authority +# Label: "ValiCert Class 2 VA" +# Serial: 1 +# MD5 Fingerprint: a9:23:75:9b:ba:49:36:6e:31:c2:db:f2:e7:66:ba:87 +# SHA1 Fingerprint: 31:7a:2a:d0:7f:2b:33:5e:f5:a1:c3:4e:4b:57:e8:b7:d8:f1:fc:a6 +# SHA256 Fingerprint: 58:d0:17:27:9c:d4:dc:63:ab:dd:b1:96:a6:c9:90:6c:30:c4:e0:87:83:ea:e8:c1:60:99:54:d6:93:55:59:6b +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0 +IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz +BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y +aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG +9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMTk1NFoXDTE5MDYy +NjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y +azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw +Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl +cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDOOnHK5avIWZJV16vY +dA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVCCSRrCl6zfN1SLUzm1NZ9 +WlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7RfZHM047QS +v4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9v +UJSZSWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTu +IYEZoDJJKPTEjlbVUjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwC +W/POuZ6lcg5Ktz885hZo+L7tdEy8W9ViH0Pd +-----END CERTIFICATE----- + +# Issuer: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 3 Policy Validation Authority +# Subject: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 3 Policy Validation Authority +# Label: "RSA Root Certificate 1" +# Serial: 1 +# MD5 Fingerprint: a2:6f:53:b7:ee:40:db:4a:68:e7:fa:18:d9:10:4b:72 +# SHA1 Fingerprint: 69:bd:8c:f4:9c:d3:00:fb:59:2e:17:93:ca:55:6a:f3:ec:aa:35:fb +# SHA256 Fingerprint: bc:23:f9:8a:31:3c:b9:2d:e3:bb:fc:3a:5a:9f:44:61:ac:39:49:4c:4a:e1:5a:9e:9d:f1:31:e9:9b:73:01:9a +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0 +IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz +BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y +aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG +9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMjIzM1oXDTE5MDYy +NjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y +azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw +Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl +cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjmFGWHOjVsQaBalfD +cnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td3zZxFJmP3MKS8edgkpfs +2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89HBFx1cQqY +JJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliE +Zwgs3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJ +n0WuPIqpsHEzXcjFV9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/A +PhmcGcwTTYJBtYze4D1gCCAPRX5ron+jjBXu +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only +# Label: "Verisign Class 3 Public Primary Certification Authority - G3" +# Serial: 206684696279472310254277870180966723415 +# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09 +# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6 +# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44 +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b +N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t +KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu +kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm +CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ +Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu +imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te +2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe +DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p +F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt +TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only +# Label: "Verisign Class 4 Public Primary Certification Authority - G3" +# Serial: 314531972711909413743075096039378935511 +# MD5 Fingerprint: db:c8:f2:27:2e:b1:ea:6a:29:23:5d:fe:56:3e:33:df +# SHA1 Fingerprint: c8:ec:8c:87:92:69:cb:4b:ab:39:e9:8d:7e:57:67:f3:14:95:73:9d +# SHA256 Fingerprint: e3:89:36:0d:0f:db:ae:b3:d2:50:58:4b:47:30:31:4e:22:2f:39:c1:56:a0:20:14:4e:8d:96:05:61:79:15:06 +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK3LpRFpxlmr8Y+1 +GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaStBO3IFsJ ++mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0Gbd +U6LM8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLm +NxdLMEYH5IBtptiWLugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XY +ufTsgsbSPZUd5cBPhMnZo0QoBmrXRazwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ +ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAj/ola09b5KROJ1WrIhVZPMq1 +CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXttmhwwjIDLk5Mq +g6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm +fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c +2NU8Qh0XwRJdRTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/ +bLvSHgCwIe34QWKCudiyxLtGUPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust.net Secure Server Certification Authority O=Entrust.net OU=www.entrust.net/CPS incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Subject: CN=Entrust.net Secure Server Certification Authority O=Entrust.net OU=www.entrust.net/CPS incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Label: "Entrust.net Secure Server CA" +# Serial: 927650371 +# MD5 Fingerprint: df:f2:80:73:cc:f1:e6:61:73:fc:f5:42:e9:c5:7c:ee +# SHA1 Fingerprint: 99:a6:9b:e6:1a:fe:88:6b:4d:2b:82:00:7c:b8:54:fc:31:7e:15:39 +# SHA256 Fingerprint: 62:f2:40:27:8c:56:4c:4d:d8:bf:7d:9d:4f:6f:36:6e:a8:94:d2:2f:5f:34:d9:89:a9:83:ac:ec:2f:ff:ed:50 +-----BEGIN CERTIFICATE----- +MIIE2DCCBEGgAwIBAgIEN0rSQzANBgkqhkiG9w0BAQUFADCBwzELMAkGA1UEBhMC +VVMxFDASBgNVBAoTC0VudHJ1c3QubmV0MTswOQYDVQQLEzJ3d3cuZW50cnVzdC5u +ZXQvQ1BTIGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMc +KGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDE6MDgGA1UEAxMxRW50cnVzdC5u +ZXQgU2VjdXJlIFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw05OTA1 +MjUxNjA5NDBaFw0xOTA1MjUxNjM5NDBaMIHDMQswCQYDVQQGEwJVUzEUMBIGA1UE +ChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5j +b3JwLiBieSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBF +bnRydXN0Lm5ldCBMaW1pdGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUg +U2VydmVyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGdMA0GCSqGSIb3DQEBAQUA +A4GLADCBhwKBgQDNKIM0VBuJ8w+vN5Ex/68xYMmo6LIQaO2f55M28Qpku0f1BBc/ +I0dNxScZgSYMVHINiC3ZH5oSn7yzcdOAGT9HZnuMNSjSuQrfJNqc1lB5gXpa0zf3 +wkrYKZImZNHkmGw6AIr1NJtl+O3jEP/9uElY3KDegjlrgbEWGWG5VLbmQwIBA6OC +AdcwggHTMBEGCWCGSAGG+EIBAQQEAwIABzCCARkGA1UdHwSCARAwggEMMIHeoIHb +oIHYpIHVMIHSMQswCQYDVQQGEwJVUzEUMBIGA1UEChMLRW50cnVzdC5uZXQxOzA5 +BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1p +dHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBFbnRydXN0Lm5ldCBMaW1pdGVk +MTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRp +b24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCmgJ6AlhiNodHRwOi8vd3d3LmVu +dHJ1c3QubmV0L0NSTC9uZXQxLmNybDArBgNVHRAEJDAigA8xOTk5MDUyNTE2MDk0 +MFqBDzIwMTkwNTI1MTYwOTQwWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAU8Bdi +E1U9s/8KAGv7UISX8+1i0BowHQYDVR0OBBYEFPAXYhNVPbP/CgBr+1CEl/PtYtAa +MAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EABAwwChsEVjQuMAMCBJAwDQYJKoZI +hvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyNEwr75Ji174z4xRAN +95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9n9cd +2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI= +-----END CERTIFICATE----- + +# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Label: "Entrust.net Premium 2048 Secure Server CA" +# Serial: 946059622 +# MD5 Fingerprint: ba:21:ea:20:d6:dd:db:8f:c1:57:8b:40:ad:a1:fc:fc +# SHA1 Fingerprint: 80:1d:62:d0:7b:44:9d:5c:5c:03:5c:98:ea:61:fa:44:3c:2a:58:fe +# SHA256 Fingerprint: d1:c3:39:ea:27:84:eb:87:0f:93:4f:c5:63:4e:4a:a9:ad:55:05:01:64:01:f2:64:65:d3:7a:57:46:63:35:9f +-----BEGIN CERTIFICATE----- +MIIEXDCCA0SgAwIBAgIEOGO5ZjANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0xOTEy +MjQxODIwNTFaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo3QwcjARBglghkgBhvhCAQEEBAMCAAcwHwYDVR0jBBgwFoAUVeSB0RGA +vtiJuQijMfmhJAkWuXAwHQYDVR0OBBYEFFXkgdERgL7YibkIozH5oSQJFrlwMB0G +CSqGSIb2fQdBAAQQMA4bCFY1LjA6NC4wAwIEkDANBgkqhkiG9w0BAQUFAAOCAQEA +WUesIYSKF8mciVMeuoCFGsY8Tj6xnLZ8xpJdGGQC49MGCBFhfGPjK50xA3B20qMo +oPS7mmNz7W3lKtvtFKkrxjYR0CvrB4ul2p5cGZ1WEvVUKcgF7bISKo30Axv/55IQ +h7A6tcOdBTcSo8f0FbnVpDkWm1M6I5HxqIKiaohowXkCIryqptau37AUX7iH0N18 +f3v/rxzP5tsHrV7bhZ3QKw0z2wTR5klAEyt2+z7pnIkPFc4YsIV4IU9rTw76NmfN +B/L/CNDi3tm/Kq+4h4YhPATKt5Rof8886ZjXOP/swNlQ8C5LWK5Gb9Auw2DaclVy +vUxFnmG6v4SBkgPR0ml8xQ== +-----END CERTIFICATE----- + +# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Label: "Baltimore CyberTrust Root" +# Serial: 33554617 +# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4 +# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74 +# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ +RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD +VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX +DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y +ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy +VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr +mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr +IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK +mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu +XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy +dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye +jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 +BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 +DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 +9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx +jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 +Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz +ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS +R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +# Issuer: CN=Equifax Secure Global eBusiness CA-1 O=Equifax Secure Inc. +# Subject: CN=Equifax Secure Global eBusiness CA-1 O=Equifax Secure Inc. +# Label: "Equifax Secure Global eBusiness CA" +# Serial: 1 +# MD5 Fingerprint: 8f:5d:77:06:27:c4:98:3c:5b:93:78:e7:d7:7d:9b:cc +# SHA1 Fingerprint: 7e:78:4a:10:1c:82:65:cc:2d:e1:f1:6d:47:b4:40:ca:d9:0a:19:45 +# SHA256 Fingerprint: 5f:0b:62:ea:b5:e3:53:ea:65:21:65:16:58:fb:b6:53:59:f4:43:28:0a:4a:fb:d1:04:d7:7d:10:f9:f0:4c:07 +-----BEGIN CERTIFICATE----- +MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEc +MBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBT +ZWN1cmUgR2xvYmFsIGVCdXNpbmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIw +MDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0VxdWlmYXggU2Vj +dXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEdsb2JhbCBlQnVzaW5l +c3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRVPEnC +UdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc +58O/gGzNqfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/ +o5brhTMhHD4ePmBudpxnhcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAH +MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUvqigdHJQa0S3ySPY+6j/s1dr +aGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hsMA0GCSqGSIb3DQEBBAUA +A4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okENI7SS+RkA +Z70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv +8qIYNMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV +-----END CERTIFICATE----- + +# Issuer: CN=Equifax Secure eBusiness CA-1 O=Equifax Secure Inc. +# Subject: CN=Equifax Secure eBusiness CA-1 O=Equifax Secure Inc. +# Label: "Equifax Secure eBusiness CA 1" +# Serial: 4 +# MD5 Fingerprint: 64:9c:ef:2e:44:fc:c6:8f:52:07:d0:51:73:8f:cb:3d +# SHA1 Fingerprint: da:40:18:8b:91:89:a3:ed:ee:ae:da:97:fe:2f:9d:f5:b7:d1:8a:41 +# SHA256 Fingerprint: cf:56:ff:46:a4:a1:86:10:9d:d9:65:84:b5:ee:b5:8a:51:0c:42:75:b0:e5:f9:4f:40:bb:ae:86:5e:19:f6:73 +-----BEGIN CERTIFICATE----- +MIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEc +MBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBT +ZWN1cmUgZUJ1c2luZXNzIENBLTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQw +MDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5j +LjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENBLTEwgZ8wDQYJ +KoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ1MRo +RvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBu +WqDZQu4aIZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKw +Env+j6YDAgMBAAGjZjBkMBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTAD +AQH/MB8GA1UdIwQYMBaAFEp4MlIR21kWNl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRK +eDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQFAAOBgQB1W6ibAxHm6VZM +zfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5lSE/9dR+ +WB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN +/Bf+KpYrtWKmpj29f5JZzVoqgrI3eQ== +-----END CERTIFICATE----- + +# Issuer: O=Equifax Secure OU=Equifax Secure eBusiness CA-2 +# Subject: O=Equifax Secure OU=Equifax Secure eBusiness CA-2 +# Label: "Equifax Secure eBusiness CA 2" +# Serial: 930140085 +# MD5 Fingerprint: aa:bf:bf:64:97:da:98:1d:6f:c6:08:3a:95:70:33:ca +# SHA1 Fingerprint: 39:4f:f6:85:0b:06:be:52:e5:18:56:cc:10:e1:80:e8:82:b3:85:cc +# SHA256 Fingerprint: 2f:27:4e:48:ab:a4:ac:7b:76:59:33:10:17:75:50:6d:c3:0e:e3:8e:f6:ac:d5:c0:49:32:cf:e0:41:23:42:20 +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIEN3DPtTANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2Vj +dXJlIGVCdXNpbmVzcyBDQS0yMB4XDTk5MDYyMzEyMTQ0NVoXDTE5MDYyMzEyMTQ0 +NVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkVxdWlmYXggU2VjdXJlMSYwJAYD +VQQLEx1FcXVpZmF4IFNlY3VyZSBlQnVzaW5lc3MgQ0EtMjCBnzANBgkqhkiG9w0B +AQEFAAOBjQAwgYkCgYEA5Dk5kx5SBhsoNviyoynF7Y6yEb3+6+e0dMKP/wXn2Z0G +vxLIPw7y1tEkshHe0XMJitSxLJgJDR5QRrKDpkWNYmi7hRsgcDKqQM2mll/EcTc/ +BPO3QSQ5BxoeLmFYoBIL5aXfxavqN3HMHMg3OrmXUqesxWoklE6ce8/AatbfIb0C +AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEX +MBUGA1UEChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2VjdXJl +IGVCdXNpbmVzcyBDQS0yMQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTkw +NjIzMTIxNDQ1WjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUUJ4L6q9euSBIplBq +y/3YIHqngnYwHQYDVR0OBBYEFFCeC+qvXrkgSKZQasv92CB6p4J2MAwGA1UdEwQF +MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA +A4GBAAyGgq3oThr1jokn4jVYPSm0B482UJW/bsGe68SQsoWou7dC4A8HOd/7npCy +0cE+U58DRLB+S/Rv5Hwf5+Kx5Lia78O9zt4LMjTZ3ijtM2vE1Nc9ElirfQkty3D1 +E4qUoSek1nDFbZS1yX2doNLGCEnZZpum0/QL3MUmV+GRMOrN +-----END CERTIFICATE----- + +# Issuer: CN=AddTrust Class 1 CA Root O=AddTrust AB OU=AddTrust TTP Network +# Subject: CN=AddTrust Class 1 CA Root O=AddTrust AB OU=AddTrust TTP Network +# Label: "AddTrust Low-Value Services Root" +# Serial: 1 +# MD5 Fingerprint: 1e:42:95:02:33:92:6b:b9:5f:c0:7f:da:d6:b2:4b:fc +# SHA1 Fingerprint: cc:ab:0e:a0:4c:23:01:d6:69:7b:dd:37:9f:cd:12:eb:24:e3:94:9d +# SHA256 Fingerprint: 8c:72:09:27:9a:c0:4e:27:5e:16:d0:7f:d3:b7:75:e8:01:54:b5:96:80:46:e3:1f:52:dd:25:76:63:24:e9:a7 +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3 +b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMw +MTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYD +VQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ul +CDtbKRY654eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6n +tGO0/7Gcrjyvd7ZWxbWroulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyl +dI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1Zmne3yzxbrww2ywkEtvrNTVokMsAsJch +PXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJuiGMx1I4S+6+JNM3GOGvDC ++Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8wHQYDVR0O +BBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBl +MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFk +ZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENB +IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxtZBsfzQ3duQH6lmM0MkhHma6X +7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0PhiVYrqW9yTkkz +43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJl +pz/+0WatC7xrmYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOA +WiFeIc9TVPC6b4nbqKqVz4vjccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- + +# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network +# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network +# Label: "AddTrust External Root" +# Serial: 1 +# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f +# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68 +# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2 +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs +IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290 +MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h +bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v +dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt +H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9 +uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX +mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX +a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN +E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0 +WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD +VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0 +Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU +cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx +IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN +AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH +YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC +Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX +c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a +mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- + +# Issuer: CN=AddTrust Public CA Root O=AddTrust AB OU=AddTrust TTP Network +# Subject: CN=AddTrust Public CA Root O=AddTrust AB OU=AddTrust TTP Network +# Label: "AddTrust Public Services Root" +# Serial: 1 +# MD5 Fingerprint: c1:62:3e:23:c5:82:73:9c:03:59:4b:2b:e9:77:49:7f +# SHA1 Fingerprint: 2a:b6:28:48:5e:78:fb:f3:ad:9e:79:10:dd:6b:df:99:72:2c:96:e5 +# SHA256 Fingerprint: 07:91:ca:07:49:b2:07:82:aa:d3:c7:d7:bd:0c:df:c9:48:58:35:84:3e:b2:d7:99:60:09:ce:43:ab:6c:69:27 +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3 +b3JrMSAwHgYDVQQDExdBZGRUcnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAx +MDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtB +ZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5ldHdvcmsxIDAeBgNV +BAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV +6tsfSlbunyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nX +GCwwfQ56HmIexkvA/X1id9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnP +dzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSGAa2Il+tmzV7R/9x98oTaunet3IAIx6eH +1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAwHM+A+WD+eeSI8t0A65RF +62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0GA1UdDgQW +BBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDEL +MAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRU +cnVzdCBUVFAgTmV0d29yazEgMB4GA1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJv +b3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4JNojVhaTdt02KLmuG7jD8WS6 +IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL+YPoRNWyQSW/ +iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao +GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh +4SINhwBk/ox9Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQm +XiLsks3/QppEIW1cxeMiHV9HEufOX1362KqxMy3ZdvJOOjMMK7MtkAY= +-----END CERTIFICATE----- + +# Issuer: CN=AddTrust Qualified CA Root O=AddTrust AB OU=AddTrust TTP Network +# Subject: CN=AddTrust Qualified CA Root O=AddTrust AB OU=AddTrust TTP Network +# Label: "AddTrust Qualified Certificates Root" +# Serial: 1 +# MD5 Fingerprint: 27:ec:39:47:cd:da:5a:af:e2:9a:01:65:21:a9:4c:bb +# SHA1 Fingerprint: 4d:23:78:ec:91:95:39:b5:00:7f:75:8f:03:3b:21:1e:c5:4d:8b:cf +# SHA256 Fingerprint: 80:95:21:08:05:db:4b:bc:35:5e:44:28:d8:fd:6e:c2:cd:e3:ab:5f:b9:7a:99:42:98:8e:b8:f4:dc:d0:60:16 +-----BEGIN CERTIFICATE----- +MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3 +b3JrMSMwIQYDVQQDExpBZGRUcnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1 +MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcxCzAJBgNVBAYTAlNFMRQwEgYDVQQK +EwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5ldHdvcmsxIzAh +BgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwq +xBb/4Oxx64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G +87B4pfYOQnrjfxvM0PC3KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i +2O+tCBGaKZnhqkRFmhJePp1tUvznoD1oL/BLcHwTOK28FSXx1s6rosAx1i+f4P8U +WfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GRwVY18BTcZTYJbqukB8c1 +0cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HUMIHRMB0G +A1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6Fr +pGkwZzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQL +ExRBZGRUcnVzdCBUVFAgTmV0d29yazEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlm +aWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBABmrder4i2VhlRO6aQTv +hsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxGGuoYQ992zPlm +hpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X +dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3 +P6CxB9bpT9zeRXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9Y +iQBCYz95OdBEsIJuQRno3eDBiFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5no +xqE= +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Label: "Entrust Root Certification Authority" +# Serial: 1164660820 +# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4 +# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9 +# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Global CA O=GeoTrust Inc. +# Subject: CN=GeoTrust Global CA O=GeoTrust Inc. +# Label: "GeoTrust Global CA" +# Serial: 144470 +# MD5 Fingerprint: f7:75:ab:29:fb:51:4e:b7:77:5e:ff:05:3c:99:8e:f5 +# SHA1 Fingerprint: de:28:f4:a4:ff:e5:b9:2f:a3:c5:03:d1:a3:49:a7:f9:96:2a:82:12 +# SHA256 Fingerprint: ff:85:6a:2d:25:1d:cd:88:d3:66:56:f4:50:12:67:98:cf:ab:aa:de:40:79:9c:72:2d:e4:d2:b5:db:36:a7:3a +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT +MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i +YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg +R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9 +9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq +fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv +iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU +1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+ +bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW +MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA +ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l +uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn +Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS +tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF +PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un +hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV +5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw== +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Global CA 2 O=GeoTrust Inc. +# Subject: CN=GeoTrust Global CA 2 O=GeoTrust Inc. +# Label: "GeoTrust Global CA 2" +# Serial: 1 +# MD5 Fingerprint: 0e:40:a7:6c:de:03:5d:8f:d1:0f:e4:d1:8d:f9:6c:a9 +# SHA1 Fingerprint: a9:e9:78:08:14:37:58:88:f2:05:19:b0:6d:2b:0d:2b:60:16:90:7d +# SHA256 Fingerprint: ca:2d:82:a0:86:77:07:2f:8a:b6:76:4f:f0:35:67:6c:fe:3e:5e:32:5e:01:21:72:df:3f:92:09:6d:b7:9b:85 +-----BEGIN CERTIFICATE----- +MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFs +IENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3Qg +R2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDvPE1A +PRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/NTL8 +Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hL +TytCOb1kLUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL +5mkWRxHCJ1kDs6ZgwiFAVvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7 +S4wMcoKK+xfNAGw6EzywhIdLFnopsk/bHdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe +2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNHK266ZUap +EBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6td +EPx7srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv +/NgdRN3ggX+d6YvhZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywN +A0ZF66D0f0hExghAzN4bcLUprbqLOzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0 +abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkCx1YAzUm5s2x7UwQa4qjJqhIF +I8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqFH4z1Ir+rzoPz +4iIprn2DQKi6bA== +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Universal CA O=GeoTrust Inc. +# Subject: CN=GeoTrust Universal CA O=GeoTrust Inc. +# Label: "GeoTrust Universal CA" +# Serial: 1 +# MD5 Fingerprint: 92:65:58:8b:a2:1a:31:72:73:68:5c:b4:a5:7a:07:48 +# SHA1 Fingerprint: e6:21:f3:35:43:79:05:9a:4b:68:30:9d:8a:2f:74:22:15:87:ec:79 +# SHA256 Fingerprint: a0:45:9b:9f:63:b2:25:59:f5:fa:5d:4c:6d:b3:f9:f7:2f:f1:93:42:03:35:78:f0:73:bf:1d:1b:46:cb:b9:12 +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy +c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0 +IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV +VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8 +cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT +QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh +F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v +c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w +mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd +VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX +teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ +f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe +Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+ +nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB +/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY +MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG +9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX +IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn +ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z +uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN +Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja +QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW +koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9 +ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt +DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm +bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Universal CA 2 O=GeoTrust Inc. +# Subject: CN=GeoTrust Universal CA 2 O=GeoTrust Inc. +# Label: "GeoTrust Universal CA 2" +# Serial: 1 +# MD5 Fingerprint: 34:fc:b8:d0:36:db:9e:14:b3:c2:f2:db:8f:e4:94:c7 +# SHA1 Fingerprint: 37:9a:19:7b:41:85:45:35:0c:a6:03:69:f3:3c:2e:af:47:4f:20:79 +# SHA256 Fingerprint: a0:23:4f:3b:c8:52:7c:a5:62:8e:ec:81:ad:5d:69:89:5d:a5:68:0d:c9:1d:1c:b8:47:7f:33:f8:78:b9:5b:0b +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy +c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD +VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1 +c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81 +WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG +FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq +XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL +se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb +KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd +IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73 +y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt +hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc +QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4 +Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV +HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ +KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ +L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr +Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo +ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY +T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz +GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m +1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV +OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH +6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX +QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +# Issuer: CN=America Online Root Certification Authority 1 O=America Online Inc. +# Subject: CN=America Online Root Certification Authority 1 O=America Online Inc. +# Label: "America Online Root Certification Authority 1" +# Serial: 1 +# MD5 Fingerprint: 14:f1:08:ad:9d:fa:64:e2:89:e7:1c:cf:a8:ad:7d:5e +# SHA1 Fingerprint: 39:21:c1:15:c1:5d:0e:ca:5c:cb:5b:c4:f0:7d:21:d8:05:0b:56:6a +# SHA256 Fingerprint: 77:40:73:12:c6:3a:15:3d:5b:c0:0b:4e:51:75:9c:df:da:c2:37:dc:2a:33:b6:79:46:e9:8e:9b:fa:68:0a:e3 +-----BEGIN CERTIFICATE----- +MIIDpDCCAoygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEc +MBoGA1UEChMTQW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBP +bmxpbmUgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAxMB4XDTAyMDUyODA2 +MDAwMFoXDTM3MTExOTIwNDMwMFowYzELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0Ft +ZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2EgT25saW5lIFJvb3Qg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAKgv6KRpBgNHw+kqmP8ZonCaxlCyfqXfaE0bfA+2l2h9LaaLl+lk +hsmj76CGv2BlnEtUiMJIxUo5vxTjWVXlGbR0yLQFOVwWpeKVBeASrlmLojNoWBym +1BW32J/X3HGrfpq/m44zDyL9Hy7nBzbvYjnF3cu6JRQj3gzGPTzOggjmZj7aUTsW +OqMFf6Dch9Wc/HKpoH145LcxVR5lu9RhsCFg7RAycsWSJR74kEoYeEfffjA3PlAb +2xzTa5qGUwew76wGePiEmf4hjUyAtgyC9mZweRrTT6PP8c9GsEsPPt2IYriMqQko +O3rHl+Ee5fSfwMCuJKDIodkP1nsmgmkyPacCAwEAAaNjMGEwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUAK3Zo/Z59m50qX8zPYEX10zPM94wHwYDVR0jBBgwFoAU +AK3Zo/Z59m50qX8zPYEX10zPM94wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB +BQUAA4IBAQB8itEfGDeC4Liwo+1WlchiYZwFos3CYiZhzRAW18y0ZTTQEYqtqKkF +Zu90821fnZmv9ov761KyBZiibyrFVL0lvV+uyIbqRizBs73B6UlwGBaXCBOMIOAb +LjpHyx7kADCVW/RFo8AasAFOq73AI25jP4BKxQft3OJvx8Fi8eNy1gTIdGcL+oir +oQHIb/AUr9KZzVGTfu0uOMe9zkZQPXLjeSWdm4grECDdpbgyn43gKd8hdIaC2y+C +MMbHNYaz+ZZfRtsMRf3zUMNvxsNIrUam4SdHCh0Om7bCd39j8uB9Gr784N/Xx6ds +sPmuujz9dLQR6FgNgLzTqIA6me11zEZ7 +-----END CERTIFICATE----- + +# Issuer: CN=America Online Root Certification Authority 2 O=America Online Inc. +# Subject: CN=America Online Root Certification Authority 2 O=America Online Inc. +# Label: "America Online Root Certification Authority 2" +# Serial: 1 +# MD5 Fingerprint: d6:ed:3c:ca:e2:66:0f:af:10:43:0d:77:9b:04:09:bf +# SHA1 Fingerprint: 85:b5:ff:67:9b:0c:79:96:1f:c8:6e:44:22:00:46:13:db:17:92:84 +# SHA256 Fingerprint: 7d:3b:46:5a:60:14:e5:26:c0:af:fc:ee:21:27:d2:31:17:27:ad:81:1c:26:84:2d:00:6a:f3:73:06:cc:80:bd +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEc +MBoGA1UEChMTQW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBP +bmxpbmUgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAyMB4XDTAyMDUyODA2 +MDAwMFoXDTM3MDkyOTE0MDgwMFowYzELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0Ft +ZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2EgT25saW5lIFJvb3Qg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAMxBRR3pPU0Q9oyxQcngXssNt79Hc9PwVU3dxgz6sWYFas14tNwC +206B89enfHG8dWOgXeMHDEjsJcQDIPT/DjsS/5uN4cbVG7RtIuOx238hZK+GvFci +KtZHgVdEglZTvYYUAQv8f3SkWq7xuhG1m1hagLQ3eAkzfDJHA1zEpYNI9FdWboE2 +JxhP7JsowtS013wMPgwr38oE18aO6lhOqKSlGBxsRZijQdEt0sdtjRnxrXm3gT+9 +BoInLRBYBbV4Bbkv2wxrkJB+FFk4u5QkE+XRnRTf04JNRvCAOVIyD+OEsnpD8l7e +Xz8d3eOyG6ChKiMDbi4BFYdcpnV1x5dhvt6G3NRI270qv0pV2uh9UPu0gBe4lL8B +PeraunzgWGcXuVjgiIZGZ2ydEEdYMtA1fHkqkKJaEBEjNa0vzORKW6fIJ/KD3l67 +Xnfn6KVuY8INXWHQjNJsWiEOyiijzirplcdIz5ZvHZIlyMbGwcEMBawmxNJ10uEq +Z8A9W6Wa6897GqidFEXlD6CaZd4vKL3Ob5Rmg0gp2OpljK+T2WSfVVcmv2/LNzGZ +o2C7HK2JNDJiuEMhBnIMoVxtRsX6Kc8w3onccVvdtjc+31D1uAclJuW8tf48ArO3 ++L5DwYcRlJ4jbBeKuIonDFRH8KmzwICMoCfrHRnjB453cMor9H124HhnAgMBAAGj +YzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFE1FwWg4u3OpaaEg5+31IqEj +FNeeMB8GA1UdIwQYMBaAFE1FwWg4u3OpaaEg5+31IqEjFNeeMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQUFAAOCAgEAZ2sGuV9FOypLM7PmG2tZTiLMubekJcmn +xPBUlgtk87FYT15R/LKXeydlwuXK5w0MJXti4/qftIe3RUavg6WXSIylvfEWK5t2 +LHo1YGwRgJfMqZJS5ivmae2p+DYtLHe/YUjRYwu5W1LtGLBDQiKmsXeu3mnFzccc +obGlHBD7GL4acN3Bkku+KVqdPzW+5X1R+FXgJXUjhx5c3LqdsKyzadsXg8n33gy8 +CNyRnqjQ1xU3c6U1uPx+xURABsPr+CKAXEfOAuMRn0T//ZoyzH1kUQ7rVyZ2OuMe +IjzCpjbdGe+n/BLzJsBZMYVMnNjP36TMzCmT/5RtdlwTCJfy7aULTd3oyWgOZtMA +DjMSW7yV5TKQqLPGbIOtd+6Lfn6xqavT4fG2wLHqiMDn05DpKJKUe2h7lyoKZy2F +AjgQ5ANh1NolNscIWC2hp1GvMApJ9aZphwctREZ2jirlmjvXGKL8nDgQzMY70rUX +Om/9riW99XJZZLF0KjhfGEzfz3EEWjbUvy+ZnOjZurGV5gJLIaFb1cFPj65pbVPb +AZO1XB4Y3WRayhgoPmMEEf0cjQAPuDffZ4qdZqkCapH/E8ovXYO8h5Ns3CRRFgQl +Zvqz2cK6Kb6aSDiCmfS/O0oxGfm/jiEzFMpPVF/7zvuPcX/9XhmgD0uRuMRUvAaw +RY8mkaKO/qk= +-----END CERTIFICATE----- + +# Issuer: CN=AAA Certificate Services O=Comodo CA Limited +# Subject: CN=AAA Certificate Services O=Comodo CA Limited +# Label: "Comodo AAA Services root" +# Serial: 1 +# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0 +# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49 +# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4 +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj +YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM +GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua +BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe +3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 +YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR +rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm +ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU +oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v +QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t +b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF +AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q +GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 +G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi +l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 +smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +# Issuer: CN=Secure Certificate Services O=Comodo CA Limited +# Subject: CN=Secure Certificate Services O=Comodo CA Limited +# Label: "Comodo Secure Services root" +# Serial: 1 +# MD5 Fingerprint: d3:d9:bd:ae:9f:ac:67:24:b3:c8:1b:52:e1:b9:a9:bd +# SHA1 Fingerprint: 4a:65:d5:f4:1d:ef:39:b8:b8:90:4a:4a:d3:64:81:33:cf:c7:a1:d1 +# SHA256 Fingerprint: bd:81:ce:3b:4f:65:91:d1:1a:67:b5:fc:7a:47:fd:ef:25:52:1b:f9:aa:4e:18:b9:e3:df:2e:34:a7:80:3b:e8 +-----BEGIN CERTIFICATE----- +MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRp +ZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVow +fjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAiBgNV +BAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPM +cm3ye5drswfxdySRXyWP9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3S +HpR7LZQdqnXXs5jLrLxkU0C8j6ysNstcrbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996 +CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rCoznl2yY4rYsK7hljxxwk +3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3Vp6ea5EQz +6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNV +HQ4EFgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1Ud +EwEB/wQFMAMBAf8wgYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2Rv +Y2EuY29tL1NlY3VyZUNlcnRpZmljYXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRw +Oi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmww +DQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm4J4oqF7Tt/Q0 +5qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj +Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtI +gKvcnDe4IRRLDXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJ +aD61JlfutuC23bkpgHl9j6PwpCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDl +izeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1HRR3B7Hzs/Sk= +-----END CERTIFICATE----- + +# Issuer: CN=Trusted Certificate Services O=Comodo CA Limited +# Subject: CN=Trusted Certificate Services O=Comodo CA Limited +# Label: "Comodo Trusted Services root" +# Serial: 1 +# MD5 Fingerprint: 91:1b:3f:6e:cd:9e:ab:ee:07:fe:1f:71:d2:b3:61:27 +# SHA1 Fingerprint: e1:9f:e3:0e:8b:84:60:9e:80:9b:17:0d:72:a8:c5:ba:6e:14:09:bd +# SHA256 Fingerprint: 3f:06:e5:56:81:d4:96:f5:be:16:9e:b5:38:9f:9f:2b:8f:f6:1e:17:08:df:68:81:72:48:49:cd:5d:27:cb:69 +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0 +aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEwMDAwMDBaFw0yODEyMzEyMzU5NTla +MH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAO +BgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUwIwYD +VQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWW +fnJSoBVC21ndZHoa0Lh73TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMt +TGo87IvDktJTdyR0nAducPy9C1t2ul/y/9c3S0pgePfw+spwtOpZqqPOSC+pw7IL +fhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6juljatEPmsbS9Is6FARW +1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsSivnkBbA7 +kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0G +A1UdDgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21v +ZG9jYS5jb20vVHJ1c3RlZENlcnRpZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRo +dHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENlcnRpZmljYXRlU2VydmljZXMu +Y3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8NtwuleGFTQQuS9/ +HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 +pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxIS +jBc/lDb+XbDABHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+ +xqFx7D+gIIxmOom0jtTYsU0lR+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/Atyjcn +dBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O9y5Xt5hwXsjEeLBi +-----END CERTIFICATE----- + +# Issuer: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com +# Subject: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com +# Label: "UTN DATACorp SGC Root CA" +# Serial: 91374294542884689855167577680241077609 +# MD5 Fingerprint: b3:a5:3e:77:21:6d:ac:4a:c0:c9:fb:d5:41:3d:ca:06 +# SHA1 Fingerprint: 58:11:9f:0e:12:82:87:ea:50:fd:d9:87:45:6f:4f:78:dc:fa:d6:d4 +# SHA256 Fingerprint: 85:fb:2f:91:dd:12:27:5a:01:45:b6:36:53:4f:84:02:4a:d6:8b:69:b8:ee:88:68:4f:f7:11:37:58:05:b3:48 +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB +kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw +IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD +VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu +dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 +E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ +D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK +4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq +lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW +bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB +o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT +MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js +LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr +BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB +AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj +j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH +KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv +2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 +mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- + +# Issuer: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com +# Subject: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com +# Label: "UTN USERFirst Hardware Root CA" +# Serial: 91374294542884704022267039221184531197 +# MD5 Fingerprint: 4c:56:41:e5:0d:bb:2b:e8:ca:a3:ed:18:08:ad:43:39 +# SHA1 Fingerprint: 04:83:ed:33:99:ac:36:08:05:87:22:ed:bc:5e:46:00:e3:be:f9:d7 +# SHA256 Fingerprint: 6e:a5:47:41:d0:04:66:7e:ed:1b:48:16:63:4a:a3:a7:9e:6e:4b:96:95:0f:82:79:da:fc:8d:9b:d8:81:21:37 +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG +A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe +MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v +d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh +cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn +0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ +M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a +MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd +oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI +DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy +oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0 +dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy +bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF +BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli +CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE +CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t +3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS +KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA== +-----END CERTIFICATE----- + +# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Label: "XRamp Global CA Root" +# Serial: 107108908803651509692980124233745014957 +# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1 +# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6 +# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2 +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB +gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk +MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY +UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx +NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3 +dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy +dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6 +38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP +KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q +DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4 +qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa +JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi +PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P +BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs +jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0 +eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR +vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa +IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy +i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ +O+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Label: "Go Daddy Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67 +# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4 +# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4 +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh +MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE +YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 +MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo +ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg +MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN +ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA +PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w +wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi +EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY +avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ +YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE +sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h +/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 +IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy +OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P +TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER +dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf +ReYNnyicsbkqWletNw+vHX/bvZ8= +-----END CERTIFICATE----- + +# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Label: "Starfield Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24 +# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a +# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58 +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing +# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing +# Label: "StartCom Certification Authority" +# Serial: 1 +# MD5 Fingerprint: 22:4d:8f:8a:fc:f7:35:c2:bb:57:34:90:7b:8b:22:16 +# SHA1 Fingerprint: 3e:2b:f7:f2:03:1b:96:f3:8c:e6:c4:d8:a8:5d:3e:2d:58:47:6a:0f +# SHA256 Fingerprint: c7:66:a9:be:f2:d4:07:1c:86:3a:31:aa:49:20:e8:13:b2:d1:98:60:8c:b7:b7:cf:e2:11:43:b8:36:df:09:ea +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW +MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg +Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9 +MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi +U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh +cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk +pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf +OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C +Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT +Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi +HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM +Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w ++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ +Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 +Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B +26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID +AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j +ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js +LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM +BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy +dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh +cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh +YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg +dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp +bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ +YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT +TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ +9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8 +jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW +FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz +ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1 +ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L +EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu +L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC +O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V +um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh +NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root CA" +# Serial: 17154717934120587862167794914071425081 +# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72 +# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43 +# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root CA" +# Serial: 10944719598952040374951832963794454346 +# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e +# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36 +# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61 +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert High Assurance EV Root CA" +# Serial: 3553400076410547919724730734378100087 +# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a +# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25 +# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc. +# Subject: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc. +# Label: "GeoTrust Primary Certification Authority" +# Serial: 32798226551256963324313806436981982369 +# MD5 Fingerprint: 02:26:c3:01:5e:08:30:37:43:a9:d0:7d:cf:37:e6:bf +# SHA1 Fingerprint: 32:3c:11:8e:1b:f7:b8:b6:52:54:e2:e2:10:0d:d6:02:90:37:f0:96 +# SHA256 Fingerprint: 37:d5:10:06:c5:12:ea:ab:62:64:21:f1:ec:8c:92:01:3f:c5:f8:2a:e9:8e:e5:33:eb:46:19:b8:de:b4:d0:6c +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY +MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo +R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx +MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9 +AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA +ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0 +7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W +kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI +mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ +KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1 +6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl +4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K +oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj +UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU +AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA" +# Serial: 69529181992039203566298953787712940909 +# MD5 Fingerprint: 8c:ca:dc:0b:22:ce:f5:be:72:ac:41:1a:11:a8:d8:12 +# SHA1 Fingerprint: 91:c6:d6:ee:3e:8a:c8:63:84:e5:48:c2:99:29:5c:75:6c:81:7b:81 +# SHA256 Fingerprint: 8d:72:2f:81:a9:c1:13:c0:79:1d:f1:36:a2:96:6d:b2:6c:95:0a:97:1d:b4:6b:41:99:f4:ea:54:b7:8b:fb:9f +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB +qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV +BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw +NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j +LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG +A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs +W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta +3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk +6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6 +Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J +NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP +r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU +DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz +YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2 +/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/ +LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7 +jVaMaA== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Class 3 Public Primary Certification Authority - G5" +# Serial: 33037644167568058970164719475676101450 +# MD5 Fingerprint: cb:17:e4:31:67:3e:e2:09:fe:45:57:93:f3:0a:fa:1c +# SHA1 Fingerprint: 4e:b6:d5:78:49:9b:1c:cf:5f:58:1e:ad:56:be:3d:9b:67:44:a5:e5 +# SHA256 Fingerprint: 9a:cf:ab:7e:43:c8:d8:80:d0:6b:26:2a:94:de:ee:e4:b4:65:99:89:c3:d0:ca:f1:9b:af:64:05:e4:1a:b7:df +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW +ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1 +nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex +t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz +SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG +BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+ +rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/ +NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E +BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH +BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv +MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE +p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y +5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK +WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ +4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N +hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO Certification Authority O=COMODO CA Limited +# Label: "COMODO Certification Authority" +# Serial: 104350513648249232941998508985834464573 +# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75 +# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b +# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66 +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw +MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW +/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g +PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY +SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv +IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 +zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd +BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB +ZQ== +-----END CERTIFICATE----- + +# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. +# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C. +# Label: "Network Solutions Certificate Authority" +# Serial: 116697915152937497490437556386812487904 +# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e +# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce +# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi +MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp +dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV +UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO +ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz +c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP +OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl +mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF +BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4 +qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw +gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu +bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp +dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8 +6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/ +h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH +/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN +pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Label: "COMODO ECC Certification Authority" +# Serial: 41578283867086692638256921589707938090 +# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23 +# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11 +# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7 +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +# Issuer: CN=TC TrustCenter Class 2 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 2 CA +# Subject: CN=TC TrustCenter Class 2 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 2 CA +# Label: "TC TrustCenter Class 2 CA II" +# Serial: 941389028203453866782103406992443 +# MD5 Fingerprint: ce:78:33:5c:59:78:01:6e:18:ea:b9:36:a0:b9:2e:23 +# SHA1 Fingerprint: ae:50:83:ed:7c:f4:5c:bc:8f:61:c6:21:fe:68:5d:79:42:21:15:6e +# SHA256 Fingerprint: e6:b8:f8:76:64:85:f8:07:ae:7f:8d:ac:16:70:46:1f:07:c0:a1:3e:ef:3a:1f:f7:17:53:8d:7a:ba:d3:91:b4 +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjEL +MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNV +BAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0 +Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYwMTEyMTQzODQzWhcNMjUxMjMxMjI1 +OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIgR21i +SDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UEAxMc +VEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jf +tMjWQ+nEdVl//OEd+DFwIxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKg +uNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2J +XjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQXa7pIXSSTYtZgo+U4+lK +8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7uSNQZu+99 +5OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3 +kUrL84J6E1wIqzCB7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRy +dXN0Y2VudGVyLmRlL2NybC92Mi90Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6 +Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBUcnVzdENlbnRlciUyMENsYXNz +JTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21iSCxPVT1yb290 +Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iS +GNn3Bzn1LL4GdXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprt +ZjluS5TmVfwLG4t3wVMTZonZKNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8 +au0WOB9/WIFaGusyiC2y8zl3gK9etmF1KdsjTYjKUCjLhdLTEKJZbtOTVAB6okaV +hgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kPJOzHdiEoZa5X6AeI +dUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfkvQ== +-----END CERTIFICATE----- + +# Issuer: CN=TC TrustCenter Class 3 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 3 CA +# Subject: CN=TC TrustCenter Class 3 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 3 CA +# Label: "TC TrustCenter Class 3 CA II" +# Serial: 1506523511417715638772220530020799 +# MD5 Fingerprint: 56:5f:aa:80:61:12:17:f6:67:21:e6:2b:6d:61:56:8e +# SHA1 Fingerprint: 80:25:ef:f4:6e:70:c8:d4:72:24:65:84:fe:40:3b:8a:8d:6a:db:f5 +# SHA256 Fingerprint: 8d:a0:84:fc:f9:9c:e0:77:22:f8:9b:32:05:93:98:06:fa:5c:b8:11:e1:c8:13:f6:a1:08:c7:d3:36:b3:40:8e +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOSkcAAQAC5aBd1j8AUb8wDQYJKoZIhvcNAQEFBQAwdjEL +MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNV +BAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDMgQ0ExJTAjBgNVBAMTHFRDIFRydXN0 +Q2VudGVyIENsYXNzIDMgQ0EgSUkwHhcNMDYwMTEyMTQ0MTU3WhcNMjUxMjMxMjI1 +OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIgR21i +SDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQTElMCMGA1UEAxMc +VEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBALTgu1G7OVyLBMVMeRwjhjEQY0NVJz/GRcekPewJDRoeIMJW +Ht4bNwcwIi9v8Qbxq63WyKthoy9DxLCyLfzDlml7forkzMA5EpBCYMnMNWju2l+Q +Vl/NHE1bWEnrDgFPZPosPIlY2C8u4rBo6SI7dYnWRBpl8huXJh0obazovVkdKyT2 +1oQDZogkAHhg8fir/gKya/si+zXmFtGt9i4S5Po1auUZuV3bOx4a+9P/FRQI2Alq +ukWdFHlgfa9Aigdzs5OW03Q0jTo3Kd5c7PXuLjHCINy+8U9/I1LZW+Jk2ZyqBwi1 +Rb3R0DHBq1SfqdLDYmAD8bs5SpJKPQq5ncWg/jcCAwEAAaOCATQwggEwMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTUovyfs8PYA9NX +XAek0CSnwPIA1DCB7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRy +dXN0Y2VudGVyLmRlL2NybC92Mi90Y19jbGFzc18zX2NhX0lJLmNybIaBn2xkYXA6 +Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBUcnVzdENlbnRlciUyMENsYXNz +JTIwMyUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21iSCxPVT1yb290 +Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEANmDkcPcGIEPZIxpC8vijsrlN +irTzwppVMXzEO2eatN9NDoqTSheLG43KieHPOh6sHfGcMrSOWXaiQYUlN6AT0PV8 +TtXqluJucsG7Kv5sbviRmEb8yRtXW+rIGjs/sFGYPAfaLFkB2otE6OF0/ado3VS6 +g0bsyEa1+K+XwDsJHI/OcpY9M1ZwvJbL2NV9IJqDnxrcOfHFcqMRA/07QlIp2+gB +95tejNaNhk4Z+rwcvsUhpYeeeC422wlxo3I0+GzjBgnyXlal092Y+tTmBvTwtiBj +S+opvaqCZh77gaqnN60TGOaSw4HBM7uIHqHn4rS9MWwOUT1v+5ZWgOI2F9Hc5A== +-----END CERTIFICATE----- + +# Issuer: CN=TC TrustCenter Universal CA I O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA +# Subject: CN=TC TrustCenter Universal CA I O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA +# Label: "TC TrustCenter Universal CA I" +# Serial: 601024842042189035295619584734726 +# MD5 Fingerprint: 45:e1:a5:72:c5:a9:36:64:40:9e:f5:e4:58:84:67:8c +# SHA1 Fingerprint: 6b:2f:34:ad:89:58:be:62:fd:b0:6b:5c:ce:bb:9d:d9:4f:4e:39:f3 +# SHA256 Fingerprint: eb:f3:c0:2a:87:89:b1:fb:7d:51:19:95:d6:63:b7:29:06:d9:13:ce:0d:5e:10:56:8a:8a:77:e2:58:61:67:e7 +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTEL +MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNV +BAsTG1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1 +c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcNMDYwMzIyMTU1NDI4WhcNMjUxMjMx +MjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIg +R21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYwJAYD +VQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSR +JJZ4Hgmgm5qVSkr1YnwCqMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3T +fCZdzHd55yx4Oagmcw6iXSVphU9VDprvxrlE4Vc93x9UIuVvZaozhDrzznq+VZeu +jRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtwag+1m7Z3W0hZneTvWq3z +wZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9OgdwZu5GQ +fezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYD +VR0jBBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0G +CSqGSIb3DQEBBQUAA4IBAQAo0uCG1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X1 +7caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/CyvwbZ71q+s2IhtNerNXxTPqYn +8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3ghUJGooWMNjs +ydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT +ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/ +2TYcuiUaUj0a7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY +-----END CERTIFICATE----- + +# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc +# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc +# Label: "Cybertrust Global Root" +# Serial: 4835703278459682877484360 +# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1 +# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6 +# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3 +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG +A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh +bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE +ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS +b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5 +7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS +J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y +HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP +t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz +FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY +XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/ +MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw +hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js +MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA +A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj +Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx +XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o +omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc +A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only +# Subject: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only +# Label: "GeoTrust Primary Certification Authority - G3" +# Serial: 28809105769928564313984085209975885599 +# MD5 Fingerprint: b5:e8:34:36:c9:10:44:58:48:70:6d:2e:83:d4:b8:05 +# SHA1 Fingerprint: 03:9e:ed:b8:0b:e7:a0:3c:69:53:89:3b:20:d2:d9:32:3a:4c:2a:fd +# SHA256 Fingerprint: b4:78:b8:12:25:0d:f8:78:63:5c:2a:a7:ec:7d:15:5e:aa:62:5e:e8:29:16:e2:cd:29:43:61:88:6c:d1:fb:d4 +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB +mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT +MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ +BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0 +BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz ++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm +hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn +5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W +JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL +DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC +huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw +HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB +AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB +zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN +kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH +SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G +spki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA - G2" +# Serial: 71758320672825410020661621085256472406 +# MD5 Fingerprint: 74:9d:ea:60:24:c4:fd:22:53:3e:cc:3a:72:d9:29:4f +# SHA1 Fingerprint: aa:db:bc:22:23:8f:c4:01:a1:27:bb:38:dd:f4:1d:db:08:9e:f0:12 +# SHA256 Fingerprint: a4:31:0d:50:af:18:a6:44:71:90:37:2a:86:af:af:8b:95:1f:fb:43:1d:83:7f:1e:56:88:b4:59:71:ed:15:57 +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp +IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi +BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw +MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig +YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v +dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/ +BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6 +papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K +DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3 +KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox +XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +# Issuer: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only +# Subject: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only +# Label: "thawte Primary Root CA - G3" +# Serial: 127614157056681299805556476275995414779 +# MD5 Fingerprint: fb:1b:5d:43:8a:94:cd:44:c6:76:f2:43:4b:47:e7:31 +# SHA1 Fingerprint: f1:8b:53:8d:1b:e9:03:b6:a6:f0:56:43:5b:17:15:89:ca:f3:6b:f2 +# SHA256 Fingerprint: 4b:03:f4:58:07:ad:70:f2:1b:fc:2c:ae:71:c9:fd:e4:60:4c:06:4c:f5:ff:b6:86:ba:e5:db:aa:d7:fd:d3:4c +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB +rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV +BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa +Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl +LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u +MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl +ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm +gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8 +YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf +b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9 +9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S +zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk +OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV +HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA +2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW +oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c +KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM +m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu +MdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only +# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only +# Label: "GeoTrust Primary Certification Authority - G2" +# Serial: 80682863203381065782177908751794619243 +# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a +# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0 +# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66 +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL +MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj +KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2 +MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw +NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV +BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH +MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL +So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal +tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG +CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT +qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz +rD6ogRLQy7rQkgu2npaqBA+K +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Universal Root Certification Authority" +# Serial: 85209574734084581917763752644031726877 +# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19 +# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54 +# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB +vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W +ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX +MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0 +IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y +IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh +bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF +9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH +H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H +LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN +/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT +rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw +WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs +exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4 +sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+ +seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz +4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+ +BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR +lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3 +7M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only +# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only +# Label: "VeriSign Class 3 Public Primary Certification Authority - G4" +# Serial: 63143484348153506665311985501458640051 +# MD5 Fingerprint: 3a:52:e1:e7:fd:6f:3a:e3:6f:f3:6f:99:1b:f9:22:41 +# SHA1 Fingerprint: 22:d5:d8:df:8f:02:31:d1:8d:f7:9d:b7:cf:8a:2d:64:c9:3f:6c:3a +# SHA256 Fingerprint: 69:dd:d7:ea:90:bb:57:c9:3e:13:5d:c8:5e:a6:fc:d5:48:0b:60:32:39:bd:c4:54:fc:75:8b:2a:26:cf:7f:79 +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG +A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp +U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg +SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln +biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm +GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve +fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ +aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj +aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW +kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC +4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga +FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Label: "GlobalSign Root CA - R3" +# Serial: 4835703278459759426209954 +# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28 +# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad +# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- + +# Issuer: CN=TC TrustCenter Universal CA III O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA +# Subject: CN=TC TrustCenter Universal CA III O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA +# Label: "TC TrustCenter Universal CA III" +# Serial: 2010889993983507346460533407902964 +# MD5 Fingerprint: 9f:dd:db:ab:ff:8e:ff:45:21:5f:f0:6c:9d:8f:fe:2b +# SHA1 Fingerprint: 96:56:cd:7b:57:96:98:95:d0:e1:41:46:68:06:fb:b8:c6:11:06:87 +# SHA256 Fingerprint: 30:9b:4a:87:f6:ca:56:c9:31:69:aa:a9:9c:6d:98:88:54:d7:89:2b:d5:43:7e:2d:07:b2:9c:be:da:55:d3:5d +-----BEGIN CERTIFICATE----- +MIID4TCCAsmgAwIBAgIOYyUAAQACFI0zFQLkbPQwDQYJKoZIhvcNAQEFBQAwezEL +MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNV +BAsTG1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQTEoMCYGA1UEAxMfVEMgVHJ1 +c3RDZW50ZXIgVW5pdmVyc2FsIENBIElJSTAeFw0wOTA5MDkwODE1MjdaFw0yOTEy +MzEyMzU5NTlaMHsxCzAJBgNVBAYTAkRFMRwwGgYDVQQKExNUQyBUcnVzdENlbnRl +ciBHbWJIMSQwIgYDVQQLExtUQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0ExKDAm +BgNVBAMTH1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQSBJSUkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDC2pxisLlxErALyBpXsq6DFJmzNEubkKLF +5+cvAqBNLaT6hdqbJYUtQCggbergvbFIgyIpRJ9Og+41URNzdNW88jBmlFPAQDYv +DIRlzg9uwliT6CwLOunBjvvya8o84pxOjuT5fdMnnxvVZ3iHLX8LR7PH6MlIfK8v +zArZQe+f/prhsq75U7Xl6UafYOPfjdN/+5Z+s7Vy+EutCHnNaYlAJ/Uqwa1D7KRT +yGG299J5KmcYdkhtWyUB0SbFt1dpIxVbYYqt8Bst2a9c8SaQaanVDED1M4BDj5yj +dipFtK+/fz6HP3bFzSreIMUWWMv5G/UPyw0RUmS40nZid4PxWJ//AgMBAAGjYzBh +MB8GA1UdIwQYMBaAFFbn4VslQ4Dg9ozhcbyO5YAvxEjiMA8GA1UdEwEB/wQFMAMB +Af8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRW5+FbJUOA4PaM4XG8juWAL8RI +4jANBgkqhkiG9w0BAQUFAAOCAQEAg8ev6n9NCjw5sWi+e22JLumzCecYV42Fmhfz +dkJQEw/HkG8zrcVJYCtsSVgZ1OK+t7+rSbyUyKu+KGwWaODIl0YgoGhnYIg5IFHY +aAERzqf2EQf27OysGh+yZm5WZ2B6dF7AbZc2rrUNXWZzwCUyRdhKBgePxLcHsU0G +DeGl6/R1yrqc0L2z0zIkTO5+4nYES0lT2PLpVDP85XEfPRRclkvxOvIAu2y0+pZV +CIgJwcyRGSmwIC3/yzikQOEXvnlhgP8HA4ZMTnsGnxGGjYnuJ8Tb4rwZjgvDwxPH +LQNjO9Po5KIqwoIIlBZU8O8fJ5AluA0OKBtHd0e9HKgl8ZS0Zg== +-----END CERTIFICATE----- + +# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Label: "Go Daddy Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01 +# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b +# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96 +# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e +# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5 +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Services Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2 +# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f +# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5 +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Commercial O=AffirmTrust +# Subject: CN=AffirmTrust Commercial O=AffirmTrust +# Label: "AffirmTrust Commercial" +# Serial: 8608355977964138876 +# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7 +# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7 +# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7 +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Networking O=AffirmTrust +# Subject: CN=AffirmTrust Networking O=AffirmTrust +# Label: "AffirmTrust Networking" +# Serial: 8957382827206547757 +# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f +# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f +# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium O=AffirmTrust +# Subject: CN=AffirmTrust Premium O=AffirmTrust +# Label: "AffirmTrust Premium" +# Serial: 7893706540734352110 +# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57 +# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27 +# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust +# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust +# Label: "AffirmTrust Premium ECC" +# Serial: 8401224907861490260 +# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d +# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb +# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23 +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- + +# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing +# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing +# Label: "StartCom Certification Authority" +# Serial: 45 +# MD5 Fingerprint: c9:3b:0d:84:41:fc:a4:76:79:23:08:57:de:10:19:16 +# SHA1 Fingerprint: a3:f1:33:3f:e2:42:bf:cf:c5:d1:4e:8f:39:42:98:40:68:10:d1:a0 +# SHA256 Fingerprint: e1:78:90:ee:09:a3:fb:f4:f4:8b:9c:41:4a:17:d6:37:b7:a5:06:47:e9:bc:75:23:22:72:7f:cc:17:42:a9:11 +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEW +MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg +Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM3WhcNMzYwOTE3MTk0NjM2WjB9 +MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi +U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh +cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk +pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf +OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C +Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT +Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi +HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM +Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w ++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ +Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 +Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B +26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID +AQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFul +F2mHMMo0aEPQQa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCC +ATgwLgYIKwYBBQUHAgEWImh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5w +ZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL2ludGVybWVk +aWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENvbW1lcmNpYWwgKFN0 +YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0aGUg +c2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93 +d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgG +CWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5fPGFf59Jb2vKXfuM/gTF +wWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWmN3PH/UvS +Ta0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst +0OcNOrg+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNc +pRJvkrKTlMeIFw6Ttn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKl +CcWw0bdT82AUuoVpaiF8H3VhFyAXe2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVF +P0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA2MFrLH9ZXF2RsXAiV+uKa0hK +1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBsHvUwyKMQ5bLm +KhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ +8dCAWZvLMdibD4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnm +fyWl8kgAwKQB2j8= +-----END CERTIFICATE----- + +# Issuer: CN=StartCom Certification Authority G2 O=StartCom Ltd. +# Subject: CN=StartCom Certification Authority G2 O=StartCom Ltd. +# Label: "StartCom Certification Authority G2" +# Serial: 59 +# MD5 Fingerprint: 78:4b:fb:9e:64:82:0a:d3:b8:4c:62:f3:64:f2:90:64 +# SHA1 Fingerprint: 31:f1:fd:68:22:63:20:ee:c6:3b:3f:9d:ea:4a:3e:53:7c:7c:39:17 +# SHA256 Fingerprint: c7:ba:65:67:de:93:a7:98:ae:1f:aa:79:1e:71:2d:37:8f:ae:1f:93:c4:39:7f:ea:44:1b:b7:cb:e6:fd:59:95 +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEW +MBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkgRzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1 +OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoG +A1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgRzIwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8Oo1XJ +JZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsD +vfOpL9HG4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnoo +D/Uefyf3lLE3PbfHkffiAez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/ +Q0kGi4xDuFby2X8hQxfqp0iVAXV16iulQ5XqFYSdCI0mblWbq9zSOdIxHWDirMxW +RST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbsO+wmETRIjfaAKxojAuuK +HDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8HvKTlXcxN +nw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM +0D4LnMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/i +UUjXuG+v+E5+M5iSFGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9 +Ha90OrInwMEePnWjFqmveiJdnxMaz6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHg +TuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJKoZIhvcNAQEL +BQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfX +UfEpY9Z1zRbkJ4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl +6/2o1PXWT6RbdejF0mCy2wl+JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK +9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG/+gyRr61M3Z3qAFdlsHB1b6uJcDJ +HgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTcnIhT76IxW1hPkWLI +wpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/XldblhY +XzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5l +IxKVCCIcl85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoo +hdVddLHRDiBYmxOlsGOm7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulr +so8uBtjRkcfGEvRM/TAXw8HaOFvjqermobp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- + +# Issuer: O=Digital Signature Trust Co., CN=DST Root CA X3 +# Subject: O=Digital Signature Trust Co., CN=DST Root CA X3 +# Label: "IdenTrust DST Root CA X3" +# Serial: 44AFB080D6A327BA893039862EF8406B +# MD5 Fingerprint: 41:03:52:DC:0F:F7:50:1B:16:F0:02:8E:BA:6F:45:C5 +# SHA1 Fingerprint: DA:C9:02:4F:54:D8:F6:DF:94:93:5F:B1:73:26:38:CA:6A:D7:7C:13 +# SHA256 Fingerprint: 06:87:26:03:31:A7:24:03:D9:09:F1:05:E6:9B:CF:0D:32:E1:BD:24:93:FF:C6:D9:20:6D:11:BC:D6:77:07:39 +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/ +MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT +DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow +PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD +Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O +rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq +OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b +xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw +7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD +aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG +SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69 +ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr +AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz +R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5 +JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo +Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G2, OU=www.digicert.com, O=DigiCert Inc, C=US +# Subject: CN=DigiCert Global Root G2, OU=www.digicert.com, O=DigiCert Inc, C=US +# Serial: 33af1e6a711a9a0bb2864b11d09fae5 +# MD5 Fingerprint: E4:A6:8A:C8:54:AC:52:42:46:0A:FD:72:48:1B:2A:44 +# SHA1 Fingerprint: DF:3C:24:F9:BF:D6:66:76:1B:26:80:73:FE:06:D1:CC:8D:4F:82:A4 +# SHA256 Fingerprint: CB:3C:CB:B7:60:31:E5:E0:13:8F:8D:D3:9A:23:F9:DE:47:FF:C3:5E:43:C1:14:4C:EA:27:D4:6A:5A:B1:CB:5F +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + diff --git a/lib/httplib2/py3/certs.py b/lib/httplib2/py3/certs.py new file mode 100644 index 00000000..59d1ffc7 --- /dev/null +++ b/lib/httplib2/py3/certs.py @@ -0,0 +1,42 @@ +"""Utilities for certificate management.""" + +import os + +certifi_available = False +certifi_where = None +try: + from certifi import where as certifi_where + certifi_available = True +except ImportError: + pass + +custom_ca_locater_available = False +custom_ca_locater_where = None +try: + from ca_certs_locater import get as custom_ca_locater_where + custom_ca_locater_available = True +except ImportError: + pass + + +BUILTIN_CA_CERTS = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "cacerts.txt" +) + + +def where(): + env = os.environ.get("HTTPLIB2_CA_CERTS") + if env is not None: + if os.path.isfile(env): + return env + else: + raise RuntimeError("Environment variable HTTPLIB2_CA_CERTS not a valid file") + if custom_ca_locater_available: + return custom_ca_locater_where() + if certifi_available: + return certifi_where() + return BUILTIN_CA_CERTS + + +if __name__ == "__main__": + print(where()) diff --git a/lib/httplib2/py3/iri2uri.py b/lib/httplib2/py3/iri2uri.py new file mode 100644 index 00000000..86e361e6 --- /dev/null +++ b/lib/httplib2/py3/iri2uri.py @@ -0,0 +1,124 @@ +# -*- coding: utf-8 -*- +"""Converts an IRI to a URI.""" + +__author__ = "Joe Gregorio (joe@bitworking.org)" +__copyright__ = "Copyright 2006, Joe Gregorio" +__contributors__ = [] +__version__ = "1.0.0" +__license__ = "MIT" + +import urllib.parse + +# Convert an IRI to a URI following the rules in RFC 3987 +# +# The characters we need to enocde and escape are defined in the spec: +# +# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD +# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF +# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD +# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD +# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD +# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD +# / %xD0000-DFFFD / %xE1000-EFFFD + +escape_range = [ + (0xA0, 0xD7FF), + (0xE000, 0xF8FF), + (0xF900, 0xFDCF), + (0xFDF0, 0xFFEF), + (0x10000, 0x1FFFD), + (0x20000, 0x2FFFD), + (0x30000, 0x3FFFD), + (0x40000, 0x4FFFD), + (0x50000, 0x5FFFD), + (0x60000, 0x6FFFD), + (0x70000, 0x7FFFD), + (0x80000, 0x8FFFD), + (0x90000, 0x9FFFD), + (0xA0000, 0xAFFFD), + (0xB0000, 0xBFFFD), + (0xC0000, 0xCFFFD), + (0xD0000, 0xDFFFD), + (0xE1000, 0xEFFFD), + (0xF0000, 0xFFFFD), + (0x100000, 0x10FFFD), +] + + +def encode(c): + retval = c + i = ord(c) + for low, high in escape_range: + if i < low: + break + if i >= low and i <= high: + retval = "".join(["%%%2X" % o for o in c.encode("utf-8")]) + break + return retval + + +def iri2uri(uri): + """Convert an IRI to a URI. Note that IRIs must be + passed in a unicode strings. That is, do not utf-8 encode + the IRI before passing it into the function.""" + if isinstance(uri, str): + (scheme, authority, path, query, fragment) = urllib.parse.urlsplit(uri) + authority = authority.encode("idna").decode("utf-8") + # For each character in 'ucschar' or 'iprivate' + # 1. encode as utf-8 + # 2. then %-encode each octet of that utf-8 + uri = urllib.parse.urlunsplit((scheme, authority, path, query, fragment)) + uri = "".join([encode(c) for c in uri]) + return uri + + +if __name__ == "__main__": + import unittest + + class Test(unittest.TestCase): + def test_uris(self): + """Test that URIs are invariant under the transformation.""" + invariant = [ + "ftp://ftp.is.co.za/rfc/rfc1808.txt", + "http://www.ietf.org/rfc/rfc2396.txt", + "ldap://[2001:db8::7]/c=GB?objectClass?one", + "mailto:John.Doe@example.com", + "news:comp.infosystems.www.servers.unix", + "tel:+1-816-555-1212", + "telnet://192.0.2.16:80/", + "urn:oasis:names:specification:docbook:dtd:xml:4.1.2", + ] + for uri in invariant: + self.assertEqual(uri, iri2uri(uri)) + + def test_iri(self): + """Test that the right type of escaping is done for each part of the URI.""" + self.assertEqual( + "http://xn--o3h.com/%E2%98%84", + iri2uri("http://\N{COMET}.com/\N{COMET}"), + ) + self.assertEqual( + "http://bitworking.org/?fred=%E2%98%84", + iri2uri("http://bitworking.org/?fred=\N{COMET}"), + ) + self.assertEqual( + "http://bitworking.org/#%E2%98%84", + iri2uri("http://bitworking.org/#\N{COMET}"), + ) + self.assertEqual("#%E2%98%84", iri2uri("#\N{COMET}")) + self.assertEqual( + "/fred?bar=%E2%98%9A#%E2%98%84", + iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"), + ) + self.assertEqual( + "/fred?bar=%E2%98%9A#%E2%98%84", + iri2uri(iri2uri("/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")), + ) + self.assertNotEqual( + "/fred?bar=%E2%98%9A#%E2%98%84", + iri2uri( + "/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode("utf-8") + ), + ) + + unittest.main() diff --git a/lib/httplib2/py3/socks.py b/lib/httplib2/py3/socks.py new file mode 100644 index 00000000..cc68e634 --- /dev/null +++ b/lib/httplib2/py3/socks.py @@ -0,0 +1,518 @@ +"""SocksiPy - Python SOCKS module. + +Version 1.00 + +Copyright 2006 Dan-Haim. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +3. Neither the name of Dan Haim nor the names of his contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA +OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE. + +This module provides a standard socket-like interface for Python +for tunneling connections through SOCKS proxies. + +Minor modifications made by Christopher Gilbert (http://motomastyle.com/) for +use in PyLoris (http://pyloris.sourceforge.net/). + +Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/) +mainly to merge bug fixes found in Sourceforge. +""" + +import base64 +import socket +import struct +import sys + +if getattr(socket, "socket", None) is None: + raise ImportError("socket.socket missing, proxy support unusable") + +PROXY_TYPE_SOCKS4 = 1 +PROXY_TYPE_SOCKS5 = 2 +PROXY_TYPE_HTTP = 3 +PROXY_TYPE_HTTP_NO_TUNNEL = 4 + +_defaultproxy = None +_orgsocket = socket.socket + + +class ProxyError(Exception): + pass + + +class GeneralProxyError(ProxyError): + pass + + +class Socks5AuthError(ProxyError): + pass + + +class Socks5Error(ProxyError): + pass + + +class Socks4Error(ProxyError): + pass + + +class HTTPError(ProxyError): + pass + + +_generalerrors = ( + "success", + "invalid data", + "not connected", + "not available", + "bad proxy type", + "bad input", +) + +_socks5errors = ( + "succeeded", + "general SOCKS server failure", + "connection not allowed by ruleset", + "Network unreachable", + "Host unreachable", + "Connection refused", + "TTL expired", + "Command not supported", + "Address type not supported", + "Unknown error", +) + +_socks5autherrors = ( + "succeeded", + "authentication is required", + "all offered authentication methods were rejected", + "unknown username or invalid password", + "unknown error", +) + +_socks4errors = ( + "request granted", + "request rejected or failed", + "request rejected because SOCKS server cannot connect to identd on the client", + "request rejected because the client program and identd report different " + "user-ids", + "unknown error", +) + + +def setdefaultproxy( + proxytype=None, addr=None, port=None, rdns=True, username=None, password=None +): + """setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]]) + Sets a default proxy which all further socksocket objects will use, + unless explicitly changed. + """ + global _defaultproxy + _defaultproxy = (proxytype, addr, port, rdns, username, password) + + +def wrapmodule(module): + """wrapmodule(module) + + Attempts to replace a module's socket library with a SOCKS socket. Must set + a default proxy using setdefaultproxy(...) first. + This will only work on modules that import socket directly into the + namespace; + most of the Python Standard Library falls into this category. + """ + if _defaultproxy != None: + module.socket.socket = socksocket + else: + raise GeneralProxyError((4, "no proxy specified")) + + +class socksocket(socket.socket): + """socksocket([family[, type[, proto]]]) -> socket object + Open a SOCKS enabled socket. The parameters are the same as + those of the standard socket init. In order for SOCKS to work, + you must specify family=AF_INET, type=SOCK_STREAM and proto=0. + """ + + def __init__( + self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None + ): + _orgsocket.__init__(self, family, type, proto, _sock) + if _defaultproxy != None: + self.__proxy = _defaultproxy + else: + self.__proxy = (None, None, None, None, None, None) + self.__proxysockname = None + self.__proxypeername = None + self.__httptunnel = True + + def __recvall(self, count): + """__recvall(count) -> data + Receive EXACTLY the number of bytes requested from the socket. + Blocks until the required number of bytes have been received. + """ + data = self.recv(count) + while len(data) < count: + d = self.recv(count - len(data)) + if not d: + raise GeneralProxyError((0, "connection closed unexpectedly")) + data = data + d + return data + + def sendall(self, content, *args): + """ override socket.socket.sendall method to rewrite the header + for non-tunneling proxies if needed + """ + if not self.__httptunnel: + content = self.__rewriteproxy(content) + return super(socksocket, self).sendall(content, *args) + + def __rewriteproxy(self, header): + """ rewrite HTTP request headers to support non-tunneling proxies + (i.e. those which do not support the CONNECT method). + This only works for HTTP (not HTTPS) since HTTPS requires tunneling. + """ + host, endpt = None, None + hdrs = header.split("\r\n") + for hdr in hdrs: + if hdr.lower().startswith("host:"): + host = hdr + elif hdr.lower().startswith("get") or hdr.lower().startswith("post"): + endpt = hdr + if host and endpt: + hdrs.remove(host) + hdrs.remove(endpt) + host = host.split(" ")[1] + endpt = endpt.split(" ") + if self.__proxy[4] != None and self.__proxy[5] != None: + hdrs.insert(0, self.__getauthheader()) + hdrs.insert(0, "Host: %s" % host) + hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2])) + return "\r\n".join(hdrs) + + def __getauthheader(self): + auth = self.__proxy[4] + b":" + self.__proxy[5] + return "Proxy-Authorization: Basic " + base64.b64encode(auth).decode() + + def setproxy( + self, + proxytype=None, + addr=None, + port=None, + rdns=True, + username=None, + password=None, + headers=None, + ): + """setproxy(proxytype, addr[, port[, rdns[, username[, password]]]]) + + Sets the proxy to be used. + proxytype - The type of the proxy to be used. Three types + are supported: PROXY_TYPE_SOCKS4 (including socks4a), + PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP + addr - The address of the server (IP or DNS). + port - The port of the server. Defaults to 1080 for SOCKS + servers and 8080 for HTTP proxy servers. + rdns - Should DNS queries be preformed on the remote side + (rather than the local side). The default is True. + Note: This has no effect with SOCKS4 servers. + username - Username to authenticate with to the server. + The default is no authentication. + password - Password to authenticate with to the server. + Only relevant when username is also provided. + headers - Additional or modified headers for the proxy connect + request. + """ + self.__proxy = ( + proxytype, + addr, + port, + rdns, + username.encode() if username else None, + password.encode() if password else None, + headers, + ) + + def __negotiatesocks5(self, destaddr, destport): + """__negotiatesocks5(self,destaddr,destport) + Negotiates a connection through a SOCKS5 server. + """ + # First we'll send the authentication packages we support. + if (self.__proxy[4] != None) and (self.__proxy[5] != None): + # The username/password details were supplied to the + # setproxy method so we support the USERNAME/PASSWORD + # authentication (in addition to the standard none). + self.sendall(struct.pack("BBBB", 0x05, 0x02, 0x00, 0x02)) + else: + # No username/password were entered, therefore we + # only support connections with no authentication. + self.sendall(struct.pack("BBB", 0x05, 0x01, 0x00)) + # We'll receive the server's response to determine which + # method was selected + chosenauth = self.__recvall(2) + if chosenauth[0:1] != chr(0x05).encode(): + self.close() + raise GeneralProxyError((1, _generalerrors[1])) + # Check the chosen authentication method + if chosenauth[1:2] == chr(0x00).encode(): + # No authentication is required + pass + elif chosenauth[1:2] == chr(0x02).encode(): + # Okay, we need to perform a basic username/password + # authentication. + packet = bytearray() + packet.append(0x01) + packet.append(len(self.__proxy[4])) + packet.extend(self.__proxy[4]) + packet.append(len(self.__proxy[5])) + packet.extend(self.__proxy[5]) + self.sendall(packet) + authstat = self.__recvall(2) + if authstat[0:1] != chr(0x01).encode(): + # Bad response + self.close() + raise GeneralProxyError((1, _generalerrors[1])) + if authstat[1:2] != chr(0x00).encode(): + # Authentication failed + self.close() + raise Socks5AuthError((3, _socks5autherrors[3])) + # Authentication succeeded + else: + # Reaching here is always bad + self.close() + if chosenauth[1] == chr(0xFF).encode(): + raise Socks5AuthError((2, _socks5autherrors[2])) + else: + raise GeneralProxyError((1, _generalerrors[1])) + # Now we can request the actual connection + req = struct.pack("BBB", 0x05, 0x01, 0x00) + # If the given destination address is an IP address, we'll + # use the IPv4 address request even if remote resolving was specified. + try: + ipaddr = socket.inet_aton(destaddr) + req = req + chr(0x01).encode() + ipaddr + except socket.error: + # Well it's not an IP number, so it's probably a DNS name. + if self.__proxy[3]: + # Resolve remotely + ipaddr = None + req = ( + req + + chr(0x03).encode() + + chr(len(destaddr)).encode() + + destaddr.encode() + ) + else: + # Resolve locally + ipaddr = socket.inet_aton(socket.gethostbyname(destaddr)) + req = req + chr(0x01).encode() + ipaddr + req = req + struct.pack(">H", destport) + self.sendall(req) + # Get the response + resp = self.__recvall(4) + if resp[0:1] != chr(0x05).encode(): + self.close() + raise GeneralProxyError((1, _generalerrors[1])) + elif resp[1:2] != chr(0x00).encode(): + # Connection failed + self.close() + if ord(resp[1:2]) <= 8: + raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])])) + else: + raise Socks5Error((9, _socks5errors[9])) + # Get the bound address/port + elif resp[3:4] == chr(0x01).encode(): + boundaddr = self.__recvall(4) + elif resp[3:4] == chr(0x03).encode(): + resp = resp + self.recv(1) + boundaddr = self.__recvall(ord(resp[4:5])) + else: + self.close() + raise GeneralProxyError((1, _generalerrors[1])) + boundport = struct.unpack(">H", self.__recvall(2))[0] + self.__proxysockname = (boundaddr, boundport) + if ipaddr != None: + self.__proxypeername = (socket.inet_ntoa(ipaddr), destport) + else: + self.__proxypeername = (destaddr, destport) + + def getproxysockname(self): + """getsockname() -> address info + Returns the bound IP address and port number at the proxy. + """ + return self.__proxysockname + + def getproxypeername(self): + """getproxypeername() -> address info + Returns the IP and port number of the proxy. + """ + return _orgsocket.getpeername(self) + + def getpeername(self): + """getpeername() -> address info + Returns the IP address and port number of the destination + machine (note: getproxypeername returns the proxy) + """ + return self.__proxypeername + + def __negotiatesocks4(self, destaddr, destport): + """__negotiatesocks4(self,destaddr,destport) + Negotiates a connection through a SOCKS4 server. + """ + # Check if the destination address provided is an IP address + rmtrslv = False + try: + ipaddr = socket.inet_aton(destaddr) + except socket.error: + # It's a DNS name. Check where it should be resolved. + if self.__proxy[3]: + ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01) + rmtrslv = True + else: + ipaddr = socket.inet_aton(socket.gethostbyname(destaddr)) + # Construct the request packet + req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr + # The username parameter is considered userid for SOCKS4 + if self.__proxy[4] != None: + req = req + self.__proxy[4] + req = req + chr(0x00).encode() + # DNS name if remote resolving is required + # NOTE: This is actually an extension to the SOCKS4 protocol + # called SOCKS4A and may not be supported in all cases. + if rmtrslv: + req = req + destaddr + chr(0x00).encode() + self.sendall(req) + # Get the response from the server + resp = self.__recvall(8) + if resp[0:1] != chr(0x00).encode(): + # Bad data + self.close() + raise GeneralProxyError((1, _generalerrors[1])) + if resp[1:2] != chr(0x5A).encode(): + # Server returned an error + self.close() + if ord(resp[1:2]) in (91, 92, 93): + self.close() + raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90])) + else: + raise Socks4Error((94, _socks4errors[4])) + # Get the bound address/port + self.__proxysockname = ( + socket.inet_ntoa(resp[4:]), + struct.unpack(">H", resp[2:4])[0], + ) + if rmtrslv != None: + self.__proxypeername = (socket.inet_ntoa(ipaddr), destport) + else: + self.__proxypeername = (destaddr, destport) + + def __negotiatehttp(self, destaddr, destport): + """__negotiatehttp(self,destaddr,destport) + Negotiates a connection through an HTTP server. + """ + # If we need to resolve locally, we do this now + if not self.__proxy[3]: + addr = socket.gethostbyname(destaddr) + else: + addr = destaddr + headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"] + wrote_host_header = False + wrote_auth_header = False + if self.__proxy[6] != None: + for key, val in self.__proxy[6].iteritems(): + headers += [key, ": ", val, "\r\n"] + wrote_host_header = key.lower() == "host" + wrote_auth_header = key.lower() == "proxy-authorization" + if not wrote_host_header: + headers += ["Host: ", destaddr, "\r\n"] + if not wrote_auth_header: + if self.__proxy[4] != None and self.__proxy[5] != None: + headers += [self.__getauthheader(), "\r\n"] + headers.append("\r\n") + self.sendall("".join(headers).encode()) + # We read the response until we get the string "\r\n\r\n" + resp = self.recv(1) + while resp.find("\r\n\r\n".encode()) == -1: + resp = resp + self.recv(1) + # We just need the first line to check if the connection + # was successful + statusline = resp.splitlines()[0].split(" ".encode(), 2) + if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()): + self.close() + raise GeneralProxyError((1, _generalerrors[1])) + try: + statuscode = int(statusline[1]) + except ValueError: + self.close() + raise GeneralProxyError((1, _generalerrors[1])) + if statuscode != 200: + self.close() + raise HTTPError((statuscode, statusline[2])) + self.__proxysockname = ("0.0.0.0", 0) + self.__proxypeername = (addr, destport) + + def connect(self, destpair): + """connect(self, despair) + Connects to the specified destination through a proxy. + destpar - A tuple of the IP/DNS address and the port number. + (identical to socket's connect). + To select the proxy server use setproxy(). + """ + # Do a minimal input check first + if ( + (not type(destpair) in (list, tuple)) + or (len(destpair) < 2) + or (not isinstance(destpair[0], (str, bytes))) + or (type(destpair[1]) != int) + ): + raise GeneralProxyError((5, _generalerrors[5])) + if self.__proxy[0] == PROXY_TYPE_SOCKS5: + if self.__proxy[2] != None: + portnum = self.__proxy[2] + else: + portnum = 1080 + _orgsocket.connect(self, (self.__proxy[1], portnum)) + self.__negotiatesocks5(destpair[0], destpair[1]) + elif self.__proxy[0] == PROXY_TYPE_SOCKS4: + if self.__proxy[2] != None: + portnum = self.__proxy[2] + else: + portnum = 1080 + _orgsocket.connect(self, (self.__proxy[1], portnum)) + self.__negotiatesocks4(destpair[0], destpair[1]) + elif self.__proxy[0] == PROXY_TYPE_HTTP: + if self.__proxy[2] != None: + portnum = self.__proxy[2] + else: + portnum = 8080 + _orgsocket.connect(self, (self.__proxy[1], portnum)) + self.__negotiatehttp(destpair[0], destpair[1]) + elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL: + if self.__proxy[2] != None: + portnum = self.__proxy[2] + else: + portnum = 8080 + _orgsocket.connect(self, (self.__proxy[1], portnum)) + if destpair[1] == 443: + self.__negotiatehttp(destpair[0], destpair[1]) + else: + self.__httptunnel = False + elif self.__proxy[0] == None: + _orgsocket.connect(self, (destpair[0], destpair[1])) + else: + raise GeneralProxyError((4, _generalerrors[4])) diff --git a/lib/reprlib/__init__.py b/lib/reprlib/__init__.py new file mode 100644 index 00000000..6ccf9c00 --- /dev/null +++ b/lib/reprlib/__init__.py @@ -0,0 +1,9 @@ +from __future__ import absolute_import +import sys + +if sys.version_info[0] < 3: + from repr import * +else: + raise ImportError('This package should not be accessible on Python 3. ' + 'Either you are trying to run from the python-future src folder ' + 'or your installation of python-future is corrupted.') diff --git a/platformcode/config.py b/platformcode/config.py index 386e4b99..29e3d329 100644 --- a/platformcode/config.py +++ b/platformcode/config.py @@ -3,6 +3,11 @@ # Parámetros de configuración (kodi) # ------------------------------------------------------------ +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + import os import re @@ -62,10 +67,12 @@ def get_platform(full_version=False): ret = {} codename = {"10": "dharma", "11": "eden", "12": "frodo", "13": "gotham", "14": "helix", "15": "isengard", - "16": "jarvis", "17": "krypton", "18": "leia"} + "16": "jarvis", "17": "krypton", "18": "leia", + "19": "matrix"} code_db = {'10': 'MyVideos37.db', '11': 'MyVideos60.db', '12': 'MyVideos75.db', '13': 'MyVideos78.db', '14': 'MyVideos90.db', '15': 'MyVideos93.db', - '16': 'MyVideos99.db', '17': 'MyVideos107.db', '18': 'MyVideos116.db'} + '16': 'MyVideos99.db', '17': 'MyVideos107.db', '18': 'MyVideos116.db', + '19': 'MyVideos116.db'} num_version = xbmc.getInfoLabel('System.BuildVersion') num_version = re.match("\d+\.\d+", num_version).group(0) @@ -334,7 +341,7 @@ def set_setting(name, value, channel="", server=""): __settings__.setSetting(name, value) - except Exception, ex: + except Exception as ex: from platformcode import logger logger.error("Error al convertir '%s' no se guarda el valor \n%s" % (name, ex)) return None @@ -346,7 +353,18 @@ def get_localized_string(code): dev = __language__(code) try: - dev = dev.encode("utf-8") + # Unicode to utf8 + if isinstance(dev, unicode): + dev = dev.encode("utf8") + if PY3: dev = dev.decode("utf8") + + # All encodings to utf8 + elif not PY3 and isinstance(dev, str): + dev = unicode(dev, "utf8", errors="replace").encode("utf8") + + # Bytes encodings to utf8 + elif PY3 and isinstance(dev, bytes): + dev = dev.decode("utf8") except: pass @@ -356,7 +374,7 @@ def get_localized_category(categ): categories = {'movie': get_localized_string(30122), 'tvshow': get_localized_string(30123), 'anime': get_localized_string(30124), 'documentary': get_localized_string(30125), 'vos': get_localized_string(30136), 'sub-ita': get_localized_string(70566), 'adult': get_localized_string(30126), - 'direct': get_localized_string(30137), 'torrent': get_localized_string(70015)} + 'direct': get_localized_string(30137), 'torrent': get_localized_string(70015), 'live': get_localized_string(30138)} return categories[categ] if categ in categories else categ @@ -391,6 +409,14 @@ def get_data_path(): return dev +def get_icon(): + return xbmc.translatePath(__settings__.getAddonInfo('icon')) + + +def get_fanart(): + return xbmc.translatePath(__settings__.getAddonInfo('fanart')) + + def get_cookie_data(): import os ficherocookies = os.path.join(get_data_path(), 'cookies.dat') diff --git a/platformcode/custom_code.py b/platformcode/custom_code.py index bcfa6319..17381cf6 100644 --- a/platformcode/custom_code.py +++ b/platformcode/custom_code.py @@ -3,17 +3,23 @@ # Updater (kodi) # -------------------------------------------------------------------------------- -import json -import os -import traceback +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int +import traceback import xbmc import xbmcaddon +import threading +import subprocess +import time -from core import filetools -from core import jsontools from platformcode import config, logger, platformtools +from core import jsontools +from core import filetools + json_data_file_name = 'custom_code.json' @@ -21,10 +27,9 @@ def init(): logger.info() """ - Todo el código añadido al add-on se borra con cada actualización. Esta función permite restaurarlo automáticamente con cada actualización. - Esto permite al usuario tener su propio código, bajo su responsabilidad, y restaurarlo al add-on cada vez que se actualiza. + Todo el código añadido al add-on se borra con cada actualización. Esta función permite restaurarlo automáticamente con cada actualización. Esto permite al usuario tener su propio código, bajo su responsabilidad, y restaurarlo al add-on cada vez que se actualiza. - El mecanismo funciona copiando el contenido de la carpeta-arbol ".\userdata\addon_data\plugin.video.alfa\custom_code\..." sobre + El mecanismo funciona copiando el contenido de la carpeta-arbol "./userdata/addon_data/plugin.video.alfa/custom_code/..." sobre las carpetas de código del add-on. No verifica el contenido, solo vuelca(reemplaza) el contenido de "custom_code". El usuario almacenará en las subcarpetas de "custom_code" su código actualizado y listo para ser copiado en cualquier momento. @@ -37,7 +42,7 @@ def init(): from platformcode import custom_code custom_code.init() - 2.- En el inicio de Kodi, comprueba si existe la carpeta "custom_code" en ".\userdata\addon_data\plugin.video.alfa\". + 2.- En el inicio de Kodi, comprueba si existe la carpeta "custom_code" en "./userdata/addon_data/plugin.video.alfa/". Si no existe, la crea y sale sin más, dando al ususario la posibilidad de copiar sobre esa estructura su código, y que la función la vuelque sobre el add-on en el próximo inicio de Kodi. @@ -55,31 +60,45 @@ def init(): Tiempos: Copiando 7 archivos de prueba, el proceso ha tardado una décima de segundo. """ - + try: + #Borra el .zip de instalación de Alfa de la carpeta Packages, por si está corrupto, y que así se pueda descargar de nuevo + version = 'plugin.video.alfa-%s.zip' % config.get_addon_version(with_fix=False) + filetools.remove(filetools.join(xbmc.translatePath('special://home'), 'addons', 'packages', version), True) + #Verifica si Kodi tiene algún achivo de Base de Datos de Vídeo de versiones anteriores, entonces los borra verify_Kodi_video_DB() + #LIBTORRENT: se descarga el binario de Libtorrent cada vez que se actualiza Alfa + try: + threading.Thread(target=update_libtorrent).start() # Creamos un Thread independiente, hasta el fin de Kodi + time.sleep(2) # Dejamos terminar la inicialización... + except: # Si hay problemas de threading, nos vamos + logger.error(traceback.format_exc()) + #QUASAR: Preguntamos si se hacen modificaciones a Quasar - if not filetools.exists(os.path.join(config.get_data_path(), "quasar.json")) and not config.get_setting('addon_quasar_update', default=False): + if not filetools.exists(filetools.join(config.get_data_path(), "quasar.json")) \ + and not config.get_setting('addon_quasar_update', default=False): question_update_external_addon("quasar") #QUASAR: Hacemos las modificaciones a Quasar, si está permitido, y si está instalado - if config.get_setting('addon_quasar_update', default=False): + if config.get_setting('addon_quasar_update', default=False) or \ + (filetools.exists(filetools.join(config.get_data_path(), \ + "quasar.json")) and not xbmc.getCondVisibility('System.HasAddon("plugin.video.quasar")')): if not update_external_addon("quasar"): platformtools.dialog_notification("Actualización Quasar", "Ha fallado. Consulte el log") #Existe carpeta "custom_code" ? Si no existe se crea y se sale - custom_code_dir = os.path.join(config.get_data_path(), 'custom_code') - if os.path.exists(custom_code_dir) == False: + custom_code_dir = filetools.join(config.get_data_path(), 'custom_code') + if not filetools.exists(custom_code_dir): create_folder_structure(custom_code_dir) return else: #Existe "custom_code.json" ? Si no existe se crea custom_code_json_path = config.get_runtime_path() - custom_code_json = os.path.join(custom_code_json_path, 'custom_code.json') - if os.path.exists(custom_code_json) == False: + custom_code_json = filetools.join(custom_code_json_path, 'custom_code.json') + if not filetools.exists(custom_code_json): create_json(custom_code_json_path) #Se verifica si la versión del .json y del add-on son iguales. Si es así se sale. Si no se copia "custom_code" al add-on @@ -92,13 +111,13 @@ def create_folder_structure(custom_code_dir): logger.info() #Creamos todas las carpetas. La importante es "custom_code". Las otras sirven meramente de guía para evitar errores de nombres... - os.mkdir(custom_code_dir) - os.mkdir(filetools.join(custom_code_dir, 'channels')) - os.mkdir(filetools.join(custom_code_dir, 'core')) - os.mkdir(filetools.join(custom_code_dir, 'lib')) - os.mkdir(filetools.join(custom_code_dir, 'platformcode')) - os.mkdir(filetools.join(custom_code_dir, 'resources')) - os.mkdir(filetools.join(custom_code_dir, 'servers')) + filetools.mkdir(custom_code_dir) + filetools.mkdir(filetools.join(custom_code_dir, 'channels')) + filetools.mkdir(filetools.join(custom_code_dir, 'core')) + filetools.mkdir(filetools.join(custom_code_dir, 'lib')) + filetools.mkdir(filetools.join(custom_code_dir, 'platformcode')) + filetools.mkdir(filetools.join(custom_code_dir, 'resources')) + filetools.mkdir(filetools.join(custom_code_dir, 'servers')) return @@ -108,9 +127,9 @@ def create_json(custom_code_json_path, json_name=json_data_file_name): #Guardamaos el json con la versión de Alfa vacía, para permitir hacer la primera copia json_data_file = filetools.join(custom_code_json_path, json_name) - json_file = open(json_data_file, "a+") - json_file.write(json.dumps({"addon_version": ""})) - json_file.close() + if filetools.exists(json_data_file): + filetools.remove(json_data_file) + result = filetools.write(json_data_file, jsontools.dump({"addon_version": ""})) return @@ -122,15 +141,21 @@ def verify_copy_folders(custom_code_dir, custom_code_json_path): json_data_file = filetools.join(custom_code_json_path, json_data_file_name) json_data = jsontools.load(filetools.read(json_data_file)) current_version = config.get_addon_version(with_fix=False) - if current_version == json_data['addon_version']: - return + if not json_data or not 'addon_version' in json_data: + create_json(custom_code_json_path) + json_data = jsontools.load(filetools.read(json_data_file)) + try: + if current_version == json_data['addon_version']: + return + except: + logger.error(traceback.format_exc(1)) #Ahora copiamos los archivos desde el área de Userdata, Custom_code, sobre las carpetas del add-on - for root, folders, files in os.walk(custom_code_dir): + for root, folders, files in filetools.walk(custom_code_dir): for file in files: input_file = filetools.join(root, file) output_file = input_file.replace(custom_code_dir, custom_code_json_path) - if filetools.copy(input_file, output_file, silent=True) == False: + if not filetools.copy(input_file, output_file, silent=True): return #Guardamaos el json con la versión actual de Alfa, para no volver a hacer la copia hasta la nueva versión @@ -160,38 +185,163 @@ def question_update_external_addon(addon_name): create_json(config.get_data_path(), "%s.json" % addon_name) return stat - + + def update_external_addon(addon_name): logger.info(addon_name) - #Verificamos que el addon está instalado - if xbmc.getCondVisibility('System.HasAddon("plugin.video.%s")' % addon_name): - #Path de actuali<aciones de Alfa - alfa_addon_updates = filetools.join(config.get_runtime_path(), filetools.join("lib", addon_name)) - - #Path de destino en addon externo - __settings__ = xbmcaddon.Addon(id="plugin.video." + addon_name) - if addon_name.lower() in ['quasar', 'elementum']: - addon_path = filetools.join(xbmc.translatePath(__settings__.getAddonInfo('Path')), filetools.join("resources", filetools.join("site-packages", addon_name))) + try: + #Verificamos que el addon está instalado + if xbmc.getCondVisibility('System.HasAddon("plugin.video.%s")' % addon_name): + #Path de actualizaciones de Alfa + alfa_addon_updates_mig = filetools.join(config.get_runtime_path(), "lib") + alfa_addon_updates = filetools.join(alfa_addon_updates_mig, addon_name) + + #Path de destino en addon externo + __settings__ = xbmcaddon.Addon(id="plugin.video." + addon_name) + if addon_name.lower() in ['quasar', 'elementum']: + addon_path_mig = filetools.join(xbmc.translatePath(__settings__.getAddonInfo('Path')), \ + filetools.join("resources", "site-packages")) + addon_path = filetools.join(addon_path_mig, addon_name) + else: + addon_path_mig = '' + addon_path = '' + + #Hay modificaciones en Alfa? Las copiamos al addon, incuidas las carpetas de migración a PY3 + if filetools.exists(alfa_addon_updates) and filetools.exists(addon_path): + for root, folders, files in filetools.walk(alfa_addon_updates_mig): + if ('future' in root or 'past' in root) and not 'concurrent' in root: + for file in files: + alfa_addon_updates_mig_folder = root.replace(alfa_addon_updates_mig, addon_path_mig) + if not filetools.exists(alfa_addon_updates_mig_folder): + filetools.mkdir(alfa_addon_updates_mig_folder) + if file.endswith('.pyo') or file.endswith('.pyd'): + continue + input_file = filetools.join(root, file) + output_file = input_file.replace(alfa_addon_updates_mig, addon_path_mig) + if not filetools.copy(input_file, output_file, silent=True): + logger.error('Error en la copia de MIGRACIÓN: Input: %s o Output: %s' % (input_file, output_file)) + return False + + for root, folders, files in filetools.walk(alfa_addon_updates): + for file in files: + input_file = filetools.join(root, file) + output_file = input_file.replace(alfa_addon_updates, addon_path) + if not filetools.copy(input_file, output_file, silent=True): + logger.error('Error en la copia: Input: %s o Output: %s' % (input_file, output_file)) + return False + return True + else: + logger.error('Alguna carpeta no existe: Alfa: %s o %s: %s' % (alfa_addon_updates, addon_name, addon_path)) + # Se ha desinstalado Quasar, reseteamos la opción else: - addon_path = '' - - #Hay modificaciones en Alfa? Las copiamos al addon - if filetools.exists(alfa_addon_updates) and filetools.exists(addon_path): - for root, folders, files in os.walk(alfa_addon_updates): - for file in files: - input_file = filetools.join(root, file) - output_file = input_file.replace(alfa_addon_updates, addon_path) - if filetools.copy(input_file, output_file, silent=True) == False: - logger.error('Error en la copia: Input: %s o Output: %s' % (input_file, output_file)) - return False + config.set_setting('addon_quasar_update', False) + if filetools.exists(filetools.join(config.get_data_path(), "%s.json" % addon_name)): + filetools.remove(filetools.join(config.get_data_path(), "%s.json" % addon_name)) return True - else: - logger.error('Alguna carpeta no existe: Alfa: %s o %s: %s' % (alfa_addon_updates, addon_name, addon_path)) + except: + logger.error(traceback.format_exc()) return False +def update_libtorrent(): + logger.info() + + if not config.get_setting("mct_buffer", server="torrent", default=""): + default = config.get_setting("torrent_client", server="torrent", default=0) + config.set_setting("torrent_client", default, server="torrent") + config.set_setting("mct_buffer", "50", server="torrent") + if config.get_setting("mct_download_path", server="torrent", default=config.get_setting("downloadpath")): + config.set_setting("mct_download_path", config.get_setting("downloadpath"), server="torrent") + config.set_setting("mct_background_download", True, server="torrent") + config.set_setting("mct_rar_unpack", True, server="torrent") + config.set_setting("bt_buffer", "50", server="torrent") + if config.get_setting("bt_download_path", server="torrent", default=config.get_setting("downloadpath")): + config.set_setting("bt_download_path", config.get_setting("downloadpath"), server="torrent") + config.set_setting("mct_download_limit", "", server="torrent") + config.set_setting("magnet2torrent", False, server="torrent") + + if not filetools.exists(filetools.join(config.get_runtime_path(), "custom_code.json")) or not \ + config.get_setting("unrar_path", server="torrent", default=""): + + path = filetools.join(config.get_runtime_path(), 'lib', 'rarfiles') + creationflags = '' + sufix = '' + unrar = '' + for device in filetools.listdir(path): + if xbmc.getCondVisibility("system.platform.android") and 'android' not in device: continue + if xbmc.getCondVisibility("system.platform.windows") and 'windows' not in device: continue + if not xbmc.getCondVisibility("system.platform.windows") and not xbmc.getCondVisibility("system.platform.android") \ + and ('android' in device or 'windows' in device): continue + if 'windows' in device: + creationflags = 0x08000000 + sufix = '.exe' + else: + creationflags = '' + sufix = '' + unrar = filetools.join(path, device, 'unrar%s') % sufix + if not filetools.exists(unrar): unrar = '' + if unrar: + if not xbmc.getCondVisibility("system.platform.windows"): + try: + if xbmc.getCondVisibility("system.platform.android"): + # Para Android copiamos el binario a la partición del sistema + unrar_org = unrar + unrar = filetools.join(xbmc.translatePath('special://xbmc/'), 'files').replace('/cache/apk/assets', '') + if not filetools.exists(unrar): + filetools.mkdir(unrar) + unrar = filetools.join(unrar, 'unrar') + filetools.copy(unrar_org, unrar, silent=True) + + command = ['chmod', '777', '%s' % unrar] + p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output_cmd, error_cmd = p.communicate() + command = ['ls', '-l', unrar] + p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output_cmd, error_cmd = p.communicate() + xbmc.log('######## UnRAR file: %s' % str(output_cmd), xbmc.LOGNOTICE) + except: + xbmc.log('######## UnRAR ERROR in path: %s' % str(unrar), xbmc.LOGNOTICE) + logger.error(traceback.format_exc(1)) + + try: + if xbmc.getCondVisibility("system.platform.windows"): + p = subprocess.Popen(unrar, stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=creationflags) + else: + p = subprocess.Popen(unrar, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output_cmd, error_cmd = p.communicate() + if p.returncode != 0 or error_cmd: + xbmc.log('######## UnRAR returncode in module %s: %s, %s in %s' % \ + (device, str(p.returncode), str(error_cmd), unrar), xbmc.LOGNOTICE) + unrar = '' + else: + xbmc.log('######## UnRAR OK in %s: %s' % (device, unrar), xbmc.LOGNOTICE) + break + except: + xbmc.log('######## UnRAR ERROR in module %s: %s' % (device, unrar), xbmc.LOGNOTICE) + logger.error(traceback.format_exc(1)) + unrar = '' + + if unrar: config.set_setting("unrar_path", unrar, server="torrent") + + if filetools.exists(filetools.join(config.get_runtime_path(), "custom_code.json")) and \ + config.get_setting("libtorrent_path", server="torrent", default="") : + return + + try: + from lib.python_libtorrent.python_libtorrent import get_libtorrent + except Exception as e: + logger.error(traceback.format_exc(1)) + if not PY3: + e = unicode(str(e), "utf8", errors="replace").encode("utf8") + config.set_setting("libtorrent_path", "", server="torrent") + if not config.get_setting("libtorrent_error", server="torrent", default=''): + config.set_setting("libtorrent_error", str(e), server="torrent") + + return + + def verify_Kodi_video_DB(): logger.info() import random @@ -204,12 +354,12 @@ def verify_Kodi_video_DB(): path = filetools.join(xbmc.translatePath("special://masterprofile/"), "Database") if filetools.exists(path): platform = config.get_platform(full_version=True) - if platform: + if platform and platform['num_version'] <= 19: db_files = filetools.walk(path) if filetools.exists(filetools.join(path, platform['video_db'])): for root, folders, files in db_files: for file in files: - if file != platform['video_db']: + if platform['video_db'] not in file: if file.startswith('MyVideos'): randnum = str(random.randrange(1, 999999)) filetools.rename(filetools.join(path, file), 'OLD_' + randnum +'_' + file) diff --git a/platformcode/download_and_play.py b/platformcode/download_and_play.py index 39e3babf..33de8da8 100644 --- a/platformcode/download_and_play.py +++ b/platformcode/download_and_play.py @@ -5,17 +5,25 @@ # Based on code from the Mega add-on (xbmchub.com) # --------------------------------------------------------------------------- +from __future__ import division +from future import standard_library +standard_library.install_aliases() +#from builtins import str +from past.utils import old_div +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + +import urllib.request, urllib.parse, urllib.error + import os import re import socket import threading import time -import urllib -import urllib2 import xbmc import xbmcgui - from core import downloadtools from platformcode import config, logger @@ -43,7 +51,7 @@ def download_and_play(url, file_name, download_path): while not cancelled and download_thread.isAlive(): dialog.update(download_thread.get_progress(), config.get_localized_string(60313), - "Velocidad: " + str(int(download_thread.get_speed() / 1024)) + " KB/s " + str( + "Velocidad: " + str(int(old_div(download_thread.get_speed(), 1024))) + " KB/s " + str( download_thread.get_actual_size()) + "MB de " + str( download_thread.get_total_size()) + "MB", "Tiempo restante: " + str(downloadtools.sec_to_hms(download_thread.get_remaining_time()))) @@ -232,7 +240,7 @@ class DownloadThread(threading.Thread): for additional_header in additional_headers: logger.info("additional_header: " + additional_header) name = re.findall("(.*?)=.*?", additional_header)[0] - value = urllib.unquote_plus(re.findall(".*?=(.*?)$", additional_header)[0]) + value = urllib.parse.unquote_plus(re.findall(".*?=(.*?)$", additional_header)[0]) headers.append([name, value]) self.url = self.url.split("|")[0] @@ -242,18 +250,18 @@ class DownloadThread(threading.Thread): socket.setdefaulttimeout(60) # Crea la petición y añade las cabeceras - h = urllib2.HTTPHandler(debuglevel=0) - request = urllib2.Request(self.url) + h = urllib.request.HTTPHandler(debuglevel=0) + request = urllib.request.Request(self.url) for header in headers: logger.info("Header=" + header[0] + ": " + header[1]) request.add_header(header[0], header[1]) # Lanza la petición - opener = urllib2.build_opener(h) - urllib2.install_opener(opener) + opener = urllib.request.build_opener(h) + urllib.request.install_opener(opener) try: connexion = opener.open(request) - except urllib2.HTTPError, e: + except urllib.error.HTTPError as e: logger.error("error %d (%s) al abrir la url %s" % (e.code, e.msg, self.url)) # print e.code # print e.msg @@ -315,10 +323,10 @@ class DownloadThread(threading.Thread): bloqueleido = connexion.read(blocksize) after = time.time() if (after - before) > 0: - self.velocidad = len(bloqueleido) / ((after - before)) + self.velocidad = old_div(len(bloqueleido), ((after - before))) falta = totalfichero - grabado if self.velocidad > 0: - self.tiempofalta = falta / self.velocidad + self.tiempofalta = old_div(falta, self.velocidad) else: self.tiempofalta = 0 break diff --git a/platformcode/envtal.py b/platformcode/envtal.py new file mode 100644 index 00000000..f413d281 --- /dev/null +++ b/platformcode/envtal.py @@ -0,0 +1,613 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# Localiza las variables de entorno más habituales (kodi) +# ------------------------------------------------------------ + +from __future__ import division +# from builtins import str +from past.utils import old_div +import sys + +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + +import xbmc +import xbmcaddon + +import os +import subprocess +import re +import platform + +try: + import ctypes +except: + pass +import traceback + +from core import filetools, scrapertools +from platformcode import logger, config, platformtools + + +def get_environment(): + """ + Devuelve las variables de entorno del OS, de Kodi y de Alfa más habituales, + necesarias para el diagnóstico de fallos + """ + + try: + import base64 + import ast + + environment = config.get_platform(full_version=True) + environment['num_version'] = str(environment['num_version']) + environment['python_version'] = str(platform.python_version()) + + environment['os_release'] = str(platform.release()) + if xbmc.getCondVisibility("system.platform.Windows"): + try: + if platform._syscmd_ver()[2]: + environment['os_release'] = str(platform._syscmd_ver()[2]) + except: + pass + environment['prod_model'] = '' + if xbmc.getCondVisibility("system.platform.Android"): + environment['os_name'] = 'Android' + try: + for label_a in subprocess.check_output('getprop').split('\n'): + if 'build.version.release' in label_a: + environment['os_release'] = str(scrapertools.find_single_match(label_a, ':\s*\[(.*?)\]$')) + if 'product.model' in label_a: + environment['prod_model'] = str(scrapertools.find_single_match(label_a, ':\s*\[(.*?)\]$')) + except: + try: + for label_a in filetools.read(os.environ['ANDROID_ROOT'] + '/build.prop').split(): + if 'build.version.release' in label_a: + environment['os_release'] = str(scrapertools.find_single_match(label_a, '=(.*?)$')) + if 'product.model' in label_a: + environment['prod_model'] = str(scrapertools.find_single_match(label_a, '=(.*?)$')) + except: + pass + + elif xbmc.getCondVisibility("system.platform.Linux.RaspberryPi"): + environment['os_name'] = 'RaspberryPi' + else: + environment['os_name'] = str(platform.system()) + + environment['machine'] = str(platform.machine()) + environment['architecture'] = str(sys.maxsize > 2 ** 32 and "64-bit" or "32-bit") + environment['language'] = str(xbmc.getInfoLabel('System.Language')) + + environment['cpu_usage'] = str(xbmc.getInfoLabel('System.CpuUsage')) + + environment['mem_total'] = str(xbmc.getInfoLabel('System.Memory(total)')).replace('MB', '').replace('KB', '') + environment['mem_free'] = str(xbmc.getInfoLabel('System.Memory(free)')).replace('MB', '').replace('KB', '') + if not environment['mem_total'] or not environment['mem_free']: + try: + if environment['os_name'].lower() == 'windows': + kernel32 = ctypes.windll.kernel32 + c_ulong = ctypes.c_ulong + c_ulonglong = ctypes.c_ulonglong + + class MEMORYSTATUS(ctypes.Structure): + _fields_ = [ + ('dwLength', c_ulong), + ('dwMemoryLoad', c_ulong), + ('dwTotalPhys', c_ulonglong), + ('dwAvailPhys', c_ulonglong), + ('dwTotalPageFile', c_ulonglong), + ('dwAvailPageFile', c_ulonglong), + ('dwTotalVirtual', c_ulonglong), + ('dwAvailVirtual', c_ulonglong), + ('availExtendedVirtual', c_ulonglong) + ] + + memoryStatus = MEMORYSTATUS() + memoryStatus.dwLength = ctypes.sizeof(MEMORYSTATUS) + kernel32.GlobalMemoryStatus(ctypes.byref(memoryStatus)) + environment['mem_total'] = str(old_div(int(memoryStatus.dwTotalPhys), (1024 ** 2))) + environment['mem_free'] = str(old_div(int(memoryStatus.dwAvailPhys), (1024 ** 2))) + + else: + with open('/proc/meminfo') as f: + meminfo = f.read() + environment['mem_total'] = str( + old_div(int(re.search(r'MemTotal:\s+(\d+)', meminfo).groups()[0]), 1024)) + environment['mem_free'] = str( + old_div(int(re.search(r'MemAvailable:\s+(\d+)', meminfo).groups()[0]), 1024)) + except: + environment['mem_total'] = '' + environment['mem_free'] = '' + + try: + environment['kodi_buffer'] = '20' + environment['kodi_bmode'] = '0' + environment['kodi_rfactor'] = '4.0' + if filetools.exists(filetools.join(xbmc.translatePath("special://userdata"), "advancedsettings.xml")): + advancedsettings = filetools.read(filetools.join(xbmc.translatePath("special://userdata"), + "advancedsettings.xml")).split('\n') + for label_a in advancedsettings: + if 'memorysize' in label_a: + environment['kodi_buffer'] = str(old_div(int(scrapertools.find_single_match + (label_a, '>(\d+)<\/')), 1024 ** 2)) + if 'buffermode' in label_a: + environment['kodi_bmode'] = str(scrapertools.find_single_match + (label_a, '>(\d+)<\/')) + if 'readfactor' in label_a: + environment['kodi_rfactor'] = str(scrapertools.find_single_match + (label_a, '>(.*?)<\/')) + except: + pass + + environment['userdata_path'] = str(xbmc.translatePath(config.get_data_path())) + try: + if environment['os_name'].lower() == 'windows': + free_bytes = ctypes.c_ulonglong(0) + ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(environment['userdata_path']), + None, None, ctypes.pointer(free_bytes)) + environment['userdata_free'] = str(round(float(free_bytes.value) / (1024 ** 3), 3)) + else: + disk_space = os.statvfs(environment['userdata_path']) + if not disk_space.f_frsize: disk_space.f_frsize = disk_space.f_frsize.f_bsize + environment['userdata_free'] = str(round((float(disk_space.f_bavail) / \ + (1024 ** 3)) * float(disk_space.f_frsize), 3)) + except: + environment['userdata_free'] = '?' + + try: + environment['videolab_series'] = '?' + environment['videolab_episodios'] = '?' + environment['videolab_pelis'] = '?' + environment['videolab_path'] = str(xbmc.translatePath(config.get_videolibrary_path())) + if filetools.exists(filetools.join(environment['videolab_path'], \ + config.get_setting("folder_tvshows"))): + environment['videolab_series'] = str(len(filetools.listdir(filetools.join(environment['videolab_path'], \ + config.get_setting( + "folder_tvshows"))))) + counter = 0 + for root, folders, files in filetools.walk(filetools.join(environment['videolab_path'], \ + config.get_setting("folder_tvshows"))): + for file in files: + if file.endswith('.strm'): counter += 1 + environment['videolab_episodios'] = str(counter) + if filetools.exists(filetools.join(environment['videolab_path'], \ + config.get_setting("folder_movies"))): + environment['videolab_pelis'] = str(len(filetools.listdir(filetools.join(environment['videolab_path'], \ + config.get_setting( + "folder_movies"))))) + except: + pass + try: + video_updates = ['No', 'Inicio', 'Una vez', 'Inicio+Una vez'] + environment['videolab_update'] = str(video_updates[config.get_setting("update", "videolibrary")]) + except: + environment['videolab_update'] = '?' + try: + if environment['os_name'].lower() == 'windows': + free_bytes = ctypes.c_ulonglong(0) + ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(environment['videolab_path']), + None, None, ctypes.pointer(free_bytes)) + environment['videolab_free'] = str(round(float(free_bytes.value) / (1024 ** 3), 3)) + else: + disk_space = os.statvfs(environment['videolab_path']) + if not disk_space.f_frsize: disk_space.f_frsize = disk_space.f_frsize.f_bsize + environment['videolab_free'] = str(round((float(disk_space.f_bavail) / \ + (1024 ** 3)) * float(disk_space.f_frsize), 3)) + except: + environment['videolab_free'] = '?' + + environment['torrent_list'] = [] + environment['torrentcli_option'] = '' + environment['torrent_error'] = '' + environment['torrentcli_rar'] = config.get_setting("mct_rar_unpack", server="torrent", default=True) + environment['torrentcli_backgr'] = config.get_setting("mct_background_download", server="torrent", default=True) + environment['torrentcli_lib_path'] = config.get_setting("libtorrent_path", server="torrent", default="") + if environment['torrentcli_lib_path']: + lib_path = 'Activo' + else: + lib_path = 'Inactivo' + environment['torrentcli_unrar'] = config.get_setting("unrar_path", server="torrent", default="") + if environment['torrentcli_unrar']: + if xbmc.getCondVisibility("system.platform.Android"): + unrar = 'Android' + else: + unrar, bin = filetools.split(environment['torrentcli_unrar']) + unrar = unrar.replace('\\', '/') + if not unrar.endswith('/'): + unrar = unrar + '/' + unrar = scrapertools.find_single_match(unrar, '\/([^\/]+)\/$').capitalize() + else: + unrar = 'Inactivo' + torrent_id = config.get_setting("torrent_client", server="torrent", default=0) + environment['torrentcli_option'] = str(torrent_id) + torrent_options = platformtools.torrent_client_installed() + if lib_path == 'Activo': + torrent_options = ['MCT'] + torrent_options + torrent_options = ['BT'] + torrent_options + environment['torrent_list'].append({'Torrent_opt': str(torrent_id), 'Libtorrent': lib_path, \ + 'RAR_Auto': str(environment['torrentcli_rar']), \ + 'RAR_backgr': str(environment['torrentcli_backgr']), \ + 'UnRAR': unrar}) + environment['torrent_error'] = config.get_setting("libtorrent_error", server="torrent", default="") + if environment['torrent_error']: + environment['torrent_list'].append({'Libtorrent_error': environment['torrent_error']}) + + for torrent_option in torrent_options: + cliente = dict() + cliente['D_load_Path'] = '' + cliente['Libre'] = '?' + cliente['Plug_in'] = torrent_option.replace('Plugin externo: ', '') + if cliente['Plug_in'] == 'BT': + cliente['D_load_Path'] = str(config.get_setting("bt_download_path", server="torrent", default='')) + if not cliente['D_load_Path']: continue + cliente['Buffer'] = str(config.get_setting("bt_buffer", server="torrent", default=50)) + elif cliente['Plug_in'] == 'MCT': + cliente['D_load_Path'] = str(config.get_setting("mct_download_path", server="torrent", default='')) + if not cliente['D_load_Path']: continue + cliente['Buffer'] = str(config.get_setting("mct_buffer", server="torrent", default=50)) + elif xbmc.getCondVisibility('System.HasAddon("plugin.video.%s")' % cliente['Plug_in']): + __settings__ = xbmcaddon.Addon(id="plugin.video.%s" % cliente['Plug_in']) + cliente['Plug_in'] = cliente['Plug_in'].capitalize() + if cliente['Plug_in'] == 'Torrenter': + cliente['D_load_Path'] = str(xbmc.translatePath(__settings__.getSetting('storage'))) + if not cliente['D_load_Path']: + cliente['D_load_Path'] = str(filetools.join(xbmc.translatePath("special://home/"), \ + "cache", "xbmcup", "plugin.video.torrenter", + "Torrenter")) + cliente['Buffer'] = str(__settings__.getSetting('pre_buffer_bytes')) + else: + cliente['D_load_Path'] = str(xbmc.translatePath(__settings__.getSetting('download_path'))) + cliente['Buffer'] = str(__settings__.getSetting('buffer_size')) + if __settings__.getSetting('download_storage') == '1' and __settings__.getSetting('memory_size'): + cliente['Memoria'] = str(__settings__.getSetting('memory_size')) + + if cliente['D_load_Path']: + try: + if environment['os_name'].lower() == 'windows': + free_bytes = ctypes.c_ulonglong(0) + ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(cliente['D_load_Path']), + None, None, ctypes.pointer(free_bytes)) + cliente['Libre'] = str(round(float(free_bytes.value) / \ + (1024 ** 3), 3)).replace('.', ',') + else: + disk_space = os.statvfs(cliente['D_load_Path']) + if not disk_space.f_frsize: disk_space.f_frsize = disk_space.f_frsize.f_bsize + cliente['Libre'] = str(round((float(disk_space.f_bavail) / \ + (1024 ** 3)) * float(disk_space.f_frsize), 3)).replace('.', ',') + except: + pass + environment['torrent_list'].append(cliente) + + environment['proxy_active'] = '' + try: + proxy_channel_bloqued_str = base64.b64decode(config.get_setting('proxy_channel_bloqued')).decode('utf-8') + proxy_channel_bloqued = dict() + proxy_channel_bloqued = ast.literal_eval(proxy_channel_bloqued_str) + for channel_bloqued, proxy_active in list(proxy_channel_bloqued.items()): + if proxy_active != 'OFF': + environment['proxy_active'] += channel_bloqued + ', ' + except: + pass + if not environment['proxy_active']: environment['proxy_active'] = 'OFF' + environment['proxy_active'] = environment['proxy_active'].rstrip(', ') + + for root, folders, files in filetools.walk(xbmc.translatePath("special://logpath/")): + for file in files: + if file.lower() in ['kodi.log', 'jarvis.log', 'spmc.log', 'cemc.log', \ + 'mygica.log', 'wonderbox.log', 'leiapp,log', \ + 'leianmc.log', 'kodiapp.log', 'anmc.log', \ + 'latin-anmc.log']: + environment['log_path'] = str(filetools.join(root, file)) + break + else: + environment['log_path'] = '' + break + + if environment['log_path']: + environment['log_size_bytes'] = str(filetools.getsize(environment['log_path'])) + environment['log_size'] = str(round(float(environment['log_size_bytes']) / \ + (1024 * 1024), 3)) + else: + environment['log_size_bytes'] = '' + environment['log_size'] = '' + + environment['debug'] = str(config.get_setting('debug')) + environment['addon_version'] = str(config.get_addon_version()) + + except: + logger.error(traceback.format_exc()) + environment = {} + environment['log_size'] = '' + environment['cpu_usage'] = '' + environment['python_version'] = '' + environment['log_path'] = '' + environment['userdata_free'] = '' + environment['mem_total'] = '' + environment['machine'] = '' + environment['platform'] = '' + environment['videolab_path'] = '' + environment['num_version'] = '' + environment['os_name'] = '' + environment['video_db'] = '' + environment['userdata_path'] = '' + environment['log_size_bytes'] = '' + environment['name_version'] = '' + environment['language'] = '' + environment['mem_free'] = '' + environment['prod_model'] = '' + environment['proxy_active'] = '' + environment['architecture'] = '' + environment['os_release'] = '' + environment['videolab_free'] = '' + environment['kodi_buffer'] = '' + environment['kodi_bmode'] = '' + environment['kodi_rfactor'] = '' + environment['videolab_series'] = '' + environment['videolab_episodios'] = '' + environment['videolab_pelis'] = '' + environment['videolab_update'] = '' + environment['debug'] = '' + environment['addon_version'] = '' + environment['torrent_list'] = [] + environment['torrentcli_option'] = '' + environment['torrentcli_rar'] = '' + environment['torrentcli_lib_path'] = '' + environment['torrentcli_unrar'] = '' + environment['torrent_error'] = '' + + return environment + + +def list_env(environment={}): + if not environment: + environment = get_environment() + + if environment['debug'] == 'False': + logger.log_enable(True) + + logger.info('----------------------------------------------') + logger.info('Variables de entorno Alfa: ' + environment['addon_version'] + + ' Debug: ' + environment['debug']) + logger.info("----------------------------------------------") + + logger.info(environment['os_name'] + ' ' + environment['prod_model'] + ' ' + + environment['os_release'] + ' ' + environment['machine'] + ' ' + + environment['architecture'] + ' ' + environment['language']) + + logger.info('Kodi ' + environment['num_version'] + ', Vídeo: ' + + environment['video_db'] + ', Python ' + environment['python_version']) + + if environment['cpu_usage']: + logger.info('CPU: ' + environment['cpu_usage']) + + if environment['mem_total'] or environment['mem_free']: + logger.info('Memoria: Total: ' + environment['mem_total'] + ' MB / Disp.: ' + + environment['mem_free'] + ' MB / Buffers: ' + + str(int(environment['kodi_buffer']) * 3) + ' MB / Buffermode: ' + + environment['kodi_bmode'] + ' / Readfactor: ' + + environment['kodi_rfactor']) + + logger.info('Userdata: ' + environment['userdata_path'] + ' - Libre: ' + + environment['userdata_free'].replace('.', ',') + ' GB') + + logger.info('Videoteca: Series/Epis: ' + environment['videolab_series'] + '/' + + environment['videolab_episodios'] + ' - Pelis: ' + + environment['videolab_pelis'] + ' - Upd: ' + + environment['videolab_update'] + ' - Path: ' + + environment['videolab_path'] + ' - Libre: ' + + environment['videolab_free'].replace('.', ',') + ' GB') + + if environment['torrent_list']: + for x, cliente in enumerate(environment['torrent_list']): + if x == 0: + cliente_alt = cliente.copy() + del cliente_alt['Torrent_opt'] + logger.info('Torrent: Opt: %s, %s' % (str(cliente['Torrent_opt']), \ + str(cliente_alt).replace('{', '').replace('}', '') \ + .replace("'", '').replace('_', ' '))) + elif x == 1 and environment['torrent_error']: + logger.info('- ' + str(cliente).replace('{', '').replace('}', '') \ + .replace("'", '').replace('_', ' ')) + else: + cliente_alt = cliente.copy() + del cliente_alt['Plug_in'] + cliente_alt['Libre'] = cliente_alt['Libre'].replace('.', ',') + ' GB' + logger.info('- %s: %s' % (str(cliente['Plug_in']), str(cliente_alt) \ + .replace('{', '').replace('}', '').replace("'", '') \ + .replace('\\\\', '\\'))) + + logger.info('Proxy: ' + environment['proxy_active']) + + logger.info('TAMAÑO del LOG: ' + environment['log_size'].replace('.', ',') + ' MB') + logger.info("----------------------------------------------") + + if environment['debug'] == 'False': + logger.log_enable(False) + + return environment + + +def paint_env(item, environment={}): + from core.item import Item + from channelselector import get_thumb + + if not environment: + environment = get_environment() + environment = list_env(environment) + + itemlist = [] + + thumb = get_thumb("setting_0.png") + + cabecera = """\ + Muestra las [COLOR yellow]variables[/COLOR] del ecosistema de Kodi que puden ser relevantes para el diagnóstico de problema en Alfa: + - Versión de Alfa con Fix + - Debug Alfa: True/False + """ + plataform = """\ + Muestra los datos especificos de la [COLOR yellow]plataforma[/COLOR] en la que está alojado Kodi: + - Sistema Operativo + - Modelo (opt) + - Versión SO + - Procesador + - Aquitectura + - Idioma de Kodi + """ + kodi = """\ + Muestra los datos especificos de la instalación de [COLOR yellow]Kodi[/COLOR]: + - Versión de Kodi + - Base de Datos de Vídeo + - Versión de Python + """ + cpu = """\ + Muestra los datos consumo actual de [COLOR yellow]CPU(s)[/COLOR] + """ + memoria = """\ + Muestra los datos del uso de [COLOR yellow]Memoria[/COLOR] del sistema: + - Memoria total + - Memoria disponible + - en [COLOR yellow]Advancedsettings.xml[/COLOR] + - Buffer de memoria + configurado: + para Kodi: 3 x valor de + <memorysize> + - Buffermode: cachea: + * Internet (0, 2) + * También local (1) + * No Buffer (3) + - Readfactor: readfactor * + avg bitrate vídeo + """ + userdata = """\ + Muestra los datos del "path" de [COLOR yellow]Userdata[/COLOR]: + - Path + - Espacio disponible + """ + videoteca = """\ + Muestra los datos de la [COLOR yellow]Videoteca[/COLOR]: + - Nº de Series y Episodios + - Nº de Películas + - Tipo de actulización + - Path + - Espacio disponible + """ + torrent = """\ + Muestra los datos generales del estado de [COLOR yellow]Torrent[/COLOR]: + - ID del cliente seleccionado + - Descompresión automática de archivos RAR? + - Está activo Libtorrent? + - Se descomprimen los RARs en background? + - Está operativo el módulo UnRAR? Qué plataforma? + """ + torrent_error = """\ + Muestra los datos del error de importación de [COLOR yellow]Libtorrent[/COLOR] + """ + torrent_cliente = """\ + Muestra los datos de los [COLOR yellow]Clientes Torrent[/COLOR]: + - Nombre del Cliente + - Tamaño de buffer inicial + - Path de descargas + - Tamaño de buffer en Memoria + (opt, si no disco) + - Espacio disponible + """ + proxy = """\ + Muestra las direcciones de canales o servidores que necesitan [COLOR yellow]Proxy[/COLOR] + """ + log = """\ + Muestra el tamaño actual del [COLOR yellow]Log[/COLOR] + """ + reporte = """\ + Enlaza con la utilidad que permite el [COLOR yellow]envío del Log[/COLOR] de Kodi a través de un servicio Pastebin + """ + + itemlist.append(Item(channel=item.channel, title="[COLOR orange][B]Variables " + + "de entorno Alfa: %s Debug: %s[/B][/COLOR]" % + (environment['addon_version'], environment['debug']), + action="", plot=cabecera, thumbnail=thumb, folder=False)) + + itemlist.append(Item(channel=item.channel, title='[COLOR yellow]%s[/COLOR]' % + environment['os_name'] + ' ' + environment['prod_model'] + ' ' + + environment['os_release'] + ' ' + environment['machine'] + ' ' + + environment['architecture'] + ' ' + environment['language'], + action="", plot=plataform, thumbnail=thumb, folder=False)) + + itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Kodi [/COLOR]' + + environment['num_version'] + ', Vídeo: ' + environment[ + 'video_db'] + + ', Python ' + environment['python_version'], action="", + plot=kodi, thumbnail=thumb, folder=False)) + + if environment['cpu_usage']: + itemlist.append(Item(channel=item.channel, title='[COLOR yellow]CPU: [/COLOR]' + + environment['cpu_usage'], action="", plot=cpu, thumbnail=thumb, + folder=False)) + + if environment['mem_total'] or environment['mem_free']: + itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Memoria: [/COLOR]Total: ' + + environment['mem_total'] + ' MB / Disp.: ' + + environment['mem_free'] + ' MB / Buffers: ' + + str(int( + environment['kodi_buffer']) * 3) + ' MB / Buffermode: ' + + environment['kodi_bmode'] + ' / Readfactor: ' + + environment['kodi_rfactor'], + action="", plot=memoria, thumbnail=thumb, folder=False)) + + itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Userdata: [/COLOR]' + + environment['userdata_path'] + ' - Free: ' + environment[ + 'userdata_free'].replace('.', ',') + + ' GB', action="", plot=userdata, thumbnail=thumb, folder=False)) + + itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Videoteca: [/COLOR]Series/Epis: ' + + environment['videolab_series'] + '/' + environment[ + 'videolab_episodios'] + + ' - Pelis: ' + environment['videolab_pelis'] + ' - Upd: ' + + environment['videolab_update'] + ' - Path: ' + + environment['videolab_path'] + ' - Free: ' + environment[ + 'videolab_free'].replace('.', ',') + + ' GB', action="", plot=videoteca, thumbnail=thumb, folder=False)) + + if environment['torrent_list']: + for x, cliente in enumerate(environment['torrent_list']): + if x == 0: + cliente_alt = cliente.copy() + del cliente_alt['Torrent_opt'] + itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Torrent: [/COLOR]Opt: %s, %s' \ + % (str(cliente['Torrent_opt']), + str(cliente_alt).replace('{', '').replace('}', '') \ + .replace("'", '').replace('_', ' ')), action="", + plot=torrent, thumbnail=thumb, + folder=False)) + elif x == 1 and environment['torrent_error']: + itemlist.append(Item(channel=item.channel, + title='[COLOR magenta]- %s[/COLOR]' % str(cliente).replace('{', '').replace('}', + '') \ + .replace("'", '').replace('_', ' '), action="", plot=torrent_error, + thumbnail=thumb, + folder=False)) + else: + cliente_alt = cliente.copy() + del cliente_alt['Plug_in'] + cliente_alt['Libre'] = cliente_alt['Libre'].replace('.', ',') + ' GB' + itemlist.append(Item(channel=item.channel, title='[COLOR yellow]- %s: [/COLOR]: %s' % + (str(cliente['Plug_in']), + str(cliente_alt).replace('{', '').replace('}', '') \ + .replace("'", '').replace('\\\\', '\\')), action="", + plot=torrent_cliente, + thumbnail=thumb, folder=False)) + + itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Proxy: [/COLOR]' + + environment['proxy_active'], action="", plot=proxy, + thumbnail=thumb, + folder=False)) + + itemlist.append(Item(channel=item.channel, title='[COLOR yellow]TAMAÑO del LOG: [/COLOR]' + + environment['log_size'].replace('.', ',') + ' MB', action="", + plot=log, thumbnail=thumb, + folder=False)) + + itemlist.append(Item(title="[COLOR hotpink][B]==> Reportar un fallo[/B][/COLOR]", + channel="setting", action="report_menu", category='Configuración', + unify=False, plot=reporte, thumbnail=get_thumb("error.png"))) + + return (itemlist, environment) \ No newline at end of file diff --git a/platformcode/keymaptools.py b/platformcode/keymaptools.py index 5997427c..3f2ae5eb 100644 --- a/platformcode/keymaptools.py +++ b/platformcode/keymaptools.py @@ -1,5 +1,10 @@ # -*- coding: utf-8 -*- +from builtins import map +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int from threading import Timer import xbmc @@ -113,7 +118,7 @@ class Main(xbmcgui.WindowXMLDialog): if config.get_platform(True)['num_version'] < 18: self.setCoordinateResolution(2) - for menuentry in MAIN_MENU.keys(): + for menuentry in list(MAIN_MENU.keys()): item = xbmcgui.ListItem(MAIN_MENU[menuentry]["label"]) item.setProperty("thumb", str(MAIN_MENU[menuentry]["icon"])) item.setProperty("identifier", str(menuentry)) diff --git a/platformcode/launcher.py b/platformcode/launcher.py index c89d7a7d..71a3df92 100644 --- a/platformcode/launcher.py +++ b/platformcode/launcher.py @@ -3,24 +3,33 @@ # XBMC Launcher (xbmc / kodi) # ------------------------------------------------------------ +#from future import standard_library +#standard_library.install_aliases() +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + +if PY3: + import urllib.error as urllib2 # Es muy lento en PY2. En PY3 es nativo +else: + import urllib2 # Usamos el nativo de PY2 que es más rápido + import os import sys - - -import urllib2 +import time from core import channeltools from core import scrapertools from core import servertools -from core import trakt_tools from core import videolibrarytools +from core import trakt_tools from core.item import Item from platformcode import config, logger from platformcode import platformtools from platformcode.logger import WebErrorException - def start(): """ Primera funcion que se ejecuta al entrar en el plugin. Dentro de esta funcion deberian ir todas las llamadas a las @@ -30,19 +39,15 @@ def start(): #config.set_setting('show_once', True) # Test if all the required directories are created config.verify_directories_created() - # controlla se l'utente ha qualche problema di connessione # se lo ha: non lo fa entrare nell'addon # se ha problemi di DNS avvia ma lascia entrare # se tutto ok: entra nell'addon - from specials import resolverdns + from specials.checkhost import test_conn import threading threading.Thread(target=test_conn, args=(True, not config.get_setting('resolver_dns'), True, [], [], True)).start() - # check_adsl = test_conn(is_exit = True, check_dns = True, view_msg = True, - # lst_urls = [], lst_site_check_dns = [], in_addon = True) - - + def run(item=None): logger.info() if not item: @@ -78,9 +83,6 @@ def run(item=None): else: item = Item(channel="channelselector", action="getmainlist", viewmode="movie") if not config.get_setting('show_once'): - if not config.dev_mode(): - from platformcode import updater - updater.calcCurrHash() from platformcode import xbmc_videolibrary xbmc_videolibrary.ask_set_content(1, config.get_setting('videolibrary_kodi_force')) config.set_setting('show_once', True) @@ -88,9 +90,12 @@ def run(item=None): logger.info(item.tostring()) try: + if not config.get_setting('tmdb_active'): + config.set_setting('tmdb_active', True) + # If item has no action, stops here if item.action == "": - logger.info("Item sin accion") + logger.info("Item without action") return # Action for main menu in channelselector @@ -145,8 +150,12 @@ def run(item=None): if xbmc.getCondVisibility('system.platform.linux') and xbmc.getCondVisibility('system.platform.android'): # android xbmc.executebuiltin('StartAndroidActivity("", "android.intent.action.VIEW", "", "%s")' % (item.url)) else: - short = urllib2.urlopen( - 'https://u.nu/api.php?action=shorturl&format=simple&url=' + item.url).read() + try: + import urllib.request as urllib + except ImportError: + import urllib + short = urllib.urlopen( + 'https://u.nu/api.php?action=shorturl&format=simple&url=' + item.url).read().decode('utf-8') platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(70740) % short) # Action in certain channel specified in "action" and "channel" parameters @@ -169,28 +178,28 @@ def run(item=None): # Checks if channel exists if os.path.isfile(os.path.join(config.get_runtime_path(), 'channels', item.channel + ".py")): - CHANNELS = 'channels' + CHANNELS = 'channels' elif os.path.isfile(os.path.join(config.get_runtime_path(), 'channels', 'porn', item.channel + ".py")): CHANNELS = 'channels.porn' else: - CHANNELS ='specials' + CHANNELS = 'specials' if CHANNELS != 'channels.porn': channel_file = os.path.join(config.get_runtime_path(), CHANNELS, item.channel + ".py") else: - channel_file = os.path.join(config.get_runtime_path(), 'channels', 'porn', item.channel + ".py") + channel_file = os.path.join(config.get_runtime_path(), 'channels', 'porn', + item.channel + ".py") - logger.info("channel_file= " + channel_file + ' - ' + CHANNELS +' - ' + item.channel) + logger.info("channel_file= " + channel_file + ' - ' + CHANNELS + ' - ' + item.channel) channel = None if os.path.exists(channel_file): try: - channel = __import__(CHANNELS + item.channel, None, None, [CHANNELS + item.channel]) + channel = __import__('%s.%s' % (CHANNELS, item.channel), None, + None, ['%s.%s' % (CHANNELS, item.channel)]) except ImportError: - importer = "import " + CHANNELS + "." + item.channel + " as channel " - - exec(importer) + exec("import " + CHANNELS + "." + item.channel + " as channel") logger.info("Running channel %s | %s" % (channel.__name__, channel.__file__)) @@ -270,14 +279,22 @@ def run(item=None): # Special action for searching, first asks for the words then call the "search" function elif item.action == "search": logger.info("item.action=%s" % item.action.upper()) - if channeltools.get_channel_setting('last_search', 'search'): - last_search = channeltools.get_channel_setting('Last_searched', 'search', '') - else: - last_search = '' + + # last_search = "" + # last_search_active = config.get_setting("last_search", "search") + # if last_search_active: + # try: + # current_saved_searches_list = list(config.get_setting("saved_searches_list", "search")) + # last_search = current_saved_searches_list[0] + # except: + # pass + + last_search = channeltools.get_channel_setting('Last_searched', 'search', '') + tecleado = platformtools.dialog_input(last_search) + if tecleado is not None: channeltools.set_channel_setting('Last_searched', tecleado, 'search') - if 'search' in dir(channel): itemlist = channel.search(item, tecleado) else: @@ -308,26 +325,26 @@ def run(item=None): platformtools.render_items(itemlist, item) - except urllib2.URLError, e: + except urllib2.URLError as e: import traceback logger.error(traceback.format_exc()) # Grab inner and third party errors if hasattr(e, 'reason'): - logger.error("Razon del error, codigo: %s | Razon: %s" % (str(e.reason[0]), str(e.reason[1]))) + logger.error("Reason for the error, code: %s | Reason: %s" % (str(e.reason[0]), str(e.reason[1]))) texto = config.get_localized_string(30050) # "No se puede conectar con el sitio web" platformtools.dialog_ok(config.get_localized_string(20000), texto) # Grab server response errors elif hasattr(e, 'code'): - logger.error("Codigo de error HTTP : %d" % e.code) + logger.error("HTTP error code: %d" % e.code) # "El sitio web no funciona correctamente (error http %d)" platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(30051) % e.code) - except WebErrorException, e: + except WebErrorException as e: import traceback logger.error(traceback.format_exc()) - patron = 'File "' + os.path.join(config.get_runtime_path(), CHANNELS, "").replace("\\", "\\\\") + '([^.]+)\.py"' + patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\", "\\\\") + '([^.]+)\.py"' canal = scrapertools.find_single_match(traceback.format_exc(), patron) platformtools.dialog_ok( @@ -382,13 +399,19 @@ def reorder_itemlist(itemlist): [config.get_localized_string(60336), '[D]']] for item in itemlist: - old_title = unicode(item.title, "utf8").lower().encode("utf8") + if not PY3: + old_title = unicode(item.title, "utf8").lower().encode("utf8") + else: + old_title = item.title.lower() for before, after in to_change: if before in item.title: item.title = item.title.replace(before, after) break - new_title = unicode(item.title, "utf8").lower().encode("utf8") + if not PY3: + new_title = unicode(item.title, "utf8").lower().encode("utf8") + else: + new_title = item.title.lower() if old_title != new_title: mod_list.append(item) modified += 1 @@ -401,7 +424,7 @@ def reorder_itemlist(itemlist): new_list.extend(mod_list) new_list.extend(not_mod_list) - logger.info("Titulos modificados:%i | No modificados:%i" % (modified, not_modified)) + logger.info("Modified Titles:%i |Unmodified:%i" % (modified, not_modified)) if len(new_list) == 0: new_list = itemlist diff --git a/platformcode/logger.py b/platformcode/logger.py index 447fe8a1..f5308358 100644 --- a/platformcode/logger.py +++ b/platformcode/logger.py @@ -6,9 +6,12 @@ import inspect import xbmc - from platformcode import config +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + loggeractive = (config.get_setting("debug") == True) @@ -18,13 +21,19 @@ def log_enable(active): def encode_log(message=""): + # Unicode to utf8 - if type(message) == unicode: + if isinstance(message, unicode): message = message.encode("utf8") + if PY3: message = message.decode("utf8") # All encodings to utf8 - elif type(message) == str: + elif not PY3 and isinstance(message, str): message = unicode(message, "utf8", errors="replace").encode("utf8") + + # Bytes encodings to utf8 + elif PY3 and isinstance(message, bytes): + message = message.decode("utf8") # Objects to string else: @@ -34,6 +43,17 @@ def encode_log(message=""): def get_caller(message=None): + + if message and isinstance(message, unicode): + message = message.encode("utf8") + if PY3: message = message.decode("utf8") + elif message and PY3 and isinstance(message, bytes): + message = message.decode("utf8") + elif message and not PY3: + message = unicode(message, "utf8", errors="replace").encode("utf8") + elif message: + message = str(message) + module = inspect.getmodule(inspect.currentframe().f_back.f_back) if module == None: diff --git a/platformcode/mct.py b/platformcode/mct.py index 2c2fe21d..b7d00b44 100644 --- a/platformcode/mct.py +++ b/platformcode/mct.py @@ -3,53 +3,88 @@ # MCT - Mini Cliente Torrent # ------------------------------------------------------------ -import os -import shutil -import tempfile -import urllib +from __future__ import division +from future import standard_library +standard_library.install_aliases() +from builtins import hex +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int +from builtins import range +from past.utils import old_div -import urllib2 +import os +import re +import tempfile +import urllib.request, urllib.parse, urllib.error +import platform +import traceback try: - from python_libtorrent import get_libtorrent, get_platform - - lt = get_libtorrent() -except Exception, e: - import libtorrent as lt - -import xbmc -import xbmcgui + import xbmc + import xbmcgui +except: + pass from platformcode import config -from core import httptools +LIBTORRENT_PATH = config.get_setting("libtorrent_path", server="torrent", default='') + +from servers import torrent as torr +lt, e, e1, e2 = torr.import_libtorrent(LIBTORRENT_PATH) + from core import scrapertools from core import filetools +from core import httptools + +try: + BUFFER = int(config.get_setting("mct_buffer", server="torrent", default="50")) +except: + BUFFER = 50 + config.set_setting("mct_buffer", "50", server="torrent") + +try: + DOWNLOAD_PATH = '' + DOWNLOAD_PATH = xbmc.translatePath(config.get_setting("mct_download_path", \ + server="torrent", default=config.get_setting("downloadpath"))) +except: + DOWNLOAD_PATH = config.get_setting("mct_download_path", server="torrent", default=config.get_setting("downloadpath")) +if not config.get_setting("mct_download_path", server="torrent") and DOWNLOAD_PATH: + config.set_setting("mct_download_path", DOWNLOAD_PATH, server="torrent") +if not DOWNLOAD_PATH: + try: + DOWNLOAD_PATH = str(xbmc.translatePath(os.path.join(config.get_data_path(), 'downloads'))) + config.set_setting("mct_download_path", os.path.join(config.get_data_path(), 'downloads'), server="torrent") + except: + DOWNLOAD_PATH = os.path.join(config.get_data_path(), 'downloads') + config.set_setting("mct_download_path", DOWNLOAD_PATH, server="torrent") + +BACKGROUND = config.get_setting("mct_background_download", server="torrent", default=True) +RAR = config.get_setting("mct_rar_unpack", server="torrent", default=True) +DOWNLOAD_LIMIT = config.get_setting("mct_download_limit", server="torrent", default="") +if DOWNLOAD_LIMIT: + try: + DOWNLOAD_LIMIT = int(DOWNLOAD_LIMIT) * 1024 + except: + DOWNLOAD_LIMIT = 0 +else: + DOWNLOAD_LIMIT = 0 +UPLOAD_LIMIT = 100 * 1024 +msg_header = 'Alfa MCT Cliente Torrent' -def play(url, xlistitem={}, is_view=None, subtitle="", item=None): +def play(url, xlistitem={}, is_view=None, subtitle="", password="", item=None): allocate = True try: - import platform - xbmc.log("XXX KODI XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") - xbmc.log("OS platform: %s %s" % (platform.system(), platform.release())) - xbmc.log("xbmc/kodi version: %s" % xbmc.getInfoLabel("System.BuildVersion")) - xbmc_version = int(xbmc.getInfoLabel("System.BuildVersion")[:2]) - xbmc.log("xbmc/kodi version number: %s" % xbmc_version) - xbmc.log("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX KODI XXXX") - - _platform = get_platform() - if str(_platform['system']) in ["android_armv7", "linux_armv6", "linux_armv7"]: - allocate = False - # -- log ------------------------------------------------ - xbmc.log("XXX platform XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") - xbmc.log("_platform['system']: %s" % _platform['system']) - xbmc.log("allocate: %s" % allocate) - xbmc.log("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX platform XXXX") - # -- ---------------------------------------------------- + log("XXX KODI XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") + log("OS platform: %s %s" % (platform.system(),platform.release())) + log("xbmc/kodi version: %s" % xbmc.getInfoLabel( "System.BuildVersion" )) + xbmc_version = int(xbmc.getInfoLabel( "System.BuildVersion" )[:2]) + log("Architecture: %s %s" % (str(platform.machine()), \ + str(sys.maxsize > 2 ** 32 and "64-bit" or "32-bit"))) + log("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX KODI & platform XXXX") except: - pass - - DOWNLOAD_PATH = config.get_setting("downloadpath") + log(traceback.format_exc()) # -- adfly: ------------------------------------ if url.startswith("http://adf.ly/"): @@ -58,34 +93,49 @@ def play(url, xlistitem={}, is_view=None, subtitle="", item=None): url = decode_adfly(data) except: ddd = xbmcgui.Dialog() - ddd.ok("alfa-MCT: Sin soporte adf.ly", - "El script no tiene soporte para el acortador de urls adf.ly.", "", "url: " + url) + ddd.ok( msg_header + ": Sin soporte adf.ly", "El script no tiene soporte para el acortador de urls adf.ly.", "", "url: " + url ) return + """ # -- Necesario para algunas webs ---------------------------- if not url.endswith(".torrent") and not url.startswith("magnet"): + #t_file = httptools.downloadpage(url, follow_redirects=False).headers["location"] t_file = scrapertools.get_header_from_response(url, header_to_get="location") - if len(t_file) > 0: - url = t_file - t_file = scrapertools.get_header_from_response(url, header_to_get="location") - if len(t_file) > 0: - url = t_file + if t_file: + if len(t_file) > 0: + url = t_file + t_file = httptools.downloadpage(url, follow_redirects=False).headers["location"] + if len(t_file) > 0: + url = t_file + """ # -- Crear dos carpetas en descargas para los archivos ------ - save_path_videos = os.path.join(DOWNLOAD_PATH, "torrent-videos") - save_path_torrents = os.path.join(DOWNLOAD_PATH, "torrent-torrents") - if not os.path.exists(save_path_torrents): os.mkdir(save_path_torrents) + save_path_videos = os.path.join( DOWNLOAD_PATH , "MCT-torrent-videos" ) + save_path_torrents = os.path.join( DOWNLOAD_PATH , "MCT-torrents" ) + if not os.path.exists( save_path_torrents ): os.mkdir(save_path_torrents) + video_path = '' + global bkg_user + bkg_user = False + ses_lt = False + if item: + if item.contentType == 'movie': + video_path = '%s-%s' % (item.contentTitle, item.infoLabels['tmdb_id']) + else: + video_path = '%s-%sx%s-%s' % (item.contentSerieName, item.contentSeason, \ + item.contentEpisodeNumber, item.infoLabels['tmdb_id']) + item.rar_path = video_path # -- Usar - archivo torrent desde web, magnet o HD --------- if not os.path.isfile(url) and not url.startswith("magnet"): # -- http - crear archivo torrent ----------------------- data = url_get(url) + # -- El nombre del torrent será el que contiene en los -- # -- datos. - - re_name = urllib.unquote(scrapertools.scrapertools.find_single_match(data, ':name\d+:(.*?)\d+:')) - torrent_file = filetools.join(save_path_torrents, filetools.encode(re_name + '.torrent')) + re_name = urllib.parse.unquote( scrapertools.find_single_match(data,':name\d+:(.*?)\d+:') ) + torrent_file = os.path.join(save_path_torrents, encode(re_name + '.torrent')) - f = open(torrent_file, 'wb') + f = open(torrent_file,'wb') f.write(data) f.close() elif os.path.isfile(url): @@ -97,17 +147,22 @@ def play(url, xlistitem={}, is_view=None, subtitle="", item=None): # ----------------------------------------------------------- # -- MCT - MiniClienteTorrent ------------------------------- - ses = lt.session() + try: + log("XXX libtorrent pathname: %s" % str(LIBTORRENT_PATH)) + ses = lt.session() + except Exception as e: + do = xbmcgui.Dialog() + e = e1 or e2 + do.ok('ERROR en el cliente MCT Libtorrent', 'Módulo no encontrado o imcompatible con el dispositivo.', + 'Reporte el fallo adjuntando un "log".', str(e)) + return + + log("XXX libtorrent version: %s" % lt.version) + log("##### Torrent file: %s ##" % torrent_file) - # -- log ---------------------------------------------------- - xbmc.log("### Init session ########") - xbmc.log(lt.version) - xbmc.log("#########################") - # -- -------------------------------------------------------- - - ses.add_dht_router("router.bittorrent.com", 6881) - ses.add_dht_router("router.utorrent.com", 6881) - ses.add_dht_router("dht.transmissionbt.com", 6881) + ses.add_dht_router("router.bittorrent.com",6881) + ses.add_dht_router("router.utorrent.com",6881) + ses.add_dht_router("dht.transmissionbt.com",6881) trackers = [ "udp://tracker.openbittorrent.com:80/announce", @@ -136,8 +191,7 @@ def play(url, xlistitem={}, is_view=None, subtitle="", item=None): if torrent_file.startswith("magnet"): try: import zlib - btih = hex(zlib.crc32( - scrapertools.scrapertools.find_single_match(torrent_file, 'magnet:\?xt=urn:(?:[A-z0-9:]+|)([A-z0-9]{32})')) & 0xffffffff) + btih = hex(zlib.crc32(scrapertools.find_single_match(torrent_file, 'magnet:\?xt=urn:(?:[A-z0-9:]+|)([A-z0-9]{32})')) & 0xffffffff) files = [f for f in os.listdir(save_path_torrents) if os.path.isfile(os.path.join(save_path_torrents, f))] for file in files: if btih in os.path.basename(file): @@ -149,42 +203,45 @@ def play(url, xlistitem={}, is_view=None, subtitle="", item=None): try: tempdir = tempfile.mkdtemp() except IOError: - tempdir = os.path.join(save_path_torrents, "temp") + tempdir = os.path.join(save_path_torrents , "temp") if not os.path.exists(tempdir): os.mkdir(tempdir) params = { 'save_path': tempdir, 'trackers': trackers, - 'storage_mode': lt.storage_mode_t.storage_mode_allocate, + 'storage_mode': lt.storage_mode_t.storage_mode_allocate + } + """ + , 'paused': False, 'auto_managed': True, 'duplicate_is_error': True - } + """ h = lt.add_magnet_uri(ses, torrent_file, params) dp = xbmcgui.DialogProgress() - dp.create('alfa-MCT') + dp.create(msg_header) while not h.has_metadata(): message, porcent, msg_file, s, download = getProgress(h, "Creando torrent desde magnet") dp.update(porcent, message, msg_file) if s.state == 1: download = 1 if dp.iscanceled(): dp.close() - remove_files(download, torrent_file, video_file, ses, h) + remove_files( download, torrent_file, video_file, ses, h ) return h.force_dht_announce() xbmc.sleep(1000) dp.close() info = h.get_torrent_info() - data = lt.bencode(lt.create_torrent(info).generate()) + data = lt.bencode( lt.create_torrent(info).generate() ) - torrent_file = os.path.join(save_path_torrents, - unicode(info.name() + "-" + btih, "'utf-8'", errors="replace") + ".torrent") - f = open(torrent_file, 'wb') + #torrent_file = os.path.join(save_path_torrents, unicode(info.name()+"-"+btih, "'utf-8'", errors="replace") + ".torrent") + torrent_file = os.path.join(save_path_torrents, info.name()+"-"+btih+ ".torrent") + f = open(torrent_file,'wb') f.write(data) f.close() ses.remove_torrent(h) - shutil.rmtree(tempdir) + filetools.rmdirtree(tempdir) # ----------------------------------------------------------- # -- Archivos torrent --------------------------------------- @@ -194,30 +251,64 @@ def play(url, xlistitem={}, is_view=None, subtitle="", item=None): # -- El más gordo o uno de los más gordo se entiende que es - # -- el vídeo o es el vídeo que se usará como referencia - # -- para el tipo de archivo - - xbmc.log("##### Archivos ## %s ##" % len(info.files())) + log("##### Archivos ## %s ##" % len(info.files())) _index_file, _video_file, _size_file = get_video_file(info) # -- Prioritarizar/Seleccionar archivo----------------------- - _index, video_file, video_size, len_files = get_video_files_sizes(info) + _index, video_file, video_size, len_files = get_video_files_sizes( info ) if len_files == 0: dp = xbmcgui.Dialog().ok("No se puede reproducir", "El torrent no contiene ningún archivo de vídeo") - if _index == -1: - _index = _index_file + if _index < 0: + log("##### parts = %s #########" % str(video_file)) + log("##### video_size = %s #########" % str(video_size)) + log("##### _index = %s #########" % str(_index)) + #if _index == -1: + # _index = _index_file + # video_size = _size_file video_file = _video_file - video_size = _size_file - - _video_file_ext = os.path.splitext(_video_file)[1] - xbmc.log("##### _video_file_ext ## %s ##" % _video_file_ext) - if (_video_file_ext == ".avi" or _video_file_ext == ".mp4") and allocate: - xbmc.log("##### storage_mode_t.storage_mode_allocate (" + _video_file_ext + ") #####") - h = ses.add_torrent({'ti': info, 'save_path': save_path_videos, 'trackers': trackers, - 'storage_mode': lt.storage_mode_t.storage_mode_allocate}) else: - xbmc.log("##### storage_mode_t.storage_mode_sparse (" + _video_file_ext + ") #####") - h = ses.add_torrent({'ti': info, 'save_path': save_path_videos, 'trackers': trackers, - 'storage_mode': lt.storage_mode_t.storage_mode_sparse}) + log("##### video_size = %s #########" % str(video_size)) + log("##### _index = %s #########" % str(_index)) + _video_file_ext = os.path.splitext( _video_file )[1] + log("##### _video_file ## %s ##" % str(_video_file)) + log("##### _video_file_ext ## %s ##" % _video_file_ext) + + dp_cerrado = True + rar = False + global extracted_rar + extracted_rar = False + global erase_file_path + erase_file_path = '' + + if _video_file_ext == ".rar": + rar = True + filename = video_file + if "/" in filename: + filename = filename.split("/")[1] + if RAR and BACKGROUND: + xbmcgui.Dialog().notification("Encontrado archivo .RAR de %.2f MB" % (video_size / 1048576.0), + "Puedes realizar otras tareas en Kodi mientrastanto. " + \ + "Te informaremos...", time=10000) + dialog = True + else: + dialog = xbmcgui.Dialog().yesno("Encontrado archivo .RAR...", "Nombre: %s" % filename, + "Tamaño: %.2f MB" % (video_size / 1048576.0), + "¿Descargar en segundo plano? Cancelar en menú Descargas") + if dialog: + dp_cerrado = False + dp = xbmcgui.DialogProgressBG() + dp.create(msg_header) + + if (_video_file_ext == ".avi" or _video_file_ext == ".mp4" or _video_file_ext == ".mkv") and allocate: + log("##### storage_mode_t.storage_mode_allocate ("+_video_file_ext+") #####") + h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_allocate } ) + else: + log("##### storage_mode_t.storage_mode_sparse ("+_video_file_ext+") #####") + h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_sparse } ) allocate = True + global ses_lt + ses_lt = True # ----------------------------------------------------------- # -- Descarga secuencial - trozo 1, trozo 2, ... ------------ @@ -225,6 +316,7 @@ def play(url, xlistitem={}, is_view=None, subtitle="", item=None): h.force_reannounce() h.force_dht_announce() + h.set_upload_limit(UPLOAD_LIMIT) # -- Inicio de variables para 'pause' automático cuando el - # -- el vídeo se acerca a una pieza sin completar - @@ -232,37 +324,48 @@ def play(url, xlistitem={}, is_view=None, subtitle="", item=None): is_greater_num_pieces_plus = False is_greater_num_pieces_pause = False - porcent4first_pieces = int(video_size * 0.000000005) - if porcent4first_pieces < 10: porcent4first_pieces = 10 + porcent4first_pieces = int( video_size * 0.000000005 ) + porcent4first_pieces = BUFFER + if porcent4first_pieces < BUFFER: porcent4first_pieces = BUFFER if porcent4first_pieces > 100: porcent4first_pieces = 100 - porcent4last_pieces = int(porcent4first_pieces / 2) + porcent4last_pieces = int(old_div(porcent4first_pieces,2)) - num_pieces_to_resume = int(video_size * 0.0000000025) - if num_pieces_to_resume < 5: num_pieces_to_resume = 5 + num_pieces_to_resume = int( video_size * 0.0000000025 ) + if num_pieces_to_resume < 10: num_pieces_to_resume = 10 if num_pieces_to_resume > 25: num_pieces_to_resume = 25 - xbmc.log("##### porcent4first_pieces ## %s ##" % porcent4first_pieces) - xbmc.log("##### porcent4last_pieces ## %s ##" % porcent4last_pieces) - xbmc.log("##### num_pieces_to_resume ## %s ##" % num_pieces_to_resume) + log("##### porcent4first_pieces ## %s ##" % porcent4first_pieces) + log("##### porcent4last_pieces ## %s ##" % porcent4last_pieces) + log("##### num_pieces_to_resume ## %s ##" % num_pieces_to_resume) # -- Prioritarizar o seleccionar las piezas del archivo que - # -- se desea reproducir con 'file_priorities' - piece_set = set_priority_pieces(h, _index, video_file, video_size, porcent4first_pieces, porcent4last_pieces, allocate) + global tot_piece_set + tot_piece_set = len(piece_set) + log("##### total piece_set ## %s ##" % len(piece_set)) - # -- Crear diálogo de progreso para el primer bucle --------- - dp = xbmcgui.DialogProgress() - dp.create('alfa-MCT') + if dp_cerrado: + # -- Crear diálogo de progreso para el primer bucle --------- + dp = xbmcgui.DialogProgress() + dp.create(msg_header) _pieces_info = {} + ren_video_file = os.path.join( save_path_videos, video_file ) # -- Doble bucle anidado ------------------------------------ - # -- Descarga - Primer bucle - + # -- Descarga - Primer bucle while not h.is_seed(): s = h.status() - xbmc.sleep(100) - + xbmc.sleep(1000) + if not dp_cerrado and not BACKGROUND: + dp.close() + dp_cerrado = True + dp = xbmcgui.DialogProgress() + dp.create(msg_header) + # -- Recuperar los datos del progreso ------------------- message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info) @@ -271,45 +374,73 @@ def play(url, xlistitem={}, is_view=None, subtitle="", item=None): # -- descargados para el diálogo de 'remove_files' - if s.state == 1: download = 1 + if (s.state == 5 or s.state == 4) and rar: + # -- Borrar sesión para que libere los archivos y se pueda renombrar la carpeta ------- + ses.pause() + #video_file, rar, play_file = extract_files(video_file, save_path_videos, password, dp, item=item) + video_file, rar, play_file, erase_path = torr.extract_files(video_file, \ + save_path_videos, password, dp, item=item, torr_client='MCT') # ... extraemos el vídeo del RAR + dp.close() + + erase_file_path = erase_path + ren_video_file = erase_file_path + extracted_rar = rar + if not play_file: + remove_files( download, torrent_file, erase_file_path, ses, h, ren_video_file ) + return + is_view = "Ok" + save_path_videos = play_file + xbmc.sleep(3000) + # -- Player - play -------------------------------------- # -- Comprobar si se han completado las piezas para el - # -- inicio del vídeo - first_pieces = True - + #if not extracted_rar: _c = 0 - for i in range(piece_set[0], piece_set[porcent4first_pieces]): + for i in range( piece_set[0], piece_set[porcent4first_pieces] ): first_pieces &= h.have_piece(i) - if h.have_piece(i): _c += 1 - _pieces_info = {'current': 0, 'continuous': "%s/%s" % (_c, porcent4first_pieces), 'continuous2': "", - 'have': h.status().num_pieces, 'len': len(piece_set)} + if h.have_piece(i): _c+= 1 + _pieces_info = {'current': 0, 'continuous': "%s/%s" % (_c, porcent4first_pieces), \ + 'continuous2': "", 'have': h.status().num_pieces, 'len': len(piece_set)} last_pieces = True if not allocate: - _c = len(piece_set) - 1; - _cc = 0 - for i in range(len(piece_set) - porcent4last_pieces, len(piece_set)): + _c = len(piece_set)-1; _cc = 0 + for i in range(len(piece_set)-porcent4last_pieces, len(piece_set)): last_pieces &= h.have_piece(i) - if h.have_piece(i): _c -= 1; _cc += 1 + if h.have_piece(i): _c-= 1; _cc+=1 _pieces_info['continuous2'] = "[%s/%s] " % (_cc, porcent4last_pieces) - if is_view != "Ok" and first_pieces and last_pieces: + if is_view != "Ok" and h.status().num_pieces >= BUFFER and not rar and not bkg_user \ + or ((s.state == 5 or s.state == 4) and bkg_user): _pieces_info['continuous2'] = "" - xbmc.log("##### porcent [%.2f%%]" % (s.progress * 100)) - is_view = "Ok" + log("##### porcent [%.2f%%]" % (s.progress * 100)) dp.close() + dp_cerrado = True + if not bkg_user: + is_view = "Ok" + else: + remove_files( download, torrent_file, video_file, ses, h, ren_video_file ) + return + if is_view == "Ok": + # -- Esperando a que termine otra reproducción -------------------------- + while xbmc.Player().isPlaying(): + xbmc.sleep(3000) + # -- Player - Ver el vídeo -------------------------- - playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) + playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO ) playlist.clear() - ren_video_file = os.path.join(save_path_videos, video_file) + ren_video_file = os.path.join( save_path_videos, video_file ) try: - playlist.add(ren_video_file, xlistitem) + playlist.add( ren_video_file, xlistitem ) except: - playlist.add(ren_video_file) + playlist.add( ren_video_file ) if xbmc_version < 17: - player = play_video(xbmc.PLAYER_CORE_AUTO) + player = play_video( xbmc.PLAYER_CORE_AUTO ) else: player = play_video() player.play(playlist) @@ -333,13 +464,11 @@ def play(url, xlistitem={}, is_view=None, subtitle="", item=None): _sub = False # -- Segundo bucle - Player - Control de eventos ---- + bkg_auto = True + log("##### PLAY %s" % (h.status().num_pieces)) + if item: torr.mark_auto_as_watched(item) + if ses_lt: h.set_download_limit(DOWNLOAD_LIMIT) while player.isPlaying(): - xbmc.sleep(100) - - # -- Añadir subTítulos - if subtitle != "" and not _sub: - _sub = True - player.setSubtitles(subtitle) # -- Impedir que kodi haga 'resume' al inicio --- # -- de la descarga de un archivo conocido - @@ -350,20 +479,30 @@ def play(url, xlistitem={}, is_view=None, subtitle="", item=None): # -- Control 'pause' automático - continuous_pieces = count_completed_continuous_pieces(h, piece_set) - if xbmc.Player().isPlaying(): + if xbmc.Player().isPlaying() and not rar: # -- Porcentage del progreso del vídeo ------ + # -- En kodi 18.x se debe controlar - + # -- ZeroDivisionError: float division by - + # -- zero - player_getTime = player.getTime() player_getTotalTime = player.getTotalTime() - porcent_time = player_getTime / player_getTotalTime * 100 + try: porcent_time = old_div(player_getTime, player_getTotalTime) * 100 + except: porcent_time = 0 # -- Pieza que se está reproduciendo -------- - current_piece = int(porcent_time / 100 * len(piece_set)) + # -- En kodi 18.x se debe controlar - + # -- ZeroDivisionError: float division by - + # -- zero - + try: current_piece = int( old_div(porcent_time, 100) * len(piece_set) ) + except: current_piece = 0 # -- Banderas de control -------------------- is_greater_num_pieces = (current_piece > continuous_pieces - num_pieces_to_resume) - is_greater_num_pieces_plus = (current_piece + porcent4first_pieces > continuous_pieces) - is_greater_num_pieces_finished = (current_piece + porcent4first_pieces >= len(piece_set)) + #is_greater_num_pieces_plus = (current_piece + porcent4first_pieces > continuous_pieces) + is_greater_num_pieces_plus = (current_piece + BUFFER > continuous_pieces) + #is_greater_num_pieces_finished = (current_piece + porcent4first_pieces >= len(piece_set)) + is_greater_num_pieces_finished = (current_piece + BUFFER >= len(piece_set)) # -- Activa 'pause' automático -------------- if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished: @@ -372,163 +511,184 @@ def play(url, xlistitem={}, is_view=None, subtitle="", item=None): if continuous_pieces >= set_next_continuous_pieces: set_next_continuous_pieces = continuous_pieces + num_pieces_to_resume - next_continuous_pieces = str(continuous_pieces - current_piece) + "/" + str( - set_next_continuous_pieces - current_piece) - _pieces_info = {'current': current_piece, 'continuous': next_continuous_pieces, - 'continuous2': _pieces_info['continuous2'], 'have': h.status().num_pieces, - 'len': len(piece_set)} - - # si es un archivo de la videoteca enviar a marcar como visto - if item.strm_path: - from platformcode import xbmc_videolibrary - xbmc_videolibrary.mark_auto_as_watched(item) + next_continuous_pieces = str(continuous_pieces - current_piece) + "/" + str(set_next_continuous_pieces - current_piece) + _pieces_info = {'current': current_piece, 'continuous': next_continuous_pieces , 'continuous2': _pieces_info['continuous2'], 'have': h.status().num_pieces, 'len': len(piece_set)} # -- Cerrar el diálogo de progreso -------------- if player.resumed: dp.close() # -- Mostrar el diálogo de progreso ------------- - if player.paused: + if player.paused and dp_cerrado and not rar: # -- Crear diálogo si no existe ------------- + log("##### PAUSED %s" % (h.status().num_pieces)) if not player.statusDialogoProgress: - dp = xbmcgui.DialogProgress() - dp.create('alfa-MCT') + dp = xbmcgui.DialogProgressBG() + dp.create(msg_header) player.setDialogoProgress() # -- Diálogos de estado en el visionado ----- if not h.is_seed(): # -- Recuperar los datos del progreso --- message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info) - dp.update(porcent, message, msg_file) + dp.update(porcent, message, '[CR]' + message + '[CR]' + msg_file) else: dp.update(100, "Descarga completa: " + video_file) # -- Se canceló el progreso en el visionado - # -- Continuar - - if dp.iscanceled(): + if not bkg_auto and dp.iscanceled(): dp.close() player.pause() # -- Se canceló el progreso en el visionado - # -- en la ventana de 'pause' automático. - # -- Parar si el contador llega a 3 - - if dp.iscanceled() and is_greater_num_pieces_pause: - is_greater_num_pieces_canceled += 1 + if not bkg_auto and dp.iscanceled() and is_greater_num_pieces_pause: + is_greater_num_pieces_canceled+= 1 if is_greater_num_pieces_canceled == 3: player.stop() # -- Desactiva 'pause' automático y --------- # -- reinicia el contador de cancelaciones - - if not dp.iscanceled() and not is_greater_num_pieces_plus and is_greater_num_pieces_pause: + if not is_greater_num_pieces_plus and is_greater_num_pieces_pause: dp.close() player.pause() is_greater_num_pieces_pause = False is_greater_num_pieces_canceled = 0 - + # -- El usuario cancelo el visionado -------- # -- Terminar - if player.ended: # -- Diálogo eliminar archivos ---------- - remove_files(download, torrent_file, video_file, ses, h) + remove_files( download, torrent_file, video_file, ses, h, ren_video_file ) return - + + xbmc.sleep(1000) + # -- Kodi - Se cerró el visionado ----------------------- # -- Continuar | Terminar - if is_view == "Ok" and not xbmc.Player().isPlaying(): - - if info.num_files() == 1: + dp.close() + + if h.status().num_pieces < tot_piece_set: # -- Diálogo continuar o terminar --------------- - d = xbmcgui.Dialog() - ok = d.yesno('alfa-MCT', 'XBMC-Kodi Cerró el vídeo.', '¿Continuar con la sesión?') - else: - ok = False - # -- SI --------------------------------------------- - if ok: - # -- Continuar: --------------------------------- - is_view = None + # Preguntamos si el usuario quiere pasar a backgroung + ok = xbmcgui.Dialog().yesno(msg_header, "¿Borramos los archivo descargados? (incompletos)", + "Selecciona NO para seguir descargando en segundo plano") + else: ok = True + # -- NO --------------------------------------------- + if not ok: + is_view=None + bkg_user = True + dp_cerrado = False + dp = xbmcgui.DialogProgressBG() + dp.create(msg_header) + else: # -- Terminar: ---------------------------------- # -- Comprobar si el vídeo pertenece a una ------ # -- lista de archivos - - _index, video_file, video_size, len_files = get_video_files_sizes(info) - if _index == -1 or len_files == 1: + remove_files( download, torrent_file, video_file, ses, h, ren_video_file ) + dp.close() + return + """ + #_index, video_file, video_size, len_files = get_video_files_sizes( info ) + if _index < 0 or len_files == 1: # -- Diálogo eliminar archivos -------------- - remove_files(download, torrent_file, video_file, ses, h) + #video_file = _video_file + remove_files( download, torrent_file, video_file, ses, h, ren_video_file ) + dp.close() return else: # -- Lista de archivos. Diálogo de opciones - piece_set = set_priority_pieces(h, _index, video_file, video_size, porcent4first_pieces, porcent4last_pieces, allocate) - is_view = None + is_view=None dp = xbmcgui.DialogProgress() - dp.create('alfa-MCT') + dp.create(msg_header) + """ # -- Mostar progeso antes del visionado ----------------- - if is_view != "Ok": + if is_view != "Ok" : dp.update(porcent, message, msg_file) # -- Se canceló el progreso antes del visionado --------- - # -- Terminar - - if dp.iscanceled(): + # -- Dar otra oportunidad en background o Terminar - + if not bkg_user and dp_cerrado and dp.iscanceled(): dp.close() - # -- Comprobar si el vídeo pertenece a una lista de - - # -- archivos - - _index, video_file, video_size, len_files = get_video_files_sizes(info) - if _index == -1 or len_files == 1: - # -- Diálogo eliminar archivos ------------------ - remove_files(download, torrent_file, video_file, ses, h) - return + # Preguntamos si el usuario quiere pasar a backgroung + dialog = xbmcgui.Dialog().yesno(msg_header, "¿Borramos los archivo descargados? (incompletos)", + "Seleccione NO para seguir descargando en segundo plano") + if not dialog: + bkg_user = True + dp_cerrado = False + dp = xbmcgui.DialogProgressBG() + dp.create(msg_header) + if ses_lt: h.set_download_limit(DOWNLOAD_LIMIT) + else: - # -- Lista de archivos. Diálogo de opciones ----- - piece_set = set_priority_pieces(h, _index, video_file, video_size, - porcent4first_pieces, porcent4last_pieces, allocate) - is_view = None - dp = xbmcgui.DialogProgress() - dp.create('alfa-MCT') + + remove_files( download, torrent_file, video_file, ses, h, ren_video_file ) + return + # -- Comprobar si el vídeo pertenece a una lista de - + # -- archivos - + #_index, video_file, video_size, len_files = get_video_files_sizes( info ) + if _index < 0 or len_files == 1: + # -- Diálogo eliminar archivos ------------------ + #video_file = _video_file + remove_files( download, torrent_file, video_file, ses, h, ren_video_file ) + return + else: + # -- Lista de archivos. Diálogo de opciones ----- + piece_set = set_priority_pieces(h, _index, video_file, video_size, + porcent4first_pieces, porcent4last_pieces, allocate) + is_view=None + dp = xbmcgui.DialogProgress() + dp.create(msg_header) # -- Kodi - Error? - No debería llegar aquí ----------------- if is_view == "Ok" and not xbmc.Player().isPlaying(): dp.close() # -- Diálogo eliminar archivos -------------------------- - remove_files(download, torrent_file, video_file, ses, h) + remove_files( download, torrent_file, video_file, ses, h, ren_video_file ) return # -- Progreso de la descarga ------------------------------------ def getProgress(h, video_file, _pf={}): + if len(_pf) > 0: - _pf_msg = "[%s] [%s] %s[%s] [%s]" % ( - _pf['current'], _pf['continuous'], _pf['continuous2'], _pf['have'], _pf['len']) - else: - _pf_msg = "" + _pf_msg = "[%s] [%s] %s[%s] [%s]" % (_pf['current'], _pf['continuous'], _pf['continuous2'], _pf['have'], _pf['len']) + else: _pf_msg = "" s = h.status() state_str = ['queued', 'checking', 'downloading metadata', \ - 'downloading', 'finished', 'seeding', 'allocating', 'checking fastresume'] + 'downloading', 'finished', 'seeding', 'allocating', 'checking fastresume'] message = '%.2f%% d:%.1f kb/s u:%.1f kb/s p:%d s:%d %s' % \ - (s.progress * 100, s.download_rate / 1000, s.upload_rate / 1000, \ - s.num_peers, s.num_seeds, state_str[s.state]) - porcent = int(s.progress * 100) + (s.progress * 100, old_div(s.download_rate, 1000), old_div(s.upload_rate, 1000), \ + s.num_peers, s.num_seeds, state_str[s.state]) + porcent = int( s.progress * 100 ) - download = (s.progress * 100) + download = ( s.progress * 100 ) if "/" in video_file: video_file = video_file.split("/")[1] msg_file = video_file if len(msg_file) > 50: - msg_file = msg_file.replace(video_file, - os.path.splitext(video_file)[0][:40] + "... " + os.path.splitext(video_file)[1]) - msg_file = msg_file + "[CR]" + "%.2f MB" % (s.total_wanted / 1048576.0) + " - " + _pf_msg + msg_file = msg_file.replace( video_file, os.path.splitext(video_file)[0][:40] + "... " + os.path.splitext(video_file)[1] ) + msg_file = msg_file + "[CR]" + "%.2f MB" % (s.total_wanted/1048576.0) + " - " + _pf_msg return (message, porcent, msg_file, s, download) # -- Clase play_video - Controlar eventos ----------------------- class play_video(xbmc.Player): - def __init__(self, *args, **kwargs): + + def __init__( self, *args, **kwargs ): self.paused = False self.resumed = True self.statusDialogoProgress = False @@ -560,24 +720,34 @@ class play_video(xbmc.Player): # -- El más gordo o uno de los más gordo se entiende que es el - # -- vídeo o es vídeo que se usará como referencia para el tipo - # -- de archivo - -def get_video_file(info): +def get_video_file( info ): + extensions_list = ['.aaf', '.3gp', '.asf', '.avi', '.flv', '.mpeg', + '.m1v', '.m2v', '.m4v', '.mkv', '.mov', '.mpg', + '.mpe', '.mp4', '.ogg', '.rar', '.wmv', '.zip'] size_file = 0 for i, f in enumerate(info.files()): if f.size > size_file: - video_file = f.path.replace("\\", "/") + video_file = f.path.replace("\\","/") size_file = f.size index_file = i + if os.path.splitext( video_file )[1] in extensions_list: + break return index_file, video_file, size_file # -- Listado de selección del vídeo a prioritarizar ------------- -def get_video_files_sizes(info): +def get_video_files_sizes( info ): + opciones = {} vfile_name = {} vfile_size = {} + rar_parts = 0 + rar_size = 0 + vid_parts = 0 + vid_size = 0 # -- Eliminar errores con tíldes ----------------------------- - for i, f in enumerate(info.files()): + for i, f in enumerate( info.files() ): _title = unicode(f.path, "iso-8859-1", errors="replace") _title = unicode(f.path, "'utf-8'", errors="replace") @@ -585,22 +755,29 @@ def get_video_files_sizes(info): '.m1v', '.m2v', '.m4v', '.mkv', '.mov', '.mpg', '.mpe', '.mp4', '.ogg', '.rar', '.wmv', '.zip'] - for i, f in enumerate(info.files()): + for i, f in enumerate( info.files() ): _index = int(i) - _title = f.path.replace("\\", "/") + _title = f.path.replace("\\","/") _size = f.size - _file_name = os.path.splitext(_title)[0] + _file_name = os.path.splitext( _title )[0] if "/" in _file_name: _file_name = _file_name.split('/')[1] - _file_ext = os.path.splitext(_title)[1] + _file_ext = os.path.splitext( _title )[1] + + if '.rar' in _file_ext or '.zip' in _file_ext: + rar_parts += 1 + rar_size += _size + else: + vid_parts += 1 + vid_size += _size if _file_ext in extensions_list: index = len(opciones) _caption = str(index) + \ - " - " + \ - _file_name + _file_ext + \ - " - %.2f MB" % (_size / 1048576.0) + " - " + \ + _file_name + _file_ext + \ + " - %.2f MB" % (_size / 1048576.0) vfile_name[index] = _title vfile_size[index] = _size @@ -608,56 +785,102 @@ def get_video_files_sizes(info): opciones[i] = _caption if len(opciones) > 1: - d = xbmcgui.Dialog() - seleccion = d.select("alfa-MCT: Lista de vídeos", opciones.values()) - else: - seleccion = 0 + if rar_parts > 1: + seleccion = -1 + index = -9 + return index, rar_parts, rar_size, len(opciones) + else: + d = xbmcgui.Dialog() + seleccion = d.select(msg_header + ": Selecciona el vídeo, o 'Cancelar' para todos", list(opciones.values())) + else: seleccion = 0 - index = opciones.keys()[seleccion] + index = list(opciones.keys())[seleccion] if seleccion == -1: - vfile_name[seleccion] = "" - vfile_size[seleccion] = 0 + vfile_name[seleccion] = vid_parts + vfile_size[seleccion] = vid_size index = seleccion return index, vfile_name[seleccion], vfile_size[seleccion], len(opciones) - # -- Preguntar si se desea borrar lo descargado ----------------- -def remove_files(download, torrent_file, video_file, ses, h): +def remove_files( download, torrent_file, video_file, ses, h, ren_video_file="" ): dialog_view = False torrent = False - if os.path.isfile(torrent_file): + if os.path.isfile( torrent_file ): dialog_view = True torrent = True if download > 0: dialog_view = True + if bkg_user and not extracted_rar: + dialog_view = False + + if erase_file_path and erase_file_path != \ + os.path.join( DOWNLOAD_PATH , "MCT-torrent-videos" ): + ren_video_file = erase_file_path + if filetools.isfile(ren_video_file) and filetools.split(ren_video_file)[0] != \ + os.path.join( DOWNLOAD_PATH , "MCT-torrent-videos" ): + ren_video_file = filetools.split(ren_video_file)[0] + elif filetools.isdir(ren_video_file) and ren_video_file == \ + os.path.join( DOWNLOAD_PATH , "MCT-torrent-videos" ): + ren_video_file = '' - if "/" in video_file: video_file = video_file.split("/")[0] - - if dialog_view: - d = xbmcgui.Dialog() - ok = d.yesno('alfa-MCT', 'Borrar las descargas del video', video_file) + if dialog_view and ren_video_file: + if h.status().num_pieces >= tot_piece_set: + d = xbmcgui.Dialog() + ok = d.yesno(msg_header, '¿Borrarmos los archivos descargados? (completos)', video_file) + else: + ok = True # -- SI ------------------------------------------------- if ok: # -- Borrar archivo - torrent ----------------------- if torrent: - os.remove(torrent_file) + try: + os.remove( torrent_file ) + except: + pass # -- Borrar carpeta/archivos y sesión - vídeo ------- - ses.remove_torrent(h, 1) - xbmc.log("### End session #########") + try: + ses.remove_torrent( h, 1 ) + ses_lt = False + except: + ses_lt = False + try: + if os.path.isdir(ren_video_file): + filetools.rmdirtree(ren_video_file, silent=True) + elif os.path.exists(ren_video_file) and os.path.isfile(ren_video_file): + os.remove(ren_video_file) + log("##### erase_file_path: %s" % ren_video_file) + except: + log("##### erase_file_path: %s" % ren_video_file) + + log("### End session #########") else: # -- Borrar sesión ---------------------------------- - ses.remove_torrent(h) - xbmc.log("### End session #########") + try: + ses.remove_torrent( h ) + ses_lt = False + except: + ses_lt = False + log("### End session #########") else: # -- Borrar sesión -------------------------------------- - ses.remove_torrent(h) - xbmc.log("### End session #########") + try: + ses.remove_torrent( h ) + ses_lt = False + except: + ses_lt = False + # -- Borrar archivo - torrent ----------------------- + if torrent: + try: + os.remove( torrent_file ) + except: + pass + log("### End session #########") - return + return # -- Descargar de la web los datos para crear el torrent -------- @@ -669,23 +892,22 @@ def url_get(url, params={}, headers={}): USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:20.0) Gecko/20100101 Firefox/20.0" if params: - import urllib - url = "%s?%s" % (url, urllib.urlencode(params)) + url = "%s?%s" % (url, urllib.parse.urlencode(params)) - req = urllib2.Request(url) + req = urllib.request.Request(url) req.add_header("User-Agent", USER_AGENT) - for k, v in headers.items(): + for k, v in list(headers.items()): req.add_header(k, v) try: - with closing(urllib2.urlopen(req)) as response: + with closing(urllib.request.urlopen(req)) as response: data = response.read() if response.headers.get("Content-Encoding", "") == "gzip": import zlib return zlib.decompressobj(16 + zlib.MAX_WBITS).decompress(data) return data - except urllib2.HTTPError: + except urllib.error.HTTPError: return None @@ -693,10 +915,8 @@ def url_get(url, params={}, headers={}): def count_completed_continuous_pieces(h, piece_set): not_zero = 0 for i, _set in enumerate(piece_set): - if not h.have_piece(_set): - break - else: - not_zero = 1 + if not h.have_piece(_set): break + else: not_zero = 1 return i + not_zero @@ -706,21 +926,31 @@ def count_completed_continuous_pieces(h, piece_set): # -- en una lista los índices de de las piezas del archivo - def set_priority_pieces(h, _index, video_file, video_size, porcent4first_pieces, porcent4last_pieces, allocate): + for i, _set in enumerate(h.file_priorities()): - if i != _index: - h.file_priority(i, 0) + if i != _index and _index >= 0: + #h.file_priority(i,0) + xbmc.sleep(1000) + h.file_priority(i,0) else: - h.file_priority(i, 1) + #h.file_priority(i,0) + xbmc.sleep(1000) + h.file_priority(i,1) piece_set = [] + x = 0 for i, _set in enumerate(h.piece_priorities()): - if _set == 1: piece_set.append(i) + #log("***** Nº Pieza: %s: %s" % (i, str(_set))) + if _set > 0: + piece_set.append(i) + x += 1 + log("***** Piezas %s : Activas: %s" % (str(i+1), str(x))) if not allocate: for i in range(0, porcent4first_pieces): h.set_piece_deadline(piece_set[i], 10000) - for i in range(len(piece_set) - porcent4last_pieces, len(piece_set)): + for i in range(len(piece_set)-porcent4last_pieces, len(piece_set)): h.set_piece_deadline(piece_set[i], 10000) return piece_set @@ -731,9 +961,23 @@ def decode_adfly(data): ysmm = scrapertools.find_single_match(data, "var ysmm = '([^']+)'") left = '' right = '' - for c in [ysmm[i:i + 2] for i in range(0, len(ysmm), 2)]: + for c in [ysmm[i:i+2] for i in range(0, len(ysmm), 2)]: left += c[0] right = c[1] + right decoded_url = base64.b64decode(left.encode() + right.encode())[2:].decode() return decoded_url + + +def encode(s): + import unicodedata + #log("### log ######") + #for c in s: + # log("%s : %s" % (c, str(unicodedata.category(c)))) + #log("##############") + #return s + return str(''.join((c for c in unicodedata.normalize('NFD', unicode(s, 'utf-8')) if unicodedata.category(c) != 'Mn'))) + + +def log(texto): + xbmc.log(texto, xbmc.LOGNOTICE) diff --git a/platformcode/platformtools.py b/platformcode/platformtools.py index 937573f0..5f2a0b72 100644 --- a/platformcode/platformtools.py +++ b/platformcode/platformtools.py @@ -8,26 +8,36 @@ # version 2.0 # ------------------------------------------------------------ -import os +from __future__ import division +from __future__ import absolute_import +from past.utils import old_div +#from builtins import str import sys -import urllib +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + +if PY3: + #from future import standard_library + #standard_library.install_aliases() + import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo +else: + import urllib # Usamos el nativo de PY2 que es más rápido + +import os -# import config import xbmc -import xbmcaddon import xbmcgui import xbmcplugin +import xbmcaddon from channelselector import get_thumb from core import channeltools from core import trakt_tools, scrapertools from core.item import Item -from platformcode import logger, keymaptools, config +from platformcode import logger +from platformcode import config from platformcode import unify -addon = xbmcaddon.Addon('plugin.video.kod') -downloadenabled = addon.getSetting('downloadenabled') - class XBMCPlayer(xbmc.Player): @@ -98,17 +108,16 @@ def dialog_numeric(_type, heading, default=""): return d -def dialog_textviewer(heading, text): # disponible a partir de kodi 16 +def dialog_textviewer(heading, text): # disponible a partir de kodi 16 return xbmcgui.Dialog().textviewer(heading, text) - def itemlist_refresh(): xbmc.executebuiltin("Container.Refresh") def itemlist_update(item, replace=False): - if replace: # reset the path history + if replace: # reset the path history xbmc.executebuiltin("Container.Update(" + sys.argv[0] + "?" + item.tourl() + ", replace)") else: xbmc.executebuiltin("Container.Update(" + sys.argv[0] + "?" + item.tourl() + ")") @@ -123,11 +132,11 @@ def render_items(itemlist, parent_item): @type parent_item: item @param parent_item: elemento padre """ - logger.info('START render_items') + logger.info('INICIO render_items') from core import httptools # Si el itemlist no es un list salimos - if not type(itemlist) == list: + if not isinstance(itemlist, list): return if parent_item.start: @@ -146,10 +155,18 @@ def render_items(itemlist, parent_item): anime = False if 'anime' in channeltools.get_channel_parameters(parent_item.channel)['categories']: anime = True - + try: + force_unify = channeltools.get_channel_parameters(parent_item.channel)['force_unify'] + except: + force_unify = False unify_enabled = config.get_setting('unify') - #logger.debug('unify_enabled: %s' % unify_enabled) + try: + if channeltools.get_channel_parameters(parent_item.channel)['adult']: + unify_enabled = False + except: + pass + # logger.debug('unify_enabled: %s' % unify_enabled) # Recorremos el itemlist for item in itemlist: @@ -157,10 +174,14 @@ def render_items(itemlist, parent_item): # Si el item no contiene categoria, le ponemos la del item padre if item.category == "": item.category = parent_item.category - + # Si title no existe, lo iniciamos como str, para evitar errones "NoType" if not item.title: item.title = '' + + # Si no hay action o es findvideos/play, folder=False porque no se va a devolver ningún listado + if item.action in ['play', '']: + item.folder = False # Si el item no contiene fanart, le ponemos el del item padre if item.fanart == "": @@ -180,14 +201,13 @@ def render_items(itemlist, parent_item): if 'pelicula' in item.action: item.thumbnail = get_thumb("videolibrary_movie.png") elif 'serie' in item.action: - item.thumbnail = get_thumb("videolibrary_tvshow.png") + item.thumbnail = get_thumb("videolibrary_tvshow.png") - - if unify_enabled and parent_item.channel != 'kodfavorites': + if (unify_enabled or force_unify) and parent_item.channel not in ['kodfavourites']: # Formatear titulo con unify item = unify.title_format(item) else: - #Formatear titulo metodo old school + # Formatear titulo metodo old school if item.text_color: item.title = '[COLOR %s]%s[/COLOR]' % (item.text_color, item.title) if item.text_bold: @@ -196,33 +216,26 @@ def render_items(itemlist, parent_item): item.title = '[I]%s[/I]' % item.title # Añade headers a las imagenes si estan en un servidor con cloudflare - if item.action == 'play': - #### Compatibilidad con Kodi 18: evita que se quede la ruedecedita dando vueltas en enlaces Directos - item.folder = False - item.thumbnail = unify.thumbnail_type(item) else: item.thumbnail = httptools.get_url_headers(item.thumbnail) item.fanart = httptools.get_url_headers(item.fanart) + # IconImage para folder y video if item.folder: icon_image = "DefaultFolder.png" else: icon_image = "DefaultVideo.png" - #if not genre or (genre and valid_genre): - # Creamos el listitem - #listitem = xbmcgui.ListItem(item.title, iconImage=icon_image, thumbnailImage=unify.thumbnail_type(item)) - listitem = xbmcgui.ListItem(item.title, iconImage=icon_image, thumbnailImage=item.thumbnail) # Ponemos el fanart if item.fanart: fanart = item.fanart else: - fanart = os.path.join(config.get_runtime_path(), "fanart1.jpg") + fanart = config.get_fanart() # Creamos el listitem - #listitem = xbmcgui.ListItem(item.title) + listitem = xbmcgui.ListItem(item.title) # values icon, thumb or poster are skin dependent.. so we set all to avoid problems # if not exists thumb it's used icon value @@ -240,19 +253,21 @@ def render_items(itemlist, parent_item): # Esta opcion es para poder utilizar el xbmcplugin.setResolvedUrl() # if item.isPlayable == True or (config.get_setting("player_mode") == 1 and item.action == "play"): if config.get_setting("player_mode") == 1 and item.action == "play": - if item.folder: - item.folder = False listitem.setProperty('IsPlayable', 'true') # Añadimos los infoLabels set_infolabels(listitem, item) + + # No arrastrar plot si no es una peli/serie/temporada/episodio + if item.plot and item.contentType not in ['movie', 'tvshow', 'season', 'episode']: + item.__dict__['infoLabels'].pop('plot') # Montamos el menu contextual if parent_item.channel != 'special': context_commands = set_context_commands(item, parent_item) else: context_commands = [] - # Añadimos el item + # Añadimos el menu contextual if config.get_platform(True)['num_version'] >= 17.0 and parent_item.list_type == '': listitem.addContextMenuItems(context_commands) elif parent_item.list_type == '': @@ -264,18 +279,15 @@ def render_items(itemlist, parent_item): listitem=listitem, isFolder=item.folder, totalItems=item.totalItems) - # Fijar los tipos de vistas... - if config.get_setting("forceview"): - # ...forzamos segun el viewcontent + if config.get_setting("forceview"): # ...forzamos segun el viewcontent xbmcplugin.setContent(int(sys.argv[1]), parent_item.viewcontent) - elif parent_item.channel not in ["channelselector", "", "kodfavorites"]: - # ... o segun el canal + elif parent_item.channel not in ["channelselector", "", "kodfavourites"]: # ... o segun el canal xbmcplugin.setContent(int(sys.argv[1]), "movies") - elif parent_item.channel == "kodfavorites" and parent_item.action == 'mostrar_perfil': - xbmcplugin.setContent(int(sys.argv[1]), "movies") + elif parent_item.channel == "kodfavourites" and parent_item.action == 'mostrar_perfil': + xbmcplugin.setContent(int(sys.argv[1]), "movies") # Fijamos el "breadcrumb" if parent_item.list_type == '': @@ -355,21 +367,58 @@ def set_infolabels(listitem, item, player=False): Metodo para pasar la informacion al listitem (ver tmdb.set_InfoLabels() ) item.infoLabels es un dicionario con los pares de clave/valor descritos en: http://mirrors.xbmc.org/docs/python-docs/14.x-helix/xbmcgui.html#ListItem-setInfo + https://kodi.wiki/view/InfoLabels @param listitem: objeto xbmcgui.ListItem @type listitem: xbmcgui.ListItem @param item: objeto Item que representa a una pelicula, serie o capitulo @type item: item """ + + infoLabels_dict = {'aired': 'aired', 'album': 'album', 'artist': 'artist', 'cast': 'cast', + 'castandrole': 'castandrole', 'tmdb_id': 'code', 'code': 'code', 'country': 'country', + 'credits': 'credits', 'release_date': 'dateadded', 'dateadded': 'dateadded', 'dbid': 'dbid', + 'director': 'director', 'duration': 'duration', 'episode': 'episode', + 'episodio_sinopsis': 'episodeguide', 'episodio_air_date': 'None', 'episodio_imagen': 'None', + 'episodio_titulo': 'title', 'episodio_vote_average': 'rating', 'episodio_vote_count': 'votes', + 'fanart': 'None', 'genre': 'genre', 'homepage': 'None', 'imdb_id': 'imdbnumber', + 'imdbnumber': 'imdbnumber', 'in_production': 'None', 'last_air_date': 'lastplayed', + 'mediatype': 'mediatype', 'mpaa': 'mpaa', 'number_of_episodes': 'None', + 'number_of_seasons': 'None', 'original_language': 'None', 'originaltitle': 'originaltitle', + 'overlay': 'overlay', 'poster_path': 'path', 'popularity': 'None', 'playcount': 'playcount', + 'plot': 'plot', 'plotoutline': 'plotoutline', 'premiered': 'premiered', 'quality': 'None', + 'rating': 'rating', 'season': 'season', 'set': 'set', 'setid': 'setid', + 'setoverview': 'setoverview', 'showlink': 'showlink', 'sortepisode': 'sortepisode', + 'sortseason': 'sortseason', 'sorttitle': 'sorttitle', 'status': 'status', 'studio': 'studio', + 'tag': 'tag', 'tagline': 'tagline', 'temporada_air_date': 'None', 'temporada_nombre': 'None', + 'temporada_num_episodios': 'None', 'temporada_poster': 'None', 'title': 'title', + 'top250': 'top250', 'tracknumber': 'tracknumber', 'trailer': 'trailer', 'thumbnail': 'None', + 'tvdb_id': 'None', 'tvshowtitle': 'tvshowtitle', 'type': 'None', 'userrating': 'userrating', + 'url_scraper': 'None', 'votes': 'votes', 'writer': 'writer', 'year': 'year'} + + infoLabels_kodi = {} + if item.infoLabels: if 'mediatype' not in item.infoLabels: item.infoLabels['mediatype'] = item.contentType - listitem.setInfo("video", item.infoLabels) + + try: + for label_tag, label_value in list(item.infoLabels.items()): + try: + # logger.debug(str(label_tag) + ': ' + str(infoLabels_dict[label_tag])) + if infoLabels_dict[label_tag] != 'None': + infoLabels_kodi.update({infoLabels_dict[label_tag]: item.infoLabels[label_tag]}) + except: + continue + + listitem.setInfo("video", infoLabels_kodi) + + except: + listitem.setInfo("video", item.infoLabels) + logger.error(item.infoLabels) + logger.error(infoLabels_kodi) if player and not item.contentTitle: - if item.fulltitle: - listitem.setInfo("video", {"Title": item.fulltitle}) - else: - listitem.setInfo("video", {"Title": item.title}) + listitem.setInfo("video", {"Title": item.title}) elif not player: listitem.setInfo("video", {"Title": item.title}) @@ -409,9 +458,9 @@ def set_context_commands(item, parent_item): num_version_xbmc = config.get_platform(True)['num_version'] # Creamos un list con las diferentes opciones incluidas en item.context - if type(item.context) == str: + if isinstance(item.context, str): context = item.context.split("|") - elif type(item.context) == list: + elif isinstance(item.context, list): context = item.context else: context = [] @@ -423,31 +472,30 @@ def set_context_commands(item, parent_item): item.action = itemBK.action item.channel = itemBK.channel infoLabels = {} - if itemBK.infoLabels["year"]: infoLabels["year"] = itemBK.infoLabels["year"] - if itemBK.infoLabels["imdb_id"]: infoLabels["imdb_id"] = itemBK.infoLabels["imdb_id"] - if itemBK.infoLabels["tmdb_id"]: infoLabels["tmdb_id"] = itemBK.infoLabels["tmdb_id"] - if itemBK.infoLabels["tvdb_id"]: infoLabels["tvdb_id"] = itemBK.infoLabels["tvdb_id"] - if itemBK.infoLabels["noscrap_id"]: infoLabels["noscrap_id"] = itemBK.infoLabels["noscrap_id"] - if len(infoLabels) > 0: item.infoLabels = infoLabels + if itemBK.infoLabels["year"]: infoLabels["year"] = itemBK.infoLabels["year"] + if itemBK.infoLabels["imdb_id"]: infoLabels["imdb_id"] = itemBK.infoLabels["imdb_id"] + if itemBK.infoLabels["tmdb_id"]: infoLabels["tmdb_id"] = itemBK.infoLabels["tmdb_id"] + if itemBK.infoLabels["tvdb_id"]: infoLabels["tvdb_id"] = itemBK.infoLabels["tvdb_id"] + if itemBK.infoLabels["noscrap_id"]: infoLabels["noscrap_id"] = itemBK.infoLabels["noscrap_id"] + if len(infoLabels) > 0: item.infoLabels = infoLabels - if itemBK.thumbnail: item.thumbnail = itemBK.thumbnail - if itemBK.extra: item.extra = itemBK.extra + if itemBK.thumbnail: item.thumbnail = itemBK.thumbnail + if itemBK.extra: item.extra = itemBK.extra if itemBK.contentEpisodeNumber: item.contentEpisodeNumber = itemBK.contentEpisodeNumber - if itemBK.contentEpisodeTitle: item.contentEpisodeTitle = itemBK.contentEpisodeTitle - if itemBK.contentPlot: item.contentPlot = itemBK.contentPlot - if itemBK.contentQuality: item.contentQuality = itemBK.contentQuality - if itemBK.contentSeason: item.contentSeason = itemBK.contentSeason - if itemBK.contentSerieName: item.contentSerieName = itemBK.contentSerieName - if itemBK.contentThumbnail: item.contentThumbnail = itemBK.contentThumbnail - if itemBK.contentTitle: item.contentTitle = itemBK.contentTitle - if itemBK.contentType: item.contentType = itemBK.contentType - if itemBK.duration: item.duration = itemBK.duration - if itemBK.fulltitle: item.fulltitle = itemBK.fulltitle - if itemBK.plot: item.plot = itemBK.plot - if itemBK.quality: item.quality = itemBK.quality - if itemBK.show: item.show = itemBK.show - if itemBK.title: item.title = itemBK.title - if itemBK.viewcontent: item.viewcontent = itemBK.viewcontent + if itemBK.contentEpisodeTitle: item.contentEpisodeTitle = itemBK.contentEpisodeTitle + if itemBK.contentPlot: item.contentPlot = itemBK.contentPlot + if itemBK.contentQuality: item.contentQuality = itemBK.contentQuality + if itemBK.contentSeason: item.contentSeason = itemBK.contentSeason + if itemBK.contentSerieName: item.contentSerieName = itemBK.contentSerieName + if itemBK.contentThumbnail: item.contentThumbnail = itemBK.contentThumbnail + if itemBK.contentTitle: item.contentTitle = itemBK.contentTitle + if itemBK.contentType: item.contentType = itemBK.contentType + if itemBK.duration: item.duration = itemBK.duration + if itemBK.plot: item.plot = itemBK.plot + if itemBK.quality: item.quality = itemBK.quality + if itemBK.show: item.show = itemBK.show + if itemBK.title: item.title = itemBK.title + if itemBK.viewcontent: item.viewcontent = itemBK.viewcontent # itemJson = item.tojson() # logger.info("Elemento: {0} bytes".format(len(itemJson))) @@ -457,12 +505,12 @@ def set_context_commands(item, parent_item): # Opciones segun item.context for command in context: # Predefinidos - if type(command) == str: + if isinstance(command, str): if command == "no_context": return [] # Formato dict - if type(command) == dict: + if isinstance(command, dict): # Los parametros del dict, se sobreescriben al nuevo context_item en caso de sobreescribir "action" y # "channel", los datos originales se guardan en "from_action" y "from_channel" if "action" in command: @@ -486,12 +534,12 @@ def set_context_commands(item, parent_item): return context_commands # Opciones segun criterios, solo si el item no es un tag (etiqueta), ni es "Añadir a la videoteca", etc... - if item.action and item.action not in ["add_pelicula_to_library", "add_serie_to_library", "buscartrailer"]: + if item.action and item.action not in ["add_pelicula_to_library", "add_serie_to_library", "buscartrailer", "actualizar_titulos"]: # Mostrar informacion: si el item tiene plot suponemos q es una serie, temporada, capitulo o pelicula if item.infoLabels['plot'] and (num_version_xbmc < 17.0 or item.contentType == 'season'): context_commands.append((config.get_localized_string(60348), "XBMC.Action(Info)")) - # ExtendedInfo: Si esta instalado el addon y se cumplen una serie de condiciones + # ExtendedInfo: Si está instalado el addon y se cumplen una serie de condiciones if xbmc.getCondVisibility('System.HasAddon(script.extendedinfo)') \ and config.get_setting("extended_info") == True: if item.contentType == "episode" and item.contentEpisodeNumber and item.contentSeason \ @@ -520,15 +568,16 @@ def set_context_commands(item, parent_item): elif item.contentType == "movie" and (item.infoLabels['tmdb_id'] or item.infoLabels['imdb_id'] or item.contentTitle): param = "id =%s,imdb_id=%s,name=%s" \ - % (item.infoLabels['tmdb_id'], item.infoLabels['imdb_id'], item.contentTitle) + % (item.infoLabels['tmdb_id'], item.infoLabels['imdb_id'], item.contentTitle) context_commands.append(("ExtendedInfo", "XBMC.RunScript(script.extendedinfo,info=extendedinfo,%s)" % param)) # InfoPlus if config.get_setting("infoplus"): - if item.infoLabels['tmdb_id'] or item.infoLabels['imdb_id'] or item.infoLabels['tvdb_id'] or \ - (item.contentTitle and item.infoLabels["year"]) or item.contentSerieName: + #if item.infoLabels['tmdb_id'] or item.infoLabels['imdb_id'] or item.infoLabels['tvdb_id'] or \ + # (item.contentTitle and item.infoLabels["year"]) or item.contentSerieName: + if item.infoLabels['tmdb_id'] or item.infoLabels['imdb_id'] or item.infoLabels['tvdb_id']: context_commands.append(("InfoPlus", "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone( channel="infoplus", action="start", from_channel=item.channel).tourl()))) @@ -540,6 +589,7 @@ def set_context_commands(item, parent_item): context_commands.insert(2, (config.get_localized_string(70739), "XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(action="open_browser", url=item.url).tourl()))) + # Añadir a Favoritos if num_version_xbmc < 17.0 and \ ((item.channel not in ["favorites", "videolibrary", "help", ""] @@ -548,16 +598,17 @@ def set_context_commands(item, parent_item): (sys.argv[0], item.clone(channel="favorites", action="addFavourite", from_channel=item.channel, from_action=item.action).tourl()))) + # Añadir a Alfavoritos (Mis enlaces) if item.channel not in ["favorites", "videolibrary", "help", ""] and parent_item.channel != "favorites": - context_commands.append(("[B]%s[/B]" % config.get_localized_string(70557), "XBMC.RunPlugin(%s?%s)" % - (sys.argv[0], item.clone(channel="kodfavorites", action="addFavourite", - from_channel=item.channel, - from_action=item.action).tourl()))) + context_commands.append( + ('[COLOR blue]%s[/COLOR]' % config.get_localized_string(70557), "XBMC.RunPlugin(%s?%s)" % + (sys.argv[0], item.clone(channel="kodfavourites", action="addFavourite", + from_channel=item.channel, + from_action=item.action).tourl()))) # Buscar en otros canales - if item.contentType in ['movie', 'tvshow'] and item.channel != 'search': - + if item.contentType in ['movie', 'tvshow'] and item.channel != 'search' and item.action not in ['play']: # Buscar en otros canales if item.contentSerieName != '': @@ -569,6 +620,7 @@ def set_context_commands(item, parent_item): mediatype = 'tv' else: mediatype = item.contentType + context_commands.append((config.get_localized_string(60350), "XBMC.Container.Update (%s?%s)" % (sys.argv[0], item.clone(channel='search', @@ -576,13 +628,16 @@ def set_context_commands(item, parent_item): from_channel=item.channel, contextual=True, text=item.wanted).tourl()))) - context_commands.append(("[B]%s[/B]" % config.get_localized_string(70561), "XBMC.Container.Update (%s?%s)" % ( - sys.argv[0], item.clone(channel='search', action='from_context', search_type='list', page='1', - list_type='%s/%s/similar' % (mediatype,item.infoLabels['tmdb_id'])).tourl()))) + + context_commands.append( + ("[COLOR yellow]%s[/COLOR]" % config.get_localized_string(70561), "XBMC.Container.Update (%s?%s)" % ( + sys.argv[0], item.clone(channel='search', action='from_context', search_type='list', page='1', + list_type='%s/%s/similar' % ( + mediatype, item.infoLabels['tmdb_id'])).tourl()))) # Definir como Pagina de inicio if config.get_setting('start_page'): - if item.action not in ['findvideos', 'play']: + if item.action not in ['episodios', 'seasons', 'findvideos', 'play']: context_commands.insert(0, (config.get_localized_string(60351), "XBMC.RunPlugin(%s?%s)" % ( sys.argv[0], Item(channel='side_menu', @@ -602,55 +657,45 @@ def set_context_commands(item, parent_item): (sys.argv[0], item.clone(action="add_pelicula_to_library", from_action=item.action).tourl()))) - if (item.channel != "downloads" and item.channel != "videolibrary" and downloadenabled != "false" and not config.get_localized_string(70585) in str(item.context))\ - or (item.channel != "downloads" and item.channel != "videolibrary" and downloadenabled != "false" and config.get_localized_string(70585) in str(item.context) and config.get_localized_string(70714) in str(item.context)): + if item.channel != "downloads" and item.server != 'torrent': # Descargar pelicula - if item.contentType == "movie" and item.action in ['findvideos', "play"]: + if item.contentType == "movie" and item.contentTitle: context_commands.append((config.get_localized_string(60354), "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(channel="downloads", action="save_download", from_channel=item.channel, from_action=item.action) - .tourl()))) + .tourl()))) - # elif item.contentSerieName: - # Descargar serie - elif (item.contentType == "tvshow" and item.action in ["episodios"]) or \ - (item.contentType == "tvshow" and item.action in ['get_seasons'] and config.get_setting('show_seasons',item.channel) == False): - item.contentType == "tvshow" - context_commands.append((config.get_localized_string(60355), "XBMC.RunPlugin(%s?%s)" % - (sys.argv[0], item.clone(channel="downloads", action="save_download", - from_channel=item.channel, - from_action=item.action).tourl()))) - context_commands.append((config.get_localized_string(60357), "XBMC.RunPlugin(%s?%s)" % - (sys.argv[0], item.clone(channel="downloads", action="save_download", - from_channel=item.channel, - from_action=item.action, - download='season').tourl()))) - - # Descargar episodio - elif item.contentType == 'episode' and item.action in ["findvideos", "play"]: - item.contentType == "episode" - context_commands.append((config.get_localized_string(60356), "XBMC.RunPlugin(%s?%s)" % - (sys.argv[0], item.clone(channel="downloads", action="save_download", - from_channel=item.channel, - from_action=item.action).tourl()))) - - # Descargar temporada - elif item.contentType == "season": - context_commands.append((config.get_localized_string(60357), "XBMC.RunPlugin(%s?%s)" % - (sys.argv[0], item.clone(channel="downloads", action="save_download", - from_channel=item.channel, - from_action=item.action, - download='season').tourl()))) + elif item.contentSerieName: + # Descargar serie + if item.contentType == "tvshow": + context_commands.append((config.get_localized_string(60355), "XBMC.RunPlugin(%s?%s)" % + (sys.argv[0], item.clone(channel="downloads", action="save_download", + from_channel=item.channel, + from_action=item.action).tourl()))) + # Descargar episodio + elif item.contentType == "episode": + context_commands.append((config.get_localized_string(60356), "XBMC.RunPlugin(%s?%s)" % + (sys.argv[0], item.clone(channel="downloads", action="save_download", + from_channel=item.channel, + from_action=item.action).tourl()))) + # Descargar temporada + elif item.contentType == "season": + context_commands.append((config.get_localized_string(60357), "XBMC.RunPlugin(%s?%s)" % + (sys.argv[0], item.clone(channel="downloads", action="save_download", + from_channel=item.channel, + from_action=item.action).tourl()))) # Abrir configuración - if parent_item.channel not in ["setting", "news", "search"]: + if parent_item.channel not in ["setting", "news", "search"] and item.action == "play": context_commands.append((config.get_localized_string(60358), "XBMC.Container.Update(%s?%s)" % (sys.argv[0], Item(channel="setting", action="mainlist").tourl()))) # Buscar Trailer if item.action == "findvideos" or "buscar_trailer" in context: - context_commands.append((config.get_localized_string(60359), "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone( - channel="trailertools", action="buscartrailer", contextual=True).tourl()))) + context_commands.append( + (config.get_localized_string(60359), "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone( + channel="trailertools", action="buscartrailer", contextual=True).tourl()))) + # Añadir SuperFavourites al menu contextual (1.0.53 o superior necesario) sf_file_path = xbmc.translatePath("special://home/addons/plugin.program.super.favourites/LaunchSFMenu.py") check_sf = os.path.exists(sf_file_path) @@ -659,21 +704,16 @@ def set_context_commands(item, parent_item): "XBMC.RunScript(special://home/addons/plugin.program.super.favourites/LaunchSFMenu.py)")) context_commands = sorted(context_commands, key=lambda comand: comand[0]) + # Menu Rapido # context_commands.insert(0, (config.get_localized_string(60360), - # "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], Item(channel='side_menu', - # action="open_shortcut_menu", - # parent=parent_item.tourl()).tourl( - # )))) - # context_commands.insert(1, (config.get_localized_string(70737), # "XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(channel='side_menu', # action="open_menu", # parent=parent_item.tourl()).tourl( # )))) if config.dev_mode(): context_commands.insert(2, ("item info", - "XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(action="itemInfo", - parent=item.tojson()).tourl()))) + "XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(action="itemInfo", parent=item.tojson()).tourl()))) return context_commands @@ -683,10 +723,8 @@ def is_playing(): def play_video(item, strm=False, force_direct=False, autoplay=False): logger.info() - # if item.play_from == 'window': - # force_direct=True # logger.debug(item.tostring('\n')) - logger.debug('item play: %s'%item) + logger.debug('item play: %s' % item) xbmc_player = XBMCPlayer() if item.channel == 'downloads': logger.info("Reproducir video local: %s [%s]" % (item.title, item.url)) @@ -767,6 +805,14 @@ def stop_video(): def get_seleccion(default_action, opciones, seleccion, video_urls): + fixpri = False + # para conocer en que prioridad se trabaja + priority = int(config.get_setting("resolve_priority")) + # se usara para comprobar si hay links premium o de debriders + check = [] + # Comprueba si resolve stop esta desactivado + if config.get_setting("resolve_stop") == False: + fixpri = True # preguntar if default_action == 0: # "Elige una opción" @@ -775,24 +821,42 @@ def get_seleccion(default_action, opciones, seleccion, video_urls): elif default_action == 1: resolutions = [] for url in video_urls: + if "debrid]" in url[0] or "Premium)" in url[0]: + check.append(True) res = calcResolution(url[0]) if res: resolutions.append(res) if resolutions: - seleccion = resolutions.index(min(resolutions)) + if (fixpri == True and + check and + priority == 2): + seleccion = 0 + else: + seleccion = resolutions.index(min(resolutions)) else: seleccion = 0 # Ver en alta calidad elif default_action == 2: resolutions = [] for url in video_urls: + if "debrid]" in url[0] or "Premium)" in url[0]: + check.append(True) res = calcResolution(url[0]) if res: resolutions.append(res) + if resolutions: - seleccion = resolutions.index(max(resolutions)) + if (fixpri == True and + check and + priority == 2): + seleccion = 0 + else: + seleccion = resolutions.index(max(resolutions)) else: - seleccion = len(video_urls) - 1 + if fixpri == True and check: + seleccion = 0 + else: + seleccion = len(video_urls) - 1 else: seleccion = 0 return seleccion @@ -802,7 +866,7 @@ def calcResolution(option): match = scrapertools.find_single_match(option, '([0-9]{2,4})x([0-9]{2,4})') resolution = False if match: - resolution = int(match[0])*int(match[1]) + resolution = int(match[0]) * int(match[1]) else: if '240p' in option: resolution = 320 * 240 @@ -829,7 +893,7 @@ def show_channel_settings(**kwargs): @return: devuelve la ventana con los elementos @rtype: SettingsWindow """ - from xbmc_config_menu import SettingsWindow + from platformcode.xbmc_config_menu import SettingsWindow return SettingsWindow("ChannelSettings.xml", config.get_runtime_path()).start(**kwargs) @@ -843,12 +907,12 @@ def show_video_info(*args, **kwargs): @rtype: InfoWindow """ - from xbmc_info_window import InfoWindow + from platformcode.xbmc_info_window import InfoWindow return InfoWindow("InfoWindow.xml", config.get_runtime_path()).start(*args, **kwargs) def show_recaptcha(key, referer): - from recaptcha import Recaptcha + from platformcode.recaptcha import Recaptcha return Recaptcha("Recaptcha.xml", config.get_runtime_path()).Start(key, referer) @@ -868,7 +932,7 @@ def handle_wait(time_to_wait, title, text): espera = dialog_progress(' ' + title, "") secs = 0 - increment = int(100 / time_to_wait) + increment = int(old_div(100, time_to_wait)) cancelled = False while secs < time_to_wait: @@ -930,10 +994,10 @@ def get_dialogo_opciones(item, default_action, strm, autoplay): # "Descargar" import xbmcaddon addon = xbmcaddon.Addon('plugin.video.kod') - # downloadenabled = addon.getSetting('downloadenabled') - # if downloadenabled != "false": - # opcion = config.get_localized_string(30153) - # opciones.append(opcion) + downloadenabled = addon.getSetting('downloadenabled') + if downloadenabled != "false": + opcion = config.get_localized_string(30153) + opciones.append(opcion) if item.isFavourite: # "Quitar de favoritos" @@ -1051,11 +1115,13 @@ def get_video_seleccionado(item, seleccion, video_urls): mediaurl = video_urls[seleccion][1] if len(video_urls[seleccion]) > 4: wait_time = video_urls[seleccion][2] - item.subtitle = video_urls[seleccion][3] + if not item.subtitle: + item.subtitle = video_urls[seleccion][3] mpd = True elif len(video_urls[seleccion]) > 3: wait_time = video_urls[seleccion][2] - item.subtitle = video_urls[seleccion][3] + if not item.subtitle: + item.subtitle = video_urls[seleccion][3] elif len(video_urls[seleccion]) > 2: wait_time = video_urls[seleccion][2] view = True @@ -1079,8 +1145,7 @@ def get_video_seleccionado(item, seleccion, video_urls): def set_player(item, xlistitem, mediaurl, view, strm): logger.info() - # logger.debug("item:\n" + item.tostring('\n')) - + logger.debug("item:\n" + item.tostring('\n')) # Movido del conector "torrent" aqui if item.server == "torrent": play_torrent(item, xlistitem, mediaurl) @@ -1097,11 +1162,11 @@ def set_player(item, xlistitem, mediaurl, view, strm): logger.info("player_mode=%s" % config.get_setting("player_mode")) logger.info("mediaurl=" + mediaurl) if config.get_setting("player_mode") == 3 or "megacrypter.com" in mediaurl: - import download_and_play - download_and_play.download_and_play(mediaurl, "download_and_play.mp4", config.get_setting("downloadpath")) + from . import download_and_play + download_and_play.download_and_play(mediaurl, "download_and_play.tmp", config.get_setting("downloadpath")) return - elif config.get_setting("player_mode") == 0 or item.play_from == 'window' or\ + elif config.get_setting("player_mode") == 0 or item.play_from == 'window' or \ (config.get_setting("player_mode") == 3 and mediaurl.startswith("rtmp")): # Añadimos el listitem a una lista de reproducción (playlist) playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) @@ -1116,15 +1181,16 @@ def set_player(item, xlistitem, mediaurl, view, strm): # elif config.get_setting("player_mode") == 1 or item.isPlayable: elif config.get_setting("player_mode") == 1: - logger.info("mediaurl :" + mediaurl) logger.info("Tras setResolvedUrl") # si es un archivo de la videoteca enviar a marcar como visto + if strm or item.strm_path: from platformcode import xbmc_videolibrary xbmc_videolibrary.mark_auto_as_watched(item) logger.debug(item) xlistitem.setPath(mediaurl) xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xlistitem) + xbmc.sleep(2500) elif config.get_setting("player_mode") == 2: xbmc.executebuiltin("PlayMedia(" + mediaurl + ")") @@ -1160,9 +1226,13 @@ def torrent_client_installed(show_tuple=False): def play_torrent(item, xlistitem, mediaurl): logger.info() import time + import traceback + from core import filetools - from core import videolibrarytools - + from core import httptools + from lib import generictools + from servers import torrent + # Opciones disponibles para Reproducir torrents torrent_options = list() torrent_options.append(["Cliente interno (necesario libtorrent)"]) @@ -1183,205 +1253,227 @@ def play_torrent(item, xlistitem, mediaurl): else: seleccion = 0 - # Plugins externos - if seleccion > 1: - - #### Compatibilidad con Kodi 18: evita cuelgues/cancelaciones cuando el .torrent se lanza desde pantalla convencional - #if xbmc.getCondVisibility('Window.IsMedia'): - xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) #Preparamos el entorno para evitar error Kod1 18 - time.sleep(0.5) #Dejamos tiempo para que se ejecute + # Si Libtorrent ha dado error de inicialización, no se pueden usar los clientes internos + UNRAR = config.get_setting("unrar_path", server="torrent", default="") + LIBTORRENT = config.get_setting("libtorrent_path", server="torrent", default='') + size_rar = 2 + rar_files = [] + if item.password: + size_rar = 3 - #Nuevo método de descarga previa del .torrent. Si da error, miramos si hay alternatica local. Si ya es local, lo usamos + # Si es Libtorrent y no está soportado, se ofrecen alternativas, si las hay... + if seleccion < 2 and not LIBTORRENT: + dialog_ok('Cliente Interno (LibTorrent):', 'Este cliente no está soportado en su dispositivo.', \ + 'Error: [COLOR yellow]%s[/COLOR]' % config.get_setting("libtorrent_error", server="torrent", + default=''), \ + 'Use otro cliente Torrent soportado') + if len(torrent_options) > 2: + seleccion = dialog_select(config.get_localized_string(70193), [opcion[0] for opcion in torrent_options]) + if seleccion < 2: + return + else: + return + # Si es Torrenter o Elementum con opción de Memoria, se ofrece la posibilidad ee usar Libtorrent temporalemente + elif seleccion > 1 and LIBTORRENT and UNRAR and 'RAR-' in item.torrent_info and ( + "torrenter" in torrent_options[seleccion][0] \ + or ("elementum" in torrent_options[seleccion][0] and xbmcaddon.Addon(id="plugin.video.%s" \ + % torrent_options[seleccion][ + 0].replace('Plugin externo: ', + '')).getSetting( + 'download_storage') == '1')): + if dialog_yesno(torrent_options[seleccion][0], 'Este plugin externo no soporta extraer on-line archivos RAR', \ + '[COLOR yellow]¿Quiere que usemos esta vez el Cliente interno MCT?[/COLOR]', \ + 'Esta operación ocupará en disco [COLOR yellow][B]%s+[/B][/COLOR] veces el tamaño del vídeo' % size_rar): + seleccion = 1 + else: + return + # Si es Elementum pero con opción de Memoria, se muestras los Ajustes de Elementum y se pide al usuario que cambie a "Usar Archivos" + elif seleccion > 1 and not LIBTORRENT and UNRAR and 'RAR-' in item.torrent_info and "elementum" in \ + torrent_options[seleccion][0] \ + and xbmcaddon.Addon(id="plugin.video.%s" % torrent_options[seleccion][0].replace('Plugin externo: ', '')) \ + .getSetting('download_storage') == '1': + if dialog_yesno(torrent_options[seleccion][0], + 'Elementum con descarga en [COLOR yellow]Memoria[/COLOR] no soporta ' + \ + 'extraer on-line archivos RAR (ocupación en disco [COLOR yellow][B]%s+[/B][/COLOR] veces)' % size_rar, \ + '[COLOR yellow]¿Quiere llamar a los Ajustes de Elementum para cambiar [B]temporalmente[/B] ' + \ + 'a [COLOR hotpink]"Usar Archivos"[/COLOR] y [B]reintentarlo[/B]?[/COLOR]'): + __settings__ = xbmcaddon.Addon( + id="plugin.video.%s" % torrent_options[seleccion][0].replace('Plugin externo: ', '')) + __settings__.openSettings() # Se visulizan los Ajustes de Elementum + elementum_dl = xbmcaddon.Addon( + id="plugin.video.%s" % torrent_options[seleccion][0].replace('Plugin externo: ', '')) \ + .getSetting('download_storage') + if elementum_dl != '1': + config.set_setting("elementum_dl", "1", server="torrent") # Salvamos el cambio para restaurarlo luego + return # Se sale, porque habrá refresco y cancelaría Kodi si no + + # Descarga de torrents a local + if seleccion >= 0: + + #### Compatibilidad con Kodi 18: evita cuelgues/cancelaciones cuando el .torrent se lanza desde pantalla convencional + # if xbmc.getCondVisibility('Window.IsMedia'): + xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) # Preparamos el entorno para evitar error Kod1 18 + time.sleep(0.5) # Dejamos tiempo para que se ejecute + + # Nuevo método de descarga previa del .torrent. Si da error, miramos si hay alternatica local. + # Si ya es local, lo usamos url = '' url_stat = False torrents_path = '' referer = None post = None - videolibrary_path = config.get_videolibrary_path() #Calculamos el path absoluto a partir de la Videoteca - if videolibrary_path.lower().startswith("smb://"): #Si es una conexión SMB, usamos userdata local - videolibrary_path = config.get_data_path() #Calculamos el path absoluto a partir de Userdata - if not filetools.exists(videolibrary_path): #Si no existe el path, pasamos al modo clásico + rar = False + size = '' + password = '' + if item.password: + password = item.password + + videolibrary_path = config.get_videolibrary_path() # Calculamos el path absoluto a partir de la Videoteca + if scrapertools.find_single_match(videolibrary_path, + '(^\w+:\/\/)'): # Si es una conexión REMOTA, usamos userdata local + videolibrary_path = config.get_data_path() # Calculamos el path absoluto a partir de Userdata + if not filetools.exists(videolibrary_path): # Si no existe el path, pasamos al modo clásico videolibrary_path = False else: - torrents_path = filetools.join(videolibrary_path, 'temp_torrents_Alfa', 'cliente_torrent_Alfa.torrent') #path descarga temporal - if videolibrary_path and not filetools.exists(filetools.join(videolibrary_path, 'temp_torrents_Alfa')): #Si no existe la carpeta temporal, la creamos + torrents_path = filetools.join(videolibrary_path, 'temp_torrents_Alfa', \ + 'cliente_torrent_Alfa.torrent') # path descarga temporal + if not videolibrary_path or not filetools.exists(filetools.join(videolibrary_path, \ + 'temp_torrents_Alfa')): # Si no existe la carpeta temporal, la creamos filetools.mkdir(filetools.join(videolibrary_path, 'temp_torrents_Alfa')) - #identificamos si es una url o un path de archivo. Los Magnets los tratamos de la forma clásica - if not item.url.startswith("\\") and not item.url.startswith("/") and not item.url.startswith("magnet:") and not url_stat: + # Si hay headers, se pasar a la petición de descarga del .torrent + headers = {} + if item.headers: + headers = item.headers + + # identificamos si es una url o un path de archivo + if not item.url.startswith("\\") and not item.url.startswith("/") and not url_stat: timeout = 10 if item.torrent_alt: timeout = 5 - #Si es una llamada con POST, lo preparamos + # Si es una llamada con POST, lo preparamos if item.referer: referer = item.referer if item.post: post = item.post - #Descargamos el .torrent - url = videolibrarytools.caching_torrents(item.url, referer, post, torrents_path=torrents_path, timeout=timeout) + # Descargamos el .torrent + size, url, torrent_f, rar_files = generictools.get_torrent_size(item.url, referer, post, \ + torrents_path=torrents_path, + timeout=timeout, lookup=False, + headers=headers, short_pad=True) if url: url_stat = True item.url = url - if "torrentin" in torrent_options[seleccion][1]: + if "torrentin" in torrent_options[seleccion][0]: item.url = 'file://' + item.url - if not url and item.torrent_alt: #Si hay error, se busca un .torrent alternativo + if not url and item.torrent_alt: # Si hay error, se busca un .torrent alternativo if (item.torrent_alt.startswith("\\") or item.torrent_alt.startswith("/")) and videolibrary_path: - item.url = item.torrent_alt #El .torrent alternativo puede estar en una url o en local - elif not item.url.startswith("\\") and not item.url.startswith("/") and not item.url.startswith("magnet:"): + item.url = item.torrent_alt # El .torrent alternativo puede estar en una url o en local + elif not item.url.startswith("\\") and not item.url.startswith("/"): item.url = item.torrent_alt - - #Si es un archivo .torrent local, actualizamos el path relativo a path absoluto - if (item.url.startswith("\\") or item.url.startswith("/")) and not url_stat and videolibrary_path: #.torrent alternativo local + + # Si es un archivo .torrent local, actualizamos el path relativo a path absoluto + if (item.url.startswith("\\") or item.url.startswith("/")) and not \ + url_stat and videolibrary_path: # .torrent alternativo local movies = config.get_setting("folder_movies") series = config.get_setting("folder_tvshows") - if item.contentType == 'movie': - folder = movies #películas + if item.contentType == 'movie': + folder = movies # películas else: - folder = series #o series - item.url = filetools.join(config.get_videolibrary_path(), folder, item.url) #dirección del .torrent local en la Videoteca - if filetools.copy(item.url, torrents_path, silent=True): #se copia a la carpeta generíca para evitar problemas de encode + folder = series # o series + item.url = filetools.join(config.get_videolibrary_path(), folder, + item.url) # dirección del .torrent local en la Videoteca + if filetools.copy(item.url, torrents_path, + silent=True): # se copia a la carpeta generíca para evitar problemas de encode item.url = torrents_path - if "torrentin" in torrent_options[seleccion][1]: #Si es Torrentin, hay que añadir un prefijo + if "torrentin" in torrent_options[seleccion][0]: # Si es Torrentin, hay que añadir un prefijo item.url = 'file://' + item.url + size, rar_files = generictools.get_torrent_size('', file_list=True, local_torr=torrents_path, + short_pad=True) - mediaurl = urllib.quote_plus(item.url) - #Llamada con más parámetros para completar el título - if ("quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]) and item.infoLabels['tmdb_id']: - if item.contentType == 'episode' and "elementum" not in torrent_options[seleccion][1]: - mediaurl += "&episode=%s&library=&season=%s&show=%s&tmdb=%s&type=episode" % (item.infoLabels['episode'], item.infoLabels['season'], item.infoLabels['tmdb_id'], item.infoLabels['tmdb_id']) - elif item.contentType == 'movie': - mediaurl += "&library=&tmdb=%s&type=movie" % (item.infoLabels['tmdb_id']) + mediaurl = item.url - xbmc.executebuiltin("PlayMedia(" + torrent_options[seleccion][1] % mediaurl + ")") + if seleccion >= 0: + + # Reproductor propio BT (libtorrent) + if seleccion == 0: + torrent.bt_client(mediaurl, xlistitem, rar_files, subtitle=item.subtitle, password=password, item=item) - #Seleccionamos que clientes torrent soportamos para el marcado de vídeos vistos: asumimos que todos funcionan - #if "quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]: + # Reproductor propio MCT (libtorrent) + elif seleccion == 1: + from platformcode import mct + mct.play(mediaurl, xlistitem, subtitle=item.subtitle, password=password, item=item) - time_limit = time.time() + 150 #Marcamos el timepo máx. de buffering - while not is_playing() and time.time() < time_limit: #Esperamos mientra buffera - time.sleep(5) #Repetimos cada intervalo - #logger.debug(str(time_limit)) - if item.subtitle != '': - time.sleep(5) - xbmc_player.setSubtitles(item.subtitle) - #subt = xbmcgui.ListItem(path=item.url, thumbnailImage=item.thumbnail) - #subt.setSubtitles([item.subtitle]) + # Plugins externos + else: + mediaurl = urllib.quote_plus(item.url) + # Llamada con más parámetros para completar el título + if ("quasar" in torrent_options[seleccion][1] or "elementum" in torrent_options[seleccion][1]) \ + and item.infoLabels['tmdb_id']: + if item.contentType == 'episode' and "elementum" not in torrent_options[seleccion][1]: + mediaurl += "&episode=%s&library=&season=%s&show=%s&tmdb=%s&type=episode" % ( + item.infoLabels['episode'], item.infoLabels['season'], item.infoLabels['tmdb_id'], + item.infoLabels['tmdb_id']) + elif item.contentType == 'movie': + mediaurl += "&library=&tmdb=%s&type=movie" % (item.infoLabels['tmdb_id']) - if item.strm_path and is_playing(): #Sólo si es de Videoteca - from platformcode import xbmc_videolibrary - xbmc_videolibrary.mark_auto_as_watched(item) #Marcamos como visto al terminar - #logger.debug("Llamado el marcado") + xbmc.executebuiltin("PlayMedia(" + torrent_options[seleccion][1] % mediaurl + ")") - if seleccion == 1: - from platformcode import mct - mct.play(mediaurl, xlistitem, subtitle=item.subtitle, item=item) + # Si es un archivo RAR, monitorizamos el cliente Torrent hasta que haya descargado el archivo, + # y después lo extraemos, incluso con RAR's anidados y con contraseña + torr_client = torrent_options[seleccion][0].replace('Plugin externo: ', '') + if 'RAR-' in size and torr_client in ['quasar', 'elementum'] and UNRAR: + rar_file, save_path_videos, folder_torr = torrent.wait_for_download(rar_files, + torr_client) # Esperamos mientras se descarga el RAR + if rar_file and save_path_videos: # Si se ha descargado el RAR... + dp = dialog_progress_bg('Alfa %s' % torr_client) + video_file, rar, video_path, erase_file_path = torrent.extract_files(rar_file, \ + save_path_videos, password, dp, + item, + torr_client) # ... extraemos el vídeo del RAR + dp.close() - # Reproductor propio (libtorrent) - if seleccion == 0: - import time - played = False - debug = (config.get_setting("debug") == True) - - # Importamos el cliente - from btserver import Client - - client_tmp_path = config.get_setting("downloadpath") - if not client_tmp_path: - client_tmp_path = config.get_data_path() - - # Iniciamos el cliente: - c = Client(url=mediaurl, is_playing_fnc=xbmc_player.isPlaying, wait_time=None, timeout=10, - temp_path=os.path.join(client_tmp_path, config.get_localized_string(70194)), print_status=debug) - - # Mostramos el progreso - progreso = dialog_progress(config.get_localized_string(70195), config.get_localized_string(70196)) - - # Mientras el progreso no sea cancelado ni el cliente cerrado - while not c.closed: - try: - # Obtenemos el estado del torrent - s = c.status - if debug: - # Montamos las tres lineas con la info del torrent - txt = '%.2f%% de %.1fMB %s | %.1f kB/s' % \ - (s.progress_file, s.file_size, s.str_state, s._download_rate) - txt2 = 'S: %d(%d) P: %d(%d) | DHT:%s (%d) | Trakers: %d' % \ - (s.num_seeds, s.num_complete, s.num_peers, s.num_incomplete, s.dht_state, s.dht_nodes, - s.trackers) - txt3 = 'Origen Peers TRK: %d DHT: %d PEX: %d LSD %d ' % \ - (s.trk_peers, s.dht_peers, s.pex_peers, s.lsd_peers) - else: - txt = '%.2f%% de %.1fMB %s | %.1f kB/s' % \ - (s.progress_file, s.file_size, s.str_state, s._download_rate) - txt2 = 'S: %d(%d) P: %d(%d)' % (s.num_seeds, s.num_complete, s.num_peers, s.num_incomplete) - try: - txt3 = config.get_localized_string(70197) % (int(s.timeout)) - except: - txt3 = '' - - progreso.update(s.buffer, txt, txt2, txt3) - time.sleep(0.5) - - if progreso.iscanceled(): - progreso.close() - if s.buffer == 100: - if dialog_yesno(config.get_localized_string(70195), config.get_localized_string(70198)): - played = False - progreso = dialog_progress(config.get_localized_string(70195), "") - progreso.update(s.buffer, txt, txt2, txt3) - else: - progreso = dialog_progress(config.get_localized_string(70195), "") - break - - else: - if dialog_yesno(config.get_localized_string(70195), config.get_localized_string(70199)): - progreso = dialog_progress(config.get_localized_string(70195), "") - break - - else: - progreso = dialog_progress(config.get_localized_string(70195), "") - progreso.update(s.buffer, txt, txt2, txt3) - - # Si el buffer se ha llenado y la reproduccion no ha sido iniciada, se inicia - if s.buffer == 100 and not played: - # Cerramos el progreso - progreso.close() - - # Obtenemos el playlist del torrent - videourl = c.get_play_list() - - # Iniciamos el reproductor - playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) - playlist.clear() - playlist.add(videourl, xlistitem) - # xbmc_player = xbmc_player - xbmc_player.play(playlist) - - # Marcamos como reproducido para que no se vuelva a iniciar - played = True - - # si es un archivo de la videoteca enviar a marcar como visto - if item.strm_path: - from platformcode import xbmc_videolibrary - xbmc_videolibrary.mark_auto_as_watched(item) - - # Y esperamos a que el reproductor se cierre - while xbmc_player.isPlaying(): + # Reproducimos el vídeo extraido, si no hay nada en reproducción + while is_playing() and rar and not xbmc.abortRequested: + time.sleep(3) # Repetimos cada intervalo + if rar and not xbmc.abortRequested: time.sleep(1) + video_play = filetools.join(video_path, video_file) + log("##### video_play: %s" % video_play) + playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) + playlist.clear() + playlist.add(video_play, xlistitem) + xbmc_player.play(playlist) - # Cuando este cerrado, Volvemos a mostrar el dialogo - progreso = dialog_progress(config.get_localized_string(70195), "") - progreso.update(s.buffer, txt, txt2, txt3) + if seleccion > 1: + # Seleccionamos que clientes torrent soportamos para el marcado de vídeos vistos: asumimos que todos funcionan + torrent.mark_auto_as_watched(item) - except: - import traceback - logger.error(traceback.format_exc()) - break + # Si se ha extraido un RAR, se pregunta para borrar los archivos después de reproducir el vídeo (plugins externos) + while is_playing() and rar and not xbmc.abortRequested: + time.sleep(3) # Repetimos cada intervalo + if rar and not xbmc.abortRequested: + if dialog_yesno('Alfa %s' % torr_client, '¿Borrar las descargas del RAR y Vídeo?'): + log("##### erase_file_path: %s" % erase_file_path) + try: + torr_data, deamon_url, index = torrent.get_tclient_data(folder_torr, torr_client) + if torr_data and deamon_url: + data = httptools.downloadpage('%sdelete/%s' % (deamon_url, index), timeout=5, + alfa_s=True).data + time.sleep(1) + if filetools.isdir(erase_file_path): + filetools.rmdirtree(erase_file_path) + elif filetools.exists(erase_file_path) and filetools.isfile(erase_file_path): + filetools.remove(erase_file_path) + except: + logger.error(traceback.format_exc(1)) + elementum_dl = config.get_setting("elementum_dl", server="torrent", + default='') # Si salvamos el cambio de Elementum + if elementum_dl: + config.set_setting("elementum_dl", "", server="torrent") # lo reseteamos en Alfa + xbmcaddon.Addon(id="plugin.video.%s" % torrent_options[seleccion][0].replace('Plugin externo: ', '')) \ + .setSetting('download_storage', elementum_dl) # y lo reseteamos en Elementum - progreso.update(100, config.get_localized_string(70200), " ", " ") - # Detenemos el cliente - if not c.closed: - c.stop() +def log(texto): + xbmc.log(texto, xbmc.LOGNOTICE) - # Y cerramos el progreso - progreso.close() diff --git a/platformcode/recaptcha.py b/platformcode/recaptcha.py index 57f4a68b..4748367c 100644 --- a/platformcode/recaptcha.py +++ b/platformcode/recaptcha.py @@ -1,12 +1,13 @@ # -*- coding: utf-8 -*- +from builtins import range import xbmcgui - from core import httptools from core import scrapertools from platformcode import config from platformcode import platformtools +lang = 'it' class Recaptcha(xbmcgui.WindowXMLDialog): def Start(self, key, referer): @@ -14,9 +15,9 @@ class Recaptcha(xbmcgui.WindowXMLDialog): self.key = key self.headers = {'Referer': self.referer} - api_js = httptools.downloadpage("http://www.google.com/recaptcha/api.js?hl=es").data - version = scrapertools.find_single_match(api_js, 'po.src = \'(.*?)\';').split("/")[5] - self.url = "http://www.google.com/recaptcha/api/fallback?k=%s&hl=es&v=%s&t=2&ff=true" % (self.key, version) + api_js = httptools.downloadpage("https://www.google.com/recaptcha/api.js?hl=" + lang).data + version = scrapertools.find_single_match(api_js, 'po.src\s*=\s*\'(.*?)\';').split("/")[5] + self.url = "https://www.google.com/recaptcha/api/fallback?k=" + self.key + "&hl=" + lang + "&v=" + version + "&t=2&ff=true" self.doModal() # Reload if self.result == {}: @@ -27,10 +28,10 @@ class Recaptcha(xbmcgui.WindowXMLDialog): def update_window(self): data = httptools.downloadpage(self.url, headers=self.headers).data self.message = scrapertools.find_single_match(data, - '<div class="rc-imageselect-desc-no-canonical">(.*?)(?:</label>|</div>)').replace( + '<div class="rc-imageselect-desc[a-z-]*">(.*?)(?:</label>|</div>)').replace( "<strong>", "[B]").replace("</strong>", "[/B]") self.token = scrapertools.find_single_match(data, 'name="c" value="([^"]+)"') - self.image = "http://www.google.com/recaptcha/api2/payload?k=%s&c=%s" % (self.key, self.token) + self.image = "https://www.google.com/recaptcha/api2/payload?k=%s&c=%s" % (self.key, self.token) self.result = {} self.getControl(10020).setImage(self.image) self.getControl(10000).setText(self.message) @@ -56,16 +57,18 @@ class Recaptcha(xbmcgui.WindowXMLDialog): self.close() elif control == 10002: - self.result = [int(k) for k in range(9) if self.result.get(k, False) == True] - post = "c=%s" % self.token + self.result = [int(k) for k in range(9) if self.result.get(k, False)] + post = { + "c": self.token, + "response": self.result + } - for r in self.result: - post += "&response=%s" % r - - data = httptools.downloadpage(self.url, post, headers=self.headers).data + data = httptools.downloadpage(self.url, post=post, headers=self.headers).data + from platformcode import logger + logger.info(data) self.result = scrapertools.find_single_match(data, '<div class="fbc-verification-token">.*?>([^<]+)<') if self.result: - platformtools.dialog_notification("Captcha Correcto", "La verificación ha concluido") + platformtools.dialog_notification("Captcha corretto", "Verifica conclusa") self.close() else: self.result = {} diff --git a/platformcode/subtitletools.py b/platformcode/subtitletools.py index bd30ff93..b4294efc 100644 --- a/platformcode/subtitletools.py +++ b/platformcode/subtitletools.py @@ -1,17 +1,34 @@ # -*- coding: utf-8 -*- +from __future__ import print_function +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + +if PY3: + #from future import standard_library + #standard_library.install_aliases() + import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo +else: + import urllib # Usamos el nativo de PY2 que es más rápido + import os import re import string -import urllib + from unicodedata import normalize +from core import filetools +from core import httptools +from core import jsontools +from core import scrapertools import xbmc import xbmcgui - from platformcode import config, logger -allchars = string.maketrans('', '') +if PY3: allchars = str.maketrans('', '') +if not PY3: allchars = string.maketrans('', '') deletechars = ',\\/:*"<>|?' @@ -38,14 +55,14 @@ def regex_tvshow(compare, file, sub=""): for regex in regex_expressions: response_file = re.findall(regex, file) if len(response_file) > 0: - print "Regex File Se: %s, Ep: %s," % (str(response_file[0][0]), str(response_file[0][1]),) + print("Regex File Se: %s, Ep: %s," % (str(response_file[0][0]), str(response_file[0][1]),)) tvshow = 1 if not compare: title = re.split(regex, file)[0] for char in ['[', ']', '_', '(', ')', '.', '-']: title = title.replace(char, ' ') if title.endswith(" "): title = title.strip() - print "title: %s" % title + print("title: %s" % title) return title, response_file[0][0], response_file[0][1] else: break @@ -74,7 +91,7 @@ def set_Subtitle(): logger.info() exts = [".srt", ".sub", ".txt", ".smi", ".ssa", ".ass"] - subtitle_folder_path = os.path.join(config.get_data_path(), "subtitles") + subtitle_folder_path = filetools.join(config.get_data_path(), "subtitles") subtitle_type = config.get_setting("subtitle_type") @@ -90,9 +107,9 @@ def set_Subtitle(): config.set_setting("subtitlepath_folder", subtitle_path) else: subtitle_path = config.get_setting("subtitlepath_keyboard") - long = len(subtitle_path) - if long > 0: - if subtitle_path.startswith("http") or subtitle_path[long - 4, long] in exts: + long_v = len(subtitle_path) + if long_v > 0: + if subtitle_path.startswith("http") or subtitle_path[long_v - 4, long] in exts: logger.info("Con subtitulo : " + subtitle_path) xbmc.Player().setSubtitles(subtitle_path) return @@ -106,13 +123,13 @@ def set_Subtitle(): tvshow_title, season, episode = regex_tvshow(False, subtitle_name) try: if episode != "": - Subnames = glob.glob(os.path.join(subtitle_path, "Tvshows", tvshow_title, + Subnames = glob.glob(filetools.join(subtitle_path, "Tvshows", tvshow_title, "%s %sx%s" % (tvshow_title, season, episode) + "*.??.???")) else: - Subnames = glob.glob(os.path.join(subtitle_path, "Movies", subtitle_name + "*.??.???")) + Subnames = glob.glob(filetools.join(subtitle_path, "Movies", subtitle_name + "*.??.???")) for Subname in Subnames: if os.path.splitext(Subname)[1] in exts: - logger.info("Con subtitulo : " + os.path.split(Subname)[1]) + logger.info("Con subtitulo : " + filetools.split(Subname)[1]) xbmc.Player().setSubtitles((Subname)) except: logger.error("error al cargar subtitulos") @@ -147,13 +164,13 @@ def searchSubtitle(item): if config.get_setting("subtitle_type") == 0: subtitlepath = config.get_setting("subtitlepath_folder") if subtitlepath == "": - subtitlepath = os.path.join(config.get_data_path(), "subtitles") + subtitlepath = filetools.join(config.get_data_path(), "subtitles") config.set_setting("subtitlepath_folder", subtitlepath) elif config.get_setting("subtitle_type") == 1: subtitlepath = config.get_setting("subtitlepath_keyboard") if subtitlepath == "": - subtitlepath = os.path.join(config.get_data_path(), "subtitles") + subtitlepath = filetools.join(config.get_data_path(), "subtitles") config.set_setting("subtitlepathkeyboard", subtitlepath) elif subtitlepath.startswith("http"): subtitlepath = config.get_setting("subtitlepath_folder") @@ -161,27 +178,27 @@ def searchSubtitle(item): else: subtitlepath = config.get_setting("subtitlepath_folder") if subtitlepath == "": - subtitlepath = os.path.join(config.get_data_path(), "subtitles") + subtitlepath = filetools.join(config.get_data_path(), "subtitles") config.set_setting("subtitlepath_folder", subtitlepath) - if not os.path.exists(subtitlepath): + if not filetools.exists(subtitlepath): try: - os.mkdir(subtitlepath) + filetools.mkdir(subtitlepath) except: logger.error("error no se pudo crear path subtitulos") return - path_movie_subt = xbmc.translatePath(os.path.join(subtitlepath, "Movies")) - if not os.path.exists(path_movie_subt): + path_movie_subt = xbmc.translatePath(filetools.join(subtitlepath, "Movies")) + if not filetools.exists(path_movie_subt): try: - os.mkdir(path_movie_subt) + filetools.mkdir(path_movie_subt) except: logger.error("error no se pudo crear el path Movies") return full_path_tvshow = "" - path_tvshow_subt = xbmc.translatePath(os.path.join(subtitlepath, "Tvshows")) - if not os.path.exists(path_tvshow_subt): + path_tvshow_subt = xbmc.translatePath(filetools.join(subtitlepath, "Tvshows")) + if not filetools.exists(path_tvshow_subt): try: - os.mkdir(path_tvshow_subt) + filetools.mkdir(path_tvshow_subt) except: logger.error("error no pudo crear el path Tvshows") return @@ -189,20 +206,20 @@ def searchSubtitle(item): title_new = title = urllib.unquote_plus(item.title) else: title_new = title = urllib.unquote_plus(item.show + " - " + item.title) - path_video_temp = xbmc.translatePath(os.path.join(config.get_runtime_path(), "resources", "subtitle.mp4")) - if not os.path.exists(path_video_temp): + path_video_temp = xbmc.translatePath(filetools.join(config.get_runtime_path(), "resources", "subtitle.mp4")) + if not filetools.exists(path_video_temp): logger.error("error : no existe el video temporal de subtitulos") return - # path_video_temp = xbmc.translatePath(os.path.join( ,video_temp + ".mp4" )) + # path_video_temp = xbmc.translatePath(filetools.join( ,video_temp + ".mp4" )) title_new = _normalize(title_new) tvshow_title, season, episode = regex_tvshow(False, title_new) if episode != "": - full_path_tvshow = xbmc.translatePath(os.path.join(path_tvshow_subt, tvshow_title)) - if not os.path.exists(full_path_tvshow): - os.mkdir(full_path_tvshow) # title_new + ".mp4" + full_path_tvshow = xbmc.translatePath(filetools.join(path_tvshow_subt, tvshow_title)) + if not filetools.exists(full_path_tvshow): + filetools.mkdir(full_path_tvshow) # title_new + ".mp4" full_path_video_new = xbmc.translatePath( - os.path.join(full_path_tvshow, "%s %sx%s.mp4" % (tvshow_title, season, episode))) + filetools.join(full_path_tvshow, "%s %sx%s.mp4" % (tvshow_title, season, episode))) logger.info(full_path_video_new) listitem = xbmcgui.ListItem(title_new, iconImage="DefaultVideo.png", thumbnailImage="") listitem.setInfo("video", @@ -210,14 +227,14 @@ def searchSubtitle(item): "tvshowtitle": tvshow_title}) else: - full_path_video_new = xbmc.translatePath(os.path.join(path_movie_subt, title_new + ".mp4")) + full_path_video_new = xbmc.translatePath(filetools.join(path_movie_subt, title_new + ".mp4")) listitem = xbmcgui.ListItem(title, iconImage="DefaultVideo.png", thumbnailImage="") listitem.setInfo("video", {"Title": title_new, "Genre": "Movies"}) - import shutil, time + import time try: - shutil.copy(path_video_temp, full_path_video_new) + filetools.copy(path_video_temp, full_path_video_new) copy = True logger.info("nuevo path =" + full_path_video_new) time.sleep(2) @@ -242,10 +259,10 @@ def searchSubtitle(item): continue time.sleep(1) - os.remove(full_path_video_new) + filetools.remove(full_path_video_new) try: if full_path_tvshow != "": - os.rmdir(full_path_tvshow) + filetools.rmdir(full_path_tvshow) except OSError: pass @@ -267,3 +284,70 @@ def saveSubtitleName(item): else: config.set_setting("subtitle_name", title) return + + +def get_from_subdivx(sub_url): + + """ + :param sub_url: Url de descarga del subtitulo alojado en suvdivx.com + Por Ejemplo: http://www.subdivx.com/bajar.php?id=573942&u=8 + + :return: La ruta al subtitulo descomprimido + """ + + logger.info() + + sub = '' + sub_dir = os.path.join(config.get_data_path(), 'temp_subs') + + if os.path.exists(sub_dir): + for sub_file in os.listdir(sub_dir): + old_sub = os.path.join(sub_dir, sub_file) + os.remove(old_sub) + else: + os.mkdir(sub_dir) + + sub_url = sub_url.replace("&", "&") + sub_data = httptools.downloadpage(sub_url, follow_redirects=False) + if 'x-frame-options' not in sub_data.headers: + sub_url = '%s' % sub_data.headers['location'] + ext = sub_url[-4::] + file_id = "subtitle%s" % ext + filename = os.path.join(sub_dir, file_id) + try: + data_dl = httptools.downloadpage(sub_url).data + filetools.write(filename, data_dl) + sub = extract_file_online(sub_dir, filename) + except: + logger.info('sub no valido') + else: + logger.info('sub no valido') + return sub + + +def extract_file_online(path, filename): + + """ + :param path: Ruta donde se encuentra el archivo comprimido + + :param filename: Nombre del archivo comprimido + + :return: Devuelve la ruta al subtitulo descomprimido + """ + + logger.info() + + url = "http://online.b1.org/rest/online/upload" + + data = httptools.downloadpage(url, file=filename).data + + result = jsontools.load(scrapertools.find_single_match(data, "result.listing = ([^;]+);")) + compressed = result["name"] + extracted = result["children"][0]["name"] + + dl_url = "http://online.b1.org/rest/online/download/%s/%s" % (compressed, extracted) + extracted_path = os.path.join(path, extracted) + data_dl = httptools.downloadpage(dl_url).data + filetools.write(extracted_path, data_dl) + + return extracted_path diff --git a/platformcode/unify.py b/platformcode/unify.py index 0c8c41af..07302888 100644 --- a/platformcode/unify.py +++ b/platformcode/unify.py @@ -6,189 +6,167 @@ # datos obtenidos de las paginas # ---------------------------------------------------------- -import re +# from builtins import str +import sys + +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + +import os import unicodedata +import re -import config - +from platformcode import config +from core.item import Item from core import scrapertools from platformcode import logger -thumb_dict = { - "numbers": "http://icons.iconarchive.com/icons/custom-icon-design/pretty-office-10/256/Numbers-icon.png", - "a": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-A-black-icon.png", - "accion": "https://s14.postimg.cc/sqy3q2aht/action.png", - "actors": "https://i.postimg.cc/tC2HMhVV/actors.png", - "adolescente" : "https://s10.postimg.cc/inq7u4p61/teens.png", - "adultos": "https://s10.postimg.cc/s8raxc51l/adultos.png", - "adults": "https://s10.postimg.cc/s8raxc51l/adultos.png", - "alcinema": "http://icons.iconarchive.com/icons/chromatix/aerial/256/movie-icon.png", #"http://icons.iconarchive.com/icons/itzikgur/my-seven/256/Movies-Films-icon.png", - "all": "https://s10.postimg.cc/h1igpgw0p/todas.png", - "alphabet": "https://s10.postimg.cc/4dy3ytmgp/a-z.png", - "animacion": "https://s14.postimg.cc/vl193mupd/animation.png", - "anime" : "https://s10.postimg.cc/n9mc2ikzt/anime.png", - "artes marciales" : "https://s10.postimg.cc/4u1v51tzt/martial_arts.png", - "asiaticas" : "https://i.postimg.cc/Xq0HXD5d/asiaticas.png", - "audio": "https://s10.postimg.cc/b34nern7d/audio.png", - "aventura": "http://icons.iconarchive.com/icons/sirubico/movie-genre/256/Adventure-2-icon.png",#"https://s14.postimg.cc/ky7fy5he9/adventure.png", - "b": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-B-black-icon.png", - "belico": "https://s14.postimg.cc/5e027lru9/war.png", - "biografia" : "https://s10.postimg.cc/jq0ecjxnt/biographic.png", - "c": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-C-black-icon.png", - "carreras": "https://s14.postimg.cc/yt5qgdr69/races.png", - "cast": "https://i.postimg.cc/qvfP5Xvt/cast.png", - "categories": "https://s10.postimg.cc/v0ako5lmh/categorias.png", - "ciencia ficcion": "https://s14.postimg.cc/8kulr2jy9/scifi.png", - "cine negro" : "https://s10.postimg.cc/6ym862qgp/noir.png", - "colections": "https://s10.postimg.cc/ywnwjvytl/colecciones.png", - "comedia": "https://s14.postimg.cc/9ym8moog1/comedy.png", - "cortometraje" : "https://s10.postimg.cc/qggvlxndl/shortfilm.png", - "country": "https://s10.postimg.cc/yz0h81j15/pais.png", - "crimen": "https://s14.postimg.cc/duzkipjq9/crime.png", - "d": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-D-black-icon.png", - "de la tv": "https://s10.postimg.cc/94gj0iwh5/image.png", - "deporte": "https://s14.postimg.cc/x1crlnnap/sports.png", - "destacadas": "https://s10.postimg.cc/yu40x8q2x/destacadas.png", - "documental": "https://s10.postimg.cc/68aygmmcp/documentales.png", - "documentaries": "https://s10.postimg.cc/68aygmmcp/documentales.png", - "doramas":"https://s10.postimg.cc/h4dyr4nfd/doramas.png", - "drama": "https://s14.postimg.cc/fzjxjtnxt/drama.png", - "e": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-E-black-icon.png", - "erotica" : "https://s10.postimg.cc/dcbb9bfx5/erotic.png", - "espanolas" : "https://s10.postimg.cc/x1y6zikx5/spanish.png", - "estrenos" : "https://s10.postimg.cc/sk8r9xdq1/estrenos.png", - "extranjera": "https://s10.postimg.cc/f44a4eerd/foreign.png", - "f": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-F-black-icon.png", - "familiar": "https://s14.postimg.cc/jj5v9ndsx/family.png", - "fantasia": "https://s14.postimg.cc/p7c60ksg1/fantasy.png", - "fantastico" : "https://s10.postimg.cc/tedufx5eh/fantastic.png", - "favorites": "https://s10.postimg.cc/rtg147gih/favoritas.png", - "g": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-G-black-icon.png", - "genres": "https://s10.postimg.cc/6c4rx3x1l/generos.png", - "h": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-H-black-icon.png", - "historica": "https://s10.postimg.cc/p1faxj6yh/historic.png", - "horror" : "https://s10.postimg.cc/8exqo6yih/horror2.png", - "hot": "https://s10.postimg.cc/yu40x8q2x/destacadas.png", - "i": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-I-black-icon.png", - "infantil": "https://s14.postimg.cc/4zyq842mp/childish.png", - "intriga": "https://s14.postimg.cc/5qrgdimw1/intrigue.png", - "j": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-J-black-icon.png", - "k": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-K-black-icon.png", - "l": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-L-black-icon.png", - "language": "https://s10.postimg.cc/6wci189ft/idioma.png", - "last": "https://s10.postimg.cc/i6ciuk0eh/ultimas.png", - "lat": "https://i.postimg.cc/Gt8fMH0J/lat.png", - "latino" : "https://s10.postimg.cc/swip0b86h/latin.png", - "m": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-M-black-icon.png", - "mexicanas" : "https://s10.postimg.cc/swip0b86h/latin.png", - "misterio": "https://s14.postimg.cc/3m73cg8ep/mistery.png", - "more voted": "https://s10.postimg.cc/lwns2d015/masvotadas.png", - "more watched": "https://s10.postimg.cc/c6orr5neh/masvistas.png", - "movies": "https://s10.postimg.cc/fxtqzdog9/peliculas.png", - "musical": "https://s10.postimg.cc/hy7fhtecp/musical.png", - "n": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-N-black-icon.png", - "new episodes": "https://s10.postimg.cc/fu4iwpnqh/nuevoscapitulos.png", - "newest": "http://icons.iconarchive.com/icons/laurent-baumann/creme/128/Location-News-icon.png", #"http://icons.iconarchive.com/icons/uiconstock/ios8-setting/128/news-icon.png", - "nextpage": "http://icons.iconarchive.com/icons/custom-icon-design/pretty-office-5/256/navigate-right-icon.png", #"http://icons.iconarchive.com/icons/custom-icon-design/office/256/forward-icon.png", #"http://icons.iconarchive.com/icons/ahmadhania/spherical/128/forward-icon.png", - "o": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-O-black-icon.png", - "others": "http://icons.iconarchive.com/icons/limav/movie-genres-folder/128/Others-icon.png", - "p": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-P-black-icon.png", - "peleas" : "https://s10.postimg.cc/7a3ojbjwp/Fight.png", - "policial" : "https://s10.postimg.cc/wsw0wbgbd/cops.png", - "premieres": "https://s10.postimg.cc/sk8r9xdq1/estrenos.png", - "q": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-Q-black-icon.png", - "quality": "https://s10.postimg.cc/9bbojsbjd/calidad.png", - "r": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-R-black-icon.png", - "recents": "https://s10.postimg.cc/649u24kp5/recents.png", - "recomendadas": "https://s10.postimg.cc/7xk1oqccp/recomendadas.png", - "recomended": "https://s10.postimg.cc/7xk1oqccp/recomendadas.png", - "religion" : "https://s10.postimg.cc/44j2skquh/religion.png", - "romance" : "https://s10.postimg.cc/yn8vdll6x/romance.png", - "romantica": "https://s14.postimg.cc/8xlzx7cht/romantic.png", - "s": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-S-black-icon.png", - "search": "http://icons.iconarchive.com/icons/jamespeng/movie/256/database-icon.png", - "suspenso": "https://s10.postimg.cc/7peybxdfd/suspense.png", - "t": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-T-black-icon.png", - "telenovelas": "https://i.postimg.cc/QCXZkyDM/telenovelas.png", - "terror": "https://s14.postimg.cc/thqtvl52p/horror.png", - "thriller": "https://s14.postimg.cc/uwsekl8td/thriller.png", - "tvshows": "https://s10.postimg.cc/kxvslawe1/series.png", - "u": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-U-black-icon.png", - "ultimiarrivi" : "http://icons.iconarchive.com/icons/saki/snowish/128/Extras-internet-download-icon.png", - "updated" : "https://s10.postimg.cc/46m3h6h9l/updated.png", - "v": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-V-black-icon.png", - "vose": "https://i.postimg.cc/kgmnbd8h/vose.png", - "w": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-W-black-icon.png", - "western": "https://s10.postimg.cc/5wc1nokjt/western.png", - "x": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-X-black-icon.png", - "y": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-Y-black-icon.png", - "year": "https://s10.postimg.cc/atzrqg921/a_o.png", - "z": "http://icons.iconarchive.com/icons/hydrattz/multipurpose-alphabet/256/Letter-Z-black-icon.png" - } +thumb_dict = {"movies": "https://s10.postimg.cc/fxtqzdog9/peliculas.png", + "tvshows": "https://s10.postimg.cc/kxvslawe1/series.png", + "on air": "https://i.postimg.cc/HLLJWMcr/en-emision.png", + "all": "https://s10.postimg.cc/h1igpgw0p/todas.png", + "genres": "https://s10.postimg.cc/6c4rx3x1l/generos.png", + "search": "https://s10.postimg.cc/v985e2izd/buscar.png", + "quality": "https://s10.postimg.cc/9bbojsbjd/calidad.png", + "audio": "https://s10.postimg.cc/b34nern7d/audio.png", + "newest": "https://s10.postimg.cc/g1s5tf1bt/novedades.png", + "last": "https://s10.postimg.cc/i6ciuk0eh/ultimas.png", + "hot": "https://s10.postimg.cc/yu40x8q2x/destacadas.png", + "year": "https://s10.postimg.cc/atzrqg921/a_o.png", + "alphabet": "https://s10.postimg.cc/4dy3ytmgp/a-z.png", + "recomended": "https://s10.postimg.cc/7xk1oqccp/recomendadas.png", + "more watched": "https://s10.postimg.cc/c6orr5neh/masvistas.png", + "more voted": "https://s10.postimg.cc/lwns2d015/masvotadas.png", + "favorites": "https://s10.postimg.cc/rtg147gih/favoritas.png", + "colections": "https://s10.postimg.cc/ywnwjvytl/colecciones.png", + "categories": "https://s10.postimg.cc/v0ako5lmh/categorias.png", + "premieres": "https://s10.postimg.cc/sk8r9xdq1/estrenos.png", + "documentaries": "https://s10.postimg.cc/68aygmmcp/documentales.png", + "language": "https://s10.postimg.cc/6wci189ft/idioma.png", + "new episodes": "https://s10.postimg.cc/fu4iwpnqh/nuevoscapitulos.png", + "country": "https://s10.postimg.cc/yz0h81j15/pais.png", + "adults": "https://s10.postimg.cc/s8raxc51l/adultos.png", + "recents": "https://s10.postimg.cc/649u24kp5/recents.png", + "updated": "https://s10.postimg.cc/46m3h6h9l/updated.png", + "actors": "https://i.postimg.cc/tC2HMhVV/actors.png", + "cast": "https://i.postimg.cc/qvfP5Xvt/cast.png", + "lat": "https://i.postimg.cc/Gt8fMH0J/lat.png", + "vose": "https://i.postimg.cc/kgmnbd8h/vose.png", + "accion": "https://s14.postimg.cc/sqy3q2aht/action.png", + "adolescente": "https://s10.postimg.cc/inq7u4p61/teens.png", + "adultos": "https://s10.postimg.cc/s8raxc51l/adultos.png", + "animacion": "https://s14.postimg.cc/vl193mupd/animation.png", + "anime": "https://s10.postimg.cc/n9mc2ikzt/anime.png", + "artes marciales": "https://s10.postimg.cc/4u1v51tzt/martial_arts.png", + "asiaticas": "https://i.postimg.cc/Xq0HXD5d/asiaticas.png", + "aventura": "https://s14.postimg.cc/ky7fy5he9/adventure.png", + "belico": "https://s14.postimg.cc/5e027lru9/war.png", + "biografia": "https://s10.postimg.cc/jq0ecjxnt/biographic.png", + "carreras": "https://s14.postimg.cc/yt5qgdr69/races.png", + "ciencia ficcion": "https://s14.postimg.cc/8kulr2jy9/scifi.png", + "cine negro": "https://s10.postimg.cc/6ym862qgp/noir.png", + "comedia": "https://s14.postimg.cc/9ym8moog1/comedy.png", + "cortometraje": "https://s10.postimg.cc/qggvlxndl/shortfilm.png", + "crimen": "https://s14.postimg.cc/duzkipjq9/crime.png", + "de la tv": "https://s10.postimg.cc/94gj0iwh5/image.png", + "deporte": "https://s14.postimg.cc/x1crlnnap/sports.png", + "destacadas": "https://s10.postimg.cc/yu40x8q2x/destacadas.png", + "documental": "https://s10.postimg.cc/68aygmmcp/documentales.png", + "doramas": "https://s10.postimg.cc/h4dyr4nfd/doramas.png", + "drama": "https://s14.postimg.cc/fzjxjtnxt/drama.png", + "erotica": "https://s10.postimg.cc/dcbb9bfx5/erotic.png", + "espanolas": "https://s10.postimg.cc/x1y6zikx5/spanish.png", + "estrenos": "https://s10.postimg.cc/sk8r9xdq1/estrenos.png", + "extranjera": "https://s10.postimg.cc/f44a4eerd/foreign.png", + "familiar": "https://s14.postimg.cc/jj5v9ndsx/family.png", + "fantasia": "https://s14.postimg.cc/p7c60ksg1/fantasy.png", + "fantastico": "https://s10.postimg.cc/tedufx5eh/fantastic.png", + "historica": "https://s10.postimg.cc/p1faxj6yh/historic.png", + "horror": "https://s10.postimg.cc/8exqo6yih/horror2.png", + "infantil": "https://s14.postimg.cc/4zyq842mp/childish.png", + "intriga": "https://s14.postimg.cc/5qrgdimw1/intrigue.png", + "latino": "https://s10.postimg.cc/swip0b86h/latin.png", + "mexicanas": "https://s10.postimg.cc/swip0b86h/latin.png", + "misterio": "https://s14.postimg.cc/3m73cg8ep/mistery.png", + "musical": "https://s10.postimg.cc/hy7fhtecp/musical.png", + "peleas": "https://s10.postimg.cc/7a3ojbjwp/Fight.png", + "policial": "https://s10.postimg.cc/wsw0wbgbd/cops.png", + "recomendadas": "https://s10.postimg.cc/7xk1oqccp/recomendadas.png", + "religion": "https://s10.postimg.cc/44j2skquh/religion.png", + "romance": "https://s10.postimg.cc/yn8vdll6x/romance.png", + "romantica": "https://s14.postimg.cc/8xlzx7cht/romantic.png", + "suspenso": "https://s10.postimg.cc/7peybxdfd/suspense.png", + "telenovelas": "https://i.postimg.cc/QCXZkyDM/telenovelas.png", + "terror": "https://s14.postimg.cc/thqtvl52p/horror.png", + "thriller": "https://s14.postimg.cc/uwsekl8td/thriller.png", + "western": "https://s10.postimg.cc/5wc1nokjt/western.png" + } + def set_genre(string): - #logger.info() + # logger.info() - genres_dict = {'accion':['azione'], - 'adultos':['adulto','adulti'], - 'animacion':['animazione'], - 'adolescente':['adolescente', 'adolescenti'], - 'aventura':['avventura'], - 'belico':['guerra','guerriglia'], - 'biografia':['biografia', 'biografie', 'biografico'], - 'ciencia ficcion':['ciencia ficcion', 'cienciaficcion', 'sci fi', 'c ficcion'], - 'cine negro':['film noir'], - 'comedia':['commedia', 'commedie'], - 'cortometraje':['cortometraggio', 'corto', 'corti'], - 'de la tv':['della tv', 'televisione', 'tv'], - 'deporte':['deporte', 'deportes'], - 'destacadas':['destacada', 'destacadas'], - 'documental':['documentario', 'documentari'], - 'erotica':['erotica', 'erotica +', 'eroticas', 'eroticas +', 'erotico', 'erotico +'], - 'estrenos':['estrenos', 'estrenos'], - 'extranjera':['extrajera', 'extrajeras', 'foreign'], - 'familiar':['familiare', 'famiglia'], - 'fantastico':['fantastico', 'fantastica', 'fantastici'], - 'historica':['storico', 'storia'], - 'infantil':['bambini', 'infanzia'], - 'musical':['musicale', 'musical', 'musica'], - 'numbers': ['0','1','2','3','4','5','6','7','8','9'], - 'policial':['politico', 'politici', 'politica'], - 'recomendadas':['raccomandato', 'raccomandati'], - 'religion':['religione', 'religioso', 'religiosa','religiosi'], - 'romantica':['romantica', 'romantico', 'romantici'], - 'suspenso':['suspenso', 'suspense'], - 'thriller':['thriller', 'thrillers'], - 'western':['western', 'westerns'] + genres_dict = {'accion': ['accion', 'action', 'accion y aventura', 'action & adventure'], + 'adultos': ['adultos', 'adultos +', 'adulto'], + 'animacion': ['animacion', 'animacion e infantil', 'dibujos animados'], + 'adolescente': ['adolescente', 'adolescentes', 'adolescencia', 'adolecentes'], + 'aventura': ['aventura', 'aventuras'], + 'belico': ['belico', 'belica', 'belicas', 'guerra', 'belico guerra'], + 'biografia': ['biografia', 'biografias', 'biografica', 'biograficas', 'biografico'], + 'ciencia ficcion': ['ciencia ficcion', 'cienciaficcion', 'sci fi', 'c ficcion'], + 'cine negro': ['film noir', 'negro'], + 'comedia': ['comedia', 'comedias'], + 'cortometraje': ['cortometraje', 'corto', 'cortos'], + 'de la tv': ['de la tv', 'television', 'tv'], + 'deporte': ['deporte', 'deportes'], + 'destacadas': ['destacada', 'destacadas'], + 'documental': ['documental', 'documentales'], + 'erotica': ['erotica', 'erotica +', 'eroticas', 'eroticas +', 'erotico', 'erotico +'], + 'estrenos': ['estrenos', 'estrenos'], + 'extranjera': ['extrajera', 'extrajeras', 'foreign'], + 'familiar': ['familiar', 'familia'], + 'fantastico': ['fantastico', 'fantastica', 'fantasticas'], + 'historica': ['historica', 'historicas', 'historico', 'historia'], + 'infantil': ['infantil', 'kids'], + 'musical': ['musical', 'musicales', 'musica'], + 'policial': ['policial', 'policiaco', 'policiaca'], + 'recomendadas': ['recomedada', 'recomendadas'], + 'religion': ['religion', 'religiosa', 'religiosas'], + 'romantica': ['romantica', 'romanticas', 'romantico'], + 'suspenso': ['suspenso', 'suspense'], + 'thriller': ['thriller', 'thrillers'], + 'western': ['western', 'westerns', 'oeste western'] } - string = re.sub(r'peliculas de |pelicula de la |peli |cine ','', string) - for genre, variants in genres_dict.items(): + string = re.sub(r'peliculas de |pelicula de la |peli |cine ', '', string) + for genre, variants in list(genres_dict.items()): if string in variants: string = genre return string + def remove_format(string): - #logger.info() - #logger.debug('entra en remove: %s' % string) + # logger.info() + # logger.debug('entra en remove: %s' % string) string = string.rstrip() string = re.sub(r'(\[|\[\/)(?:color|COLOR|b|B|i|I).*?\]|\[|\]|\(|\)|\:|\.', '', string) - #logger.debug('sale de remove: %s' % string) + # logger.debug('sale de remove: %s' % string) return string + def normalize(string): - string = string.decode('utf-8') + if not PY3 and isinstance(string, str): + string = string.decode('utf-8') normal = ''.join((c for c in unicodedata.normalize('NFD', unicode(string)) if unicodedata.category(c) != 'Mn')) return normal def simplify(string): - - #logger.info() - #logger.debug('entra en simplify: %s'%string) + # logger.info() + # logger.debug('entra en simplify: %s'%string) string = remove_format(string) - string = string.replace('-',' ').replace('_',' ') - string = re.sub(r'\d+','', string) + string = string.replace('-', ' ').replace('_', ' ') + string = re.sub(r'\d+', '', string) string = string.strip() notilde = normalize(string) @@ -197,12 +175,13 @@ def simplify(string): except: pass string = string.lower() - #logger.debug('sale de simplify: %s' % string) + # logger.debug('sale de simplify: %s' % string) return string + def add_languages(title, languages): - #logger.info() + # logger.info() if isinstance(languages, list): for language in languages: @@ -211,14 +190,55 @@ def add_languages(title, languages): title = '%s %s' % (title, set_color(languages, languages)) return title + +def add_info_plot(plot, languages, quality): + # logger.info() + last = '[/I][/B]\n' + + if languages: + l_part = '[COLOR yellowgreen][B][I]Idiomas:[/COLOR] ' + mid = '' + + if isinstance(languages, list): + for language in languages: + mid += '%s ' % (set_color(language, language)) + else: + mid = '%s ' % (set_color(languages, languages)) + + p_lang = '%s%s%s' % (l_part, mid, last) + + if quality: + q_part = '[COLOR yellowgreen][B][I]Calidad:[/COLOR] ' + p_quality = '%s%s%s' % (q_part, quality, last) + + if languages and quality: + plot_ = '%s%s\n%s' % (p_lang, p_quality, plot) + + elif languages: + plot_ = '%s\n%s' % (p_lang, plot) + + elif quality: + plot_ = '%s\n%s' % (p_quality, plot) + + else: + plot_ = plot + + return plot_ + + def set_color(title, category): - #logger.info() + # logger.info() + from core import jsontools + + styles_path = os.path.join(config.get_runtime_path(), 'resources', 'color_styles.json') + preset = config.get_setting("preset_style", default="Estilo 1") + color_setting = jsontools.load((open(styles_path, "r").read()))[preset] color_scheme = {'otro': 'white', 'dual': 'white'} - #logger.debug('category antes de remove: %s' % category) + # logger.debug('category antes de remove: %s' % category) category = remove_format(category).lower() - #logger.debug('category despues de remove: %s' % category) + # logger.debug('category despues de remove: %s' % category) # Lista de elementos posibles en el titulo color_list = ['movie', 'tvshow', 'year', 'rating_1', 'rating_2', 'rating_3', 'quality', 'cast', 'lat', 'vose', 'vos', 'vo', 'server', 'library', 'update', 'no_update'] @@ -234,46 +254,45 @@ def set_color(title, category): if custom_colors: color_scheme[element] = remove_format(config.get_setting('%s_color' % element)) else: - color_scheme[element] = 'white' + color_scheme[element] = remove_format(color_setting.get(element, 'white')) + # color_scheme[element] = 'white' + if category in ['update', 'no_update']: - #logger.debug('title antes de updates: %s' % title) - title= re.sub(r'\[COLOR .*?\]','[COLOR %s]' % color_scheme[category],title) + # logger.debug('title antes de updates: %s' % title) + title = re.sub(r'\[COLOR .*?\]', '[COLOR %s]' % color_scheme[category], title) else: if category not in ['movie', 'tvshow', 'library', 'otro']: - title = "[COLOR %s][%s][/COLOR]"%(color_scheme[category], title) + title = "[COLOR %s][%s][/COLOR]" % (color_scheme[category], title) else: title = "[COLOR %s]%s[/COLOR]" % (color_scheme[category], title) return title -def set_lang(language): - #logger.info() - cast =['castellano','espanol','cast','esp','espaol', 'es','zc', 'spa', 'spanish', 'vc'] - ita =['italiano','italian','ita','it'] - lat=['latino','lat','la', 'espanol latino', 'espaol latino', 'zl', 'mx', 'co', 'vl'] - vose=['subtitulado','subtitulada','sub','sub espanol','vose','espsub','su','subs castellano', - 'sub: español', 'vs', 'zs', 'vs', 'english-spanish subs', 'ingles sub espanol'] - sub_ita=['sottotitolato','sottotitolata','sub','sub ita','subs italiano', - 'sub: italiano', 'inglese sottotitolato'] - vos=['vos', 'sub ingles', 'engsub','ingles subtitulado', 'sub: ingles'] - vo=['ingles', 'en','vo', 'ovos', 'eng','v.o', 'english'] - dual=['dual'] +def set_lang(language): + # logger.info() + + cast = ['castellano', 'español', 'espanol', 'cast', 'esp', 'espaol', 'es', 'zc', 'spa', 'spanish', 'vc'] + ita = ['italiano', 'italian', 'ita', 'it'] + lat = ['latino', 'lat', 'la', 'español latino', 'espanol latino', 'espaol latino', 'zl', 'mx', 'co', 'vl'] + vose = ['subtitulado', 'subtitulada', 'sub', 'sub espanol', 'vose', 'espsub', 'su', 'subs castellano', + 'sub: español', 'vs', 'zs', 'vs', 'english-spanish subs', 'ingles sub espanol', 'ingles sub español'] + vos = ['vos', 'sub ingles', 'engsub', 'vosi', 'ingles subtitulado', 'sub: ingles'] + vo = ['ingles', 'en', 'vo', 'ovos', 'eng', 'v.o', 'english'] + dual = ['dual'] language = scrapertools.decodeHtmlentities(language) old_lang = language language = simplify(language) - #logger.debug('language before simplify: %s' % language) - #logger.debug('old language: %s' % old_lang) + # logger.debug('language before simplify: %s' % language) + # logger.debug('old language: %s' % old_lang) if language in cast: language = 'cast' elif language in lat: language = 'lat' elif language in ita: language = 'ita' - elif language in sub_ita: - language = 'sub-ita' elif language in vose: language = 'vose' elif language in vos: @@ -285,67 +304,67 @@ def set_lang(language): else: language = 'otro' - #logger.debug('language after simplify: %s' % language) + # logger.debug('language after simplify: %s' % language) return language - - - def title_format(item): - #logger.info() + # logger.info() lang = False valid = True language_color = 'otro' + simple_language = '' - #logger.debug('item.title antes de formatear: %s' % item.title.lower()) + # logger.debug('item.title antes de formatear: %s' % item.title.lower()) # TODO se deberia quitar cualquier elemento que no sea un enlace de la lista de findvideos para quitar esto - #Palabras "prohibidas" en los titulos (cualquier titulo que contengas estas no se procesara en unify) + # Palabras "prohibidas" en los titulos (cualquier titulo que contengas estas no se procesara en unify) excluded_words = ['online', 'descarga', 'downloads', 'trailer', 'videoteca', 'gb', 'autoplay'] # Actions excluidos, (se define canal y action) los titulos que contengan ambos valores no se procesaran en unify - excluded_actions = [('videolibrary','get_episodes')] + excluded_actions = [('videolibrary', 'get_episodes')] - # Verifica si hay marca de visto de trakt - - visto = False - #logger.debug('titlo con visto? %s' % item.title) - - if '[[I]v[/I]]' in item.title or '[COLOR limegreen][v][/COLOR]' in item.title: - visto = True - - # Se elimina cualquier formato previo en el titulo - if item.action != '' and item.action !='mainlist': - item.title = remove_format(item.title) - - #logger.debug('visto? %s' % visto) - - # Evita que aparezcan los idiomas en los mainlist de cada canal - if item.action == 'mainlist': - item.language ='' - - info = item.infoLabels - #logger.debug('item antes de formatear: %s'%item) - - if hasattr(item,'text_color'): - item.text_color='' - - #Verifica el item sea valido para ser formateado por unify + # Verifica el item sea valido para ser formateado por unify if item.channel == 'trailertools' or (item.channel.lower(), item.action.lower()) in excluded_actions or \ - item.action=='': + item.action == '': valid = False else: for word in excluded_words: if word in item.title.lower(): valid = False break + if not valid: + return item - if valid and item.unify!=False: + # Verifica si hay marca de visto de trakt + + visto = False + # logger.debug('titlo con visto? %s' % item.title) + + if '[[I]v[/I]]' in item.title or '[COLOR limegreen][v][/COLOR]' in item.title: + visto = True + + # Se elimina cualquier formato previo en el titulo + if item.action != '' and item.action != 'mainlist' and item.unify: + item.title = remove_format(item.title) + + # logger.debug('visto? %s' % visto) + + # Evita que aparezcan los idiomas en los mainlist de cada canal + if item.action == 'mainlist': + item.language = '' + + info = item.infoLabels + # logger.debug('item antes de formatear: %s'%item) + + if hasattr(item, 'text_color'): + item.text_color = '' + + if valid and item.unify != False: # Formamos el titulo para serie, se debe definir contentSerieName # o show en el item para que esto funcione. @@ -354,25 +373,26 @@ def title_format(item): # Si se tiene la informacion en infolabels se utiliza if item.contentType == 'episode' and info['episode'] != '': if info['title'] == '': - info['title'] = '%s - Episodio %s'% (info['tvshowtitle'], info['episode']) + info['title'] = '%s - Episodio %s' % (info['tvshowtitle'], info['episode']) elif 'Episode' in info['title']: episode = info['title'].lower().replace('episode', 'episodio') info['title'] = '%s - %s' % (info['tvshowtitle'], episode.capitalize()) - elif info['episodio_titulo']!='': - #logger.debug('info[episode_titulo]: %s' % info['episodio_titulo']) + elif info['episodio_titulo'] != '': + # logger.debug('info[episode_titulo]: %s' % info['episodio_titulo']) if 'episode' in info['episodio_titulo'].lower(): episode = info['episodio_titulo'].lower().replace('episode', 'episodio') - item.title = '%sx%s - %s' % (info['season'],info['episode'], episode.capitalize()) + item.title = '%sx%s - %s' % (info['season'], info['episode'], episode.capitalize()) else: - item.title = '%sx%s - %s' % (info['season'], info['episode'], info['episodio_titulo'].capitalize()) + item.title = '%sx%s - %s' % ( + info['season'], info['episode'], info['episodio_titulo'].capitalize()) else: - item.title = '%sx%s - %s' % (info['season'],info['episode'], info['title']) + item.title = '%sx%s - %s' % (info['season'], info['episode'], info['title']) item.title = set_color(item.title, 'tvshow') else: # En caso contrario se utiliza el titulo proporcionado por el canal - #logger.debug ('color_scheme[tvshow]: %s' % color_scheme['tvshow']) + # logger.debug ('color_scheme[tvshow]: %s' % color_scheme['tvshow']) item.title = '%s' % set_color(item.title, 'tvshow') elif item.contentTitle: @@ -386,27 +406,27 @@ def title_format(item): item.title = '%s [V.Extend.]' % set_color(item.contentTitle, 'movie') else: item.title = '%s' % set_color(item.contentTitle, 'movie') - if item.contentType=='movie': + if item.contentType == 'movie': if item.context: if isinstance(item.context, list): item.context.append('Buscar esta pelicula en otros canales') - if 'Novedades' in item.category and item.from_channel=='news': - #logger.debug('novedades') - item.title = '%s [%s]'%(item.title, item.channel) + if ('Novedades' in item.category and item.from_channel == 'news'): + # logger.debug('novedades') + item.title = '%s [%s]' % (item.title, item.channel) # Verificamos si item.language es una lista, si lo es se toma # cada valor y se normaliza formado una nueva lista - if hasattr(item,'language') and item.language !='': - #logger.debug('tiene language: %s'%item.language) + if hasattr(item, 'language') and item.language != '': + # logger.debug('tiene language: %s'%item.language) if isinstance(item.language, list): - language_list =[] + language_list = [] for language in item.language: if language != '': lang = True language_list.append(set_lang(remove_format(language)).upper()) - #logger.debug('language_list: %s' % language_list) + # logger.debug('language_list: %s' % language_list) simple_language = language_list else: # Si item.language es un string se normaliza @@ -416,19 +436,19 @@ def title_format(item): else: simple_language = '' - #item.language = simple_language + # item.language = simple_language # Damos formato al año si existiera y lo agregamos # al titulo excepto que sea un episodio - if info and info.get("year", "") not in [""," "] and item.contentType != 'episode' and not info['season']: + if info and info.get("year", "") not in ["", " "] and item.contentType != 'episode' and not info['season']: try: year = '%s' % set_color(info['year'], 'year') item.title = item.title = '%s %s' % (item.title, year) except: - logger.debug('infoLabels: %s'%info) + logger.debug('infoLabels: %s' % info) # Damos formato al puntaje si existiera y lo agregamos al titulo - if info and info['rating'] and info['rating']!='0.0' and not info['season']: + if info and info['rating'] and info['rating'] != '0.0' and not info['season']: # Se normaliza el puntaje del rating @@ -454,13 +474,29 @@ def title_format(item): # Damos formato a la calidad si existiera y lo agregamos al titulo if item.quality and isinstance(item.quality, str): quality = item.quality.strip() - item.title = '%s %s' % (item.title, set_color(quality, 'quality')) else: quality = '' - # Damos formato al idioma si existiera y lo agregamos al titulo - if lang: - item.title = add_languages(item.title, simple_language) + # Damos formato al idioma-calidad si existieran y los agregamos al plot + quality_ = set_color(quality, 'quality') + + if (lang or quality) and item.action == "play": + if hasattr(item, "clean_plot"): + item.contentPlot = item.clear_plot + + if lang: item.title = add_languages(item.title, simple_language) + if quality: item.title = '%s %s' % (item.title, quality_) + + elif (lang or quality) and item.action != "play": + + if item.contentPlot: + item.clean_plot = item.contentPlot + plot_ = add_info_plot(item.contentPlot, simple_language, quality_) + item.contentPlot = plot_ + else: + item.clean_plot = None + plot_ = add_info_plot('', simple_language, quality_) + item.contentPlot = plot_ # Para las busquedas por canal if item.from_channel != '': @@ -469,17 +505,16 @@ def title_format(item): logger.debug(channel_parameters) item.title = '%s [%s]' % (item.title, channel_parameters['title']) - # Formato para actualizaciones de series en la videoteca sobreescribe los colores anteriores - if item.channel=='videolibrary' and item.context!='': - if item.action=='get_seasons': + if item.channel == 'videolibrary' and item.context != '': + if item.action == 'get_seasons': if 'Desactivar' in item.context[1]['title']: - item.title= '%s' % (set_color(item.title, 'update')) + item.title = '%s' % (set_color(item.title, 'update')) if 'Activar' in item.context[1]['title']: - item.title= '%s' % (set_color(item.title, 'no_update')) + item.title = '%s' % (set_color(item.title, 'no_update')) - #logger.debug('Despues del formato: %s' % item) + # logger.debug('Despues del formato: %s' % item) # Damos formato al servidor si existiera if item.server: server = '%s' % set_color(item.server.strip().capitalize(), 'server') @@ -487,18 +522,28 @@ def title_format(item): # Compureba si estamos en findvideos, y si hay server, si es asi no se muestra el # titulo sino el server, en caso contrario se muestra el titulo normalmente. - #logger.debug('item.title antes de server: %s'%item.title) + # logger.debug('item.title antes de server: %s'%item.title) if item.action != 'play' and item.server: - item.title ='%s %s'%(item.title, server.strip()) + item.title = '%s %s' % (item.title, server.strip()) + elif item.action == 'play' and item.server: + if hasattr(item, "clean_plot"): + item.contentPlot = item.clean_plot if item.quality == 'default': quality = '' - #logger.debug('language_color: %s'%language_color) - item.title = '%s %s' % (server, set_color(quality,'quality')) + # logger.debug('language_color: %s'%language_color) + item.title = '%s %s' % (server, set_color(quality, 'quality')) if lang: item.title = add_languages(item.title, simple_language) - #logger.debug('item.title: %s' % item.title) + # logger.debug('item.title: %s' % item.title) + # Torrent_info + if item.server == 'torrent' and item.torrent_info != '': + item.title = '%s [%s]' % (item.title, item.torrent_info) + + if item.channel == 'videolibrary': + item.title += ' [%s]' % item.contentChannel + # si hay verificacion de enlaces if item.alive != '': if item.alive.lower() == 'no': @@ -507,29 +552,33 @@ def title_format(item): item.title = '[[COLOR yellow][B]?[/B][/COLOR]] %s' % item.title else: item.title = '%s' % item.title - #logger.debug('item.title despues de server: %s' % item.title) + + # logger.debug('item.title despues de server: %s' % item.title) elif 'library' in item.action: item.title = '%s' % set_color(item.title, 'library') - elif item.action == '' and item.title !='': - item.title='**- %s -**'%item.title - else: + elif item.action == '' and item.title != '': + item.title = '**- %s -**' % item.title + elif item.unify: item.title = '%s' % set_color(item.title, 'otro') - #logger.debug('antes de salir %s' % item.title) + # logger.debug('antes de salir %s' % item.title) if visto: try: check = u'\u221a' title = '[B][COLOR limegreen][%s][/COLOR][/B] %s' % (check, item.title.decode('utf-8')) item.title = title.encode('utf-8') + if PY3: item.title = item.title.decode('utf-8') except: check = 'v' title = '[B][COLOR limegreen][%s][/COLOR][/B] %s' % (check, item.title.decode('utf-8')) item.title = title.encode('utf-8') + if PY3: item.title = item.title.decode('utf-8') return item + def thumbnail_type(item): - #logger.info() + # logger.info() # Se comprueba que tipo de thumbnail se utilizara en findvideos, # Poster o Logo del servidor @@ -539,7 +588,7 @@ def thumbnail_type(item): item.contentThumbnail = item.thumbnail if info: - if info['thumbnail'] !='': + if info['thumbnail'] != '': item.contentThumbnail = info['thumbnail'] if item.action == 'play': @@ -548,7 +597,7 @@ def thumbnail_type(item): item.thumbnail = info['thumbnail'] elif thumb_type == 1: from core.servertools import get_server_parameters - #logger.debug('item.server: %s'%item.server) + # logger.debug('item.server: %s'%item.server) server_parameters = get_server_parameters(item.server.lower()) item.thumbnail = server_parameters.get("thumbnail", item.contentThumbnail) @@ -574,7 +623,7 @@ def check_rating(rating): try: # convertimos los deciamles p.e. 7.1 return "%.1f" % round(_rating, 1) - except Exception, ex_dl: + except Exception as ex_dl: template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex_dl).__name__, ex_dl.args) logger.error(message) @@ -601,18 +650,18 @@ def check_rating(rating): def convert_float(_rating): try: return float(_rating) - except ValueError, ex_ve: + except ValueError as ex_ve: template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex_ve).__name__, ex_ve.args) logger.error(message) return None - if type(rating) != float: + if not isinstance(rating, float): # logger.debug("no soy float") - if type(rating) == int: + if isinstance(rating, int): # logger.debug("soy int") rating = convert_float(rating) - elif type(rating) == str: + elif isinstance(rating, str): # logger.debug("soy str") rating = rating.replace("<", "") @@ -634,4 +683,4 @@ def check_rating(rating): rating = check_decimal_length(rating) rating = check_range(rating) - return rating + return rating \ No newline at end of file diff --git a/platformcode/updater.py b/platformcode/updater.py index b88840ee..ecac4c14 100644 --- a/platformcode/updater.py +++ b/platformcode/updater.py @@ -2,7 +2,7 @@ import io import os import shutil -from cStringIO import StringIO +from lib.six import BytesIO from core import filetools from platformcode import logger, platformtools @@ -15,7 +15,9 @@ try: import urllib.request as urllib except ImportError: import urllib - +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int addon = xbmcaddon.Addon('plugin.video.kod') _hdr_pat = re.compile("^@@ -(\d+),?(\d+)? \+(\d+),?(\d+)? @@.*") @@ -33,7 +35,7 @@ def loadCommits(page=1): apiLink = 'https://api.github.com/repos/' + user + '/' + repo + '/commits?sha=' + branch + "&page=" + str(page) logger.info(apiLink) # riprova ogni secondo finchè non riesce (ad esempio per mancanza di connessione) - for n in xrange(10): + for n in range(10): try: commitsLink = urllib.urlopen(apiLink).read() ret = json.loads(commitsLink) @@ -112,24 +114,26 @@ def check(background=False): if 'patch' in file: text = "" try: - localFile = open(addonDir + file["filename"], 'r+') + localFile = io.open(addonDir + file["filename"], 'r+', encoding="utf8") text = localFile.read() + if not PY3: + text = text.decode('utf-8') except IOError: # nuovo file # crea le cartelle se non esistono dirname = os.path.dirname(addonDir + file["filename"]) if not os.path.exists(dirname): os.makedirs(dirname) - localFile = open(addonDir + file["filename"], 'w') + localFile = io.open(addonDir + file["filename"], 'w', encoding="utf8") patched = apply_patch(text, (file['patch']+'\n').encode('utf-8')) if patched != text: # non eseguo se già applicata (es. scaricato zip da github) + alreadyApplied = False if getShaStr(patched) == file['sha']: localFile.seek(0) localFile.truncate() localFile.writelines(patched) localFile.close() - alreadyApplied = False else: # nel caso ci siano stati problemi logger.info('lo sha non corrisponde, scarico il file') localFile.close() @@ -250,7 +254,7 @@ def apply_patch(s,patch,revert=False): def getSha(path): try: - f = open(path, 'rb') + f = io.open(path, 'rb', encoding="utf8") except: return '' size = len(f.read()) @@ -259,7 +263,11 @@ def getSha(path): def getShaStr(str): - return githash.blob_hash(StringIO(str), len(str)).hexdigest() + if PY3: + return githash.blob_hash(BytesIO(str.encode('utf-8')), len(str.encode('utf-8'))).hexdigest() + else: + return githash.blob_hash(BytesIO(str), len(str)).hexdigest() + def updateFromZip(message='Installazione in corso...'): @@ -267,7 +275,7 @@ def updateFromZip(message='Installazione in corso...'): dp.update(0) remotefilename = 'https://github.com/' + user + "/" + repo + "/archive/" + branch + ".zip" - localfilename = os.path.join(xbmc.translatePath("special://home/addons/"), "plugin.video.kod.update.zip").encode('utf-8') + localfilename = filetools.join(xbmc.translatePath("special://home/addons/"), "plugin.video.kod.update.zip") destpathname = xbmc.translatePath("special://home/addons/") logger.info("remotefilename=%s" % remotefilename) @@ -306,7 +314,7 @@ def updateFromZip(message='Installazione in corso...'): for member in zip.infolist(): zip.extract(member, destpathname) cur_size += member.file_size - dp.update(80 + cur_size * 19 / size) + dp.update(int(90 + cur_size * 9 / size)) except Exception as e: logger.info('Non sono riuscito ad estrarre il file zip') @@ -417,13 +425,14 @@ def fOpen(file, mode = 'r'): logger.info('android, uso FileIO per leggere') return io.FileIO(file, mode) else: - return open(file, mode) + return io.open(file, mode) def _pbhook(numblocks, blocksize, filesize, url, dp): try: percent = min((numblocks*blocksize*90)/filesize, 100) - dp.update(percent) - except: + dp.update(int(percent)) + except Exception as e: + logger.error(e) percent = 90 - dp.update(percent) \ No newline at end of file + dp.update(percent) diff --git a/platformcode/xbmc_config_menu.py b/platformcode/xbmc_config_menu.py index b3e87a25..290622c4 100644 --- a/platformcode/xbmc_config_menu.py +++ b/platformcode/xbmc_config_menu.py @@ -3,13 +3,21 @@ # XBMC Config Menu # ------------------------------------------------------------ +from __future__ import division +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int +from builtins import range +from past.utils import old_div + import inspect import os import xbmcgui from core import channeltools -from core import servertools +from core import servertools, scrapertools from platformcode import config, logger @@ -161,7 +169,7 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): self.callback = callback self.item = item - if type(custom_button) == dict: + if isinstance(custom_button, dict): self.custom_button = {} self.custom_button["label"] = custom_button.get("label", "") self.custom_button["function"] = custom_button.get("function", "") @@ -245,12 +253,16 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): def evaluate(self, index, cond): import re + ok = False + # Si la condicion es True o False, no hay mas que evaluar, ese es el valor - if type(cond) == bool: + if isinstance(cond, bool): return cond # Obtenemos las condiciones - conditions = re.compile("(!?eq|!?gt|!?lt)?\(([^,]+),[\"|']?([^)|'|\"]*)['|\"]?\)[ ]*([+||])?").findall(cond) + # conditions = re.compile("(!?eq|!?gt|!?lt)?\(([^,]+),[\"|']?([^)|'|\"]*)['|\"]?\)[ ]*([+||])?").findall(cond) + conditions = re.compile(r'''(!?eq|!?gt|!?lt)?\s*\(\s*([^, ]+)\s*,\s*["']?([^"'\)]+)["']?\)([+|])?''').findall(cond) + # conditions = scrapertools.find_multiple_matches(cond, r"(!?eq|!?gt|!?lt)?\(([^,]+),[\"|']?([^)|'|\"]*)['|\"]?\)[ ]*([+||])?") for operator, id, value, next in conditions: # El id tiene que ser un numero, sino, no es valido y devuelve False try: @@ -276,7 +288,7 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): if value.startswith('@') and unicode(value[1:]).isnumeric(): value = config.get_localized_string(int(value[1:])) - + # Operaciones lt "menor que" y gt "mayor que", requieren que las comparaciones sean numeros, sino devuelve # False if operator in ["lt", "!lt", "gt", "!gt"]: @@ -294,9 +306,9 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): pass # valor bool - if value.lower() == "true": + if not isinstance(value, int) and value.lower() == "true": value = True - elif value.lower() == "false": + elif not isinstance(value, int) and value.lower() == "false": value = False # operacion "eq" "igual a" @@ -515,7 +527,7 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): continue if c["type"] == "list" and "lvalues" not in c: continue - if c["type"] == "list" and not type(c["lvalues"]) == list: + if c["type"] == "list" and not isinstance(c["lvalues"], list): continue if c["type"] == "list" and not len(c["lvalues"]) > 0: continue @@ -590,7 +602,7 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): self.check_ok(self.values) def dispose_controls(self, index, focus=False, force=False): - show_controls = self.controls_height / self.height_control - 1 + show_controls = old_div(self.controls_height, self.height_control) - 1 visible_count = 0 @@ -609,7 +621,7 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): if index < 0: index = 0 new_index = index - if self.index <> new_index or force: + if self.index != new_index or force: for x, c in enumerate(self.visible_controls): if x < new_index or visible_count > show_controls or not c["show"]: self.set_visible(c, False) @@ -693,7 +705,7 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): else: self.return_value = getattr(cb_channel, self.custom_button['function'])(self.item, self.values) if not self.custom_button["close"]: - if isinstance(self.return_value, dict) and self.return_value.has_key("label"): + if isinstance(self.return_value, dict) and "label" in self.return_value: self.getControl(10006).setLabel(self.return_value['label']) for c in self.list_controls: @@ -757,23 +769,23 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): # Controles de ajustes, si se cambia el valor de un ajuste, cambiamos el valor guardado en el diccionario de # valores # Obtenemos el control sobre el que se ha echo click - control = self.getControl(id) + # control = self.getControl(id) # Lo buscamos en el listado de controles for cont in self.list_controls: # Si el control es un "downBtn" o "upBtn" son los botones del "list" # en este caso cambiamos el valor del list - if cont["type"] == "list" and (cont["downBtn"] == control or cont["upBtn"] == control): + if cont["type"] == "list" and (cont["downBtn"].getId() == id or cont["upBtn"].getId() == id): # Para bajar una posicion - if cont["downBtn"] == control: + if cont["downBtn"].getId() == id: index = cont["lvalues"].index(cont["label"].getLabel()) if index > 0: cont["label"].setLabel(cont["lvalues"][index - 1]) # Para subir una posicion - elif cont["upBtn"] == control: + elif cont["upBtn"].getId() == id: index = cont["lvalues"].index(cont["label"].getLabel()) if index < len(cont["lvalues"]) - 1: cont["label"].setLabel(cont["lvalues"][index + 1]) @@ -782,11 +794,11 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): self.values[cont["id"]] = cont["lvalues"].index(cont["label"].getLabel()) # Si esl control es un "bool", guardamos el nuevo valor True/False - if cont["type"] == "bool" and cont["control"] == control: + if cont["type"] == "bool" and cont["control"].getId() == id: self.values[cont["id"]] = bool(cont["control"].isSelected()) # Si esl control es un "text", guardamos el nuevo valor - if cont["type"] == "text" and cont["control"] == control: + if cont["type"] == "text" and cont["control"].getId() == id: # Versiones antiguas requieren abrir el teclado manualmente if xbmcgui.ControlEdit == ControlEdit: import xbmc @@ -817,9 +829,9 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): if action == 1: # Si el foco no está en ninguno de los tres botones inferiores, y esta en un "list" cambiamos el valor if focus not in [10004, 10005, 10006]: - control = self.getFocus() + control = self.getFocus().getId() for cont in self.list_controls: - if cont["type"] == "list" and cont["control"] == control: + if cont["type"] == "list" and cont["control"].getId() == control: index = cont["lvalues"].index(cont["label"].getLabel()) if index > 0: cont["label"].setLabel(cont["lvalues"][index - 1]) @@ -843,9 +855,9 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): elif action == 2: # Si el foco no está en ninguno de los tres botones inferiores, y esta en un "list" cambiamos el valor if focus not in [10004, 10005, 10006]: - control = self.getFocus() + control = self.getFocus().getId() for cont in self.list_controls: - if cont["type"] == "list" and cont["control"] == control: + if cont["type"] == "list" and cont["control"].getId() == control: index = cont["lvalues"].index(cont["label"].getLabel()) if index < len(cont["lvalues"]) - 1: cont["label"].setLabel(cont["lvalues"][index + 1]) @@ -870,11 +882,9 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): # Si el foco no está en ninguno de los tres botones inferiores, bajamos el foco en los controles de ajustes if focus not in [10004, 10005, 10006]: try: - focus_control = \ - [self.visible_controls.index(c) for c in self.visible_controls if - c["control"] == self.getFocus()][ - 0] + focus_control = [self.visible_controls.index(c) for c in self.visible_controls if c["control"].getId() == self.getFocus().getId()][0] focus_control += 1 + except: focus_control = 0 @@ -895,9 +905,7 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): if focus not in [10003, 10004, 10005, 10006]: try: focus_control = \ - [self.visible_controls.index(c) for c in self.visible_controls if - c["control"] == self.getFocus()][ - 0] + [self.visible_controls.index(c) for c in self.visible_controls if c["control"].getId() == self.getFocus().getId()][0] focus_control -= 1 while not focus_control == -1 and (self.visible_controls[focus_control]["type"] == "label" or not @@ -936,11 +944,11 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): elif action == 504: if self.xx > raw_action.getAmount2(): - if (self.xx - int(raw_action.getAmount2())) / self.height_control: + if old_div((self.xx - int(raw_action.getAmount2())), self.height_control): self.xx -= self.height_control self.dispose_controls(self.index + 1) else: - if (int(raw_action.getAmount2()) - self.xx) / self.height_control: + if old_div((int(raw_action.getAmount2()) - self.xx), self.height_control): self.xx += self.height_control self.dispose_controls(self.index - 1) return @@ -981,7 +989,7 @@ class ControlEdit(xbmcgui.ControlButton): def setWidth(self, w): xbmcgui.ControlButton.setWidth(self, w) - self.textControl.setWidth(w / 2) + self.textControl.setWidth(old_div(w, 2)) def setHeight(self, w): xbmcgui.ControlButton.setHeight(self, w) @@ -992,7 +1000,7 @@ class ControlEdit(xbmcgui.ControlButton): if xbmcgui.__version__ == "1.2": self.textControl.setPosition(x + self.getWidth(), y) else: - self.textControl.setPosition(x + self.getWidth() / 2, y) + self.textControl.setPosition(x + old_div(self.getWidth(), 2), y) def setText(self, text): self.text = text diff --git a/platformcode/xbmc_info_window.py b/platformcode/xbmc_info_window.py index fea52fa2..07c689fc 100644 --- a/platformcode/xbmc_info_window.py +++ b/platformcode/xbmc_info_window.py @@ -91,8 +91,7 @@ class InfoWindow(xbmcgui.WindowXMLDialog): En caso de peliculas: Coge el titulo de los siguientes campos (en este orden) 1. contentTitle (este tiene prioridad 1) - 2. fulltitle (este tiene prioridad 2) - 3. title (este tiene prioridad 3) + 2. title (este tiene prioridad 2) El primero que contenga "algo" lo interpreta como el titulo (es importante asegurarse que el titulo este en su sitio) diff --git a/platformcode/xbmc_videolibrary.py b/platformcode/xbmc_videolibrary.py index 3f0c1972..de06903c 100644 --- a/platformcode/xbmc_videolibrary.py +++ b/platformcode/xbmc_videolibrary.py @@ -3,17 +3,24 @@ # XBMC Library Tools # ------------------------------------------------------------ +from future import standard_library +standard_library.install_aliases() +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + import os import threading import time +import re -import urllib2 import xbmc - from core import filetools from core import jsontools from platformcode import config, logger from platformcode import platformtools +from core import scrapertools def mark_auto_as_watched(item): @@ -83,7 +90,6 @@ def sync_trakt_addon(path_folder): "special://home/addons/script.trakt/"] for path in paths: - import sys sys.path.append(xbmc.translatePath(path)) # se obtiene las series vistas @@ -94,10 +100,9 @@ def sync_trakt_addon(path_folder): return shows = traktapi.getShowsWatched({}) - shows = shows.items() + shows = list(shows.items()) # obtenemos el id de la serie para comparar - import re _id = re.findall("\[(.*?)\]", path_folder, flags=re.DOTALL)[0] logger.debug("el id es %s" % _id) @@ -329,9 +334,7 @@ def mark_season_as_watched_on_kodi(item, value=1): def mark_content_as_watched_on_alfa(path): from specials import videolibrary from core import videolibrarytools - from core import scrapertools - from core import filetools - import re + """ marca toda la serie o película como vista o no vista en la Videoteca de Alfa basado en su estado en la Videoteca de Kodi @type str: path @@ -361,6 +364,9 @@ def mark_content_as_watched_on_alfa(path): if "\\" in path: path = path.replace("/", "\\") head_nfo, item = videolibrarytools.read_nfo(path) #Leo el .nfo del contenido + if not item: + logger.error('.NFO no encontrado: ' + path) + return if FOLDER_TVSHOWS in path: #Compruebo si es CINE o SERIE contentType = "episode_view" #Marco la tabla de BBDD de Kodi Video @@ -379,7 +385,7 @@ def mark_content_as_watched_on_alfa(path): nfo_name = scrapertools.find_single_match(path2, '\]\/(.*?)$') #Construyo el nombre del .nfo path1 = path1.replace(nfo_name, '') #para la SQL solo necesito la carpeta path2 = path2.replace(nfo_name, '') #para la SQL solo necesito la carpeta - path2 = filetools.remove_smb_credential(path2) #Si el archivo está en un servidor SMB, quiamos las credenciales + path2 = filetools.remove_smb_credential(path2) #Si el archivo está en un servidor SMB, quitamos las credenciales #Ejecutmos la sentencia SQL sql = 'select strFileName, playCount from %s where (strPath like "%s" or strPath like "%s")' % (contentType, path1, path2) @@ -399,7 +405,11 @@ def mark_content_as_watched_on_alfa(path): playCount_final = 0 elif playCount >= 1: playCount_final = 1 - title_plain = title_plain.decode("utf-8").encode("utf-8") #Hacemos esto porque si no genera esto: u'title_plain' + + elif not PY3 and isinstance(title_plain, (str, unicode)): + title_plain = title_plain.decode("utf-8").encode("utf-8") #Hacemos esto porque si no genera esto: u'title_plain' + elif PY3 and isinstance(var, bytes): + title_plain = title_plain.decode('utf-8') item.library_playcounts.update({title_plain: playCount_final}) #actualizamos el playCount del .nfo if item.infoLabels['mediatype'] == "tvshow": #Actualizamos los playCounts de temporadas y Serie @@ -420,6 +430,7 @@ def get_data(payload): @param payload: data :return: """ + import urllib.request, urllib.error logger.info("payload: %s" % payload) # Required header for XBMC JSON-RPC calls, otherwise you'll get a 415 HTTP response code - Unsupported media type headers = {'content-type': 'application/json'} @@ -433,14 +444,14 @@ def get_data(payload): xbmc_json_rpc_url = "http://" + config.get_setting("xbmc_host", "videolibrary") + ":" + str( xbmc_port) + "/jsonrpc" - req = urllib2.Request(xbmc_json_rpc_url, data=jsontools.dump(payload), headers=headers) - f = urllib2.urlopen(req) + req = urllib.request.Request(xbmc_json_rpc_url, data=jsontools.dump(payload), headers=headers) + f = urllib.request.urlopen(req) response = f.read() f.close() logger.info("get_data: response %s" % response) data = jsontools.load(response) - except Exception, ex: + except Exception as ex: template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex).__name__, ex.args) logger.error("error en xbmc_json_rpc_url: %s" % message) @@ -448,7 +459,7 @@ def get_data(payload): else: try: data = jsontools.load(xbmc.executeJSONRPC(jsontools.dump(payload))) - except Exception, ex: + except Exception as ex: template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex).__name__, ex.args) logger.error("error en xbmc.executeJSONRPC: %s" % message) @@ -477,6 +488,7 @@ def update(folder_content=config.get_setting("folder_tvshows"), folder=""): } if folder: + folder = str(folder) videolibrarypath = config.get_videolibrary_config_path() if folder.endswith('/') or folder.endswith('\\'): @@ -489,9 +501,10 @@ def update(folder_content=config.get_setting("folder_tvshows"), folder=""): videolibrarypath = videolibrarypath[:-1] update_path = videolibrarypath + "/" + folder_content + "/" + folder + "/" else: - update_path = filetools.join(videolibrarypath, folder_content, folder) + "/" + #update_path = filetools.join(videolibrarypath, folder_content, folder) + "/" # Problemas de encode en "folder" + update_path = filetools.join(videolibrarypath, folder_content, ' ').rstrip() - if not update_path.startswith("smb://"): + if not scrapertools.find_single_match(update_path, '(^\w+:\/\/)'): payload["params"] = {"directory": update_path} while xbmc.getCondVisibility('Library.IsScanningVideo()'): @@ -663,7 +676,7 @@ def set_content(content_type, silent=False): if sql_videolibrarypath.startswith("special://"): sql_videolibrarypath = sql_videolibrarypath.replace('/profile/', '/%/').replace('/home/userdata/', '/%/') sep = '/' - elif sql_videolibrarypath.startswith("smb://"): + elif scrapertools.find_single_match(sql_videolibrarypath, '(^\w+:\/\/)'): sep = '/' else: sep = os.sep @@ -881,7 +894,7 @@ def add_sources(path): # Nodo <name> nodo_name = xmldoc.createElement("name") sep = os.sep - if path.startswith("special://") or path.startswith("smb://"): + if path.startswith("special://") or scrapertools.find_single_match(path, '(^\w+:\/\/)'): sep = "/" name = path if path.endswith(sep): @@ -904,8 +917,13 @@ def add_sources(path): nodo_video.appendChild(nodo_source) # Guardamos los cambios - filetools.write(SOURCES_PATH, - '\n'.join([x for x in xmldoc.toprettyxml().encode("utf-8").splitlines() if x.strip()])) + if not PY3: + filetools.write(SOURCES_PATH, + '\n'.join([x for x in xmldoc.toprettyxml().encode("utf-8").splitlines() if x.strip()])) + else: + filetools.write(SOURCES_PATH, + b'\n'.join([x for x in xmldoc.toprettyxml().encode("utf-8").splitlines() if x.strip()]), + vfs=False) def ask_set_content(flag, silent=False): diff --git a/resources/language/English/strings.po b/resources/language/English/strings.po index b126159c..f817700b 100644 --- a/resources/language/English/strings.po +++ b/resources/language/English/strings.po @@ -245,6 +245,10 @@ msgctxt "#30137" msgid "Direct" msgstr "" +msgctxt "#30138" +msgid "Live" +msgstr "" + msgctxt "#30151" msgid "Watch the video" msgstr "" @@ -5734,7 +5738,19 @@ msgid "Playback" msgstr "" msgctxt "#70754" -msgid "Compact mode" +msgid "Display mode" +msgstr "" + +msgctxt "#70755" +msgid "Default" +msgstr "" + +msgctxt "#70756" +msgid "Extended" +msgstr "" + +msgctxt "#70757" +msgid "Compact" msgstr "" # DNS start [ settings and declaration ] @@ -5805,3 +5821,71 @@ msgstr "" msgctxt "#707417" msgid "Favourite quality" msgstr "" + +msgctxt "#707418" +msgid "Follow the steps below:" +msgstr "" + +msgctxt "#707419" +msgid "%s) click here to enable debug logging" +msgstr "" + +msgctxt "#707420" +msgid "%s) repeat what you did to cause the error" +msgstr "" + +msgctxt "#707421" +msgid "%s) click here to create the report" +msgstr "" + +msgctxt "#707422" +msgid "%s) click here to disable debug logging" +msgstr "" + +msgctxt "#707423" +msgid "Explain the issue and share this link:" +msgstr "" + +msgctxt "#707424" +msgid "Service not available. Try again later" +msgstr "" + +msgctxt "#707425" +msgid "Debug should be active" +msgstr "" + +msgctxt "#707426" +msgid "before generating the report" +msgstr "" + +msgctxt "#707427" +msgid "Unable to read kodi log" +msgstr "" + +msgctxt "#707428" +msgid "Failed to upload report" +msgstr "" + +msgctxt "#707429" +msgid "Report an issue" +msgstr "" + +msgctxt "#707430" +msgid "Debug logging" +msgstr "" + +msgctxt "#707431" +msgid "Enabled" +msgstr "" + +msgctxt "#707432" +msgid "Disabled" +msgstr "" + +msgctxt "#707433" +msgid "Delete" +msgstr "" + +msgctxt "#707434" +msgid "reCaptcha verification failed" +msgstr "" \ No newline at end of file diff --git a/resources/language/Italian/strings.po b/resources/language/Italian/strings.po index fea225ce..3eed066d 100644 --- a/resources/language/Italian/strings.po +++ b/resources/language/Italian/strings.po @@ -245,6 +245,10 @@ msgctxt "#30137" msgid "Direct" msgstr "Diretto" +msgctxt "#30138" +msgid "Live" +msgstr "Live" + msgctxt "#30151" msgid "Watch the video" msgstr "Guarda il video" @@ -5738,8 +5742,20 @@ msgid "Playback" msgstr "Riproduzione" msgctxt "#70754" -msgid "Compact mode" -msgstr "Modalità compatta" +msgid "Display mode" +msgstr "Modalità visualizzazione" + +msgctxt "#70755" +msgid "Default" +msgstr "Predefinita" + +msgctxt "#70756" +msgid "Extended" +msgstr "Estesa" + +msgctxt "#70757" +msgid "Compact" +msgstr "Compatta" # DNS start [ settings and declaration ] msgctxt "#707401" @@ -5809,3 +5825,71 @@ msgstr "Ogni quanto vuoi che vengano controllati? (ore)" msgctxt "#707417" msgid "Favourite quality" msgstr "Qualità preferita" + +msgctxt "#707418" +msgid "Follow the steps below:" +msgstr "Segui i seguenti passi:" + +msgctxt "#707419" +msgid "%s) click here to enable debug logging" +msgstr "%s) clicca qui per attivare il logging di debug" + +msgctxt "#707420" +msgid "%s) repeat what you did to cause the error" +msgstr "%s) ripeti ciò che hai fatto per causare l'errore" + +msgctxt "#707421" +msgid "%s) click here to create the report" +msgstr "%s) clicca qui per creare il report" + +msgctxt "#707422" +msgid "%s) click here to disable debug logging" +msgstr "%s) clicca qui per disattivare il logging di debug" + +msgctxt "#707423" +msgid "Explain the issue and share this link:" +msgstr "Spiega il problema e condividi questo link:" + +msgctxt "#707424" +msgid "Service not available. Try again later" +msgstr "Servizio non disponibile Riprova più tardi" + +msgctxt "#707425" +msgid "Debug should be active" +msgstr "Il debug dovrebbe essere attivo" + +msgctxt "#707426" +msgid "before generating the report" +msgstr "prima di generare il report" + +msgctxt "#707427" +msgid "Unable to read kodi log" +msgstr "Impossibile leggere il log di kodi" + +msgctxt "#707428" +msgid "Failed to upload report" +msgstr "Impossibile caricare il report" + +msgctxt "#707429" +msgid "Report an issue" +msgstr "Segnala un problema" + +msgctxt "#707430" +msgid "Debug logging" +msgstr "Logging di debug" + +msgctxt "#707431" +msgid "Enabled" +msgstr "Attivato" + +msgctxt "#707432" +msgid "Disabed" +msgstr "Disattivato" + +msgctxt "#707433" +msgid "Delete" +msgstr "Cancella" + +msgctxt "#707434" +msgid "reCaptcha verification failed" +msgstr "Verifica reCaptcha fallita" \ No newline at end of file diff --git a/resources/settings.xml b/resources/settings.xml index 19711b58..62c9c475 100644 --- a/resources/settings.xml +++ b/resources/settings.xml @@ -59,7 +59,8 @@ <setting id="videolibrary_kodi" type="bool" label="70120" enable="lt(-1,2)+eq(0,false)" default="false"/> <setting id="videolibrary_max_quality" type="bool" label="70729" default="false" visible="true"/> <setting id="next_ep" type="enum" label="70746" lvalues="70752|70747|70748" default="0"/> - <setting id="next_ep_type" type="bool" label="70754" default="false" visible="eq(-1,2)"/> + <setting id="next_ep_type" type="select" label="70754" lvalues="70755|70756|70757" default="0" visible="eq(-1,2)"/> + <!-- <setting id="next_ep_type" type="bool" label="70754" default="false" visible="eq(-1,2)"/> --> <setting id="next_ep_seconds" type="enum" values="20|30|40|50|60|70|80|90|100|110|120" label="70749" default="2" visible="!eq(-2,0)"/> </category> diff --git a/resources/skins/Default/720p/ChannelSettings.xml b/resources/skins/Default/720p/ChannelSettings.xml index 06c64a8d..82c3fb67 100644 --- a/resources/skins/Default/720p/ChannelSettings.xml +++ b/resources/skins/Default/720p/ChannelSettings.xml @@ -26,7 +26,7 @@ <height>34</height> <width>725</width> <font>font12_title</font> - <textcolor>0xFFFFFFFF</textcolor> + <textcolor>FFFFFFFF</textcolor> <align>center</align> <aligny>center</aligny> <label>$ADDON[plugin.video.kod 70000]</label> @@ -45,6 +45,7 @@ <width>200</width> <height>50</height> <textwidth>110</textwidth> + <textcolor>FFFFFFFF</textcolor> <texturefocus>Controls/KeyboardKey.png</texturefocus> <texturenofocus>Controls/KeyboardKeyNF.png</texturenofocus> <align>center</align> @@ -57,6 +58,7 @@ <width>200</width> <height>50</height> <textwidth>110</textwidth> + <textcolor>FFFFFFFF</textcolor> <texturefocus>Controls/KeyboardKey.png</texturefocus> <texturenofocus>Controls/KeyboardKeyNF.png</texturenofocus> <align>center</align> @@ -69,6 +71,7 @@ <width>200</width> <height>50</height> <textwidth>110</textwidth> + <textcolor>FFFFFFFF</textcolor> <texturefocus>Controls/KeyboardKey.png</texturefocus> <texturenofocus>Controls/KeyboardKeyNF.png</texturenofocus> <align>center</align> @@ -86,7 +89,7 @@ <width>745</width> <height>300</height> <font>font16</font> - <textcolor>0xFFFFFFFF</textcolor> + <textcolor>FFFFFFFF</textcolor> <align>center</align> <aligny>center</aligny> <label>$ADDON[plugin.video.kod 70004]</label> @@ -97,7 +100,7 @@ <posx>780</posx> <width>10</width> <height>300</height> - <textcolor>0xFFFFFFFF</textcolor> + <textcolor>FFFFFFFF</textcolor> <texture>Controls/ScrollBack.png</texture> </control> <control type="image" id="10009"> diff --git a/resources/skins/Default/720p/NextDialog.xml b/resources/skins/Default/720p/NextDialog.xml index 5de2f868..4b5fbbb9 100644 --- a/resources/skins/Default/720p/NextDialog.xml +++ b/resources/skins/Default/720p/NextDialog.xml @@ -42,7 +42,7 @@ <itemgap>0</itemgap> <align>right</align> <control type="button" id="11"> - <label>$ADDON[plugin.video.kod 70750] $INFO[Player.TimeRemaining(mm:ss)]</label> + <label>$INFO[Window.Property(title)] | $INFO[Window.Property(ep_title)] | $INFO[Player.TimeRemaining(secs),,]</label> <onclick>SendClick(3012)</onclick> <height>40</height> <width min="50">auto</width> diff --git a/resources/skins/Default/720p/NextDialogCompact.xml b/resources/skins/Default/720p/NextDialogCompact.xml index bad59a9f..dd348317 100644 --- a/resources/skins/Default/720p/NextDialogCompact.xml +++ b/resources/skins/Default/720p/NextDialogCompact.xml @@ -42,7 +42,7 @@ <itemgap>0</itemgap> <align>right</align> <control type="button" id="11"> - <label>[B]$INFO[Player.TimeRemaining(mm:ss)][/B]</label> + <label>[B]$INFO[Player.TimeRemaining(secs),,][/B]</label> <onclick>SendClick(3012)</onclick> <!-- <visible>!Integer.IsGreater(Player.TimeRemaining,59)</visible> --> <height>40</height> diff --git a/resources/skins/Default/720p/NextDialogExtended.xml b/resources/skins/Default/720p/NextDialogExtended.xml new file mode 100644 index 00000000..22bf4c44 --- /dev/null +++ b/resources/skins/Default/720p/NextDialogExtended.xml @@ -0,0 +1,125 @@ +<?xml version="1.0" encoding="UTF-8"?> +<window> + <defaultcontrol always="true">20</defaultcontrol> + <onload>Dialog.Close(fullscreeninfo,true)</onload> + <onload>Dialog.Close(videoosd,true)</onload> + <controls> + <control type="group"> + <animation type="WindowOpen" reversible="false"> + <effect type="fade" start="0" end="100" time="600" /> + <effect type="slide" start="115,0" end="0,0" time="600" /> + </animation> + <animation type="WindowClose" reversible="false"> + <effect type="fade" start="100" end="0" time="400" /> + <effect type="slide" start="0,0" end="115,0" time="400" /> + </animation> + <control type="group"> + <right>30</right> + <bottom>30</bottom> + <height>220</height> + <width>326</width> + <!-- Background --> + <control type="image"> + <top>0</top> + <right>0</right> + <width>326</width> + <height>180</height> + <texture>$INFO[Window.Property(next_img)]</texture> + </control> + <control type="group"> + <top>0</top> + <right>0</right> + <width>100%</width> + <!-- buttons --> + <control type="button" id="3012"> + <left>-1000</left> + <top>-1000</top> + <height>1</height> + <width>1</width> + </control> + <control type="grouplist" id="20"> + <orientation>vertical</orientation> + <height>220</height> + <width>326</width> + <itemgap>0</itemgap> + <right>0</right> + <control type="button" id="11"> + <label></label> + <onclick>SendClick(3012)</onclick> + <height>180</height> + <width>326</width> + <right>0</right> + <font>font30_title</font> + <textoffsety>20</textoffsety> + <textcolor>FFFFFFFF</textcolor> + <focusedcolor>FFFFFFFF</focusedcolor> + <selectedcolor>FFFFFFFF</selectedcolor> + <shadowcolor>22000000</shadowcolor> + <aligny>top</aligny> + <align>center</align> + <texturefocus border="10">NextDialog/background-play.png</texturefocus> + <texturenofocus border="10" colordiffuse="00232323">NextDialog/background-diffuse.png</texturenofocus> + <pulseonselect>no</pulseonselect> + </control> + <control type="button" id="3013"> + <label>$ADDON[plugin.video.kod 60396]</label> + <height>40</height> + <width>326</width> + <font>font30_title</font> + <textoffsetx>20</textoffsetx> + <textcolor>80FFFFFF</textcolor> + <focusedcolor>FFFFFFFF</focusedcolor> + <selectedcolor>80FFFFFF</selectedcolor> + <shadowcolor>22000000</shadowcolor> + <aligny>center</aligny> + <align>center</align> + <texturefocus border="10" colordiffuse="88232323">NextDialog/background-diffuse.png</texturefocus> + <texturenofocus border="10" colordiffuse="88232323">NextDialog/background-diffuse.png</texturenofocus> + <pulseonselect>no</pulseonselect> + </control> + </control> + <control type="label"> + <bottom>60</bottom> + <height>40</height> + <aligny>center</aligny> + <visible>true</visible> + <align>center</align> + <scroll>true</scroll> + <scrollspeed>50</scrollspeed> + <textcolor>FFFFFFFF</textcolor> + <shadowcolor>ff000000</shadowcolor> + <info>Window.Property(title)</info> + <font>font30_title</font> + </control> + <control type="label"> + <bottom>40</bottom> + <height>40</height> + <aligny>center</aligny> + <visible>true</visible> + <align>center</align> + <scroll>true</scroll> + <scrollspeed>50</scrollspeed> + <textcolor>FFFFFFFF</textcolor> + <shadowcolor>ff000000</shadowcolor> + <info>Window.Property(ep_title)</info> + <font>font20_title</font> + </control> + <control type="label"> + <top>20</top> + <right>25</right> + <height>auto</height> + <aligny>top</aligny> + <visible>true</visible> + <align>right</align> + <scroll>true</scroll> + <scrollspeed>50</scrollspeed> + <textcolor>FFFFFFFF</textcolor> + <shadowcolor>ff000000</shadowcolor> + <info>Player.TimeRemaining(secs),,</info> + <font>font30_title</font> + </control> + </control> + </control> + </control> + </controls> +</window> \ No newline at end of file diff --git a/resources/skins/Default/720p/Recaptcha.xml b/resources/skins/Default/720p/Recaptcha.xml index 0d824514..cc552796 100644 --- a/resources/skins/Default/720p/Recaptcha.xml +++ b/resources/skins/Default/720p/Recaptcha.xml @@ -1,206 +1,221 @@ <?xml version="1.0" encoding="utf-8"?> <window> <allowoverlays>false</allowoverlays> + <animation type="WindowOpen" reversible="false"> + <effect type="zoom" start="80" end="100" center="640,225" delay="160" tween="back" time="240" /> + <effect type="fade" delay="160" end="100" time="240" /> + </animation> + <animation type="WindowClose" reversible="false"> + <effect type="zoom" start="100" end="80" center="640,225" easing="in" tween="back" time="240" /> + <effect type="fade" start="100" end="0" time="240" /> + </animation> <controls> <control type="group" id="10001"> - <posx>250</posx> - <posy>60</posy> - <width>700</width> - <height>600</height> + <top>40</top> + <left>390</left> + <width>600</width> + <height>640</height> <control type="image"> - <width>700</width> - <height>600</height> - <texture>Windows/DialogBack.png</texture> + <width>510</width> + <height>640</height> + <left>45</left> + <texture>Shortcut/dialog-bg-solid.png</texture> </control> <control type="textbox" id="10000"> - <posy>20</posy> - <posx>30</posx> + <top>30</top> + <left>20</left> <height>60</height> - <width>630</width> - <textcolor>0xFFFFA500</textcolor> + <width>560</width> + <textcolor>FFFFFFFF</textcolor> <wrapmultiline>true</wrapmultiline> <align>center</align> <label></label> </control> <control type="button" id="10002"> - <posy>540</posy> - <posx>110</posx> - <width>140</width> - <height>30</height> - <textwidth>100</textwidth> + <top>565</top> + <left>75</left> + <width>150</width> + <height>50</height> + <textwidth>110</textwidth> + <textcolor>FFFFFFFF</textcolor> + <focusedcolor>FFFFFFFF</focusedcolor> <texturefocus>Controls/KeyboardKey.png</texturefocus> <texturenofocus>Controls/KeyboardKeyNF.png</texturenofocus> <align>center</align> <aligny>center</aligny> - <label>Aceptar</label> - <onup>10013</onup> - <ondown>10003</ondown> - <onleft>10013</onleft> + <label>$ADDON[plugin.video.kod 70007]</label> + <onup>10011</onup> + <ondown>10005</ondown> + <onleft>10004</onleft> <onright>10003</onright> - </control> + </control> <control type="button" id="10003"> - <posy>540</posy> - <posx>280</posx> - <width>140</width> - <height>30</height> - <textwidth>100</textwidth> + <top>565</top> + <left>225</left> + <width>150</width> + <height>50</height> + <textwidth>110</textwidth> + <textcolor>FFFFFFFF</textcolor> + <focusedcolor>FFFFFFFF</focusedcolor> <texturefocus>Controls/KeyboardKey.png</texturefocus> <texturenofocus>Controls/KeyboardKeyNF.png</texturenofocus> <align>center</align> <aligny>center</aligny> - <label>Cancelar</label> - <onup>10002</onup> - <ondown>10004</ondown> + <label>$ADDON[plugin.video.kod 707433]</label> + <onup>10012</onup> + <ondown>10006</ondown> <onleft>10002</onleft> <onright>10004</onright> </control> <control type="button" id="10004"> - <posy>540</posy> - <posx>450</posx> - <width>140</width> - <height>30</height> - <textwidth>100</textwidth> + <top>565</top> + <left>375</left> + <width>150</width> + <height>50</height> + <textwidth>110</textwidth> + <textcolor>FFFFFFFF</textcolor> + <focusedcolor>FFFFFFFF</focusedcolor> <texturefocus>Controls/KeyboardKey.png</texturefocus> <texturenofocus>Controls/KeyboardKeyNF.png</texturenofocus> <align>center</align> <aligny>center</aligny> - <label>Recargar</label> - <onup>10003</onup> - <ondown>10005</ondown> + <label>$ADDON[plugin.video.kod 70008]</label> + <onup>10013</onup> + <ondown>10007</ondown> <onleft>10003</onleft> - <onright>10005</onright> - </control> + <onright>10002</onright> + </control> <control type="image" id="10020"> - <posy>80</posy> - <posx>120</posx> + <top>90</top> + <left>75</left> <width>450</width> <height>450</height> </control> <control type="togglebutton" id="10005"> - <posy>80</posy> - <posx>120</posx> + <top>90</top> + <left>75</left> <width>150</width> <height>150</height> - <texturefocus border="50" colordiffuse="50D8D8D8">Controls/KeyboardKeyWhite.png</texturefocus> - <texturenofocus border="15" colordiffuse="FFA2B2E7">Controls/KeyboardKeyNF.png</texturenofocus> - <alttexturefocus>Controls/check_mark.png</alttexturefocus> - <alttexturenofocus>Controls/check_mark.png</alttexturenofocus> - <onup>10004</onup> - <ondown>10006</ondown> - <onleft>10004</onleft> + <texturefocus colordiffuse="AA232323">Controls/background-diffuse.png</texturefocus> + <texturenofocus colordiffuse="00232323">Controls/background-diffuse.png</texturenofocus> + <alttexturefocus colordiffuse="FF232323">Controls/check_mark.png</alttexturefocus> + <alttexturenofocus colordiffuse="FFFFFFFF">Controls/check_mark.png</alttexturenofocus> + <onup>10002</onup> + <ondown>10008</ondown> + <onleft>10007</onleft> <onright>10006</onright> </control> <control type="togglebutton" id="10006"> - <posy>80</posy> - <posx>270</posx> + <top>90</top> + <left>225</left> <width>150</width> <height>150</height> - <texturefocus border="50" colordiffuse="50D8D8D8">Controls/KeyboardKeyWhite.png</texturefocus> - <texturenofocus border="15" colordiffuse="FFA2B2E7">Controls/KeyboardKeyNF.png</texturenofocus> - <alttexturefocus>Controls/check_mark.png</alttexturefocus> - <alttexturenofocus>Controls/check_mark.png</alttexturenofocus> - <onup>10005</onup> - <ondown>10007</ondown> + <texturefocus colordiffuse="AA232323">Controls/background-diffuse.png</texturefocus> + <texturenofocus colordiffuse="00232323">Controls/background-diffuse.png</texturenofocus> + <alttexturefocus colordiffuse="FF232323">Controls/check_mark.png</alttexturefocus> + <alttexturenofocus colordiffuse="FFFFFFFF">Controls/check_mark.png</alttexturenofocus> + <onup>10003</onup> + <ondown>10009</ondown> <onleft>10005</onleft> <onright>10007</onright> </control> <control type="togglebutton" id="10007"> - <posy>80</posy> - <posx>420</posx> + <top>90</top> + <left>375</left> <width>150</width> <height>150</height> - <texturefocus border="50" colordiffuse="50D8D8D8">Controls/KeyboardKeyWhite.png</texturefocus> - <texturenofocus border="15" colordiffuse="FFA2B2E7">Controls/KeyboardKeyNF.png</texturenofocus> - <alttexturefocus>Controls/check_mark.png</alttexturefocus> - <alttexturenofocus>Controls/check_mark.png</alttexturenofocus> - <onup>10006</onup> - <ondown>10008</ondown> + <texturefocus colordiffuse="AA232323">Controls/background-diffuse.png</texturefocus> + <texturenofocus colordiffuse="00232323">Controls/background-diffuse.png</texturenofocus> + <alttexturefocus colordiffuse="FF232323">Controls/check_mark.png</alttexturefocus> + <alttexturenofocus colordiffuse="FFFFFFFF">Controls/check_mark.png</alttexturenofocus> + <onup>10004</onup> + <ondown>10010</ondown> <onleft>10006</onleft> - <onright>10008</onright> + <onright>10005</onright> </control> <control type="togglebutton" id="10008"> - <posy>230</posy> - <posx>120</posx> + <top>240</top> + <left>75</left> <width>150</width> <height>150</height> - <texturefocus border="50" colordiffuse="50D8D8D8">Controls/KeyboardKeyWhite.png</texturefocus> - <texturenofocus border="15" colordiffuse="FFA2B2E7">Controls/KeyboardKeyNF.png</texturenofocus> - <alttexturefocus>Controls/check_mark.png</alttexturefocus> - <alttexturenofocus>Controls/check_mark.png</alttexturenofocus> - <onup>10007</onup> - <ondown>10009</ondown> - <onleft>10007</onleft> + <texturefocus colordiffuse="AA232323">Controls/background-diffuse.png</texturefocus> + <texturenofocus colordiffuse="00232323">Controls/background-diffuse.png</texturenofocus> + <alttexturefocus colordiffuse="FF232323">Controls/check_mark.png</alttexturefocus> + <alttexturenofocus colordiffuse="FFFFFFFF">Controls/check_mark.png</alttexturenofocus> + <onup>10005</onup> + <ondown>10011</ondown> + <onleft>10010</onleft> <onright>10009</onright> </control> <control type="togglebutton" id="10009"> - <posy>230</posy> - <posx>270</posx> + <top>240</top> + <left>225</left> <width>150</width> <height>150</height> - <texturefocus border="50" colordiffuse="50D8D8D8">Controls/KeyboardKeyWhite.png</texturefocus> - <texturenofocus border="15" colordiffuse="FFA2B2E7">Controls/KeyboardKeyNF.png</texturenofocus> - <alttexturefocus>Controls/check_mark.png</alttexturefocus> - <alttexturenofocus>Controls/check_mark.png</alttexturenofocus> - <onup>10008</onup> - <ondown>10010</ondown> + <texturefocus colordiffuse="AA232323">Controls/background-diffuse.png</texturefocus> + <texturenofocus colordiffuse="00232323">Controls/background-diffuse.png</texturenofocus> + <alttexturefocus colordiffuse="FF232323">Controls/check_mark.png</alttexturefocus> + <alttexturenofocus colordiffuse="FFFFFFFF">Controls/check_mark.png</alttexturenofocus> + <onup>10006</onup> + <ondown>10012</ondown> <onleft>10008</onleft> <onright>10010</onright> </control> <control type="togglebutton" id="10010"> - <posy>230</posy> - <posx>420</posx> + <top>240</top> + <left>375</left> <width>150</width> <height>150</height> - <texturefocus border="50" colordiffuse="50D8D8D8">Controls/KeyboardKeyWhite.png</texturefocus> - <texturenofocus border="15" colordiffuse="FFA2B2E7">Controls/KeyboardKeyNF.png</texturenofocus> - <alttexturefocus>Controls/check_mark.png</alttexturefocus> - <alttexturenofocus>Controls/check_mark.png</alttexturenofocus> - <onup>10009</onup> - <ondown>10011</ondown> + <texturefocus colordiffuse="AA232323">Controls/background-diffuse.png</texturefocus> + <texturenofocus colordiffuse="00232323">Controls/background-diffuse.png</texturenofocus> + <alttexturefocus colordiffuse="FF232323">Controls/check_mark.png</alttexturefocus> + <alttexturenofocus colordiffuse="FFFFFFFF">Controls/check_mark.png</alttexturenofocus> + <onup>10007</onup> + <ondown>10013</ondown> <onleft>10009</onleft> - <onright>10011</onright> + <onright>10008</onright> </control> <control type="togglebutton" id="10011"> - <posy>380</posy> - <posx>120</posx> + <top>390</top> + <left>75</left> <width>150</width> <height>150</height> - <texturefocus border="50" colordiffuse="50D8D8D8">Controls/KeyboardKeyWhite.png</texturefocus> - <texturenofocus border="15" colordiffuse="FFA2B2E7">Controls/KeyboardKeyNF.png</texturenofocus> - <alttexturefocus>Controls/check_mark.png</alttexturefocus> - <alttexturenofocus>Controls/check_mark.png</alttexturenofocus> - <onup>10010</onup> - <ondown>10012</ondown> - <onleft>10010</onleft> + <texturefocus colordiffuse="AA232323">Controls/background-diffuse.png</texturefocus> + <texturenofocus colordiffuse="00232323">Controls/background-diffuse.png</texturenofocus> + <alttexturefocus colordiffuse="FF232323">Controls/check_mark.png</alttexturefocus> + <alttexturenofocus colordiffuse="FFFFFFFF">Controls/check_mark.png</alttexturenofocus> + <onup>10008</onup> + <ondown>10002</ondown> + <onleft>10013</onleft> <onright>10012</onright> </control> <control type="togglebutton" id="10012"> - <posy>380</posy> - <posx>270</posx> + <top>390</top> + <left>225</left> <width>150</width> <height>150</height> - <texturefocus border="50" colordiffuse="50D8D8D8">Controls/KeyboardKeyWhite.png</texturefocus> - <texturenofocus border="15" colordiffuse="FFA2B2E7">Controls/KeyboardKeyNF.png</texturenofocus> - <alttexturefocus>Controls/check_mark.png</alttexturefocus> - <alttexturenofocus>Controls/check_mark.png</alttexturenofocus> - <onup>10011</onup> - <ondown>10013</ondown> + <texturefocus colordiffuse="AA232323">Controls/background-diffuse.png</texturefocus> + <texturenofocus colordiffuse="00232323">Controls/background-diffuse.png</texturenofocus> + <alttexturefocus colordiffuse="FF232323">Controls/check_mark.png</alttexturefocus> + <alttexturenofocus colordiffuse="FFFFFFFF">Controls/check_mark.png</alttexturenofocus> + <onup>10009</onup> + <ondown>10003</ondown> <onleft>10011</onleft> <onright>10013</onright> </control> <control type="togglebutton" id="10013"> - <posy>380</posy> - <posx>420</posx> + <top>390</top> + <left>375</left> <width>150</width> <height>150</height> - <texturefocus border="50" colordiffuse="50D8D8D8">Controls/KeyboardKeyWhite.png</texturefocus> - <texturenofocus border="15" colordiffuse="FFA2B2E7">Controls/KeyboardKeyNF.png</texturenofocus> - <alttexturefocus>Controls/check_mark.png</alttexturefocus> - <alttexturenofocus>Controls/check_mark.png</alttexturenofocus> - <onup>10012</onup> - <ondown>10002</ondown> + <texturefocus colordiffuse="AA232323">Controls/background-diffuse.png</texturefocus> + <texturenofocus colordiffuse="00232323">Controls/background-diffuse.png</texturenofocus> + <alttexturefocus colordiffuse="FF232323">Controls/check_mark.png</alttexturefocus> + <alttexturenofocus colordiffuse="FFFFFFFF">Controls/check_mark.png</alttexturenofocus> + <onup>10010</onup> + <ondown>10004</ondown> <onleft>10012</onleft> - <onright>10002</onright> + <onright>10011</onright> </control> </control> </controls> diff --git a/resources/skins/Default/720p/ShortCutMenu.xml b/resources/skins/Default/720p/ShortCutMenu.xml index 6f8b50d0..ced2cc5e 100644 --- a/resources/skins/Default/720p/ShortCutMenu.xml +++ b/resources/skins/Default/720p/ShortCutMenu.xml @@ -69,10 +69,8 @@ <texture colordiffuse="60FFFFFF">Shortcut/black.png</texture> </control> <control type="image"> - <left>35</left> - <top>35</top> - <width>150</width> - <height>150</height> + <width>220</width> + <height>220</height> <texture>$INFO[ListItem.Property(thumb)]</texture> <aspectratio>keep</aspectratio> <align>center</align> @@ -97,8 +95,9 @@ <animation effect="fade" start="100" end="0" time="0">Focus</animation> </control> <control type="image"> + <top>1</top> <width>220</width> - <height>220</height> + <height>218</height> <texture colordiffuse="FF0082C2">Shortcut/button-fo.png</texture> <animation effect="fade" start="100" end="0" time="0">Unfocus</animation> </control> @@ -118,20 +117,19 @@ <texture colordiffuse="60FFFFFF">Shortcut/black.png</texture> </control> <control type="image"> - <left>35</left> - <top>35</top> - <width>150</width> - <height>150</height> + <width>220</width> + <height>220</height> <texture>$INFO[ListItem.Property(thumb)]</texture> <aspectratio>keep</aspectratio> <align>center</align> </control> <control type="textbox"> <left>0</left> - <top>146</top> + <top>160</top> <width>220</width> <height>74</height> <font>font12</font> + <textcolor>FFFFFFFF</textcolor> <label>$INFO[ListItem.Label]</label> <align>center</align> <aligny>center</aligny> @@ -151,8 +149,8 @@ <textureslidernibfocus>-</textureslidernibfocus> <showonepage>false</showonepage> <orientation>horizontal</orientation> - <onleft>32500</onleft> - <onright>32500</onright> + <!-- <onleft>32500</onleft> + <onright>32500</onright> --> <ondown>32500</ondown> <onup>32500</onup> <animation effect="slide" end="120,0" time="0" condition="!Control.IsVisible(5)">Conditional</animation> diff --git a/resources/skins/Default/media/Controls/background-diffuse.png b/resources/skins/Default/media/Controls/background-diffuse.png new file mode 100644 index 0000000000000000000000000000000000000000..f636860396124eee9d8e0ad0a22357ac4938892c GIT binary patch literal 1627 zcmeAS@N?(olHy`uVBq!ia0vp^0U*r51SA=YQ-3iqF#pR8i71Ki^|4CM&(%vz$xlkv ztH><?$}=$7*jE%JCTFLXC?ut(XXe=|z2CiGNg*@ERw>-n*TA>HIW;5GqpB!1xXLdi zxhgx^GDXSWj?1RP3TQxXYDuC(MQ%=Bu~mhw64+cTAR8pCucQE0Qj%?}6yY17;GAES zs$i;Ts%M~N$E9FXl#*r@<l+X^4CL7=Wt5Z@Sn2DRmzV368|&p4rRy77T3YHG80i}s z=>k>g7FXt#Bv$C=6)S^`fSBQuTAW;zSx}OhpQivaGchT@w8U0P2`H}sHM}G<4Pkm- zG1U9OfY1lY=o{)8=p!o!dJFCr6x9`p1=tk(`r=ofn~P$6aY;}r4(GruC=RJCNYxKY zEzU13N=^kTS4MVUYDH$6YXscR<jjIZy{rO#kT8bx5=(*60u5Ibzra-kRY05v3JWXG z+{E-$pVYkck_=lV3j?sV2#N5_6eMwAI9WLtr6!i-7lq{K=h!Maf|8X&Kt_H^esM;A zfr4|enSzFIVsd64NI(;87*Lb1ua!%Fa%paAUWuoRtrAc~FC{a@%F)ur$jr>u($Lby z(9qD;%+kWi!o|ed+0xm~)WyWu6-m2iUU5lcUUDi-dnQ7=1zzotptAycu-G?0B^Bsq zu&KZ>0E+>+pme2R19fF;QD#|cid#{BE;u!TW6mlCx7*F}TCM=mpNZS?W;pfhgVVb{ zD1jp-cu+n9ivcqdFh}acvm}rM&(x`Tz?@tJ%(A=J$sPli2ThsI&H=?i?oJAxuFgOk z&CGO-^b8poG$xi#-0$raD019BD{w|)fOB(mhiT}P#w(W=OkuscfbDP8m#6&)<omx* znQ(1Es76QBFVidW0ofO1SX>2NyuFJ9y^p@$BQoVgvE}!gci-<7@Aw|G!Ohq!EMKZ6 zXWfEUXU6Y;4lUaKbY@!2jgJp{G<R7E&+NE#!S3CL86TJ3Uvu@xo&DFgO}ZeuVGCEk z=BhA(c_Q=Tv{$I*bC+wrywUYxWx;$kaglwdUn^SUef&ayoKDe|;%nOKd}Hpa#}%^L znY!9K`p0LV6`A^~_RMq(p`ANBT>5u#&oxkF+A%X}jzV3V#MGPq_I7M3uFZcA-v1N% zY0~NwYl57Tr;229ed&yv@`|(5^P;ek(Udd){=Z#4Pv%?bDr=?-e^Mm!Z7*zMy`Osc zhmA?kd*uzbhg{c9n{VGRvHap`)3dwz&+%I9UX%Gz*5ZE4J9g7$N9$B)?=YR&a$Pth zzTsT<2K~^gu)EKH$Sr#PDKvb;ZuY%J_jdkaj$lf^pYdkFSzzJIS>O>_%)r1c48n{I zv*t(u1xr0$978JRygkpz%b+N5V8ipj)pjusMp3M{XU%-C`u}aiWuEJj>x^U4cbqGj i`_SreY1?bc1*BiSW1f3Jq&N;#CwRL0xvX<aXaWFyCQNJq literal 0 HcmV?d00001 diff --git a/resources/skins/Default/media/NextDialog/background-play.png b/resources/skins/Default/media/NextDialog/background-play.png new file mode 100644 index 0000000000000000000000000000000000000000..f5a4b8c1541d80816e7f67767fe94822107e72d6 GIT binary patch literal 3409 zcmds4X;f3!77l`nxEugQ%48Z4Z8alP5(p?6AYu>#DyXP2xxghMH)eo<1(89EfEcLo zL~#TW5l|nB6%oa!E>xtnPRL-XD54Ca0y61438SyJ>;3MJWaaE{@9*sW?S1!MN%Zn` zo26%>heDxdvE3P56iSN)ewXM@1K+O(64s(nQ(wcresVv~Dk>id#qk8lRtTpIl>i)t za&l2hczhuwM{k9KU@;BTRaSvP!vY#+ogIh3k<g)F*gaAT`9ym9@*{<OiU8x{tmmYp z0)kLT&O<9hMPeCMNyBJ(so=NTjK`ogE^;9a<Hzwr(-A3zw!_)s2pDHQw3Ad2NaZqE zqru=r!vxFa5-J`a9v+Shx4|LOAUu&mq2LK5Jc)z_9#~m~Sk6;o#WG7Z#RvxjlJTXm zL=Gcjw3?H*6;a4(7!1JCqvSjV$ZeEdEE}yJ=zxcma3G$DBjAAtesY8wVET;*@nH!M z7b2PPpX@7#11EC-f?kbi=qI}a(Z+ed@K*99UvuVg{@y(_^oz`8a%LDP|7+EbT_E#~ zkU)4YBtsNZKEw=z#B$5Y8LEr_I>WeLliel3h#ZjxBY#oWI6P4tP@B3663a%rG$OXf z!a)R(Xw<A$kID|^1wp_!NFGeX5Xl71#S_RnSfB=MNcIzex-gMKJr8TdFpz>Y0r^c} zG>iiYOrm;9G<5!7g+Q~YbScD>BT`=k0hz5(KRIZxv69hrAA9r?4v!Cu)f#-K;Zp~J zLg2s%22T!w1PCMomO#Q1?R<%3s;v!`WV@6=q!LDM*LZ`+MZlBu{!f1nhsr?sicruv zHiL!%3^)NSpst`WNcQ$j3X#Gf5{XQE3fYy+u%*)}be0{%)`mHj90+HNWpbXF4~>Px z6XE2)4OjPy>IEwykskvF3CMuV6Z%h}{*%zLwrNm0B0{7-VBY{?h?<PX2ZXA@)FS{G z{|92z>0VMK5EcPU#&vT=vzc^zBE{YwOTrN~%K+^JbO5Ef!!ppV2=#s#-4H&|wvp4w z!Vtk42#wY}DpVd{Jt{PeM2ZL$d`O_V7=(_XG9*wQ&XYnbf`Hhd{egin7^;Yo2|yEZ z4!Ds_#{<HHAwc%mEys^0hF9;ZvCN$Cf6CoS<1lHtfw&PHSejt_;>Xu7IE*iFNDQ{N z6s+fU$WtHi8i<8~Zhz7Xg<7IgsA`rnyyx9^nU-Qa>)hFW*0W|-2PbDe@k=(=OCsLc zV)Opq96OgyUDY{NYwJ9{w?2OyTM<)ue(fIjsymfgPmZ=a+1RB~N@X>z(OtB>?KM+9 zmgMI2Q0L^>P_V{t9bev<Is1*FQG3a+%*~A2hi!G;bJuhXXPDS*Dyv@qux41uxl0V# znKi9hpZQQ)X;|q~KzZ1LZ4O)XjlYqzei$rW(QN9In(?u&g^}pOdE|QC`)rA>qd^6E zAZdS2hEYc5^Hq88R`xr;^bno9*Kp=P;%-lQtj^o#ThfTEZsN7G_naG)ZJG1eyra4F z+Xxn|Q~Bq{+|rwo)*?yF%6v^s+*Zpcap&Fp^tq$JV&R93)Ad7MpM#ByX<HiOi;weu zFf+dO_}d50*CI})9ckQs%%L`Ee)Z4OdllsJQ^B>N<ocMs+wG1sqjT18^KhDtzx|8Z zu8T{9H!kZrbET%%JpNHj0ge~6Ao9hPs5VpOfuK$uJ3<<G(aw;#`^!+MnTG0*7AotI zF?ccWU^7<uDi3uuRtO)2o89hiF<6~$V!KjQxF*iqGB%EU>BpqFxH$AmBPMx~?jDK9 zw9J?#v-PN_;`Yq7I@)5kQD?#QZTb(`I(8R1->tMX_yaLamxrhv!V3EOqq{6pD}J~8 zL0Q?@s{QO%QBl$0EYwiJzDU&aN4l2F(aETtgef|ex^w?Q2k!iY*b}U1a-r|dUvoW@ zZl^B%%tTjd=lyiLt*x!U8`zevU%&p4BdJ;|_q09I)DYeA^uog=BW#Fg`P5HI1g=8( zxM;X}py!S7a#>kxPSccUm}IMS%I-hv))jB+Xexn!Yk05$ZNN%{g+k#~Npt1vfU>X) zc}-I`8?w^AH%dxb?0Lw~8<)8wL?l{8n{Hxa60b@oE8qT}=9A}%6^g}c-O>zMB>rG; zhq<=4c7Uw)z5>U-qh;k|FK!?gbMxhR|MP;iudxsCJS*ASqK$Ry^L-zk*-%=vsK@Aa zIw6g1Zf<__`t|Fb*bNs;a+_R-^;x8X3l}c54fVe-=xwk2Hv#x15z=4|hm$fqJe=*e z*Q~!zW^tNnT$RAhFZl>uvTttnHt+9@S2aAjtawui*w%D(ba3)Zz3Sst8~rPyqc-Gn z+dA9Zmz`{K?J!_TgZkgSz|!e-p}pwIFIkZ@FR+cPa(jDw{Xq$h^%p}AW&)d=aaAEj z_Hv%*5$E#rrKP2oJw_oQ5n+Psv17%>NL}p-x2V;-KBuHJ3jf8X#XM~;Gi`bL;e-4} zRr=!3mw?0t4>p`KuXGnj4Sawv7Z-OuKInk#&1-TE)n`dzkx2CJ+D`2ob1a-b?i;x+ zTcuK^=5k@BKp?<?%hK;IkiKZXu^S0*f6~pE*MArkcuDc*(VNDya2L~s3y-WUPk3sb z+*0}CZsuk)s{B=7V&aS%>Br8Y+wLcUT-9cdVWO5h`<E8`AjhJ0Ef4g38XnA-{ya3) z+fk1I+f`t*rBbQ2_4STJuWC=E0>yU3tL}YcVe!f9`~AB%Ur`bdM9%D~dNK2Ww5zGS zJKHhbS)a)@V3D5RN--_&?Q9$z0?GUa;+_rtWNmmq+p(|pfKAY+tOMO9yFbLMa;&Yb z0|y2NcbJ07@c<+FZL-z-m(>UHyXP#7j_&Jh49_pU_BdW;wdv{){SnWuM%5g4jvAo! z82uQpI=y}9WB1_jVBeh7y?dkC<x{WTEVNytXuY3(?(pHmdB@!|?67`kk2>_|hCBnq zaxk#waHAJ4)6IJE;s9DYoDvWa(C3qSw0FK<!mc*B#;|I-OK!fNx7F%+ee_&b%yMI` hDLftBf6^hgLJPC(Q?_$^<|6e!AvV*KQRKQM_P_M*`g#BW literal 0 HcmV?d00001 diff --git a/servers/akstream.json b/servers/akvideo.json similarity index 81% rename from servers/akstream.json rename to servers/akvideo.json index f1ea64c2..0f6b81f1 100644 --- a/servers/akstream.json +++ b/servers/akvideo.json @@ -8,17 +8,13 @@ "url": "\\1" }, { - "pattern": "akvideo.stream\/video\/(?:embed-)?([a-zA-Z0-9]+)", - "url": "http://akvideo.stream\/video\/\\1" - }, - { - "pattern": "akvideo.stream/(?:embed-)?([a-zA-Z0-9]+)", + "pattern": "akvideo.stream/(?:video/|video\\.php\\?file_code=)?(?:embed-)?([a-zA-Z0-9]+)", "url": "http://akvideo.stream/video/\\1" } ] }, "free": true, - "id": "akstream", + "id": "akvideo", "name": "Akvideo", "settings": [ { diff --git a/servers/akstream.py b/servers/akvideo.py similarity index 63% rename from servers/akstream.py rename to servers/akvideo.py index 29a4855e..998a6940 100644 --- a/servers/akstream.py +++ b/servers/akvideo.py @@ -1,24 +1,34 @@ # -*- coding: utf-8 -*- # by DrZ3r0 -import urllib +import urllib, re from core import httptools from core import scrapertools from platformcode import logger, config +from core.support import dbg def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) + # page_url = re.sub('akvideo.stream/(?:video/|video\\.php\\?file_code=)?(?:embed-)?([a-zA-Z0-9]+)','akvideo.stream/video/\\1',page_url) global data + # dbg() page = httptools.downloadpage(page_url) + logger.info(page.data) if 'embed_ak.php' in page_url: code = scrapertools.find_single_match(page.url, '/embed-([0-9a-z]+)\.html') - if code: + if not code: + code = scrapertools.find_single_match(page.data, r"""input\D*id=(?:'|")[^'"]+(?:'|").*?value='([a-z0-9]+)""") + if code : page = httptools.downloadpage('http://akvideo.stream/video/' + code) else: return False, config.get_localized_string(70449) % "Akvideo" data = page.data + + # ID, code = scrapertools.find_single_match(data, r"""input\D*id=(?:'|")([^'"]+)(?:'|").*?value='([a-z0-9]+)""") + # post = urllib.urlencode({ID: code}) + # logger.info('PAGE DATA' + data) if "File Not Found" in data: return False, config.get_localized_string(70449) % "Akvideo" return True, "" @@ -27,14 +37,20 @@ def test_video_exists(page_url): def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info(" url=" + page_url) video_urls = [] + # dbg() global data + logger.info('PAGE DATA' + data) vres = scrapertools.find_multiple_matches(data, 'nowrap[^>]+>([^,]+)') + if not vres: vres = scrapertools.find_multiple_matches(data, '<td>(\d+x\d+)') + data_pack = scrapertools.find_single_match(data, "</div>\n\s*<script[^>]+>(eval.function.p,a,c,k,e,.*?)\s*</script>") if data_pack != "": from lib import jsunpack data = jsunpack.unpack(data_pack) + block = scrapertools.find_single_match(data, "sources:\s\[([^\]]+)\]") + data = block if block else data # URL # logger.info(data) matches = scrapertools.find_multiple_matches(data, '(http.*?\.mp4)') @@ -47,6 +63,4 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= video_urls.append([vres[i] + " mp4 [Akvideo] ", media_url.replace('https://', 'http://') + '|' + _headers]) i = i + 1 - for video_url in video_urls: - logger.info(" %s - %s" % (video_url[0], video_url[1])) - return sorted(video_urls, key=lambda x: x[0].split('x')[1]) + return sorted(video_urls, key=lambda x: int(x[0].split('x')[0])) diff --git a/servers/anonfile.py b/servers/anonfile.py index 385e3cb0..212c9ae1 100644 --- a/servers/anonfile.py +++ b/servers/anonfile.py @@ -5,14 +5,14 @@ from core import httptools from core import scrapertools -from platformcode import logger +from platformcode import logger, config def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) response = httptools.downloadpage(page_url) if not response.sucess or "Not Found" in response.data or "File was deleted" in response.data or "is no longer available" in response.data: - return False, "[anonfile] El fichero no existe o ha sido borrado" + return False, config.get_localized_string(70449) % "anonfile" return True, "" diff --git a/servers/filepup.py b/servers/filepup.py index e016e107..0636c120 100644 --- a/servers/filepup.py +++ b/servers/filepup.py @@ -5,14 +5,14 @@ from core import httptools from core import scrapertools -from platformcode import logger +from platformcode import logger, config def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) response = httptools.downloadpage(page_url) if "File was deleted" in response.data or "is no longer available" in response.data: - return False, "[filepup] El fichero no existe o ha sido borrado" + return False, config.get_localized_string(70449) % "filepup" return True, "" diff --git a/servers/filevideo.py b/servers/filevideo.py index 398e0630..2bd1ad53 100644 --- a/servers/filevideo.py +++ b/servers/filevideo.py @@ -3,7 +3,7 @@ from core import httptools from core import scrapertools from lib import jsunpack -from platformcode import logger +from platformcode import logger, config def test_video_exists(page_url): @@ -12,7 +12,7 @@ def test_video_exists(page_url): data = httptools.downloadpage(page_url).data if "Not Found" in data or "File was deleted" in data: - return False, "[Filevideo] El fichero no existe o ha sido borrado" + return False, config.get_localized_string(70449) % "Filevideo" return True, "" diff --git a/servers/nofile.py b/servers/nofile.py index af3cae83..977a10bc 100644 --- a/servers/nofile.py +++ b/servers/nofile.py @@ -5,14 +5,14 @@ from core import httptools from core import scrapertools -from platformcode import logger +from platformcode import logger, config def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) response = httptools.downloadpage(page_url) if not response.sucess or "Not Found" in response.data or "File was deleted" in response.data or "is no longer available" in response.data: - return False, "[nofile] El fichero no existe o ha sido borrado" + return False, config.get_localized_string(70449) % "nofile" return True, "" diff --git a/servers/userscloud.py b/servers/userscloud.py index 9d8c4885..38d6b189 100644 --- a/servers/userscloud.py +++ b/servers/userscloud.py @@ -3,7 +3,7 @@ from core import httptools from core import scrapertools from lib import jsunpack -from platformcode import logger +from platformcode import logger, config def test_video_exists(page_url): @@ -12,7 +12,7 @@ def test_video_exists(page_url): response = httptools.downloadpage(page_url) if not response.sucess or "Not Found" in response.data or "File was deleted" in response.data or "is no longer available" in response.data: - return False, "[Userscloud] El fichero no existe o ha sido borrado" + return False, config.get_localized_string(70449) % "Userscloud" return True, "" diff --git a/servers/vcstream.json b/servers/vidcloud.json similarity index 85% rename from servers/vcstream.json rename to servers/vidcloud.json index 29b32658..f8b992cf 100644 --- a/servers/vcstream.json +++ b/servers/vidcloud.json @@ -8,13 +8,13 @@ "url": "https://vcstream.to/embed/\\1/\\2" }, { - "pattern": "vidcloud.co/(?:embed|f|v)/([a-z0-9A-Z]+)", - "url": "https://vidcloud.co\/v\/\\1" + "pattern": "vidcloud.ru/(?:embed|f|v)/([a-z0-9A-Z]+)", + "url": "https://vidcloud.ru\/v\/\\1" } ] }, "free": true, - "id": "vcstream", + "id": "vidcloud", "name": "Vidcloud", "settings": [ { diff --git a/servers/vcstream.py b/servers/vidcloud.py similarity index 96% rename from servers/vcstream.py rename to servers/vidcloud.py index b21f2a1d..4a28eb22 100644 --- a/servers/vcstream.py +++ b/servers/vidcloud.py @@ -2,7 +2,10 @@ # Icarus pv7 # Fix dentaku65 -import urlparse +try: + import urlparse +except: + import urllib.parse as urlparse from core import httptools from core import scrapertools diff --git a/servers/vidup.py b/servers/vidup.py index 382595a1..9ee972de 100644 --- a/servers/vidup.py +++ b/servers/vidup.py @@ -4,14 +4,14 @@ import urllib from core import httptools from core import scrapertools -from platformcode import logger +from platformcode import logger, config def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "Not Found" in data: - return False, "[Vidup] El fichero no existe o ha sido borrado" + return False, config.get_localized_string(70449) % "Vidup" return True, "" diff --git a/servers/watchvideo.py b/servers/watchvideo.py index 3e3ca03f..5f2e0c75 100644 --- a/servers/watchvideo.py +++ b/servers/watchvideo.py @@ -3,14 +3,14 @@ from core import httptools from core import scrapertools from lib import jsunpack -from platformcode import logger +from platformcode import logger, config def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data if "Not Found" in data or "File was deleted" in data: - return False, "[Watchvideo] El fichero no existe o ha sido borrado" + return False, config.get_localized_string(70449) % "Watchvideo" return True, "" diff --git a/servers/wstream.py b/servers/wstream.py index 02853a54..e4643de9 100644 --- a/servers/wstream.py +++ b/servers/wstream.py @@ -9,34 +9,49 @@ except ImportError: import urllib from core import httptools, scrapertools -from platformcode import logger, config +from platformcode import logger, config, platformtools headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0']] def test_video_exists(page_url): + def int_bckup_method(): + global data,headers + page_url = scrapertools.find_single_match(data, r"""<center><a href='(https?:\/\/wstream[^']+)'\s*title='bkg'""") + if page_url: + data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True, post={'g-recaptcha-response': captcha}).data + logger.info("(page_url='%s')" % page_url) resp = httptools.downloadpage(page_url) global data data = resp.data + + sitekey = scrapertools.find_single_match(data, 'data-sitekey="([^"]+)') + captcha = platformtools.show_recaptcha(sitekey, page_url) if sitekey else '' + page_url = resp.url if '/streaming.php' in page_url in page_url: - code = httptools.downloadpage(page_url, headers=headers, follow_redirects=False).headers['location'].split('/')[-1].replace('.html','') - logger.info('WCODE='+code) + code = httptools.downloadpage(page_url, headers=headers, follow_redirects=False).headers['location'].split('/')[-1].replace('.html', '') + logger.info('WCODE=' + code) page_url = 'https://wstream.video/video.php?file_code=' + code data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True).data possibleParam = scrapertools.find_multiple_matches(data, r"""<input.*?(?:name=["']([^'"]+).*?value=["']([^'"]*)['"]>|>)""") if possibleParam: - post = urllib.urlencode({param[0]: param[1] for param in possibleParam if param[0]}) - data = httptools.downloadpage(page_url, headers=headers, post=post, follow_redirects=True).data + post = {param[0]: param[1] for param in possibleParam if param[0]} + if captcha: post['g-recaptcha-response'] = captcha + if post: + data = httptools.downloadpage(page_url, headers=headers, post=post, follow_redirects=True).data + elif captcha: + int_bckup_method() + elif captcha: + int_bckup_method() else: - page_url = scrapertools.find_single_match(data, r"""<center><a href='(https?:\/\/wstream[^']+)'\s*title='bkg'""") - if page_url: - data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True).data + return False, config.get_localized_string(707434) if "Not Found" in data or "File was deleted" in data: return False, config.get_localized_string(70449) % 'Wstream' - return True, "" + else: + return True, "" # Returns an array of possible video url's from the page_url diff --git a/servers/youtube.py b/servers/youtube.py index b00e94ed..9e005a67 100644 --- a/servers/youtube.py +++ b/servers/youtube.py @@ -3,7 +3,10 @@ import re import urllib -import urlparse +try: + import urlparse +except: + import urllib.parse as urlparse from core import httptools from core import jsontools as json diff --git a/specials/autoplay.py b/specials/autoplay.py index d67975bd..9f46f4ac 100644 --- a/specials/autoplay.py +++ b/specials/autoplay.py @@ -1,13 +1,20 @@ # -*- coding: utf-8 -*- +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int +from builtins import range + import os -from time import sleep from core import channeltools from core import jsontools from core.item import Item from platformcode import config, logger from platformcode import platformtools +from platformcode import launcher +from time import sleep from platformcode.config import get_setting __channel__ = "autoplay" @@ -93,6 +100,7 @@ def start(itemlist, item): base_item = item + if not config.is_xbmc(): #platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi') return itemlist @@ -125,9 +133,14 @@ def start(itemlist, item): url_list_valid = [] autoplay_list = [] autoplay_b = [] + favorite_langs = [] favorite_servers = [] favorite_quality = [] + #2nd lang, vemos si se quiere o no filtrar + status_language = config.get_setting("filter_languages", channel_id) + + # Guarda el valor actual de "Accion y Player Mode" en preferencias user_config_setting_action = config.get_setting("default_action") user_config_setting_player = config.get_setting("player_mode") @@ -172,18 +185,21 @@ def start(itemlist, item): favorite_quality.append(channel_node['quality'][settings_node['quality_%s' % num]]) # Se filtran los enlaces de itemlist y que se correspondan con los valores de autoplay - for item in itemlist: + for n, item in enumerate(itemlist): autoplay_elem = dict() b_dict = dict() # Comprobamos q se trata de un item de video if 'server' not in item: continue + #2nd lang lista idiomas + if item.language not in favorite_langs: + favorite_langs.append(item.language) # Agrega la opcion configurar AutoPlay al menu contextual if 'context' not in item: item.context = list() - if not filter(lambda x: x['action'] == 'autoplay_config', context): + if not [x for x in context if x['action'] == 'autoplay_config']: item.context.append({"title": config.get_localized_string(60071), "action": "autoplay_config", "channel": "autoplay", @@ -204,6 +220,7 @@ def start(itemlist, item): b_dict['videoitem']= item autoplay_b.append(b_dict) continue + autoplay_elem["indice_lang"] = favorite_langs.index(item.language) autoplay_elem["indice_server"] = favorite_servers.index(item.server.lower()) autoplay_elem["indice_quality"] = favorite_quality.index(item.quality) @@ -216,6 +233,7 @@ def start(itemlist, item): b_dict['videoitem'] = item autoplay_b.append(b_dict) continue + autoplay_elem["indice_lang"] = favorite_langs.index(item.language) autoplay_elem["indice_server"] = favorite_servers.index(item.server.lower()) elif priority == 3: # Solo calidades @@ -227,6 +245,7 @@ def start(itemlist, item): b_dict['videoitem'] = item autoplay_b.append(b_dict) continue + autoplay_elem["indice_lang"] = favorite_langs.index(item.language) autoplay_elem["indice_quality"] = favorite_quality.index(item.quality) else: # No ordenar @@ -245,16 +264,16 @@ def start(itemlist, item): # Ordenamos segun la prioridad if priority == 0: # Servidores y calidades - autoplay_list.sort(key=lambda orden: (orden['indice_server'], orden['indice_quality'])) + autoplay_list.sort(key=lambda orden: (orden['indice_lang'], orden['indice_server'], orden['indice_quality'])) elif priority == 1: # Calidades y servidores - autoplay_list.sort(key=lambda orden: (orden['indice_quality'], orden['indice_server'])) + autoplay_list.sort(key=lambda orden: (orden['indice_lang'], orden['indice_quality'], orden['indice_server'])) elif priority == 2: # Solo servidores - autoplay_list.sort(key=lambda orden: orden['indice_server']) + autoplay_list.sort(key=lambda orden: (orden['indice_lang'], orden['indice_server'])) elif priority == 3: # Solo calidades - autoplay_list.sort(key=lambda orden: orden['indice_quality']) + autoplay_list.sort(key=lambda orden: (orden['indice_lang'], orden['indice_quality'])) # Se prepara el plan b, en caso de estar activo se agregan los elementos no favoritos al final try: @@ -351,7 +370,7 @@ def start(itemlist, item): # Si no quedan elementos en la lista se informa if autoplay_elem == autoplay_list[-1]: - platformtools.dialog_notification('AutoPlay', config.get_localized_string(60072)) + platformtools.dialog_notification('AutoPlay', config.get_localized_string(60072) % videoitem.server.upper()) else: platformtools.dialog_notification(config.get_localized_string(60074), config.get_localized_string(60075)) diff --git a/specials/checkhost.py b/specials/checkhost.py index 6a4edd81..41f5efbe 100644 --- a/specials/checkhost.py +++ b/specials/checkhost.py @@ -5,8 +5,11 @@ import xbmcaddon import json from platformcode import config, logger import requests -from requests.exceptions import HTTPError -from lib import httplib2 +import sys +if sys.version_info[0] >= 3: + from lib.httplib2 import py3 as httplib2 +else: + from lib.httplib2 import py2 as httplib2 import socket addon = xbmcaddon.Addon() diff --git a/specials/community.json b/specials/community.json index ac2ad1f4..c72593c1 100644 --- a/specials/community.json +++ b/specials/community.json @@ -27,4 +27,4 @@ "visible": true } ] -} +} \ No newline at end of file diff --git a/specials/community.py b/specials/community.py index 721b8fad..29f8db5b 100644 --- a/specials/community.py +++ b/specials/community.py @@ -1,14 +1,14 @@ # -*- coding: utf-8 -*- # -*- Channel Community -*- -import re, os, inspect, requests, xbmc, xbmcaddon, xbmcgui -from core import httptools, scrapertools, servertools, jsontools, tmdb, support +import re, os, inspect, xbmcaddon, xbmcgui + +from core import httptools, jsontools, tmdb, support, filetools from core.item import Item -from core.support import typo -from channelselector import get_thumb -from platformcode import config, platformtools +from platformcode import config, platformtools from specials import autoplay +from channelselector import get_thumb addon = xbmcaddon.Addon('metadata.themoviedb.org') lang = addon.getSetting('language') @@ -17,8 +17,6 @@ defpage = ["", "20", "40", "60", "80", "100"] defp = defpage[config.get_setting('pagination','community')] show_seasons = config.get_setting('show_seasons','community') -list_data = {} - list_servers = ['directo', 'akstream', 'wstream', 'backin', 'cloudvideo', 'clipwatching', 'fembed', 'gounlimited', 'mega', 'mixdrop'] list_quality = ['SD', '720', '1080', '4k'] @@ -28,8 +26,8 @@ tmdb_api = 'a1ab8b8669da03637a4b98fa39c39228' def mainlist(item): support.log() - path = os.path.join(config.get_data_path(), 'community_channels.json') - if not os.path.exists(path): + path = filetools.join(config.get_data_path(), 'community_channels.json') + if not filetools.exists(path): with open(path, "w") as file: file.write('{"channels":{}}') file.close() @@ -42,548 +40,238 @@ def show_channels(item): support.log() itemlist = [] - context = [{"title": config.get_localized_string(50005), - "action": "remove_channel", - "channel": "community"}] + # add context menu + context = [{"title": config.get_localized_string(50005), "action": "remove_channel", "channel": "community"}] - path = os.path.join(config.get_data_path(), 'community_channels.json') - file = open(path, "r") - json = jsontools.load(file.read()) + # read json + json = load_and_check(item) itemlist.append(Item(channel=item.channel, - title=typo(config.get_localized_string(70676),'bold color kod'), + title=support.typo(config.get_localized_string(70676),'bold color kod'), action='add_channel', thumbnail=get_thumb('add.png'))) for key, channel in json['channels'].items(): - # Find File Path - if 'http' in channel['path']: - try: - file_path = httptools.downloadpage(channel['path'], follow_redirects=True, timeout=5).url - except: - support.log('Offline') - file_path = None - else: file_path = channel['path'] + path = filetools.dirname(channel['path']) # relative path + channel_json = load_json(channel['path']) # read channel json - if file_path: - # make relative path - path = os.path.dirname(os.path.abspath(file_path)) - if 'http' in path: path = path[path.find('http'):].replace('\\','/').replace(':/','://') - if file_path.startswith('http'): file_url = httptools.downloadpage(file_path, follow_redirects=True).data - elif os.path.isfile(file_path): file_url = open(file_path, "r").read() - else: - item.channel_id = key - remove_channel(item) - file_url='' + # retrieve information from json + thumbnail = relative('thumbnail', channel_json, path) + if not thumbnail: thumbnail = item.thumbnail + fanart = relative('fanart', channel_json, path) + plot = channel_json['plot'] if 'plot' in channel_json else '' - # load json - if file_url: - json_url = jsontools.load(file_url) - - thumbnail = relative('thumbnail', json_url, path) - if not thumbnail: thumbnail = item.thumbnail - fanart = relative('fanart', json_url, path) - plot = json_url['plot'] if json_url.has_key('plot') else '' - - itemlist.append(Item(channel=item.channel, - title=typo(channel['channel_name'],'bold'), - url=file_path, - thumbnail=thumbnail, - fanart=fanart, - plot=plot, - action='show_menu', - channel_id = key, - context=context, - path=path)) + itemlist.append(Item(channel=item.channel, + title=support.typo(channel['channel_name'],'bold'), + url=channel['path'], + thumbnail=thumbnail, + fanart=fanart, + plot=plot, + action='show_menu', + channel_id = key, + context=context, + path=path)) autoplay.show_option(item.channel, itemlist) support.channel_config(item, itemlist) return itemlist + def show_menu(item): - global list_data - itemlist = [] - add_search = True support.log() - # If Second Level Menu - if item.menu: - menu = item.menu - item.menu = None - if item.url: itemlist.append(item) - for key in menu: - support.log("KEY= ",key) - if key != 'search': - if type(menu[key]) == dict: - title = menu[key]['title'] if menu[key].has_key('title') else item.title - thumbnail = relative('thumbnail', menu[key], item.path) - url = relative('url', menu[key], item.path) if menu[key].has_key('url') else '' - plot = menu[key]['plot'] if menu[key].has_key('plot') else '' - else: - title = menu[key] - thumbnail = item.thumbnail - plot = '' - url = '' - - itemlist.append(Item(channel=item.channel, - title=typo(title,'submenu' if not url else 'bold'), - url=url if url else item.url, - path=item.path, - thumbnail=thumbnail, - plot=plot, - action='submenu' if not url else 'show_menu', - filterkey=key if not url else '' )) - - if menu.has_key('search'): - if type(menu['search']) == dict: - url = relative('url', menu['search'], item.path) if menu['search'].has_key('url') else '' - search_menu = menu['search']['search_menu'] if menu['search'].has_key('search_menu') else '' - else: - url = '' - search_menu = False - - itemlist.append(Item(channel=item.channel, - title=typo('Cerca ' + item.fulltitle +'...','color kod bold'), - thumbnail=get_thumb('search.png'), - action='search', - url=item.url, - custom_url=url, - search_menu=search_menu, - path=item.path)) - return itemlist + itemlist = [] + if item.menu: # if second level menu + get_sub_menu(item, item.menu, 'level2', itemlist) else: - json_data = load_json(item) - for key in json_data: + if type(item.url) == dict: + json = item.url + else: + json = load_json(item) + for key in json: if key == 'menu': - for option in json_data['menu']: - thumbnail = relative('thumbnail', option, item.path) - fanart = relative('fanart', option, item.path) - plot = option['plot'] if option.has_key('plot') else item.plot - url = relative('link', option, item.path) - submenu = option['submenu'] if option.has_key('submenu') else [] - level2 = option['level2'] if option.has_key('level2') else [] - if option.has_key('title'): - itemlist.append(Item(channel=item.channel, - title=format_title(option['title']), - fulltitle=option['title'], - thumbnail=thumbnail, - fanart=fanart, - plot=plot, - action='show_menu', - url=url, - path=item.path, - menu=level2)) - if option.has_key('search'): - menu = json_data['menu'] - if type(option['search']) == dict: - url = relative('url', option['search'], item.path) if option['search'].has_key('url') else '' - search_menu = option['search']['search_menu'] if option['search'].has_key('search_menu') else '' - itemlist.append(Item(channel=item.channel, - title=typo('Cerca nel Canale...','color kod bold'), - thumbnail=get_thumb('search.png'), - action='search', - url=item.url, - custom_url=url, - search_menu=search_menu, - path=item.path)) - add_search = False - - if submenu: - for key in submenu: - if key != 'search': - if type(submenu[key]) == dict: - title = submenu[key]['title'] if submenu[key].has_key('title') else item.title - thumbnail = relative('thumbnail', submenu[key], item.path) - plot = submenu[key]['plot'] if submenu[key].has_key('plot') else '' - else: - title = submenu[key] - thumbnail = item.thumbnail - plot = '' - itemlist.append(Item(channel=item.channel, - title=typo(title,'submenu'), - url=url, - path=item.path, - thumbnail=thumbnail, - plot=plot, - action='submenu', - filterkey=key)) - if submenu.has_key('search'): - if type(submenu['search']) == dict: - url = relative('url', submenu['search'], item.path) if submenu['search'].has_key('url') else '' - search_menu = submenu['search']['search_menu'] if submenu['search'].has_key('search_menu') else '' - else: - url = '' - search_menu = False - itemlist.append(Item(channel=item.channel, - title=typo('Cerca ' + option['title'] +'...','color kod bold'), - thumbnail=get_thumb('search.png'), - action='search', - url=item.url, - custom_url=url, - search_menu=search_menu, - path=item.path)) - - elif 'list' in key: - # select type of list - item.url = { key: json_data[key]} - support.log(item.url) - if key == "movies_list": - item.media_type = 'movies_list' - item.contentType = 'movie' - item.action = 'findvideos' - elif key == "tvshows_list": - item.media_type = 'tvshows_list' - item.contentType = 'tvshow' - item.action = 'get_season' - elif key == "episodes_list": - item.media_type = 'episodes_list' - item.contentType = 'episode' - item.action = 'episodios' - elif key == "generic_list": - item.media_type= 'generic_list' - itemlist += list_all(item) - - # add Search - if 'channel_name' in json_data and add_search: - itemlist.append(Item(channel=item.channel, - title=typo('Cerca nel Canale...','color kod bold'), - thumbnail=get_thumb('search.png'), - action='search', - url=item.url, - path=item.path)) - - return itemlist - - -def filter_thread(filter,item): - thumbnail = '' - plot = '' - dict_ = {'url': 'search/person', 'language': lang, 'query': filter, 'page': 1} - tmdb_inf = tmdb.discovery(item, dict_=dict_) - results = tmdb_inf.results[0] - id = results['id'] - if id: - thumbnail = 'http://image.tmdb.org/t/p/original' + results['profile_path'] if results['profile_path'] else item.thumbnail - json_file = httptools.downloadpage('http://api.themoviedb.org/3/person/'+ str(id) + '?api_key=' + tmdb_api + '&language=en', use_requests=True).data - plot += jsontools.load(json_file)['biography'] - - item = Item(channel=item.channel, - title=typo(filter, 'bold'), - url=item.url, - media_type=item.media_type, - action='list_filtered', - thumbnail=thumbnail, - plot=plot, - path=item.path, - filterkey=item.filterkey, - filter=filter) - return item - - -def submenu(item): - support.log() - from lib.concurrent import futures - - itemlist = [] - filter_list = [] - - json_data = load_json(item) - if json_data.has_key("movies_list"): item.media_type= 'movies_list' - elif json_data.has_key("tvshows_list"): item.media_type = 'tvshows_list' - elif json_data.has_key("episodes_list"): item.media_type = 'episodes_list' - elif json_data.has_key("generic_list"): item.media_type= 'generic_list' - media_type = item.media_type - - for media in json_data[media_type]: - if media.has_key(item.filterkey) and media[item.filterkey]: - if type(media[item.filterkey]) == str and media[item.filterkey] not in filter_list: - filter_list.append(media[item.filterkey]) - elif type(media[item.filterkey]) == list: - for f in media[item.filterkey]: - if f not in filter_list: - filter_list.append(f) - - filter_list.sort() - - with futures.ThreadPoolExecutor() as executor: - List = [executor.submit(filter_thread, filter, item) for filter in filter_list] - for res in futures.as_completed(List): - if res.result(): - itemlist.append(res.result()) - itemlist = sorted(itemlist, key=lambda it: it.title) - return itemlist - - -def list_all(item): - support.log() - if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']: - pagination = int(defp) if defp.isdigit() else '' - else: pagination = '' - pag = item.page if item.page else 1 - - itemlist = [] - media_type = item.media_type - if type(item.url) == dict: - json_data = item.url - else: - json_data = load_json(item) - contentTitle = contentSerieName = '' - infoLabels = item.infoLabels if item.infoLabels else {} - - if json_data: - for i, media in enumerate(json_data[media_type]): - - if media.has_key('search'): continue - - if pagination and (pag - 1) * pagination > i: continue # pagination - if pagination and i >= pag * pagination: break # pagination - - quality, language, plot, poster = set_extra_values(media, item.path) - - fulltitle = media['title'] - title = set_title(fulltitle, language, quality) - - infoLabels['year'] = media['year'] if media.has_key('year')else '' - infoLabels['tmdb_id'] = media['tmdb_id'] if media.has_key('tmdb_id') else '' - - if 'movies_list' in json_data or 'generic_list' in json_data: - url= media - contentTitle = fulltitle - contentType = 'movie' - action='findvideos' + get_menu(item, json, key, itemlist) + if item.filterkey: + itemlist += submenu(item, json, key) + elif key in ['movies_list','tvshows_list', 'generic_list']: + itemlist += peliculas(item, json, key) + elif key in ['episodes_list']: + itemlist += episodios(item, json, key) + elif key in ['links']: + itemlist += findvideos(item) + if 'channel_name' in json: + if 'search' in json and 'url' in json['search']: + search_json = json['search'] + itemlist += get_search_menu(item, search_json, channel_name=json['channel_name']) else: - contentSerieName = fulltitle - contentType = 'tvshow' - if media.has_key('seasons_list'): - url = media['seasons_list'] - action = 'get_seasons' - else: - url = relative('link', media, item.path) - action = 'episodios' + itemlist += get_search_menu(item, json, channel_name=json['channel_name']) - itemlist.append(Item(channel=item.channel, - contentType=contentType, - title=format_title(title), - fulltitle=fulltitle, - show=fulltitle, - quality=quality, - language=language, - plot=plot, - personal_plot=plot, - thumbnail=poster, - path=item.path, - url=url, - contentTitle=contentTitle, - contentSerieName=contentSerieName, - infoLabels=infoLabels, - action=action)) - - if pagination and len(json_data[media_type]) >= pag * pagination: - if inspect.stack()[1][3] != 'get_newest': - itemlist.append( - Item(channel=item.channel, - action = 'list_all', - contentType=contentType, - title=typo(config.get_localized_string(30992), 'color kod bold'), - fulltitle= item.fulltitle, - show= item.show, - url=item.url, - args=item.args, - page=pag + 1, - path=item.path, - media_type=item.media_type, - thumbnail=support.thumb())) - - if not 'generic_list' in json_data: - tmdb.set_infoLabels(itemlist, seekTmdb=True) - for item in itemlist: - if item.personal_plot != item.plot and item.personal_plot: - item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot - return itemlist + return itemlist -def list_filtered(item): - support.log() - - if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']: - pagination = int(defp) if defp.isdigit() else '' - else: pagination = '' - pag = item.page if item.page else 1 - +def search(item, text): + support.log(text) itemlist = [] - media_type = item.media_type - json_data = load_json(item) - contentTitle = contentSerieName = '' + + if item.custom_search: + if '{}' in item.url: + item.url = item.custom_search.format(text) + else: + item.url = item.custom_search + text + elif item.global_search: + itemlist = global_search(item, text) + else: + item.search = text + + json = load_json(item) + if json: + for key in json: + peliculas(item, json, key, itemlist) + + return itemlist + +def global_search(item, text): + itemlist = [] + json = load_json(item) + item.global_search = None + + if 'menu' in json: + for option in json['menu']: + if option in ['submenu', 'level2'] and 'seach' in json['menu'][option] and 'url' in json['menu'][option]['search']: + item.custom_search = json['menu'][option]['search']['url'] + itemlist += search(item, text) + else: + extra = set_extra_values(item, option, item.path) + item.url = extra.url + if item.url: + itemlist += global_search(item, text) + + if any(key in json for key in ['movies_list','tvshows_list', 'generic_list']): + itemlist += search(item, text) + return itemlist + + + + +def peliculas(item, json='', key='', itemlist=[]): + item.plot = item.thumb = item.fanart ='' + support.log() + if not json: + key = item.key + json = load_json(item)[key] + else: + json = json[key] + infoLabels = item.infoLabels if item.infoLabels else {} + contentType = 'tvshow' if 'tvshow' in key else 'movie' + itlist = filterkey = [] + action = 'findvideos' - if json_data: - for i, media in enumerate(json_data[media_type]): - if pagination and (pag - 1) * pagination > i: continue # pagination - if pagination and i >= pag * pagination: break # pagination - if media.has_key(item.filterkey): - filter_keys = [it.lower() for it in media[item.filterkey]] if type(media[item.filterkey]) == list else media[item.filterkey].lower() - if item.filter.lower() in filter_keys: + for option in json: + if item.filterkey and item.filterkey in option: + filterkey = [it.lower() for it in option[item.filterkey]] if type(option[item.filterkey]) == list else [option[item.filterkey].lower()] + title = option['title'] if 'title' in option else '' - quality, language, plot, poster = set_extra_values(media, item.path) + if 'tvshows_list' in key: + action = 'episodios' - fulltitle = media['title'] - title = set_title(fulltitle, language, quality) + # filter elements + if (not item.filter or item.filter.lower() in filterkey) and item.search.lower() in title.lower() and title: + extra = set_extra_values(item, option, item.path) - infoLabels['year'] = media['year'] if media.has_key('year')else '' - infoLabels['tmdb_id'] = media['tmdb_id'] if media.has_key('tmdb_id') else '' + infoLabels['year'] = option['year'] if 'year' in option else '' + infoLabels['tmdb_id'] = option['tmdb_id'] if 'tmdb_id' in option else '' - if 'movies_list' in json_data or 'generic_list' in json_data: - url= media - contentTitle = fulltitle - contentType = 'movie' - action='findvideos' + it = Item(channel = item.channel, + title = set_title(title, extra.language, extra.quality), + fulltitle = title, + show = title, + contentTitle = title if contentType == 'movie' else '', + contentSerieName = title if contentType != 'movie' else '', + contentType = contentType, + infoLabels = infoLabels, + url = extra.url, + path = item.path, + thumbnail = extra.thumb, + fanart = extra.fanart, + plot = extra.plot, + personal_plot = extra.plot, + action = action) + itlist.append(it) - else: - contentSerieName = fulltitle - contentType = 'tvshow' - if media.has_key('seasons_list'): - url = media['seasons_list'] - action = 'get_seasons' - else: - url = relative('link', media, item.path) - action = 'episodios' - - itemlist.append(Item(channel=item.channel, - contentType=contentType, - title=format_title(title), - fulltitle=fulltitle, - show=fulltitle, - quality=quality, - language=language, - plot=plot, - personal_plot=plot, - thumbnail=poster, - path=item.path, - url=url, - contentTitle=contentTitle, - contentSerieName=contentSerieName, - infoLabels=infoLabels, - action=action)) - - if pagination and len(json_data[media_type]) >= pag * pagination and len(itemlist) >= pag * pagination: - if inspect.stack()[1][3] != 'get_newest': - itemlist.append( - Item(channel=item.channel, - action = 'list_filtered', - contentType=contentType, - title=typo(config.get_localized_string(30992), 'color kod bold'), - fulltitle= item.fulltitle, - show= item.show, - url=item.url, - args=item.args, - page=pag + 1, - path=item.path, - media_type=item.media_type, - thumbnail=support.thumb())) - - if not 'generic_list' in json_data: - tmdb.set_infoLabels(itemlist, seekTmdb=True) - for item in itemlist: - if item.personal_plot != item.plot and item.personal_plot: - item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot + if not 'generic_list' in key: + tmdb.set_infoLabels(itlist, seekTmdb=True) + itemlist += itlist return itemlist def get_seasons(item): - itm = item support.log() itemlist = [] - infoLabels = item.infoLabels if item.infolabels else {} - list_seasons = item.url + infoLabels = item.infoLabels + json = item.url['seasons_list'] if type(item.url) == dict else item.url - for season in list_seasons: - infoLabels['season'] = season['season'] - title = config.get_localized_string(60027) % season['season'] - url = relative('link', season, item.path) + for option in json: + infoLabels['season'] = option['season'] + title = config.get_localized_string(60027) % option['season'] + extra = set_extra_values(item, option, item.path) + # url = relative('link', option, item.path) itemlist.append(Item(channel=item.channel, - title=format_title(title), + title=set_title(title), fulltitle=item.fulltitle, show=item.show, - thumbnails=item.thumbnails, - filterseason=str(season['season']), - url=url, + thumbnail=extra.thumb, + filterseason=int(option['season']), + url=extra.url, action='episodios', - contentSeason=season['season'], + contentSeason=option['season'], infoLabels=infoLabels, - contentType='tvshow', - path=item.path)) + contentType='season', + path=extra.path)) - - if inspect.stack()[1][3] in ['add_tvshow', "get_seasons"] or show_seasons == False: - it = [] + if inspect.stack()[2][3] in ['add_tvshow', 'get_episodes', 'update', 'find_episodes', 'get_newest'] or show_seasons == False: + itlist = [] for item in itemlist: - if os.path.isfile(item.url) or requests.head(item.url): it += episodios(item) - itemlist = it - - if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes', 'get_newest']: - pagination = int(defp) if defp.isdigit() else '' - pag = itm.page if itm.page else 1 - it = [] - for i, item in enumerate(itemlist): - if pagination and (pag - 1) * pagination > i: continue # pagination - if pagination and i >= pag * pagination: break # pagination - it.append(item) - - if pagination and len(itemlist) >= pag * pagination: - itm.page = pag + 1 - itm.title=typo(config.get_localized_string(30992), 'color kod bold') - itm.thumbnail=support.thumb() - it.append(itm) - itemlist = it - else: - tmdb.set_infoLabels(itemlist, seekTmdb=True) - itemlist = sorted(itemlist, key=lambda i: i.title) - support.videolibrary(itemlist,item) - + itlist += episodios(item) + itemlist = itlist + if inspect.stack()[2][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes', 'get_newest'] and defpage: + itemlist = pagination(item, itemlist) return itemlist -def episodios(item): - support.log(item) - itm = item - - - if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']: - pagination = int(defp) if defp.isdigit() else '' - else: pagination = '' - pag = item.page if item.page else 1 - - itemlist = [] - if type(item.url) == dict: - json_data = {} - json_data['episodes_list'] = [item.url] - else: - json_data = load_json(item) - support.log(json_data) +def episodios(item, json ='', key='', itemlist =[]): + support.log() infoLabels = item.infoLabels - ep = 1 - season = infoLabels['season'] if infoLabels.has_key('season') else item.contentSeason if item.contentSeason else 1 + itm=item - for i, episode in enumerate(json_data['episodes_list']): - if pagination and (pag - 1) * pagination > i: continue # pagination - if pagination and i >= pag * pagination: break # pagination - match = [] - if episode.has_key('number'): - match = support.match(episode['number'], patron=r'(?P<season>\d+)x(?P<episode>\d+)').match - if not match and episode.has_key('title'): - match = support.match(episode['title'], patron=r'(?P<season>\d+)x(?P<episode>\d+)').match - if match: match = match[0] + if type(item.url) == dict: + if 'seasons_list' in item.url: + return get_seasons(item) + else: + json = {} + json = item.url['episodes_list'] + else: + json = load_json(item)['episodes_list'] + + # set variable + ep = 1 + season = infoLabels['season'] if 'season' in infoLabels else item.contentSeason if item.contentSeason else 1 + # make items + for option in json: + # build numeration of episodes + numeration = option['number'] if 'number' in option else option['title'] + match = support.match(numeration , patron=r'(?P<season>\d+)x(?P<episode>\d+)').match if match: episode_number = match[1] ep = int(match[1]) + 1 - season_number = match[0] + season_number = int(match[0]) else: - season_number = episode['season'] if episode.has_key('season') else season if season else 1 - episode_number = episode['number'] if episode.has_key('number') else '' + season_number = option['season'] if 'season' in option else season if season else 1 + episode_number = option['number'] if 'number' in option else '' if not episode_number.isdigit(): - episode_number = support.match(episode['title'], patron=r'(?P<episode>\d+)').match + episode_number = support.match(option['title'], patron=r'(?P<episode>\d+)').match ep = int(episode_number) if episode_number else ep if not episode_number: episode_number = str(ep).zfill(2) @@ -591,92 +279,381 @@ def episodios(item): infoLabels['season'] = season_number infoLabels['episode'] = episode_number - - plot = episode['plot'] if episode.has_key('plot') else item.plot - thumbnail = episode['poster'] if episode.has_key('poster') else episode['thumbnail'] if episode.has_key('thumbnail') else item.thumbnail - - title = ' - ' + episode['title'] if episode.has_key('title') else '' + title = ' - ' + option['title'] if 'title' in option else '' title = '%sx%s%s' % (season_number, episode_number, title) - + extra = set_extra_values(item, option, item.path) if season_number == item.filterseason or not item.filterseason: - itemlist.append(Item(channel= item.channel, - title= format_title(title), + itemlist.append(Item(channel = item.channel, + title = set_title(title, extra.language, extra.quality), fulltitle = item.fulltitle, show = item.show, - url= episode, - action= 'findvideos', - plot= plot, - thumbnail= thumbnail, - contentSeason= season_number, - contentEpisode= episode_number, - infoLabels= infoLabels, - contentType= 'episode', - path=item.path)) + url = option, + action = 'findvideos', + plot = extra.plot, + thumbnail= extra.thumb if extra.thumb else item.thumbnail, + fanart = extra.fanart, + contentSeason = season_number, + contentEpisode = episode_number, + infoLabels = infoLabels, + contentType = 'episode', + path = item.path)) - - if show_seasons == True and inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes'] and not item.filterseason: - itm.contentType='season' - season_list = [] - for item in itemlist: - if item.contentSeason not in season_list: - season_list.append(item.contentSeason) - itemlist = [] - for season in season_list: - itemlist.append(Item(channel=item.channel, - title=format_title(config.get_localized_string(60027) % season), - fulltitle=itm.fulltitle, - show=itm.show, - thumbnails=itm.thumbnails, - url=itm.url, - action='episodios', - contentSeason=season, - infoLabels=infoLabels, - filterseason=str(season), - path=item.path)) - - elif pagination and len(json_data['episodes_list']) >= pag * pagination: - if inspect.stack()[1][3] != 'get_newest': - itemlist.append( - Item(channel=item.channel, - action = 'episodios', - contentType='episode', - title=typo(config.get_localized_string(30992), 'color kod bold'), - fulltitle= item.fulltitle, - show= item.show, - url=item.url, - args=item.args, - page=pag + 1, - media_type=item.media_type, - thumbnail=support.thumb(), - path=item.path)) - - tmdb.set_infoLabels(itemlist, seekTmdb=True) + # if showseason + if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes', 'get_newest']: + if show_seasons and not item.filterseason: + itm.contentType='season' + season_list = [] + for item in itemlist: + if item.contentSeason not in season_list: + season_list.append(item.contentSeason) + itemlist = [] + for season in season_list: + itemlist.append(Item(channel=item.channel, + title=set_title(config.get_localized_string(60027) % season), + fulltitle=itm.fulltitle, + show=itm.show, + thumbnails=itm.thumbnails, + url=itm.url, + action='episodios', + contentSeason=season, + infoLabels=infoLabels, + filterseason=str(season), + path=item.path)) + elif defpage and inspect.stack()[1][3] not in ['get_seasons']: + itemlist = pagination(item, itemlist) return itemlist +# Find Servers def findvideos(item): support.log() itemlist = [] if 'links' in item.url: - for url in item.url['links']: - quality, language, plot, poster = set_extra_values(url, item.path) - title = item.fulltitle + (' - '+url['title'] if url.has_key('title') else '') - title = set_title(title, language, quality) + json = item.url['links'] + else: + json = item.url + for option in json: + extra = set_extra_values(item, option, item.path) + title = item.fulltitle + (' - '+option['title'] if 'title' in option else '') + title = set_title(title, extra.language, extra.quality) + support.typo('%s','_ [] color kod bold') - itemlist.append(Item(channel=item.channel, title=format_title(typo('%s','color kod') + ' - ' + title), url=url['url'], action='play', quality=quality, - language=language, infoLabels = item.infoLabels)) + itemlist.append(Item(channel=item.channel, title=title, url=option['url'], action='play', quality=extra.quality, + language=extra.language, infoLabels = item.infoLabels)) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) + itemlist = support.servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) + if inspect.stack()[2][3] != 'start_download': + autoplay.start(itemlist, item) + support.videolibrary(itemlist,item) + return itemlist - if inspect.stack()[2][3] != 'start_download': - autoplay.start(itemlist, item) - return itemlist +################################ Menu ################################ + +def get_menu(item, json, key, itemlist=[]): + support.log() + json = json[key] + for option in json: + title = option['title'] if 'title' in option else json[option] if 'search' not in option else '' + extra = set_extra_values(item, option, item.path) + level2 = option if 'level2' in option else [] + it = Item(channel = item.channel, + title = support.typo(title, 'bullet bold'), + fulltitle = title, + show = title, + url = extra.url, + path = item.path, + thumbnail = extra.thumb, + fanart = extra.fanart, + plot = extra.plot, + action = 'show_menu', + menu = level2 if not item.menu else None) + if title: + itemlist.append(it) + + if 'search' in option: + get_search_menu(it, json, itemlist) + + elif 'submenu' in option: + get_sub_menu(it, option, 'submenu' ,itemlist) + + return support.thumb(itemlist) + + +def get_sub_menu(item, json, key, itemlist=[]): + support.log('SUBMENU',item) + json = json[key] + search = False + if item.menu: + item.menu = None + itemlist.append(item) + for option in json: + support.log(item) + title = json[option]['title'] if 'title' in json[option] else json[option] if option != 'search' else '' + if title: + extra = set_extra_values(item, json[option], item.path) + if not extra.url: extra.url = item.url + filterkey = option + level2 = option if 'level2' in option else [] + it = Item(channel = item.channel, + title = support.typo(title,'submenu'), + fulltitle = title, + show = title, + url = extra.url, + path = item.path, + thumbnail = extra.thumb, + fanart = extra.fanart, + plot = extra.plot, + action = 'show_menu', + menu = level2 if not item.menu else None, + filterkey = filterkey) + itemlist.append(it) + + if 'search' in option: + search = True + search_json = json[option] + + if search: + get_search_menu(item, search_json, itemlist) + + return itemlist + + +def get_search_menu(item, json='', itemlist=[], channel_name=''): + support.log() + if channel_name: + title = 'Cerca in ' + channel_name + '...' + else: + title = 'Cerca ' + item.fulltitle + '...' + extra = set_extra_values(item, json, item.path) + + itemlist.append(Item(channel=item.channel, + title=support.typo(title,'submenu bold'), + fulltitle=title, + show=title, + thumbnail=extra.thumb if extra.thumb else get_thumb('search.png'), + faneart=extra.fanart if extra.fanart else item.fanart, + plot=extra.plot, + action='search', + url=item.url, + custom_search=extra.url if extra.url != item.url else '', + path=item.path, + global_search=True if channel_name else False)) + + return itemlist + + +def submenu(item, json, key, itemlist = []): + from lib.concurrent import futures + + filter_list = [] + for option in json[key]: + if item.filterkey in option and option[item.filterkey]: + if type(option[item.filterkey]) == str and option[item.filterkey] not in filter_list: + filter_list.append(option[item.filterkey]) + elif type(option[item.filterkey]) == list: + for f in option[item.filterkey]: + if f not in filter_list: + filter_list.append(f) + + filter_list.sort() + + with futures.ThreadPoolExecutor() as executor: + List = [executor.submit(filter_thread, filter, key, item) for filter in filter_list] + for res in futures.as_completed(List): + if res.result(): + itemlist.append(res.result()) + itemlist = sorted(itemlist, key=lambda it: it.title) + return itemlist + + +################################ Filter results ################################ + +# filter results +def filter_thread(filter, key, item): + thumbnail = '' + plot = '' + if item.filterkey in ['actors', 'director']: + dict_ = {'url': 'search/person', 'language': lang, 'query': filter, 'page': 1} + tmdb_inf = tmdb.discovery(item, dict_=dict_) + if tmdb_inf.results: + results = tmdb_inf.results[0] + id = results['id'] + if id: + thumbnail = 'http://image.tmdb.org/t/p/original' + results['profile_path'] if results['profile_path'] else item.thumbnail + json_file = httptools.downloadpage('http://api.themoviedb.org/3/person/'+ str(id) + '?api_key=' + tmdb_api + '&language=en', use_requests=True).data + plot += jsontools.load(json_file)['biography'] + + item = Item(channel=item.channel, + title=support.typo(filter, 'bold'), + url=item.url, + media_type=item.media_type, + action='peliculas', + thumbnail=thumbnail, + plot=plot, + path=item.path, + filterkey=item.filterkey, + filter=filter, + key=key) + return item + + +################################ Utils ################################ + +# for load json from item or url +def load_json(item): + support.log() + + url = item.url if type(item) == Item else item + try: + if url.startswith('http'): + json_file = httptools.downloadpage(url).data + else: + json_file = open(url, "r").read() + + json = jsontools.load(json_file) + + except: + json = {} + + return json + + +# Load Channels json and check that the paths and channel titles are correct +def load_and_check(item): + support.log() + path = filetools.join(config.get_data_path(), 'community_channels.json') + file = open(path, "r") + json = jsontools.load(file.read()) + + for key, channel in json['channels'].items(): + if not 'checked' in channel: + response = httptools.downloadpage(channel['path'], follow_redirects=True, timeout=5) + if response.sucess: + channel['path'] = response.url + channel['channel_name'] = re.sub(r'\[[^\]]+\]', '', channel['channel_name']) + channel['check'] = True + + with open(path, "w") as file: + file.write(jsontools.dump(json)) + file.close() + return json + + +# set extra values +def set_extra_values(item, json, path): + support.log(json) + ret = Item() + for key in json: + if key == 'quality': + ret.quality = json[key].upper() + elif key == 'language': + ret.language = json[key].upper() + elif key == 'plot': + ret.plot = json[key] + elif key in ['poster', 'thumbnail']: + ret.thumb = json[key] if ':/' in json[key] else filetools.join(path,json[key]) if '/' in json[key] else get_thumb(json[key]) + elif key == 'fanart': + ret.fanart = json[key] if ':/' in json[key] else filetools.join(path,json[key]) + elif key in ['url', 'link']: + ret.url = json[key] if ':/' in json[key] or type(json[key]) == dict else filetools.join(path,json[key]) + elif key == 'seasons_list': + ret.url = {} + ret.url['seasons_list'] = json['seasons_list'] + elif key == 'episodes_list': + ret.url = {} + ret.url['episodes_list'] = json['episodes_list'] + elif key == 'links': + ret.url={} + ret.url['links'] = json[key] + + if not ret.thumb: + ret.thumb = item.thumbnail + if not ret.fanart: + ret.fanart = item.fanart + if not ret.plot: + ret.plot = item.plot + + return ret + + +# format titles +def set_title(title, language='', quality=''): + support.log() + + t = support.match(title, patron=r'\{([^\}]+)\}').match + if 'bold' not in t: t += ' bold' + title = re.sub(r'(\{[^\}]+\})','',title) + title = support.typo(title,t) + + if quality: + title += support.typo(quality, '_ [] color kod bold') + if language: + if not isinstance(language, list): + title += support.typo(language.upper(), '_ [] color kod bold') + else: + for lang in language: + title += support.typo(lang.upper(), '_ [] color kod bold') + + return title + + +# for relative path +def relative(key, json, path): + support.log() + ret = '' + if key in json: + if key in ['thumbnail', 'poster']: + ret = json[key] if ':/' in json[key] else filetools.join(path,json[key]) if '/' in json[key] else get_thumb(json[key]) if json[key] else '' + else: + ret = json[key] if ':/' in json[key] else filetools.join(path,json[key]) if '/' in json[key] else '' + + return ret + + +def pagination(item, itemlist = []): + support.log() + import json + itlist = [] + + if not itemlist: + itemlist = [] + for it in item.itemlist: + itemlist.append(Item().fromurl(it)) + encoded_itemlist = [] + for it in itemlist: + encoded_itemlist.append(it.tourl()) + if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']: + Pagination = int(defp) if defp.isdigit() else '' + else: Pagination = '' + pag = item.page if item.page else 1 + + for i, item in enumerate(itemlist): + if Pagination and (pag - 1) * Pagination > i: continue # pagination + if Pagination and i >= pag * Pagination: break # pagination + + itlist.append(item) + + if Pagination and len(itemlist) >= pag * Pagination: + if inspect.stack()[1][3] != 'get_newest': + itlist.append( + Item(channel=item.channel, + action = 'pagination', + contentType=item.contentType, + title=support.typo(config.get_localized_string(30992), 'color kod bold'), + fulltitle= item.fulltitle, + show= item.show, + url=item.url, + args=item.args, + page=pag + 1, + path=item.path, + media_type=item.media_type, + thumbnail=support.thumb(), + itemlist= encoded_itemlist)) + return itlist def add_channel(item): support.log() - channel_to_add = {} json_file = '' result = platformtools.dialog_select(config.get_localized_string(70676), [config.get_localized_string(70678), config.get_localized_string(70679)]) @@ -705,16 +682,16 @@ def add_channel(item): platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(70682)) return channel_to_add['channel_name'] = json_file['channel_name'] - if json_file.has_key('thumbnail'): channel_to_add['thumbnail'] = json_file['thumbnail'] - if json_file.has_key('fanart'): channel_to_add['fanart'] = json_file['fanart'] - path = os.path.join(config.get_data_path(), 'community_channels.json') + if 'thumbnail' in json_file: channel_to_add['thumbnail'] = json_file['thumbnail'] + if 'fanart' in json_file: channel_to_add['fanart'] = json_file['fanart'] + path = filetools.join(config.get_data_path(), 'community_channels.json') community_json = open(path, "r") community_json = jsontools.load(community_json.read()) id = 1 - while community_json['channels'].has_key(str(id)): + while str(id) in community_json['channels']: id +=1 - community_json['channels'][id]=(channel_to_add) + community_json['channels'][str(id)]=(channel_to_add) with open(path, "w") as file: file.write(jsontools.dump(community_json)) @@ -723,11 +700,10 @@ def add_channel(item): platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(70683) % json_file['channel_name']) return - def remove_channel(item): support.log() - path = os.path.join(config.get_data_path(), 'community_channels.json') + path = filetools.join(config.get_data_path(), 'community_channels.json') community_json = open(path, "r") community_json = jsontools.load(community_json.read()) @@ -742,144 +718,3 @@ def remove_channel(item): platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(70684) % to_delete) platformtools.itemlist_refresh() return - - -def set_extra_values(dict, path): - support.log() - quality = '' - language = '' - plot = '' - poster = '' - - if 'quality' in dict and dict['quality'] != '': - quality = dict['quality'].upper() - if 'language' in dict and dict['language'] != '': - language = dict['language'].upper() - if 'plot' in dict and dict['plot'] != '': - plot = dict['plot'] - if 'poster' in dict and dict['poster'] != '': - poster = dict['poster']if ':/' in dict['poster'] else path + dict['poster'] if '/' in dict['poster'] else '' - elif 'thumbnail' in dict and dict['thumbnail'] != '': - poster = dict['thumbnail']if ':/' in dict['thumbnail'] else path + dict['thumbnail'] if '/' in dict['thumbnail'] else '' - - return quality, language, plot, poster - -def set_title(title, language, quality): - support.log() - - if not config.get_setting('unify'): - if quality != '': - title += typo(quality, '_ [] color kod') - if language != '': - if not isinstance(language, list): - title += typo(language.upper(), '_ [] color kod') - else: - for lang in language: - title += typo(lang.upper(), '_ [] color kod') - - return title - - -def format_title(title): - t = scrapertools.find_single_match(title, r'\{([^\}]+)\}') - if 'bold' not in t: t += ' bold' - title = re.sub(r'(\{[^\}]+\})','',title) - return typo(title,t) - - -def search(item, text): - support.log('Search ', text) - if item.custom_url: - item.url=item.custom_url + text - if item.search_menu: - return show_menu(item) - itemlist = [] - json_data = load_json(item) - - return load_links(item, itemlist, json_data, text) - - -def load_links(item, itemlist, json_data, text): - support.log(json_data) - - def links(item, itemlist, json_data, text): - support.log() - - if json_data.has_key("movies_list"): media_type= 'movies_list' - elif json_data.has_key("tvshows_list"): media_type = 'tvshows_list' - elif json_data.has_key("episodes_list"): media_type = 'episodes_list' - elif json_data.has_key("generic_list"): media_type= 'generic_list' - - if json_data: - for media in json_data[media_type]: - if media.has_key('search'): continue - if text.lower() in media['title'].lower(): - quality, language, plot, poster = set_extra_values(media, item.path) - - title = media['title'] - title = set_title(title, language, quality) - - new_item = Item(channel=item.channel, title=format_title(title), quality=quality, - language=language, plot=plot, personal_plot=plot, thumbnail=poster, path=item.path) - - new_item.infoLabels['year'] = media['year'] if 'year' in media else '' - new_item.infoLabels['tmdb_id'] = media['tmdb_id'] if 'tmdb_id' in media else '' - - if 'movies_list' in json_data or 'generic_list' in json_data: - new_item.url = media - new_item.contentTitle = media['title'] - new_item.action = 'findvideos' - elif 'tvshows_list' in json_data: - new_item.url = media - new_item.contentTitle = media['title'] - new_item.action = 'episodios' - else: - new_item.url = media['seasons_list'] - new_item.contentSerieName = media['title'] - new_item.action = 'seasons' - - itemlist.append(new_item) - - if not 'generic_list' in json_data: - tmdb.set_infoLabels(itemlist, seekTmdb=True) - for item in itemlist: - if item.personal_plot != item.plot and item.personal_plot: - item.plot = '\n\n' + typo('','submenu') + '\n' + item.personal_plot + '\n' + typo('','submenu') + '\n\n' + item.plot - - if json_data.has_key('menu'): - for option in json_data['menu']: - if option.has_key('link'): - json_data = load_json(option['link'] if option['link'].startswith('http') else item.path+option['link']) - load_links(item, itemlist, json_data, text) - else: - links(item, itemlist, json_data, text) - - return itemlist - - -def relative(key, json, path): - if json.has_key(key): - if key == 'thumbnail': - ret = json[key] if ':/' in json[key] else path + json[key] if '/' in json[key] else get_thumb(json[key]) if json[key] else '' - else: - ret = json[key] if ':/' in json[key] else path + json[key] if '/' in json[key] else '' - else: - ret = '' - return ret - - -def load_json(item): - support.log() - url= item if type(item) == str else item.url - try: - if url.startswith('http'): - json_file = httptools.downloadpage(url).data - else: - json_file = open(url, "r").read() - - json_data = jsontools.load(json_file) - - except: - json_data = {} - - return json_data \ No newline at end of file diff --git a/specials/favorites.py b/specials/favorites.py index f12e7944..51089d8f 100644 --- a/specials/favorites.py +++ b/specials/favorites.py @@ -176,14 +176,14 @@ def readbookmark(filepath): except: plot = lines[4].strip() - # Campos fulltitle y canal añadidos + # Campos contentTitle y canal añadidos if len(lines) >= 6: try: - fulltitle = urllib.unquote_plus(lines[5].strip()) + contentTitle = urllib.unquote_plus(lines[5].strip()) except: - fulltitle = lines[5].strip() + contentTitle = lines[5].strip() else: - fulltitle = titulo + contentTitle = titulo if len(lines) >= 7: try: @@ -195,7 +195,7 @@ def readbookmark(filepath): bookmarkfile.close() - return canal, titulo, thumbnail, plot, server, url, fulltitle + return canal, titulo, thumbnail, plot, server, url, contentTitle def check_bookmark(readpath): @@ -213,11 +213,11 @@ def check_bookmark(readpath): time.sleep(0.1) # Obtenemos el item desde el .txt - canal, titulo, thumbnail, plot, server, url, fulltitle = readbookmark(filetools.join(readpath, fichero)) + canal, titulo, thumbnail, plot, server, url, contentTitle = readbookmark(filetools.join(readpath, fichero)) if canal == "": canal = "favorites" - item = Item(channel=canal, action="play", url=url, server=server, title=fulltitle, thumbnail=thumbnail, - plot=plot, fanart=thumbnail, fulltitle=fulltitle, folder=False) + item = Item(channel=canal, action="play", url=url, server=server, title=contentTitle, thumbnail=thumbnail, + plot=plot, fanart=thumbnail, contentTitle=contentTitle, folder=False) filetools.rename(filetools.join(readpath, fichero), fichero[:-4] + ".old") itemlist.append(item) diff --git a/specials/filtertools.py b/specials/filtertools.py index 0acdeacd..f8d05001 100644 --- a/specials/filtertools.py +++ b/specials/filtertools.py @@ -3,11 +3,13 @@ # filtertools - se encarga de filtrar resultados # ------------------------------------------------------------ -from core import channeltools +from builtins import object + from core import jsontools from core.item import Item from platformcode import config, logger from platformcode import platformtools +from core import channeltools TAG_TVSHOW_FILTER = "TVSHOW_FILTER" TAG_NAME = "name" @@ -28,7 +30,7 @@ __channel__ = "filtertools" # TODO echar un ojo a https://pyformat.info/, se puede formatear el estilo y hacer referencias directamente a elementos -class ResultFilter: +class ResultFilter(object): def __init__(self, dict_filter): self.active = dict_filter[TAG_ACTIVE] self.language = dict_filter[TAG_LANGUAGE] @@ -39,7 +41,7 @@ class ResultFilter: (self.active, self.language, self.quality_allowed) -class Filter: +class Filter(object): def __init__(self, item, global_filter_lang_id): self.result = None self.__get_data(item, global_filter_lang_id) @@ -51,7 +53,7 @@ class Filter: global_filter_language = config.get_setting(global_filter_lang_id, item.channel) - if tvshow in dict_filtered_shows.keys(): + if tvshow in list(dict_filtered_shows.keys()): self.result = ResultFilter({TAG_ACTIVE: dict_filtered_shows[tvshow][TAG_ACTIVE], TAG_LANGUAGE: dict_filtered_shows[tvshow][TAG_LANGUAGE], @@ -112,9 +114,9 @@ def context(item, list_language=None, list_quality=None, exist=False): """ # Dependiendo de como sea el contexto lo guardamos y añadimos las opciones de filtertools. - if type(item.context) == str: + if isinstance(item.context, str): _context = item.context.split("|") - elif type(item.context) == list: + elif isinstance(item.context, list): _context = item.context else: _context = [] @@ -127,9 +129,9 @@ def context(item, list_language=None, list_quality=None, exist=False): dict_data["list_quality"] = list_quality added = False - if type(_context) == list: + if isinstance(_context, list): for x in _context: - if x and type(x) == dict: + if x and isinstance(x, dict): if x["channel"] == "filtertools": added = True break @@ -163,20 +165,30 @@ def load(item): def check_conditions(_filter, list_item, item, list_language, list_quality, quality_count=0, language_count=0): - if item.contentLanguage: item.language = item.contentLanguage - is_language_valid = True + if _filter.language: # logger.debug("title es %s" % item.title) + #2nd lang + + from platformcode import unify + _filter.language = unify.set_lang(_filter.language).upper() # viene de episodios if isinstance(item.language, list): + #2nd lang + for n, lang in enumerate(item.language): + item.language[n] = unify.set_lang(lang).upper() + if _filter.language in item.language: language_count += 1 else: is_language_valid = False # viene de findvideos else: + #2nd lang + item.language = unify.set_lang(item.language).upper() + if item.language.lower() == _filter.language.lower(): language_count += 1 else: @@ -194,6 +206,7 @@ def check_conditions(_filter, list_item, item, list_language, list_quality, qual is_quality_valid = False if is_language_valid and is_quality_valid: + #TODO 2nd lang: habría que ver si conviene unificar el idioma aqui o no item.list_language = list_language if list_quality: item.list_quality = list_quality @@ -208,7 +221,7 @@ def check_conditions(_filter, list_item, item, list_language, list_quality, qual logger.debug(" calidad valida?: %s, item.quality: %s, filter.quality_allowed: %s" % (is_quality_valid, quality, _filter.quality_allowed)) - return list_item, quality_count, language_count + return list_item, quality_count, language_count, _filter.language def get_link(list_item, item, list_language, list_quality=None, global_filter_lang_id="filter_languages"): @@ -244,7 +257,7 @@ def get_link(list_item, item, list_language, list_quality=None, global_filter_la if filter_global and filter_global.active: list_item, quality_count, language_count = \ - check_conditions(filter_global, list_item, item, list_language, list_quality) + check_conditions(filter_global, list_item, item, list_language, list_quality)[:3] else: item.context = context(item) list_item.append(item) @@ -271,6 +284,7 @@ def get_links(list_item, item, list_language, list_quality=None, global_filter_l """ logger.info() + # si los campos obligatorios son None salimos if list_item is None or item is None: return [] @@ -279,6 +293,13 @@ def get_links(list_item, item, list_language, list_quality=None, global_filter_l if len(list_item) == 0: return list_item + + second_lang = config.get_setting('second_language') + + #Ordena segun servidores favoritos, elima servers de blacklist y desactivados + from core import servertools + list_item= servertools.filter_servers(list_item) + logger.debug("total de items : %s" % len(list_item)) new_itemlist = [] @@ -288,13 +309,32 @@ def get_links(list_item, item, list_language, list_quality=None, global_filter_l _filter = Filter(item, global_filter_lang_id).result logger.debug("filter: '%s' datos: '%s'" % (item.show, _filter)) + if _filter and _filter.active: for item in list_item: - new_itemlist, quality_count, language_count = check_conditions(_filter, new_itemlist, item, list_language, + new_itemlist, quality_count, language_count, first_lang = check_conditions(_filter, new_itemlist, item, list_language, list_quality, quality_count, language_count) - logger.info("ITEMS FILTRADOS: %s/%s, idioma [%s]: %s, calidad_permitida %s: %s" + #2nd lang + if second_lang and second_lang != 'No' and first_lang.lower() != second_lang.lower() : + second_list= [] + _filter2 = _filter + _filter2.language = second_lang + for it in new_itemlist: + + if isinstance(it.language, list): + if not second_lang in it.language: + second_list.append(it) + else: + second_list = new_itemlist + break + for item in list_item: + new_itemlist, quality_count, language_count, second_lang = check_conditions(_filter2, second_list, item, list_language, + list_quality, quality_count, language_count) + + + logger.debug("ITEMS FILTRADOS: %s/%s, idioma [%s]: %s, calidad_permitida %s: %s" % (len(new_itemlist), len(list_item), _filter.language, language_count, _filter.quality_allowed, quality_count)) @@ -303,14 +343,19 @@ def get_links(list_item, item, list_language, list_quality=None, global_filter_l for i in list_item: list_item_all.append(i.tourl()) - _context = [{"title": config.get_localized_string(60430) % _filter.language, "action": "delete_from_context", - "channel": "filtertools", "to_channel": "seriesdanko"}] + _context = [ + {"title": config.get_localized_string(60430) % _filter.language, "action": "delete_from_context", + "channel": "filtertools", "to_channel": item.channel}] if _filter.quality_allowed: msg_quality_allowed = " y calidad %s" % _filter.quality_allowed else: msg_quality_allowed = "" - + + msg_lang = ' %s' % first_lang.upper() + if second_lang and second_lang != 'No': + msg_lang = 's %s ni %s' % (first_lang.upper(), second_lang.upper()) + new_itemlist.append(Item(channel=__channel__, action="no_filter", list_item_all=list_item_all, show=item.show, title=config.get_localized_string(60432) % (_filter.language, msg_quality_allowed), @@ -541,7 +586,7 @@ def save(item, dict_data_saved): logger.info("Se actualiza los datos") list_quality = [] - for _id, value in dict_data_saved.items(): + for _id, value in list(dict_data_saved.items()): if _id in item.list_quality and value: list_quality.append(_id.lower()) diff --git a/specials/help.py b/specials/help.py index 723a7fa9..182b22b0 100644 --- a/specials/help.py +++ b/specials/help.py @@ -40,14 +40,13 @@ def mainlist(item): logger.info() itemlist = [] + if config.is_xbmc(): + itemlist.append(Item(title=config.get_localized_string(707429), channel="setting", action="report_menu", + thumbnail=get_thumb("error.png"), viewmode="list")) + itemlist.append(Item(channel=item.channel, action="", title=config.get_localized_string(60447), thumbnail=get_thumb("help.png"), folder=False)) - if config.is_xbmc(): - itemlist.append(Item(channel=item.channel, action="faq", - title=config.get_localized_string(60448), - thumbnail=get_thumb("help.png"), - folder=False, extra="report_error")) itemlist.append(Item(channel=item.channel, action="faq", title=config.get_localized_string(60449), thumbnail=get_thumb("help.png"), diff --git a/specials/infoplus.py b/specials/infoplus.py index aeb4ef7f..cf873699 100644 --- a/specials/infoplus.py +++ b/specials/infoplus.py @@ -3,6 +3,14 @@ # infoplus ventana con información del Item # ------------------------------------------------------------ +from future import standard_library +standard_library.install_aliases() +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int +from builtins import range + import re from threading import Thread @@ -175,7 +183,7 @@ class main(xbmcgui.WindowDialog): if self.infoLabels["tmdb_id"]: otmdb = tmdb.Tmdb(id_Tmdb=self.infoLabels["tmdb_id"], tipo=tipo_busqueda) self.infoLabels["images"] = otmdb.result.get("images", {}) - for key, value in self.infoLabels["images"].items(): + for key, value in list(self.infoLabels["images"].items()): if not value: self.infoLabels["images"].pop(key) @@ -1216,22 +1224,31 @@ class related(xbmcgui.WindowDialog): def busqueda_global(item, infoLabels, org_title=False): logger.info() + + logger.debug(item) + if item.contentType != "movie": cat = ["serie"] else: cat = ["movie"] cat += ["infoPlus"] - new_item = Item() - new_item.extra = infoLabels.get("title", "") - new_item.extra = re.sub('\[.*?\]', '', new_item.extra) - - if org_title: - new_item.extra = infoLabels.get("originaltitle", "") - new_item.category = item.contentType + new_item = Item(title=item.contentTitle, text=item.contentTitle.replace("+", " "), mode=item.contentType, + infoLabels=item.infoLabels) from specials import search - return search.do_search(new_item, cat) + return search.channel_search(new_item) + + # new_item = Item() + # new_item.extra = infoLabels.get("title", "") + # new_item.extra = re.sub('\[.*?\]', '', new_item.extra) + # + # if org_title: + # new_item.extra = infoLabels.get("originaltitle", "") + # new_item.category = item.contentType + # + # from channels import search + # return search.do_search(new_item, cat) class Busqueda(xbmcgui.WindowXMLDialog): @@ -1273,7 +1290,8 @@ class Busqueda(xbmcgui.WindowXMLDialog): dialog = platformtools.dialog_progress_bg(config.get_localized_string(60496), config.get_localized_string(60497)) selectitem = self.getControl(6).getSelectedItem() item = Item().fromurl(selectitem.getProperty("item_copy")) - exec "import channels." + item.channel + " as channel" + #exec("import channels." + item.channel + " as channel") + channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel]) itemlist = getattr(channel, item.action)(item) global SearchWindows window = GlobalSearch('DialogSelect.xml', config.get_runtime_path(), itemlist=itemlist, dialog=dialog) @@ -1332,7 +1350,8 @@ class GlobalSearch(xbmcgui.WindowXMLDialog): if (action == ACTION_SELECT_ITEM or action == 100) and self.getFocusId() == 6: selectitem = self.getControl(6).getSelectedItem() item = Item().fromurl(selectitem.getProperty("item_copy")) - exec "import channels." + item.channel + " as channel" + #exec("import channels." + item.channel + " as channel") + channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel]) ventana_error = None if item.action == "play": if hasattr(channel, 'play'): @@ -1539,7 +1558,7 @@ class ActorInfo(xbmcgui.WindowDialog): threads[i] = t while threads: - for key, t in threads.items(): + for key, t in list(threads.items()): if not t.isAlive(): threads.pop(key) xbmc.sleep(100) @@ -1956,10 +1975,10 @@ class images(xbmcgui.WindowDialog): self.mal = kwargs.get("mal", []) self.imagenes = [] - for key, value in self.tmdb.iteritems(): + for key, value in self.tmdb.items(): for detail in value: self.imagenes.append('http://image.tmdb.org/t/p/w342' + detail["file_path"]) - for tipo, child in self.fanartv.iteritems(): + for tipo, child in self.fanartv.items(): for imagen in child: self.imagenes.append(imagen["url"].replace("/fanart/", "/preview/")) for imagen, title in self.fa: @@ -2326,7 +2345,7 @@ def fanartv(item, infoLabels, images={}): url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_search data = jsontools.load(httptools.downloadpage(url, headers=headers).data) if data and not "error message" in data: - for key, value in data.items(): + for key, value in list(data.items()): if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]: images[key] = value return images @@ -2398,15 +2417,14 @@ def translate(to_translate, to_language="auto", language="auto", i=0, bio=[]): Example: print(translate("salut tu vas bien?", "en")) hello you alright?''' - import urllib2 - import urllib + import urllib.request, urllib.error, urllib.parse agents = { 'User-Agent': "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)"} before_trans = 'class="t0">' - to_translate = urllib.quote(to_translate.replace(" ", "+")).replace("%2B", "+") + to_translate = urllib.parse.quote(to_translate.replace(" ", "+")).replace("%2B", "+") link = "http://translate.google.com/m?hl=%s&sl=%s&q=%s" % (to_language, language, to_translate) - request = urllib2.Request(link, headers=agents) - page = urllib2.urlopen(request).read() + request = urllib.request.Request(link, headers=agents) + page = urllib.request.urlopen(request).read() result = page[page.find(before_trans) + len(before_trans):] result = result.split("<")[0] result = re.sub(r"d>|nn", "", result) diff --git a/specials/kodfavorites.py b/specials/kodfavorites.py index 4b263b38..6fc0feac 100644 --- a/specials/kodfavorites.py +++ b/specials/kodfavorites.py @@ -4,24 +4,29 @@ # ============== # - Lista de enlaces guardados como favoritos, solamente en Alfa, no Kodi. # - Los enlaces se organizan en carpetas (virtuales) que puede definir el usuario. -# - Se utiliza un sólo fichero para guardar todas las carpetas y enlaces: alfavorites-default.json -# - Se puede copiar alfavorites-default.json a otros dispositivos ya que la única dependencia local es el thumbnail asociado a los enlaces, +# - Se utiliza un sólo fichero para guardar todas las carpetas y enlaces: kodfavourites-default.json +# - Se puede copiar kodfavourites-default.json a otros dispositivos ya que la única dependencia local es el thumbnail asociado a los enlaces, # pero se detecta por código y se ajusta al dispositivo actual. # - Se pueden tener distintos ficheros de alfavoritos y alternar entre ellos, pero solamente uno de ellos es la "lista activa". -# - Los ficheros deben estar en config.get_data_path() y empezar por alfavorites- y terminar en .json +# - Los ficheros deben estar en config.get_data_path() y empezar por kodfavourites- y terminar en .json # Requerimientos en otros módulos para ejecutar este canal: # - Añadir un enlace a este canal en channelselector.py # - Modificar platformtools.py para controlar el menú contextual y añadir "Guardar enlace" en set_context_commands # ------------------------------------------------------------ -import os -import re +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int +from builtins import object + +import os, re from datetime import datetime -from core import filetools, jsontools from core.item import Item from platformcode import config, logger, platformtools +from core import filetools, jsontools def fechahora_actual(): @@ -32,15 +37,15 @@ def fechahora_actual(): PREFIJO_LISTA = 'kodfavorites-' -# Devuelve el nombre de la lista activa (Ej: alfavorites-default.json) +# Devuelve el nombre de la lista activa (Ej: kodfavourites-default.json) def get_lista_activa(): return config.get_setting('lista_activa', default = PREFIJO_LISTA + 'default.json') -# Extrae nombre de la lista del fichero, quitando prefijo y sufijo (Ej: alfavorites-Prueba.json => Prueba) +# Extrae nombre de la lista del fichero, quitando prefijo y sufijo (Ej: kodfavourites-Prueba.json => Prueba) def get_name_from_filename(filename): return filename.replace(PREFIJO_LISTA, '').replace('.json', '') -# Componer el fichero de lista a partir de un nombre, añadiendo prefijo y sufijo (Ej: Prueba => alfavorites-Prueba.json) +# Componer el fichero de lista a partir de un nombre, añadiendo prefijo y sufijo (Ej: Prueba => kodfavourites-Prueba.json) def get_filename_from_name(name): return PREFIJO_LISTA + name + '.json' @@ -67,7 +72,7 @@ def text_clean(txt, disallowed_chars = '[^a-zA-Z0-9\-_()\[\]. ]+', blank_char = # Clase para cargar y guardar en el fichero de Alfavoritos # -------------------------------------------------------- -class kodfavoritesData: +class KodfavouritesData(object): def __init__(self, filename = None): @@ -760,9 +765,11 @@ def compartir_lista(item): progreso.update(10, config.get_localized_string(70645), config.get_localized_string(70646)) # Envío del fichero a tinyupload mediante multipart/form-data + from future import standard_library + standard_library.install_aliases() from lib import MultipartPostHandler - import urllib2 - opener = urllib2.build_opener(MultipartPostHandler.MultipartPostHandler) + import urllib.request, urllib.error + opener = urllib.request.build_opener(MultipartPostHandler.MultipartPostHandler) params = { 'MAX_FILE_SIZE' : '52428800', 'file_description' : '', 'sessionid' : sessionid, 'uploaded_file' : open(fullfilename, 'rb') } handle = opener.open(upload_url, params) data = handle.read() @@ -856,7 +863,7 @@ def descargar_lista(item, url): if 'tinyupload.com/' in url: try: - from urlparse import urlparse + from urllib.parse import urlparse data = httptools.downloadpage(url).data logger.debug(data) down_url, url_name = scrapertools.find_single_match(data, ' href="(download\.php[^"]*)"><b>([^<]*)') @@ -872,7 +879,7 @@ def descargar_lista(item, url): if not puedes: platformtools.dialog_ok('Alfa', config.get_localized_string(70655), motivo) return False - url_json = video_urls[0][1] # https://www58.zippyshare.com/d/qPzzQ0UM/25460/alfavorites-testeanding.json + url_json = video_urls[0][1] # https://www58.zippyshare.com/d/qPzzQ0UM/25460/kodfavourites-testeanding.json url_name = url_json[url_json.rfind('/')+1:] elif 'friendpaste.com/' in url: @@ -887,7 +894,7 @@ def descargar_lista(item, url): # Download json data = httptools.downloadpage(url_json).data - # Verificar formato json de alfavorites y añadir info de la descarga + # Verificar formato json de kodfavourites y añadir info de la descarga jsondata = jsontools.load(data) if 'user_favorites' not in jsondata or 'info_lista' not in jsondata: logger.debug(data) diff --git a/specials/news.py b/specials/news.py index f1274853..dd9f6eef 100644 --- a/specials/news.py +++ b/specials/news.py @@ -3,6 +3,11 @@ # Channel for recent videos on several channels # ------------------------------------------------------------ +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + import glob import os import re @@ -358,7 +363,7 @@ def get_newest(channel_id, categoria): modulo = __import__('channels.%s' % channel_id, fromlist=["channels.%s" % channel_id]) except: try: - exec "import channels." + channel_id + " as modulo" + exec("import channels." + channel_id + " as modulo") except: puede = False @@ -409,8 +414,8 @@ def get_title(item): elif item.contentTitle: # Si es una pelicula con el canal adaptado title = item.contentTitle - elif item.fulltitle: # Si el canal no esta adaptado - title = item.fulltitle + elif item.contentTitle: # Si el canal no esta adaptado + title = item.contentTitle else: # Como ultimo recurso title = item.title @@ -510,7 +515,7 @@ def group_by_content(list_result_canal): dict_contenidos[new_key] = [i] # Añadimos el contenido encontrado en la lista list_result - for v in dict_contenidos.values(): + for v in list(dict_contenidos.values()): title = v[0].title if len(v) > 1: # Eliminar de la lista de nombres de canales los q esten duplicados @@ -667,7 +672,7 @@ def cb_custom_button(item, dict_values): if value == "": value = False - for v in dict_values.keys(): + for v in list(dict_values.keys()): dict_values[v] = not value if config.set_setting("custom_button_value_news", not value, item.channel) == True: diff --git a/specials/nextep.py b/specials/nextep.py index 8f6b1d78..61089e19 100644 --- a/specials/nextep.py +++ b/specials/nextep.py @@ -6,8 +6,14 @@ from core import scrapertools from core import jsontools, filetools from lib.concurrent import futures -PLAYER_STOP = 13 -ND = 'NextDialogCompact.xml' if config.get_setting('next_ep_type') else 'NextDialog.xml' +next_dialogs = ['NextDialog.xml', 'NextDialogExtended.xml', 'NextDialogCompact.xml'] +next_ep_type = config.get_setting('next_ep_type') + +# compatibility with previous version +if type(next_ep_type) == bool: + ND = 'NextDialogCompact.xml' if config.get_setting('next_ep_type') else 'NextDialog.xml' +else: + ND = next_dialogs[next_ep_type] def check(item): return True if config.get_setting('next_ep') > 0 and item.contentType != 'movie' else False @@ -73,7 +79,7 @@ def next_ep(item): base_path = os.path.basename(os.path.normpath(os.path.dirname(item.strm_path))) path = filetools.join(config.get_videolibrary_path(), config.get_setting("folder_tvshows"),base_path) fileList = [] - for file in os.listdir(path): + for file in filetools.listdir(path): if file.endswith('.strm'): fileList.append(file) @@ -84,6 +90,7 @@ def next_ep(item): next_file = None else: next_file = fileList[nextIndex] + logger.info('NEXTFILE' + next_file) # start next episode window afther x time if next_file: @@ -102,8 +109,9 @@ def next_ep(item): infoLabels= {'episode': episode, 'mediatype': 'tvshow', 'season': season, 'title': next_ep}, strm_path= filetools.join(base_path, next_file)) - global ITEM - ITEM = item + global INFO + INFO = filetools.join(path, next_file.replace("strm", "nfo")) + logger.info('NEXTINFO' + INFO) nextDialog = NextDialog(ND, config.get_runtime_path()) nextDialog.show() @@ -142,9 +150,23 @@ class NextDialog(xbmcgui.WindowXMLDialog): def __init__(self, *args, **kwargs): logger.info() - self.action_exitkeys_id = [10, 13] + self.action_exitkeys_id = [xbmcgui.ACTION_STOP, xbmcgui.ACTION_BACKSPACE, xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK] self.progress_control = None - self.item = ITEM + + # set info + with open(INFO, 'r') as f: + full_info = f.readlines() + full_info = full_info[1:] + full_info = "".join(full_info) + info = jsontools.load(full_info) + info = info["infoLabels"] + self.setProperty("title", info["tvshowtitle"]) + self.setProperty("ep_title", "%dx%02d - %s" % (info["season"], info["episode"], info["title"])) + if "episodio_imagen" in info: + img = info["episodio_imagen"] + else: + img = filetools.join(config.get_runtime_path(), "resources", "noimage.png") + self.setProperty("next_img", img) def set_still_watching(self, stillwatching): self.stillwatching = stillwatching @@ -175,6 +197,6 @@ class NextDialog(xbmcgui.WindowXMLDialog): def onAction(self, action): logger.info() - if action == PLAYER_STOP: + if action in self.action_exitkeys_id: self.set_continue_watching(False) self.close() diff --git a/specials/renumbertools.py b/specials/renumbertools.py index 89a391ac..ddb22080 100644 --- a/specials/renumbertools.py +++ b/specials/renumbertools.py @@ -3,6 +3,13 @@ # renumeratetools - se encarga de renumerar episodios # -------------------------------------------------------------------------------- +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int +from builtins import range +from builtins import object + import os try: @@ -44,9 +51,9 @@ def context(item): """ # Dependiendo de como sea el contexto lo guardamos y añadimos las opciones de filtertools. - if type(item.context) == str: + if isinstance(item.context, str): _context = item.context.split("|") - elif type(item.context) == list: + elif isinstance(item.context, list): _context = item.context else: _context = [] @@ -155,7 +162,7 @@ def numbered_for_tratk(channel, show, season, episode): dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE) # ponemos en minusculas el key, ya que previamente hemos hecho lo mismo con show. - for key in dict_series.keys(): + for key in list(dict_series.keys()): new_key = key.lower() if new_key != key: dict_series[new_key] = dict_series[key] @@ -548,7 +555,7 @@ if xbmcgui: if len(self.data) > 5: self.move_scroll() - except Exception, Ex: + except Exception as Ex: logger.error("HA HABIDO UNA HOSTIA %s" % Ex) # def onClick(self, control_id): @@ -850,7 +857,7 @@ if xbmcgui: self.move_scroll() - except Exception, Ex: + except Exception as Ex: logger.error("HA HABIDO UNA HOSTIA %s" % Ex) def move_scroll(self): @@ -887,7 +894,7 @@ if xbmcgui: return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text) - class ControlGroup: + class ControlGroup(object): """ conjunto de controles, son los elementos que se muestra por línea de una lista. """ diff --git a/specials/resolverdns.py b/specials/resolverdns.py index 73f043bb..56e5b126 100644 --- a/specials/resolverdns.py +++ b/specials/resolverdns.py @@ -1,7 +1,10 @@ # -*- coding: utf-8 -*- import os import ssl -import urlparse +try: + import urlparse +except: + import urllib.parse as urlparse from lib.requests_toolbelt.adapters import host_header_ssl from lib import doh @@ -74,7 +77,6 @@ class CipherSuiteAdapter(host_header_ssl.HostHeaderSSLAdapter): logger.error('Failed to resolve hostname, fallback to normal dns') import traceback logger.error(traceback.print_exc()) - ip = domain return ip def writeToCache(self, domain, ip): @@ -105,39 +107,41 @@ class CipherSuiteAdapter(host_header_ssl.HostHeaderSSLAdapter): domain = parse.netloc else: raise requests.exceptions.URLRequired - self.ssl_context = CustomContext(protocol, domain) - if self.CF: - self.ssl_context.options |= (ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1) - self.ssl_context.set_ciphers(self.cipherSuite) - self.init_poolmanager(self._pool_connections, self._pool_maxsize, block=self._pool_block) ip = self.getIp(domain) + if ip: + self.ssl_context = CustomContext(protocol, domain) + if self.CF: + self.ssl_context.options |= (ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1) + self.ssl_context.set_ciphers(self.cipherSuite) + self.init_poolmanager(self._pool_connections, self._pool_maxsize, block=self._pool_block) + realUrl = request.url - realUrl = request.url - - if request.headers: - request.headers["Host"] = domain - else: - request.headers = {"Host": domain} - ret = None - tryFlush = False - - parse = list(parse) - parse[1] = ip - request.url = urlparse.urlunparse(parse) - try: - ret = super(CipherSuiteAdapter, self).send(request, **kwargs) - except Exception as e: - logger.info('Request for ' + domain + ' with ip ' + ip + ' failed') - logger.info(e) - if 'SSLError' in str(e): - # disabilito - config.set_setting("resolver_dns", False) - request.url = realUrl - ret = super(CipherSuiteAdapter, self).send(request, **kwargs) + if request.headers: + request.headers["Host"] = domain else: + request.headers = {"Host": domain} + ret = None + tryFlush = False + + parse = list(parse) + parse[1] = ip + request.url = urlparse.urlunparse(parse) + try: + ret = super(CipherSuiteAdapter, self).send(request, **kwargs) + except Exception as e: + logger.info('Request for ' + domain + ' with ip ' + ip + ' failed') + logger.info(e) + # if 'SSLError' in str(e): + # # disabilito + # config.set_setting("resolver_dns", False) + # request.url = realUrl + # ret = super(CipherSuiteAdapter, self).send(request, **kwargs) + # else: tryFlush = True - if tryFlush and not flushedDns: # re-request ips and update cache - logger.info('Flushing dns cache for ' + domain) - return self.flushDns(request, domain, **kwargs) - ret.url = realUrl + if tryFlush and not flushedDns: # re-request ips and update cache + logger.info('Flushing dns cache for ' + domain) + return self.flushDns(request, domain, **kwargs) + ret.url = realUrl + else: + ret = super(host_header_ssl.HostHeaderSSLAdapter, self).send(request, **kwargs) return ret diff --git a/specials/search.py b/specials/search.py old mode 100755 new mode 100644 index 6b324898..6705216c --- a/specials/search.py +++ b/specials/search.py @@ -3,8 +3,16 @@ # -*- Created for Alfa-addon -*- # -*- By the Alfa Develop Group -*- +from __future__ import division +from builtins import range +from past.utils import old_div +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + import os, json, time, inspect, channelselector -from lib.concurrent import futures +from concurrent import futures from core.item import Item from core import tmdb, scrapertools, channeltools, filetools, jsontools from channelselector import get_thumb @@ -181,9 +189,6 @@ def channel_search(item): channel_list, channel_titles = get_channels(item) - import requests - session = requests.Session() - searching += channel_list searching_titles += channel_titles cnt = 0 @@ -193,7 +198,7 @@ def channel_search(item): config.set_setting('tmdb_active', False) with futures.ThreadPoolExecutor(max_workers=set_workers()) as executor: - c_results = [executor.submit(get_channel_results, ch, item, session) for ch in channel_list] + c_results = [executor.submit(get_channel_results, ch, item) for ch in channel_list] for res in futures.as_completed(c_results): cnt += 1 @@ -206,7 +211,7 @@ def channel_search(item): if finished in searching: searching_titles.remove(searching_titles[searching.index(finished)]) searching.remove(finished) - progress.update((cnt * 100) / len(channel_list), config.get_localized_string(70744) % str(len(channel_list) - cnt), + progress.update(old_div((cnt * 100), len(channel_list)), config.get_localized_string(70744) % str(len(channel_list) - cnt), str(searching_titles)) progress.close() @@ -221,7 +226,7 @@ def channel_search(item): ch_name = channel_titles[channel_list.index(key)] grouped = list() cnt += 1 - progress.update((cnt * 100) / len(ch_list), config.get_localized_string(60295), config.get_localized_string(60293)) + progress.update(old_div((cnt * 100), len(ch_list)), config.get_localized_string(60295), config.get_localized_string(60293)) if len(value) <= max_results and item.mode != 'all': if len(value) == 1: if not value[0].action or config.get_localized_string(70006).lower() in value[0].title.lower(): @@ -286,20 +291,18 @@ def channel_search(item): return valid + results -def get_channel_results(ch, item, session): +def get_channel_results(ch, item): max_results = 10 results = list() ch_params = channeltools.get_channel_parameters(ch) - exec("from channels import " + ch_params["channel"] + " as module") - - mainlist = module.mainlist(Item(channel=ch_params["channel"])) + module = __import__('channels.%s' % ch_params["channel"], fromlist=["channels.%s" % ch_params["channel"]]) + mainlist = getattr(module, 'mainlist')(Item(channel=ch_params["channel"])) search_action = [elem for elem in mainlist if elem.action == "search" and (item.mode == 'all' or elem.contentType == item.mode)] if search_action: for search_ in search_action: - search_.session = session try: results.extend(module.search(search_, item.text)) except: @@ -431,7 +434,7 @@ def setting_channel_new(item): elif presel_values[ret] == 'none': preselect = [] elif presel_values[ret] == 'all': - preselect = range(len(ids)) + preselect = list(range(len(ids))) elif presel_values[ret] in ['cast', 'lat']: preselect = [] for i, lg in enumerate(lista_lang): @@ -488,7 +491,7 @@ def genres_menu(item): mode = item.mode.replace('show', '') genres = tmdb.get_genres(mode) - for key, value in genres[mode].items(): + for key, value in list(genres[mode].items()): discovery = {'url': 'discover/%s' % mode, 'with_genres': key, 'language': def_lang, 'page': '1'} diff --git a/specials/setting.py b/specials/setting.py index 6d97fba3..d747dcbb 100644 --- a/specials/setting.py +++ b/specials/setting.py @@ -3,6 +3,14 @@ # Configuracion # ------------------------------------------------------------ +from __future__ import division +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int +from builtins import range +from past.utils import old_div + from channelselector import get_thumb from core import filetools from core import servertools @@ -70,6 +78,8 @@ def menu_channels(item): from core import channeltools channel_list = channelselector.filterchannels("all") for channel in channel_list: + if not channel.channel: + continue channel_parameters = channeltools.get_channel_parameters(channel.channel) if channel_parameters["has_settings"]: itemlist.append(Item(channel=CHANNELNAME, title=". " + config.get_localized_string(60547) % channel.title, @@ -98,12 +108,41 @@ def autostart(item): # item necessario launcher.py linea 265 def setting_torrent(item): logger.info() + LIBTORRENT_PATH = config.get_setting("libtorrent_path", server="torrent", default="") + LIBTORRENT_ERROR = config.get_setting("libtorrent_error", server="torrent", default="") default = config.get_setting("torrent_client", server="torrent", default=0) + BUFFER = config.get_setting("mct_buffer", server="torrent", default="50") + DOWNLOAD_PATH = config.get_setting("mct_download_path", server="torrent", default=config.get_setting("downloadpath")) + if not DOWNLOAD_PATH: DOWNLOAD_PATH = filetools.join(config.get_data_path(), 'downloads') + BACKGROUND = config.get_setting("mct_background_download", server="torrent", default=True) + RAR = config.get_setting("mct_rar_unpack", server="torrent", default=True) + DOWNLOAD_LIMIT = config.get_setting("mct_download_limit", server="torrent", default="") + BUFFER_BT = config.get_setting("bt_buffer", server="torrent", default="50") + DOWNLOAD_PATH_BT = config.get_setting("bt_download_path", server="torrent", default=config.get_setting("downloadpath")) + if not DOWNLOAD_PATH_BT: DOWNLOAD_PATH_BT = filetools.join(config.get_data_path(), 'downloads') + MAGNET2TORRENT = config.get_setting("magnet2torrent", server="torrent", default=False) torrent_options = [config.get_localized_string(30006), config.get_localized_string(70254), config.get_localized_string(70255)] torrent_options.extend(platformtools.torrent_client_installed()) + list_controls = [ + { + "id": "libtorrent_path", + "type": "text", + "label": "Libtorrent path", + "default": LIBTORRENT_PATH, + "enabled": True, + "visible": False + }, + { + "id": "libtorrent_error", + "type": "text", + "label": "libtorrent error", + "default": LIBTORRENT_ERROR, + "enabled": True, + "visible": False + }, { "id": "list_torrent", "type": "list", @@ -112,6 +151,70 @@ def setting_torrent(item): "enabled": True, "visible": True, "lvalues": torrent_options + }, + { + "id": "mct_buffer", + "type": "text", + "label": "MCT - Tamaño del Buffer a descargar antes de la reproducción", + "default": BUFFER, + "enabled": True, + "visible": "eq(-1,%s)" % torrent_options[2] + }, + { + "id": "mct_download_path", + "type": "text", + "label": "MCT - Ruta de la carpeta de descarga", + "default": DOWNLOAD_PATH, + "enabled": True, + "visible": "eq(-2,%s)" % torrent_options[2] + }, + { + "id": "bt_buffer", + "type": "text", + "label": "BT - Tamaño del Buffer a descargar antes de la reproducción", + "default": BUFFER_BT, + "enabled": True, + "visible": "eq(-3,%s)" % torrent_options[1] + }, + { + "id": "bt_download_path", + "type": "text", + "label": "BT - Ruta de la carpeta de descarga", + "default": DOWNLOAD_PATH_BT, + "enabled": True, + "visible": "eq(-4,%s)" % torrent_options[1] + }, + { + "id": "mct_download_limit", + "type": "text", + "label": "Límite (en Kb's) de la velocidad de descarga en segundo plano (NO afecta a RAR)", + "default": DOWNLOAD_LIMIT, + "enabled": True, + "visible": "eq(-5,%s) | eq(-5,%s)" % (torrent_options[1], torrent_options[2]) + }, + { + "id": "mct_rar_unpack", + "type": "bool", + "label": "¿Quiere que se descompriman los archivos RAR y ZIP para su reproducción?", + "default": RAR, + "enabled": True, + "visible": True + }, + { + "id": "mct_background_download", + "type": "bool", + "label": "¿Se procesa la descompresión de RARs en segundo plano?", + "default": BACKGROUND, + "enabled": True, + "visible": True + }, + { + "id": "magnet2torrent", + "type": "bool", + "label": "¿Quiere convertir los Magnets a Torrents para ver tamaños y almacenarlos?", + "default": MAGNET2TORRENT, + "enabled": True, + "visible": True } ] @@ -122,6 +225,22 @@ def setting_torrent(item): def save_setting_torrent(item, dict_data_saved): if dict_data_saved and "list_torrent" in dict_data_saved: config.set_setting("torrent_client", dict_data_saved["list_torrent"], server="torrent") + if dict_data_saved and "mct_buffer" in dict_data_saved: + config.set_setting("mct_buffer", dict_data_saved["mct_buffer"], server="torrent") + if dict_data_saved and "mct_download_path" in dict_data_saved: + config.set_setting("mct_download_path", dict_data_saved["mct_download_path"], server="torrent") + if dict_data_saved and "mct_background_download" in dict_data_saved: + config.set_setting("mct_background_download", dict_data_saved["mct_background_download"], server="torrent") + if dict_data_saved and "mct_rar_unpack" in dict_data_saved: + config.set_setting("mct_rar_unpack", dict_data_saved["mct_rar_unpack"], server="torrent") + if dict_data_saved and "mct_download_limit" in dict_data_saved: + config.set_setting("mct_download_limit", dict_data_saved["mct_download_limit"], server="torrent") + if dict_data_saved and "bt_buffer" in dict_data_saved: + config.set_setting("bt_buffer", dict_data_saved["bt_buffer"], server="torrent") + if dict_data_saved and "bt_download_path" in dict_data_saved: + config.set_setting("bt_download_path", dict_data_saved["bt_download_path"], server="torrent") + if dict_data_saved and "magnet2torrent" in dict_data_saved: + config.set_setting("magnet2torrent", dict_data_saved["magnet2torrent"], server="torrent") def menu_servers(item): logger.info() @@ -138,7 +257,7 @@ def menu_servers(item): # Inicio - Servidores configurables - server_list = servertools.get_debriders_list().keys() + server_list = list(servertools.get_debriders_list().keys()) for server in server_list: server_parameters = servertools.get_server_parameters(server) if server_parameters["has_settings"]: @@ -149,13 +268,12 @@ def menu_servers(item): itemlist.append(Item(channel=CHANNELNAME, title=config.get_localized_string(60554), action="", folder=False, text_bold = True, thumbnail=get_thumb("setting_0.png"))) - server_list = servertools.get_servers_list().keys() + server_list = list(servertools.get_servers_list().keys()) for server in sorted(server_list): server_parameters = servertools.get_server_parameters(server) logger.info(server_parameters) - if server_parameters["has_settings"] and filter(lambda x: x["id"] not in ["black_list", "white_list"], - server_parameters["settings"]): + if server_parameters["has_settings"] and [x for x in server_parameters["settings"] if x["id"] not in ["black_list", "white_list"]]: itemlist.append( Item(channel=CHANNELNAME, title=". " + config.get_localized_string(60553) % server_parameters["name"], action="server_config", config=server, folder=False, thumbnail="")) @@ -207,7 +325,7 @@ def cb_servers_blacklist(item, dict_values): progreso = platformtools.dialog_progress(config.get_localized_string(60557), config.get_localized_string(60558)) n = len(dict_values) i = 1 - for k, v in dict_values.items(): + for k, v in list(dict_values.items()): if k == 'filter_servers': config.set_setting('filter_servers', v) else: @@ -215,7 +333,7 @@ def cb_servers_blacklist(item, dict_values): if v: # Si el servidor esta en la lista negra no puede estar en la de favoritos config.set_setting("favorites_servers_list", 100, server=k) f = True - progreso.update((i * 100) / n, config.get_localized_string(60559) % k) + progreso.update(old_div((i * 100), n), config.get_localized_string(60559) % k) i += 1 if not f: # Si no hay ningun servidor en la lista, desactivarla @@ -269,21 +387,21 @@ def cb_servers_favorites(server_names, dict_values): dict_name = {} progreso = platformtools.dialog_progress(config.get_localized_string(60557), config.get_localized_string(60558)) - for i, v in dict_values.items(): + for i, v in list(dict_values.items()): if i == "favorites_servers": config.set_setting("favorites_servers", v) elif int(v) > 0: dict_name[server_names[v]] = int(i) - servers_list = servertools.get_servers_list().items() + servers_list = list(servertools.get_servers_list().items()) n = len(servers_list) i = 1 for server, server_parameters in servers_list: - if server_parameters['name'] in dict_name.keys(): + if server_parameters['name'] in list(dict_name.keys()): config.set_setting("favorites_servers_list", dict_name[server_parameters['name']], server=server) else: config.set_setting("favorites_servers_list", 0, server=server) - progreso.update((i * 100) / n, config.get_localized_string(60559) % server_parameters['name']) + progreso.update(old_div((i * 100), n), config.get_localized_string(60559) % server_parameters['name']) i += 1 if not dict_name: # Si no hay ningun servidor en lalista desactivarla @@ -310,11 +428,10 @@ def submenu_tools(item): if filetools.exists(channel_custom): itemlist.append(Item(channel='custom', action='mainlist', title='Custom Channel')) - - itemlist.append(Item(channel=CHANNELNAME, action="check_quickfixes", folder=False, - title=config.get_localized_string(30001), plot=config.get_addon_version(with_fix=True) )) # itemlist.append(Item(channel=CHANNELNAME, action="update_quasar", folder=False, - # title=config.get_localized_string(70569))) + # title=config.get_localized_string(70569))) + itemlist.append(Item(channel=CHANNELNAME, action="update_quasar", folder=False, + title="Actualizar addon externo Quasar")) itemlist.append(Item(channel=CHANNELNAME, action="", title="", folder=False, thumbnail=get_thumb("setting_0.png"))) @@ -477,7 +594,7 @@ def conf_tools(item): channeljson_exists = True # Obtenemos configuracion guardada de ../settings/channel_data.json try: - dict_file = jsontools.load(open(file_settings, "rb").read()) + dict_file = jsontools.load(filetools.read(file_settings)) if isinstance(dict_file, dict) and 'settings' in dict_file: dict_settings = dict_file['settings'] except EnvironmentError: @@ -517,14 +634,9 @@ def conf_tools(item): dict_settings = default_settings dict_file['settings'] = dict_settings # Creamos el archivo ../settings/channel_data.json - json_data = jsontools.dump(dict_file) - try: - open(file_settings, "wb").write(json_data) - # logger.info(channel.channel + " - Archivo _data.json GUARDADO!") - # El channel_data.json se ha creado/modificado - list_status = config.get_localized_string(60560) - except EnvironmentError: + if not filetools.write(file_settings, jsontools.dump(dict_file), silent=True): logger.error("ERROR al salvar el archivo: %s" % file_settings) + list_status = config.get_localized_string(60560) else: if default_settings is None: list_status = config.get_localized_string(60571) @@ -596,7 +708,7 @@ def channels_onoff(item): ret = platformtools.dialog_select(config.get_localized_string(60545), preselecciones) if ret == -1: return False # pedido cancel if ret == 2: preselect = [] - elif ret == 1: preselect = range(len(ids)) + elif ret == 1: preselect = list(range(len(ids))) else: preselect = [] for i, canal in enumerate(ids): @@ -751,10 +863,403 @@ def overwrite_tools(item): movie.channel.capitalize())) # ... y la volvemos a añadir videolibrarytools.save_movie(movie) - except Exception, ex: + except Exception as ex: logger.error("Error al crear de nuevo la película") template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex).__name__, ex.args) logger.error(message) p_dialog2.close() + + +def report_menu(item): + logger.info('URL: ' + item.url) + + from channelselector import get_thumb + + thumb_debug = get_thumb("update.png") + thumb_error = get_thumb("error.png") + thumb_next = get_thumb("next.png") + itemlist = [] + paso = 1 + + # Crea un menú de opciones para permitir al usuario reportar un fallo de Alfa a través de un servidor "pastebin" + # Para que el informe sea completo el usuario debe tener la opción de DEBUG=ON + # Los servidores "pastbin" gratuitos tienen limitación de capacidad, por lo que el tamaño del log es importante + # Al final de la operación de upload, se pasa al usuario la dirección de log en el servidor para que los reporte + + itemlist.append(Item(channel=item.channel, action="", title=config.get_localized_string(707418), + thumbnail=thumb_next, folder=False)) + #if not config.get_setting('debug'): + itemlist.append(Item(channel=item.channel, action="activate_debug", extra=True, + title=config.get_localized_string(707419) % + str(paso), thumbnail=thumb_debug, folder=False)) + paso += 1 + itemlist.append(Item(channel="channelselector", action="getmainlist", + title=config.get_localized_string(707420) % + str(paso), thumbnail=thumb_debug)) + paso += 1 + itemlist.append(Item(channel=item.channel, action="report_send", + title=config.get_localized_string(707421) % + str(paso), thumbnail=thumb_error, folder=False)) + paso += 1 + #if config.get_setting('debug'): + itemlist.append(Item(channel=item.channel, action="activate_debug", extra=False, + title=config.get_localized_string(707422) % str(paso), + thumbnail=thumb_debug, folder=False)) + paso += 1 + + if item.url: + itemlist.append(Item(channel=item.channel, action="", title="", folder=False)) + + itemlist.append(Item(channel=item.channel, action="", + title=config.get_localized_string(707423), + thumbnail=thumb_next, folder=False)) + + if item.one_use: + action = '' + url = '' + else: + action = 'call_browser' + url = item.url + itemlist.append(Item(channel=item.channel, action=action, + title="**- LOG: [COLOR gold]%s[/COLOR] -**" % item.url, url=url, + thumbnail=thumb_next, unify=False, folder=False)) + + itemlist.append(Item(channel=item.channel, action="call_browser", + title="su Github (raccomandato)", url='https://github.com/kodiondemand/addon/issues', + thumbnail=thumb_next, + folder=False)) + itemlist.append(Item(channel=item.channel, action="call_browser", + url='https://t.me/kodiondemand', title="Su telegram", + thumbnail=thumb_next, unify=False, folder=False)) + + if item.one_use: + itemlist.append(Item(channel=item.channel, action="", + title="[COLOR orange]NO ACCEDA al INFORME: se BORRARÁ[/COLOR]", + thumbnail=thumb_next, folder=False)) + itemlist.append(Item(channel=item.channel, action="", + title="[COLOR orange]ya que es de un solo uso[/COLOR]", + thumbnail=thumb_next, folder=False)) + + return itemlist + + +def activate_debug(item): + logger.info(item.extra) + from platformcode import platformtools + + # Activa/Desactiva la opción de DEBUB en settings.xml + + if isinstance(item.extra, str): + return report_menu(item) + if item.extra: + config.set_setting('debug', True) + platformtools.dialog_notification(config.get_localized_string(707430), config.get_localized_string(707431)) + else: + config.set_setting('debug', False) + platformtools.dialog_notification(config.get_localized_string(707430), config.get_localized_string(707432)) + + +def report_send(item, description='', fatal=False): + import xbmc + import random + import traceback + + if PY3: + #from future import standard_library + #standard_library.install_aliases() + import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo + import urllib.parse as urllib + else: + import urllib # Usamos el nativo de PY2 que es más rápido + import urlparse + + try: + requests_status = True + import requests + except: + requests_status = False + logger.error(traceback.format_exc()) + + from core import jsontools, httptools, scrapertools + from platformcode import envtal + + # Esta función realiza la operación de upload del LOG. El tamaño del archivo es de gran importacia porque + # los servicios de "pastebin" gratuitos tienen limitaciones, a veces muy bajas. + # Hay un ervicio, File.io, que permite subida directa de "achivos binarios" a través de la función "request" + # Esto aumenta dráticamente la capacidad del envío del log, muy por encima de lo necesitado + # Por ello es necesario contar con una lista de servicios "pastebin" que puedan realizar la operación de upload, + # ya sea por capacidad disponible o por disponibilidad. + # Para poder usar los servidores "pastebin" con un código común, se ha creado un diccionario con los servidores + # y sus características. En cada entrada se recogen las peculiaridades de cada servidor, tanto para formar + # la petición consu POST como para la forma de recibir el código del upload en la respuesta (json, header, regex + # en datos,...). + # Al iniciar este método se aleatoriza la lista de servidores "pastebin" para evitar que todos los usuarios hagan + # uploads contra el mismo servidor y puedan ocasionar sobrecargas. + # Se lee el arcivo de log y se compara su tamaño con la capacidad del servidor (parámetro 10 de cada entrada + # (empezando desde 0), expresado en MB, hasta que se encuentra uno capacitado. Si el upload falla se sigue intentado + # con los siguientes servidores que tengan la capacidad requerida. + # Si no se encuentra ningun servidor disponible se pide al usuario que lo intente más tarde, o que suba el log + # directamente en el foro. Si es un problema de tamaño, se le pide que reicinie Kodi y reporducza el fallo, para + # que el LOG sea más pequeño. + + + pastebin_list = { + 'hastebin': ('1', 'https://hastebin.com/', 'documents', 'random', '', '', + 'data', 'json', 'key', '', '0.29', '10', True, 'raw/', '', ''), + 'dpaste': ('1', 'http://dpaste.com/', 'api/v2/', 'random', 'content=', + '&syntax=text&title=%s&poster=alfa&expiry_days=7', + 'headers', '', '', 'location', '0.23', '15', True, '', '.txt', ''), + 'ghostbin': ('1', 'https://ghostbin.com/', 'paste/new', 'random', 'lang=text&text=', + '&expire=2d&password=&title=%s', + 'data', 'regex', '<title>(.*?)\s*-\s*Ghostbin<\/title>', '', + '0.49', '15', False, 'paste/', '', ''), + 'write.as': ('1', 'https://write.as/', 'api/posts', 'random', 'body=', '&title=%s', + 'data', 'json', 'data', 'id', '0.018', '15', True, '', '', ''), + 'oneclickpaste': ('1', 'http://oneclickpaste.com/', 'index.php', 'random', 'paste_data=', + '&title=%s&format=text&paste_expire_date=1W&visibility=0&pass=&submit=Submit', + 'data', 'regex', '<a class="btn btn-primary" href="[^"]+\/(\d+\/)">\s*View\s*Paste\s*<\/a>', + '', '0.060', '5', True, '', '', ''), + 'bpaste': ('1', 'https://bpaste.net/', '', 'random', 'code=', '&lexer=text&expiry=1week', + 'data', 'regex', 'View\s*<a\s*href="[^*]+/(.*?)">raw<\/a>', '', + '0.79', '15', True, 'raw/', '', ''), + 'dumpz': ('0', 'http://dumpz.org/', 'api/dump', 'random', 'code=', '&lexer=text&comment=%s&password=', + 'headers', '', '', 'location', '0.99', '15', False, '', '', ''), + 'file.io': ('1', 'https://file.io/', '', 'random', '', 'expires=1w', + 'requests', 'json', 'key', '', '99.0', '30', False, '', '.log', ''), + 'uploadfiles': ('1', 'https://up.uploadfiles.io/upload', '', 'random', '', '', + 'requests', 'json', 'url', '', '99.0', '30', False, None, '', '') + } + pastebin_list_last = ['hastebin', 'ghostbin', 'file.io'] # Estos servicios los dejamos los últimos + pastebin_one_use = ['file.io'] # Servidores de un solo uso y se borra + pastebin_dir = [] + paste_file = {} + paste_params = () + paste_post = '' + status = False + msg = config.get_localized_string(707424) + + # Se verifica que el DEBUG=ON, si no está se rechaza y se pide al usuario que lo active y reproduzca el fallo + if not config.get_setting('debug'): + platformtools.dialog_notification(config.get_localized_string(707425), config.get_localized_string(707426)) + return report_menu(item) + + # De cada al futuro se permitira al usuario que introduzca una breve descripción del fallo que se añadirá al LOG + if description == 'OK': + description = platformtools.dialog_input('', 'Introduzca una breve descripción del fallo') + + # Escribimos en el log algunas variables de Kodi y Alfa que nos ayudarán en el diagnóstico del fallo + environment = envtal.list_env() + if not environment['log_path']: + environment['log_path'] = str(filetools.join(xbmc.translatePath("special://logpath/"), 'kodi.log')) + environment['log_size_bytes'] = str(filetools.getsize(environment['log_path'])) + environment['log_size'] = str(round(float(environment['log_size_bytes']) / (1024*1024), 3)) + + # Se lee el archivo de LOG + log_path = environment['log_path'] + if filetools.exists(log_path): + log_size_bytes = int(environment['log_size_bytes']) # Tamaño del archivivo en Bytes + log_size = float(environment['log_size']) # Tamaño del archivivo en MB + log_data = filetools.read(log_path) # Datos del archivo + if not log_data: # Algún error? + platformtools.dialog_notification(config.get_localized_string(707427), '', 2) + return report_menu(item) + else: # Log no existe o path erroneo? + platformtools.dialog_notification(config.get_localized_string(707427), '', 2) + return report_menu(item) + + # Si se ha introducido la descripción del fallo, se inserta la principio de los datos del LOG + # log_title = '***** DESCRIPCIÓN DEL FALLO *****' + # if description: + # log_data = '%s\n%s\n\n%s' %(log_title, description, log_data) + + # Se aleatorizan los nombre de los servidores "patebin" + for label_a, value_a in list(pastebin_list.items()): + if label_a not in pastebin_list_last: + pastebin_dir.append(label_a) + random.shuffle(pastebin_dir) + pastebin_dir.extend(pastebin_list_last) # Estos servicios los dejamos los últimos + + #pastebin_dir = ['uploadfiles'] # Para pruebas de un servicio + #log_data = 'TEST PARA PRUEBAS DEL SERVICIO' + + # Se recorre la lista de servidores "pastebin" hasta localizar uno activo, con capacidad y disponibilidad + for paste_name in pastebin_dir: + if pastebin_list[paste_name][0] != '1': # Si no esta activo el servidore, pasamos + continue + if pastebin_list[paste_name][6] == 'requests' and not requests_status: # Si "requests" no esta activo, pasamos + continue + + paste_host = pastebin_list[paste_name][1] # URL del servidor "pastebin" + paste_sufix = pastebin_list[paste_name][2] # sufijo del API para el POST + paste_title = '' + if pastebin_list[paste_name][3] == 'random': + paste_title = "LOG" + str(random.randrange(1, 999999999)) # Título del LOG + paste_post1 = pastebin_list[paste_name][4] # Parte inicial del POST + paste_post2 = pastebin_list[paste_name][5] # Parte secundaria del POST + paste_type = pastebin_list[paste_name][6] # Tipo de downloadpage: DATA o HEADERS + paste_resp = pastebin_list[paste_name][7] # Tipo de respuesta: JSON o datos con REGEX + paste_resp_key = pastebin_list[paste_name][8] # Si es JSON, etiqueta `primaria con la CLAVE + paste_url = pastebin_list[paste_name][9] # Etiqueta primaria para HEADER y sec. para JSON + paste_file_size = float(pastebin_list[paste_name][10]) # Capacidad en MB del servidor + if paste_file_size > 0: # Si es 0, la capacidad es ilimitada + if log_size > paste_file_size: # Verificación de capacidad y tamaño + msg = 'Archivo de log demasiado grande. Reinicie Kodi y reinténtelo' + continue + paste_timeout = int(pastebin_list[paste_name][11]) # Timeout para el servidor + paste_random_headers = pastebin_list[paste_name][12] # Utiliza RAMDOM headers para despistar el serv.? + paste_host_return = pastebin_list[paste_name][13] # Parte de url para componer la clave para usuario + paste_host_return_tail = pastebin_list[paste_name][14] # Sufijo de url para componer la clave para usuario + paste_headers = {} + if pastebin_list[paste_name][15]: # Headers requeridas por el servidor + paste_headers.update(jsontools.load((pastebin_list[paste_name][15]))) + + if paste_name in pastebin_one_use: + pastebin_one_use_msg = '[COLOR red]NO ACCEDA al INFORME: se BORRARÁ[/COLOR]' + item.one_use = True + else: + pastebin_one_use_msg = '' + + try: + # Se crea el POST con las opciones del servidor "pastebin" + # Se trata el formato de "requests" + if paste_type == 'requests': + paste_file = {'file': (paste_title+'.log', log_data)} + if paste_post1: + paste_file.update(paste_post1) + if paste_post2: + if '%s' in paste_post2: + paste_params = paste_post2 % (paste_title+'.log', log_size_bytes) + else: + paste_params = paste_post2 + + #Se trata el formato de downloads + else: + #log_data = 'Test de Servidor para ver su viabilidad (áéíóúñ¿?)' + if paste_name in ['hastebin']: # Hay algunos servicios que no necesitan "quote" + paste_post = log_data + else: + paste_post = urllib.quote_plus(log_data) # Se hace un "quote" de los datos del LOG + if paste_post1: + paste_post = '%s%s' % (paste_post1, paste_post) + if paste_post2: + if '%s' in paste_post2: + paste_post += paste_post2 % paste_title + else: + paste_post += paste_post2 + + # Se hace la petición en downloadpage con HEADERS o DATA, con los parámetros del servidor + if paste_type == 'headers': + data = httptools.downloadpage(paste_host+paste_sufix, post=paste_post, + timeout=paste_timeout, random_headers=paste_random_headers, + headers=paste_headers).headers + elif paste_type == 'data': + data = httptools.downloadpage(paste_host+paste_sufix, post=paste_post, + timeout=paste_timeout, random_headers=paste_random_headers, + headers=paste_headers).data + + # Si la petición es con formato REQUESTS, se realiza aquí + elif paste_type == 'requests': + #data = requests.post(paste_host, params=paste_params, files=paste_file, + # timeout=paste_timeout) + data = httptools.downloadpage(paste_host, params=paste_params, file=log_data, + file_name=paste_title+'.log', timeout=paste_timeout, + random_headers=paste_random_headers, headers=paste_headers) + except: + msg = 'Inténtelo más tarde' + logger.error('Fallo al guardar el informe. ' + msg) + logger.error(traceback.format_exc()) + continue + + # Se analiza la respuesta del servidor y se localiza la clave del upload para formar la url a pasar al usuario + if data: + paste_host_resp = paste_host + if paste_host_return == None: # Si devuelve la url completa, no se compone + paste_host_resp = '' + paste_host_return = '' + + # Respuestas a peticiones REQUESTS + if paste_type == 'requests': # Respuesta de petición tipo "requests"? + if paste_resp == 'json': # Respuesta en formato JSON? + if paste_resp_key in data.data: + if not paste_url: + key = jsontools.load(data.data)[paste_resp_key] # con una etiqueta + else: + key = jsontools.load(data.data)[paste_resp_key][paste_url] # con dos etiquetas anidadas + item.url = "%s%s%s" % (paste_host_resp+paste_host_return, key, + paste_host_return_tail) + else: + logger.error('ERROR en formato de retorno de datos. data.data=' + + str(data.data)) + continue + + # Respuestas a peticiones DOWNLOADPAGE + elif paste_resp == 'json': # Respuesta en formato JSON? + if paste_resp_key in data: + if not paste_url: + key = jsontools.load(data)[paste_resp_key] # con una etiqueta + else: + key = jsontools.load(data)[paste_resp_key][paste_url] # con dos etiquetas anidadas + item.url = "%s%s%s" % (paste_host_resp+paste_host_return, key, + paste_host_return_tail) + else: + logger.error('ERROR en formato de retorno de datos. data=' + str(data)) + continue + elif paste_resp == 'regex': # Respuesta en DATOS, a buscar con un REGEX? + key = scrapertools.find_single_match(data, paste_resp_key) + if key: + item.url = "%s%s%s" % (paste_host_resp+paste_host_return, key, + paste_host_return_tail) + else: + logger.error('ERROR en formato de retorno de datos. data=' + str(data)) + continue + elif paste_type == 'headers': # Respuesta en HEADERS, a buscar en "location"? + if paste_url in data: + item.url = data[paste_url] # Etiqueta de retorno de la clave + item.url = urlparse.urljoin(paste_host_resp + paste_host_return, + item.url + paste_host_return_tail) + else: + logger.error('ERROR en formato de retorno de datos. response.headers=' + + str(data)) + continue + else: + logger.error('ERROR en formato de retorno de datos. paste_type=' + + str(paste_type) + ' / DATA: ' + data) + continue + + status = True # Operación de upload terminada con éxito + logger.info('Report created: ' + str(item.url)) #Se guarda la URL del informe a usuario + # if fatal: # De uso futuro, para logger.crash + # platformtools.dialog_ok('Informe de ERROR en Alfa CREADO', 'Repórtelo en el foro agregando ERROR FATAL y esta URL: ', '[COLOR gold]%s[/COLOR]' % item.url, pastebin_one_use_msg) + # else: # Se pasa la URL del informe a usuario + # platformtools.dialog_ok('Informe de Fallo en Alfa CREADO', 'Repórtelo en el foro agregando una descripcion del fallo y esta URL: ', '[COLOR gold]%s[/COLOR]' % item.url, pastebin_one_use_msg) + + break # Operación terminado, no seguimos buscando + + if not status and not fatal: # Operación fracasada... + platformtools.dialog_notification(config.get_localized_string(707428), msg) #... se notifica la causa + logger.error(config.get_localized_string(707428) + msg) + + # Se devuelve control con item.url actualizado, así aparecerá en el menú la URL del informe + item.action = 'report_menu' + platformtools.itemlist_update(item, True) + # return report_menu(item) + + +def call_browser(item): + import webbrowser + if not webbrowser.open(item.url): + import xbmc + if xbmc.getCondVisibility('system.platform.linux') and xbmc.getCondVisibility( + 'system.platform.android'): # android + xbmc.executebuiltin('StartAndroidActivity("", "android.intent.action.VIEW", "", "%s")' % (item.url)) + else: + try: + import urllib.request as urllib + except ImportError: + import urllib + short = urllib.urlopen( + 'https://u.nu/api.php?action=shorturl&format=simple&url=' + item.url).read() + platformtools.dialog_ok(config.get_localized_string(20000), + config.get_localized_string(70740) % short) \ No newline at end of file diff --git a/specials/side_menu.py b/specials/side_menu.py index ef2818e7..6fc5c735 100644 --- a/specials/side_menu.py +++ b/specials/side_menu.py @@ -1,19 +1,17 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + import os - -import xbmc -import xbmcaddon -import xbmcgui - -from core import jsontools from core.item import Item +from core import jsontools from platformcode import config, logger from platformcode import launcher - -addon = xbmcaddon.Addon('metadata.themoviedb.org') -def_lang = addon.getSetting('language') +import xbmc, xbmcgui, xbmcplugin, xbmcaddon media_path = os.path.join(config.get_runtime_path(), "resources/skins/Default/media/side_menu/") menu_settings_path = os.path.join(config.get_data_path(), "settings_channels", 'menu_settings_data.json') @@ -24,7 +22,8 @@ else: menu_node = {'categoria actual':config.get_setting('category')} jsontools.update_node(menu_node, 'menu_settings_data.json', "menu") - +addon = xbmcaddon.Addon('metadata.themoviedb.org') +def_lang = addon.getSetting('language') ACTION_SHOW_FULLSCREEN = 36 ACTION_GESTURE_SWIPE_LEFT = 511 diff --git a/specials/trailertools.py b/specials/trailertools.py index 3f674f8b..126936ae 100644 --- a/specials/trailertools.py +++ b/specials/trailertools.py @@ -3,13 +3,29 @@ # Search trailers from youtube, filmaffinity, abandomoviez, vimeo, etc... # -------------------------------------------------------------------------------- -import re -import urllib +from __future__ import division +#from builtins import str +import sys -import urlparse import xbmcaddon +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int +from past.utils import old_div + +if PY3: + #from future import standard_library + #standard_library.install_aliases() + import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo + import urllib.parse as urlparse +else: + import urllib # Usamos el nativo de PY2 que es más rápido + import urlparse + +import re + from core import httptools +from core import jsontools from core import scrapertools from core import servertools from core.item import Item @@ -39,9 +55,9 @@ def buscartrailer(item, trailers=[]): itemlist = globals()[item.action](item) else: # Se elimina la opción de Buscar Trailer del menú contextual para evitar redundancias - if type(item.context) is str and "buscar_trailer" in item.context: + if isinstance(item.context, str) and "buscar_trailer" in item.context: item.context = item.context.replace("buscar_trailer", "") - elif type(item.context) is list and "buscar_trailer" in item.context: + elif isinstance(item.context, list) and "buscar_trailer" in item.context: item.context.remove("buscar_trailer") item.text_color = "" @@ -50,15 +66,15 @@ def buscartrailer(item, trailers=[]): if item.contentTitle != "": item.contentTitle = item.contentTitle.strip() elif keyboard: - fulltitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.fulltitle.strip()) - item.contentTitle = platformtools.dialog_input(default=fulltitle, heading=config.get_localized_string(70505)) + contentTitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.contentTitle.strip()) + item.contentTitle = platformtools.dialog_input(default=contentTitle, heading=config.get_localized_string(70505)) if item.contentTitle is None: - item.contentTitle = fulltitle + item.contentTitle = contentTitle else: item.contentTitle = item.contentTitle.strip() else: - fulltitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.fulltitle.strip()) - item.contentTitle = fulltitle + contentTitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.contentTitle.strip()) + item.contentTitle = contentTitle item.year = item.infoLabels['year'] @@ -148,7 +164,6 @@ def tmdb_trailers(item, tipo="movie"): def youtube_search(item): logger.info() itemlist = [] - titulo = item.contentTitle if item.extra != "youtube": titulo += " trailer" @@ -159,11 +174,10 @@ def youtube_search(item): titulo = urllib.quote(titulo) titulo = titulo.replace("%20", "+") data = httptools.downloadpage("https://www.youtube.com/results?sp=EgIQAQ%253D%253D&q=" + titulo).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = """"thumbnails":\[\{"url":"(https://i.ytimg.com/vi[^"]+).*?""" - patron += """simpleText":"([^"]+).*?""" - patron += """simpleText":"[^"]+.*?simpleText":"([^"]+).*?""" - patron += """url":"([^"]+)""" + patron = 'thumbnails":\[\{"url":"(https://i.ytimg.com/vi[^"]+).*?' + patron += 'text":"([^"]+).*?' + patron += 'simpleText":"[^"]+.*?simpleText":"([^"]+).*?' + patron += 'url":"([^"]+)' matches = scrapertools.find_multiple_matches(data, patron) for scrapedthumbnail, scrapedtitle, scrapedduration, scrapedurl in matches: scrapedtitle = scrapedtitle.decode('utf8').encode('utf8') @@ -173,18 +187,15 @@ def youtube_search(item): url = urlparse.urljoin('https://www.youtube.com/', scrapedurl) itemlist.append(item.clone(title=scrapedtitle, action="play", server="youtube", url=url, thumbnail=scrapedthumbnail, text_color="white")) - next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]+><span class="yt-uix-button-content">' 'Siguiente') if next_page != "": next_page = urlparse.urljoin("https://www.youtube.com", next_page) itemlist.append(item.clone(title=config.get_localized_string(70502), action="youtube_search", extra="youtube", page=next_page, thumbnail="", text_color="")) - if not itemlist: itemlist.append(item.clone(title=config.get_localized_string(70501) % titulo, action="", thumbnail="", text_color="")) - if keyboard: if item.contextual: title = "[COLOR green]%s[/COLOR]" @@ -192,7 +203,6 @@ def youtube_search(item): title = "%s" itemlist.append(item.clone(title=title % config.get_localized_string(70510), action="manual_search", text_color="green", thumbnail="", extra="youtube")) - return itemlist @@ -206,11 +216,11 @@ def abandomoviez_search(item): titulo = item.contentTitle.decode('utf-8').encode('iso-8859-1') post = urllib.urlencode({'query': titulo, 'searchby': '1', 'posicion': '1', 'orden': '1', 'anioin': item.year, 'anioout': item.year, 'orderby': '1'}) - url = "http://www.abandomoviez.net/db/busca_titulo_advance.php" + url = "http://www.abandomoviez.net/db/busca_titulo.php?busco2=%s" %item.contentTitle item.prefix = "db/" data = httptools.downloadpage(url, post=post).data if "No hemos encontrado ninguna" in data: - url = "http://www.abandomoviez.net/indie/busca_titulo_advance.php" + url = "http://www.abandomoviez.net/indie/busca_titulo.php?busco2=%s" %item.contentTitle item.prefix = "indie/" data = httptools.downloadpage(url, post=post).data.decode("iso-8859-1").encode('utf-8') @@ -253,7 +263,6 @@ def abandomoviez_search(item): def search_links_abando(item): logger.info() - data = httptools.downloadpage(item.url).data itemlist = [] if "Lo sentimos, no tenemos trailer" in data: @@ -286,9 +295,8 @@ def search_links_abando(item): if item.contextual: i += 1 message += ".." - progreso.update(10 + (90 * i / len(matches)), message) + progreso.update(10 + (old_div(90 * i, len(matches))), message) scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle - data_trailer = httptools.downloadpage(scrapedurl).data trailer_url = scrapertools.find_single_match(data_trailer, 'iframe.*?src="([^"]+)"') trailer_url = trailer_url.replace("embed/", "watch?v=") @@ -296,10 +304,8 @@ def search_links_abando(item): thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code itemlist.append(item.clone(title=scrapedtitle, url=trailer_url, server="youtube", action="play", thumbnail=thumbnail, text_color="white")) - if item.contextual: progreso.close() - if keyboard: if item.contextual: title = "[COLOR green]%s[/COLOR]" @@ -342,7 +348,8 @@ def filmaffinity_search(item): if not scrapedthumbnail.startswith("http"): scrapedthumbnail = "http://www.filmaffinity.com" + scrapedthumbnail scrapedurl = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % id - scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore") + if PY3: + scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore") scrapedtitle = scrapertools.htmlclean(scrapedtitle) itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, text_color="white", action="search_links_filmaff", thumbnail=scrapedthumbnail)) @@ -389,7 +396,8 @@ def search_links_filmaff(item): else: server = "" thumbnail = item.thumbnail - scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore") + if PY3: + scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore") scrapedtitle = scrapertools.htmlclean(scrapedtitle) scrapedtitle += " [" + server + "]" if item.contextual: @@ -413,15 +421,12 @@ def search_links_filmaff(item): try: import xbmcgui import xbmc - - class Select(xbmcgui.WindowXMLDialog): def __init__(self, *args, **kwargs): self.item = kwargs.get('item') self.itemlist = kwargs.get('itemlist') self.caption = kwargs.get('caption') self.result = None - def onInit(self): try: self.control_list = self.getControl(6) @@ -447,7 +452,6 @@ try: self.control_list.reset() self.control_list.addItems(self.items) self.setFocus(self.control_list) - def onClick(self, id): # Boton Cancelar y [X] if id == 5: @@ -461,7 +465,6 @@ try: del window_select else: window_select[-1].doModal() - def onAction(self, action): global window_select, result if action == 92 or action == 110: @@ -474,7 +477,6 @@ try: del window_select else: window_select[-1].doModal() - try: if (action == 7 or action == 100) and self.getFocusId() == 6: selectitem = self.control_list.getSelectedItem() @@ -489,7 +491,6 @@ try: else: result = None self.result = None - elif item.action == "play" and not self.item.windowed: for window in window_select: window.close() diff --git a/specials/tvmoviedb.py b/specials/tvmoviedb.py index 9ee10150..d2941e5b 100644 --- a/specials/tvmoviedb.py +++ b/specials/tvmoviedb.py @@ -11,10 +11,10 @@ from core import filetools from core import httptools from core import jsontools from core import scrapertools -from core import trakt_tools from core.item import Item -from core.support import typo, thumb +from core.support import typo from core.tmdb import Tmdb +from core import trakt_tools from platformcode import config, logger from platformcode import platformtools @@ -124,13 +124,12 @@ def search_(item): def busqueda(item): logger.info() - cat = [item.extra.replace("tv", "serie")] - new_item = Item() - new_item.extra = item.contentTitle.replace("+", " ") - new_item.category = item.extra + + new_item = Item(title=item.contentTitle, text=item.contentTitle.replace("+", " "), mode=item.contentType, + infoLabels=item.infoLabels) from specials import search - return search.do_search(new_item, cat) + return search.channel_search(new_item) def tmdb(item): @@ -479,8 +478,10 @@ def detalles(item): # Si viene de seccion imdb if not item.infoLabels["tmdb_id"]: headers = [['Accept-Language', langi]] - data = httptools.downloadpage("http://www.imdb.com/title/" + item.infoLabels['imdb_id'], headers=headers, - replace_headers=True).data + #data = httptools.downloadpage("http://www.imdb.com/title/" + item.infoLabels['imdb_id'], headers=headers, + # replace_headers=True).data + data = httptools.downloadpage("http://www.imdb.com/title/" + item.infoLabels['imdb_id'], headers=headers).data + pics = scrapertools.find_single_match(data, 'showAllVidsAndPics.*?href=".*?(tt\d+)') # Imágenes imdb if pics: @@ -568,7 +569,7 @@ def detalles(item): post = "u=%s&proxy_formdata_server=nl&allowCookies=1&encodeURL=1&encodePage=0&stripObjects=0&stripJS=0&go=" % urllib.quote( post_url) while True: - response = httptools.downloadpage(url, post, follow_redirects=False) + response = httptools.downloadpage(url, post=post, follow_redirects=False) if response.headers.get("location"): url = response.headers["location"] post = "" @@ -888,10 +889,12 @@ def listado_imdb(item): headers = [['Accept-Language', langi]] if "www.imdb.com" in item.url: - data = httptools.downloadpage(item.url, headers=headers, replace_headers=True).data + #data = httptools.downloadpage(item.url, headers=headers, replace_headers=True).data + data = httptools.downloadpage(item.url, headers=headers).data else: url = 'http://www.imdb.com/search/title?' + item.url - data = httptools.downloadpage(url, headers=headers, replace_headers=True).data + #data = httptools.downloadpage(url, headers=headers, replace_headers=True).data + data = httptools.downloadpage(url, headers=headers).data data = re.sub(r"\n|\r|\t| ", "", data) data = re.sub(r"\s{2}", " ", data) @@ -1155,7 +1158,7 @@ def listado_fa(item): if item.extra == "top": if item.page_fa: post = "from=%s" % item.page_fa - data = httptools.downloadpage(item.url, post).data + data = httptools.downloadpage(item.url, post=post).data if item.total > item.page_fa: item.page_fa += 30 else: @@ -1521,7 +1524,7 @@ def detalles_fa(item): post = "u=%s&proxy_formdata_server=nl&allowCookies=1&encodeURL=1&encodePage=0&stripObjects=0&stripJS=0&go=" % urllib.quote( post_url) while True: - response = httptools.downloadpage(url, post, follow_redirects=False) + response = httptools.downloadpage(url, post=post, follow_redirects=False) if response.headers.get("location"): url = response.headers["location"] post = "" @@ -1708,7 +1711,7 @@ def login_fa(): return True, "" post = "postback=1&rp=&username=%s&password=%s&rememberme=on" % (user, password) - data = httptools.downloadpage("https://m.filmaffinity.com/%s/account.ajax.php?action=login" % langf, post).data + data = httptools.downloadpage("https://m.filmaffinity.com/%s/account.ajax.php?action=login" % langf, post=post).data if "Invalid username" in data: logger.error("Error en el login") @@ -1716,7 +1719,7 @@ def login_fa(): else: post = "name=user-menu&url=http://m.filmaffinity.com/%s/main.php" % langf data = httptools.downloadpage("http://m.filmaffinity.com/%s/tpl.ajax.php?action=getTemplate" % langf, - post).data + post=post).data userid = scrapertools.find_single_match(data, 'id-user=(\d+)') if userid: config.set_setting("userid", userid, "tvmoviedb") @@ -1829,7 +1832,7 @@ def acciones_fa(item): url = "http://filmaffinity.com/%s/movieslist.ajax.php" % langf movieid = item.url.rsplit("=", 1)[1] post = "action=%s&listId=%s&movieId=%s&itk=%s" % (item.accion, item.listid, movieid, item.itk) - data = jsontools.load(httptools.downloadpage(url, post).data) + data = jsontools.load(httptools.downloadpage(url, post=post).data) if not item.folder: import xbmc return xbmc.executebuiltin("Container.Refresh") @@ -1871,7 +1874,7 @@ def callback_voto(item, values): item.action = "acciones_fa" movieid = item.url.rsplit("=", 1)[1] post = "id=%s&rating=%s&itk=%s&action=rate" % (movieid, item.voto, item.itk) - data = jsontools.load(httptools.downloadpage("http://filmaffinity.com/%s/ratingajax.php" % langf, post).data) + data = jsontools.load(httptools.downloadpage("http://filmaffinity.com/%s/ratingajax.php" % langf, post=post).data) if not item.folder: import xbmc @@ -2040,7 +2043,8 @@ def fanartv(item): % item.infoLabels['tmdb_id'] else: url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_search - data = jsontools.load(httptools.downloadpage(url, headers=headers, replace_headers=True).data) + #data = jsontools.load(httptools.downloadpage(url, headers=headers, replace_headers=True).data) + data = jsontools.load(httptools.downloadpage(url, headers=headers).data) if data and not "error message" in data: item.images['fanart.tv'] = {} for key, value in data.items(): @@ -2117,12 +2121,14 @@ def acciones_trakt(item): post = jsontools.dump(item.post) url = "http://api-v2launch.trakt.tv/%s" % item.url - data = httptools.downloadpage(url, post, headers=headers, replace_headers=True) + #data = httptools.downloadpage(url, post, headers=headers, replace_headers=True) + data = httptools.downloadpage(url, post=post, headers=headers) if data.code == "401": trakt_tools.token_trakt(item.clone(extra="renew")) token_auth = config.get_setting("token_trakt", "trakt") headers[3][1] = "Bearer %s" % token_auth - data = httptools.downloadpage(url, post, headers=headers, replace_headers=True) + #data = httptools.downloadpage(url, post, headers=headers, replace_headers=True) + data = httptools.downloadpage(url, post=post, headers=headers) data = data.data if data and "sync" in item.url: @@ -2458,7 +2464,7 @@ def detalles_mal(item): try: title_search = re.sub(r'[^0-9A-z]+', ' ', title_mal) post = "busqueda=%s&button=Search" % urllib.quote(title_search) - data_music = httptools.downloadpage("http://www.freeanimemusic.org/song_search.php", post).data + data_music = httptools.downloadpage("http://www.freeanimemusic.org/song_search.php", post=post).data if not "NO MATCHES IN YOUR SEARCH" in data_music: itemlist.append( item.clone(action="musica_anime", title=config.get_localized_string(70317), @@ -3265,7 +3271,8 @@ def addlist_mal(item): url = "https://myanimelist.net/ownlist/anime/add.json" if item.lista: url = "https://myanimelist.net/ownlist/anime/edit.json" - data = httptools.downloadpage(url, post=jsontools.dump(post), headers=headers_mal, replace_headers=True).data + #data = httptools.downloadpage(url, post=jsontools.dump(post), headers=headers_mal, replace_headers=True).data + data = httptools.downloadpage(url, post=jsontools.dump(post), headers=headers_mal).data item.title = "En tu lista" if config.is_xbmc(): import xbmc diff --git a/specials/videolibrary.py b/specials/videolibrary.py index ec50021f..0422899a 100644 --- a/specials/videolibrary.py +++ b/specials/videolibrary.py @@ -1,16 +1,20 @@ # -*- coding: utf-8 -*- -import os -import traceback +#from builtins import str +import sys +PY3 = False +if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int + +import os, traceback from channelselector import get_thumb from core import filetools from core import scrapertools from core import videolibrarytools from core.item import Item -from lib import generictools from platformcode import config, logger from platformcode import platformtools +from lib import generictools def mainlist(item): @@ -50,12 +54,13 @@ def list_movies(item, silent=False): xbmc_videolibrary.mark_content_as_watched_on_alfa(nfo_path) except: logger.error(traceback.format_exc()) - + head_nfo, new_item = videolibrarytools.read_nfo(nfo_path) if not new_item: #Si no ha leído bien el .nfo, pasamos a la siguiente + logger.error('.nfo erroneo en ' + str(nfo_path)) continue - + if len(new_item.library_urls) > 1: multicanal = True else: @@ -176,8 +181,12 @@ def list_tvshows(item): xbmc_videolibrary.mark_content_as_watched_on_alfa(tvshow_path) except: logger.error(traceback.format_exc()) - + head_nfo, item_tvshow = videolibrarytools.read_nfo(tvshow_path) + + if not item_tvshow: #Si no ha leído bien el .nfo, pasamos a la siguiente + logger.error('.nfo erroneo en ' + str(tvshow_path)) + continue if len(item_tvshow.library_urls) > 1: multicanal = True @@ -309,7 +318,7 @@ def get_seasons(item): itemlist = [] dict_temp = {} - raiz, carpetas_series, ficheros = filetools.walk(item.path).next() + raiz, carpetas_series, ficheros = next(filetools.walk(item.path)) # Menu contextual: Releer tvshow.nfo head_nfo, item_nfo = videolibrarytools.read_nfo(item.nfo) @@ -335,7 +344,7 @@ def get_seasons(item): # if config.get_setting("no_pile_on_seasons", "videolibrary") == 1 and len(dict_temp_Visible) == 1: # Sólo si hay una temporada # Creamos un item por cada temporada - for season, title in dict_temp.items(): + for season, title in list(dict_temp.items()): new_item = item.clone(action="get_episodes", title=title, contentSeason=season, filtrar_season=True) @@ -373,7 +382,7 @@ def get_episodes(item): itemlist = [] # Obtenemos los archivos de los episodios - raiz, carpetas_series, ficheros = filetools.walk(item.path).next() + raiz, carpetas_series, ficheros = next(filetools.walk(item.path)) # Menu contextual: Releer tvshow.nfo head_nfo, item_nfo = videolibrarytools.read_nfo(item.nfo) @@ -444,22 +453,23 @@ def findvideos(item): logger.debug("Unable to search for videos due to lack of parameters") return [] - content_title = filter(lambda c: c not in ":*?<>|\/", item.contentTitle.strip().lower()) + #content_title = [c for c in item.contentTitle.strip().lower() if c not in ":*?<>|\/"] + content_title = "".join(c for c in item.contentTitle.strip().lower() if c not in ":*?<>|\/") if item.contentType == 'movie': item.strm_path = filetools.join(videolibrarytools.MOVIES_PATH, item.strm_path) - path_dir = os.path.dirname(item.strm_path) - item.nfo = filetools.join(path_dir, os.path.basename(path_dir) + ".nfo") + path_dir = filetools.dirname(item.strm_path) + item.nfo = filetools.join(path_dir, filetools.basename(path_dir) + ".nfo") else: item.strm_path = filetools.join(videolibrarytools.TVSHOWS_PATH, item.strm_path) - path_dir = os.path.dirname(item.strm_path) + path_dir = filetools.dirname(item.strm_path) item.nfo = filetools.join(path_dir, 'tvshow.nfo') for fd in filetools.listdir(path_dir): if fd.endswith('.json'): contenido, nom_canal = fd[:-6].split('[') if (contenido.startswith(content_title) or item.contentType == 'movie') and nom_canal not in \ - list_canales.keys(): + list(list_canales.keys()): list_canales[nom_canal] = filetools.join(path_dir, fd) num_canales = len(list_canales) @@ -467,7 +477,12 @@ def findvideos(item): if 'downloads' in list_canales: json_path = list_canales['downloads'] item_json = Item().fromjson(filetools.read(json_path)) - + ###### Redirección al canal NewPct1.py si es un clone, o a otro canal y url si ha intervención judicial + try: + if item_json: + item_json, it, overwrite = generictools.redirect_clone_newpct1(item_json) + except: + logger.error(traceback.format_exc()) item_json.contentChannel = "local" # Soporte para rutas relativas en descargas if filetools.is_relative(item_json.url): @@ -484,7 +499,7 @@ def findvideos(item): filtro_canal = '' if num_canales > 1 and config.get_setting("ask_channel", "videolibrary"): - opciones = [config.get_localized_string(70089) % k.capitalize() for k in list_canales.keys()] + opciones = [config.get_localized_string(70089) % k.capitalize() for k in list(list_canales.keys())] opciones.insert(0, config.get_localized_string(70083)) if item_local: opciones.append(item_local.title) @@ -502,26 +517,36 @@ def findvideos(item): filtro_canal = opciones[index].replace(config.get_localized_string(70078), "").strip() itemlist = [] - for nom_canal, json_path in list_canales.items(): + for nom_canal, json_path in list(list_canales.items()): if filtro_canal and filtro_canal != nom_canal.capitalize(): continue - + item_canal = Item() item_canal.channel = nom_canal - + ###### Redirección al canal NewPct1.py si es un clone, o a otro canal y url si ha intervención judicial + try: + item_canal, it, overwrite = generictools.redirect_clone_newpct1(item_canal) + except: + logger.error(traceback.format_exc()) nom_canal = item_canal.channel - + # Importamos el canal de la parte seleccionada try: + channel = __import__('channels.%s' % nom_canal, fromlist=["channels.%s" % nom_canal]) if nom_canal == 'community': channel = __import__('specials.%s' % nom_canal, fromlist=["channels.%s" % nom_canal]) else: channel = __import__('channels.%s' % nom_canal, fromlist=["channels.%s" % nom_canal]) except ImportError: - exec "import channels." + nom_canal + " as channel" + exec("import channels." + nom_canal + " as channel") item_json = Item().fromjson(filetools.read(json_path)) - + ###### Redirección al canal NewPct1.py si es un clone, o a otro canal y url si ha intervención judicial + try: + if item_json: + item_json, it, overwrite = generictools.redirect_clone_newpct1(item_json) + except: + logger.error(traceback.format_exc()) list_servers = [] try: @@ -536,12 +561,21 @@ def findvideos(item): item_json.contentChannel = 'videolibrary' if hasattr(channel, 'findvideos'): from core import servertools + if item_json.videolibray_emergency_urls: + del item_json.videolibray_emergency_urls list_servers = getattr(channel, 'findvideos')(item_json) list_servers = servertools.filter_servers(list_servers) + elif item_json.action == 'play': + from platformcode import platformtools + autoplay.set_status(True) + item_json.contentChannel = item_json.channel + item_json.channel = "videolibrary" + platformtools.play_video(item_json) + return '' else: from core import servertools list_servers = servertools.find_video_items(item_json) - except Exception, ex: + except Exception as ex: logger.error("The findvideos function for the channel %s failed" % nom_canal) template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex).__name__, ex.args) @@ -553,7 +587,6 @@ def findvideos(item): for server in list_servers: #if not server.action: # Ignorar/PERMITIR las etiquetas # continue - server.contentChannel = server.channel server.channel = "videolibrary" server.nfo = item.nfo @@ -619,6 +652,7 @@ def play(item): v.title = config.get_localized_string(60036) % item.contentEpisodeNumber v.thumbnail = item.thumbnail v.contentThumbnail = item.thumbnail + v.contentChannel = item.contentChannel return itemlist @@ -662,22 +696,22 @@ def update_tvshow(item): def verify_playcount_series(item, path): logger.info() - + """ Este método revisa y repara el PlayCount de una serie que se haya desincronizado de la lista real de episodios en su carpeta. Las entradas de episodios, temporadas o serie que falten, son creado con la marca de "no visto". Posteriormente se envia a verificar los contadores de Temporadas y Serie - + En el retorno envía de estado de True si se actualizado o False si no, normalmente por error. Con este estado, el caller puede actualizar el estado de la opción "verify_playcount" en "videolibrary.py". La intención de este método es la de dar una pasada que repare todos los errores y luego desactivarse. Se puede volver a activar en el menú de Videoteca de Alfa. - + """ #logger.debug("item:\n" + item.tostring('\n')) - + #Si no ha hecho nunca la verificación, lo forzamos estado = config.get_setting("verify_playcount", "videolibrary") if not estado or estado == False: estado = True #Si no ha hecho nunca la verificación, lo forzamos else: estado = False - + if item.contentType == 'movie': #Esto es solo para Series return (item, False) if filetools.exists(path): @@ -686,9 +720,9 @@ def verify_playcount_series(item, path): if not hasattr(it, 'library_playcounts') or not it.library_playcounts: #Si el .nfo no tiene library_playcounts se lo creamos logger.error('** It does not have PlayCount') it.library_playcounts = {} - + # Obtenemos los archivos de los episodios - raiz, carpetas_series, ficheros = filetools.walk(path).next() + raiz, carpetas_series, ficheros = next(filetools.walk(path)) # Crear un item en la lista para cada strm encontrado estado_update = False for i in ficheros: @@ -714,7 +748,7 @@ def verify_playcount_series(item, path): logger.error('** Estado de actualización: ' + str(estado) + ' / PlayCount: ' + str(it.library_playcounts)) estado = estado_update # se comprueba que si todos los episodios de una temporada están marcados, se marque tb la temporada - for key, value in it.library_playcounts.iteritems(): + for key, value in it.library_playcounts.items(): if key.startswith("season"): season = scrapertools.find_single_match(key, 'season (\d+)') #Obtenemos en núm. de Temporada it = check_season_playcount(it, season) @@ -726,38 +760,38 @@ def verify_playcount_series(item, path): def mark_content_as_watched2(item): logger.info() - # logger.debug("item:\n" + item.tostring('\n')) + # logger.debug("item:\n" + item.tostring('\n')) if filetools.exists(item.nfo): - head_nfo, it = videolibrarytools.read_nfo(item.nfo) - #logger.debug(it) - - if item.contentType == 'movie': - name_file = os.path.splitext(os.path.basename(item.nfo))[0] - + head_nfo, it = videolibrarytools.read_nfo(item.nfo) + #logger.debug(it) + name_file = "" + if item.contentType == 'movie' or item.contentType == 'tvshow': + name_file = os.path.splitext(filetools.basename(item.nfo))[0] + if name_file != 'tvshow' : - it.library_playcounts.update({name_file: item.playcount}) + it.library_playcounts.update({name_file: item.playcount}) if item.contentType == 'episode' or item.contentType == 'tvshow' or item.contentType == 'list' or name_file == 'tvshow': # elif item.contentType == 'episode': - name_file = os.path.splitext(os.path.basename(item.strm_path))[0] + name_file = os.path.splitext(filetools.basename(item.strm_path))[0] num_season = name_file [0] - item.__setattr__('contentType', 'episode') - item.__setattr__('contentSeason', num_season) - #logger.debug(name_file) - + item.__setattr__('contentType', 'episode') + item.__setattr__('contentSeason', num_season) + #logger.debug(name_file) + else: name_file = item.contentTitle - # logger.debug(name_file) + # logger.debug(name_file) if not hasattr(it, 'library_playcounts'): it.library_playcounts = {} - it.library_playcounts.update({name_file: item.playcount}) + it.library_playcounts.update({name_file: item.playcount}) # se comprueba que si todos los episodios de una temporada están marcados, se marque tb la temporada if item.contentType != 'movie': it = check_season_playcount(it, item.contentSeason) - #logger.debug(it) + #logger.debug(it) # Guardamos los cambios en item.nfo if filetools.write(item.nfo, head_nfo + it.tojson()): @@ -785,7 +819,7 @@ def mark_content_as_watched(item): head_nfo, it = videolibrarytools.read_nfo(item.nfo) if item.contentType == 'movie': - name_file = os.path.splitext(os.path.basename(item.nfo))[0] + name_file = os.path.splitext(filetools.basename(item.nfo))[0] elif item.contentType == 'episode': name_file = "%sx%s" % (item.contentSeason, str(item.contentEpisodeNumber).zfill(2)) else: @@ -826,7 +860,7 @@ def mark_season_as_watched(item): it.library_playcounts = {} # Obtenemos los archivos de los episodios - raiz, carpetas_series, ficheros = filetools.walk(item.path).next() + raiz, carpetas_series, ficheros = next(filetools.walk(item.path)) # Marcamos cada uno de los episodios encontrados de esta temporada episodios_marcados = 0 @@ -839,14 +873,14 @@ def mark_season_as_watched(item): season, episode = season_episode.split("x") if int(item.contentSeason) == -1 or int(season) == int(item.contentSeason): - name_file = os.path.splitext(os.path.basename(i))[0] + name_file = os.path.splitext(filetools.basename(i))[0] it.library_playcounts[name_file] = item.playcount episodios_marcados += 1 if episodios_marcados: if int(item.contentSeason) == -1: # Añadimos todas las temporadas al diccionario item.library_playcounts - for k in it.library_playcounts.keys(): + for k in list(it.library_playcounts.keys()): if k.startswith("season"): it.library_playcounts[k] = item.playcount else: @@ -882,7 +916,7 @@ def delete(item): for file in filetools.listdir(_item.path): if file.endswith(".strm") or file.endswith(".nfo") or file.endswith(".json")or file.endswith(".torrent"): filetools.remove(filetools.join(_item.path, file)) - raiz, carpeta_serie, ficheros = filetools.walk(_item.path).next() + raiz, carpeta_serie, ficheros = next(filetools.walk(_item.path)) if ficheros == []: filetools.rmdir(_item.path) @@ -908,7 +942,7 @@ def delete(item): if item.multicanal: # Obtener listado de canales if item.dead == '': - opciones = [config.get_localized_string(70086) % k.capitalize() for k in item.library_urls.keys() if + opciones = [config.get_localized_string(70086) % k.capitalize() for k in list(item.library_urls.keys()) if k != "downloads"] opciones.insert(0, heading) @@ -957,7 +991,7 @@ def check_season_playcount(item, season): if season: episodios_temporada = 0 episodios_vistos_temporada = 0 - for key, value in item.library_playcounts.iteritems(): + for key, value in item.library_playcounts.items(): if key.startswith("%sx" % season): episodios_temporada += 1 if value > 0: @@ -978,7 +1012,7 @@ def check_tvshow_playcount(item, season): if season: temporadas_serie = 0 temporadas_vistas_serie = 0 - for key, value in item.library_playcounts.iteritems(): + for key, value in item.library_playcounts.items(): #if key.startswith("season %s" % season): if key.startswith("season" ): temporadas_serie += 1 diff --git a/videolibrary_service.py b/videolibrary_service.py index 0bdbb1fe..46eef1c7 100644 --- a/videolibrary_service.py +++ b/videolibrary_service.py @@ -80,13 +80,13 @@ def update(path, p_dialog, i, t, serie, overwrite): # serie.infoLabels['playcount'] = serie.playcount insertados_total += insertados - except Exception, ex: + except Exception as ex: logger.error("Error al guardar los capitulos de la serie") template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex).__name__, ex.args) logger.error(message) - except Exception, ex: + except Exception as ex: logger.error("Error al obtener los episodios de: %s" % serie.show) template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex).__name__, ex.args) @@ -247,7 +247,7 @@ def check_for_update(overwrite=True): else: logger.info("No actualiza la videoteca, está desactivado en la configuración de alfa") - except Exception, ex: + except Exception as ex: logger.error("Se ha producido un error al actualizar las series") template = "An exception of type %s occured. Arguments:\n%r" message = template % (type(ex).__name__, ex.args)