diff --git a/addon.xml b/addon.xml index 707feb35..b64a18a7 100644 --- a/addon.xml +++ b/addon.xml @@ -1,4 +1,4 @@ - + @@ -19,9 +19,9 @@ resources/media/themes/ss/2.png resources/media/themes/ss/3.png - - Fixato cloudflare (parzialmente) - - Ricerca e navigazione più veloce - - N.B. Su Android alcunu siti come SerieHD e GuardaSerie.click non funzionano + -Nuova ricerca globale + -migliorie prestazionali in generale + -fix vari ai server Naviga velocemente sul web e guarda i contenuti presenti [COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR] [COLOR yellow]Kodi © is a registered trademark of the XBMC Foundation. We are not connected to or in any other way affiliated with Kodi, Team Kodi, or the XBMC Foundation. Furthermore, any software, addons, or products offered by us will receive no support in official Kodi channels, including the Kodi forums and various social networks.[/COLOR] diff --git a/channels.json b/channels.json index a2faac40..1ef4373b 100644 --- a/channels.json +++ b/channels.json @@ -1,48 +1,46 @@ { "altadefinizione01": "https://www.altadefinizione01.cc", "altadefinizione01_club": "https://www.altadefinizione01.cc", - "altadefinizione01_link": "http://altadefinizione01.gift", + "altadefinizione01_link": "http://altadefinizione01.cx", "altadefinizioneclick": "https://altadefinizione.cloud", - "animeforce": "https://ww1.animeforce.org", + "animeforce": "https://ww1.animeforce.org", "animeleggendari": "https://animepertutti.com", "animespace": "http://www.animespace.tv", "animestream": "https://www.animeworld.it", "animesubita": "http://www.animesubita.org", "animetubeita": "http://www.animetubeita.com", - "animeworld": "https://www.animeworld.tv", + "animeworld": "https://www1.animeworld.tv", "casacinema": "https://www.casacinema.uno", "casacinemainfo": "https://www.casacinema.info", "cb01anime": "https://www.cineblog01.ink", - "cinemalibero": "https://www.cinemalibero.best", + "cinemalibero": "https://www.cinemalibero.live", "documentaristreamingda": "https://documentari-streaming-da.com", "dreamsub": "https://www.dreamsub.stream", "eurostreaming": "https://eurostreaming.pink", - "fastsubita": "http://fastsubita.com", + "fastsubita": "https://fastsubita.com", "filmgratis": "https://www.filmaltadefinizione.net", "filmigratis": "https://filmigratis.org", - "filmpertutti": "https://www.filmpertutti.link", - "filmsenzalimiticc": "https://www.filmsenzalimiti.press", + "filmpertutti": "https://www.filmpertutti.gratis", + "filmsenzalimiticc": "https://www.filmsenzalimiti.monster", "filmstreaming01": "https://filmstreaming01.com", - "guardarefilm": "https://www.guardarefilm.red", + "guardarefilm": "https://www.guardarefilm.red", "guardaserie_stream": "https://guardaserie.co", - "guardaserieclick": "https://www.guardaserie.media", - "ilgeniodellostreaming": "https://igds.red", - "italiafilm": "https://www.italia-film.pw", - "italiaserie": "https://italiaserie.org", - "mondolunatico": "http://mondolunatico.org", - "mondolunatico2": "https://mondolunatico.org:443/stream", + "guardaserieclick": "https://www.guardaserie.media", + "ilgeniodellostreaming": "https://igds.red", + "italiafilm": "https://www.italia-film.video", + "italiaserie": "https://italiaserie.org", "mondoserietv": "https://mondoserietv.com", - "piratestreaming": "https://www.piratestreaming.top", + "netfreex": "https://www.netfreex.online", + "piratestreaming": "https://www.piratestreaming.gratis", "polpotv": "https://polpo.tv", - "seriehd": "https://www.seriehd.moda", - "serietvonline": "https://serietvonline.best", + "seriehd": "https://www.seriehd.watch", + "serietvonline": "https://serietvonline.monster", "serietvsubita": "http://serietvsubita.xyz", - "serietvu": "https://www.serietvu.club", + "serietvu": "https://www.serietvu.link", "streamingaltadefinizione": "https://www.popcornstream.best", "streamtime": "https://t.me/s/StreamTime", "tantifilm": "https://www.tantifilm.eu", "toonitalia": "https://toonitalia.org", "vedohd": "https://vedohd.video", - "vvvvid": "https://www.vvvvid.it", - "netfreex": "https://www.netfreex.online" + "vvvvid": "https://www.vvvvid.it" } \ No newline at end of file diff --git a/channels/cinemalibero.py b/channels/cinemalibero.py index 36b56231..df6ede32 100644 --- a/channels/cinemalibero.py +++ b/channels/cinemalibero.py @@ -39,8 +39,7 @@ def mainlist(item): tvshow = ['/category/serie-tv/' ] -## Anime = [(support.typo('Anime', 'bullet bold'),['/category/anime-giapponesi/', 'peliculas', 'anime', 'tvshow']) -## ] + anime = ['/category/anime-giapponesi/'] ## Sport = [(support.typo('Sport', 'bullet bold'), ['/category/sport/', 'peliculas', 'sport', 'tvshow']) ## ] @@ -53,12 +52,7 @@ def mainlist(item): @support.scrape def peliculas(item): - - data = httptools.downloadpage(item.url, headers=headers).data - data = re.sub('\n|\t', ' ', data) - data = re.sub(r'>\s+<', '> <', data) - GENERI = scrapertoolsV2.find_multiple_matches(data, r'.*?class="col-md-12[^"]*?">(?P.*?)
' if item.args == 'newest': patron = r'
[^>]+>[^>]+>\s[^\)]+)\)">[^>]+>(?P[^<]+)<[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>.+?(?:[ ]\((?P<year>\d{4})\))?<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?' @@ -69,49 +63,30 @@ def peliculas(item): patronBlock = r'<section id="slider">(?P<block>.*?)</section>' patron = r'<a href="(?P<url>(?:https:\/\/.+?\/(?P<title>[^\/]+[a-zA-Z0-9\-]+)(?P<year>\d{4})))/".+?url\((?P<thumb>[^\)]+)\)">' elif item.contentType == 'tvshow': - action = 'episodios' - patron = r'<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>(?P<title>.+?)(?:[ ]\((?P<year>\d{4})\))?<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?' - if item.args == 'anime': - anime = True - patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>\s?(?P<rating>[\d\.]+)?[^>]+>[^>]+>(?P<title>.+?)\(?(?P<year>\d+)?\)?<[^>]+>[^>]+>(?:.+?[^fFiInNeE]+?\(?(?P<lang>[sSuUbBiItTaA]+)\)?.+?)<' - elif item.args == 'update': + # patron = r'<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>(?P<title>.+?)(?:[ ]\((?P<year>\d{4})\))?<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?' + # if item.args == 'anime': + # anime = True + # patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>\s?(?P<rating>[\d\.]+)?[^>]+>[^>]+>(?P<title>.+?)\(?(?P<year>\d+)?\)?<[^>]+>[^>]+>(?:.+?[^fFiInNeE]+?\(?(?P<lang>[sSuUbBiItTaA]+)\)?.+?)<' + if item.args == 'update': patron = r'<a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">\s<div class="titolo">(?P<title>.+?)(?: – Serie TV)?(?:\([sSuUbBiItTaA\-]+\))?[ ]?(?P<year>\d{4})?</div>[ ]<div class="genere">(?:[\w]+?\.?\s?[\s|S]?[\dx\-S]+?\s\(?(?P<lang>[iItTaA]+|[sSuUbBiItTaA\-]+)\)?\s?(?P<quality>[HD]+)?|.+?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?</div>)' pagination = 25 + else: + patron = r'<a href="(?P<url>[^"]+)"\s*title="(?P<title>[^"\(]+)(?:"|\()(?:(?P<year>\d+)[^"]+)?.*?url\((?P<thumb>[^\)]+)\)(?:.*?<div class="voto">[^>]+>[^>]+>\s*(?P<rating>[^<]+))?.*?<div class="titolo">[^>]+>(?:<div class="genere">[^ ]*(?:\s\d+)?\s*(?:\()?(?P<lang>[^\)< ]+))?' else: #search patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>(?P<title>.+?)(?:[ ]\((?P<year>\d{4})\))?<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?' def itemHook(item): + if 'sub' in item.contentLanguage.lower(): + item.contentLanguage= 'Sub-ITA' + item.title = re.sub('[Ss]ub(?:-)?', item.contentLanguage, item.title) if item.lang2: if len(item.lang2)<3: item.lang2 = 'ITA' item.contentLanguage = item.lang2 item.title += support.typo(item.lang2, '_ [] color kod') - - dataBlock = httptools.downloadpage(item.url, headers=headers).data - genere = scrapertoolsV2.find_single_match(dataBlock, r'rel="category tag">([a-zA-Z0-9]+).+?<') - - if genere.lower() in str(GENERI).lower(): - item.contentType = 'movie' - action = 'findvideos' - if item.args == 'update': - item.title = item.title.replace('-',' ') - elif genere.lower() == 'serie': - item.action = 'episodios' - item.contentType = 'tvshow' - elif genere.lower() == 'anime': - blockAnime = scrapertoolsV2.find_single_match(dataBlock, r'<div id="container" class="container">(.+?)<div style="margin-left') - if 'stagione' in blockAnime.lower() or 'episodio' in blockAnime.lower() or 'saga' in blockAnime.lower(): - anime = True - item.action = 'episodios' - item.contentType = 'tvshow' - else: - item.contentType = 'movie' - item.action = 'findvideos' - item.url = scrapertoolsV2.find_single_match(blockAnime, r'<span class="txt_dow">(?:.+?)?Streaming:(?:.+?)?</span>(.*?)<div style="margin-left:') - else: - # Tutto il resto!! - pass + if item.args == 'update': + item.title = item.title.replace('-', ' ') return item @@ -121,22 +96,29 @@ def peliculas(item): @support.scrape def episodios(item): - + data=item.data + # debug = True + support.log('EPISODIOS DATA',data) if item.args == 'anime': support.log("Anime :", item) - blacklist = ['Clipwatching', 'Verystream', 'Easybytez', 'Flix555', 'Cloudvideo'] - patron = r'(Stagione (?P<season>\d+))?.+?<a target=(?P<url>[^>]+>(?P<title>.+?(?P<episode>\d+)))(?:[:]?.+?)(?:</a></p>|</a><br />)' - patronBlock = r'Stagione (?P<season>\d+)</span><br />(?P<block>.*?)(?:<div style="margin-left:|<span class="txt_dow">)' + # blacklist = ['Clipwatching', 'Verystream', 'Easybytez', 'Flix555', 'Cloudvideo'] + patron = r'<a target=(?P<url>[^>]+>(?P<title>Episodio\s(?P<episode>\d+))(?::)?(?:(?P<title2>[^<]+))?.*?(?:<br|</p))' + patronBlock = r'(?:Stagione (?P<season>\d+))?(?:</span><br />|</span></p>|strong></p>)(?P<block>.*?)(?:<div style="margin-left|<span class="txt_dow">)' item.contentType = 'tvshow' - item.contentSerieName = item.fulltitle + # item.contentSerieName = item.fulltitle else:# item.extra == 'serie': + # debug = True support.log("Serie :", item) patron = r'(?P<episode>\d+(?:×|×)?\d+\-\d+|\d+(?:×|×)\d+)[;]?[ ]?(?:(?P<title>[^<]+)(?P<url>.*?)|(\2[ ])(?:<(\3.*?)))(?:</a><br />|</a></p>)' - patronBlock = r'<p><strong>(?P<block>(?:.+?[Ss]tagione.+?(?P<lang>iTA|ITA|Sub-ITA|Sub-iTA))?(?:|.+?|</strong>)(/?:</span>)?</p>.*?</p>)' + patronBlock = r'<p><strong>(?:.+?[Ss]tagione\s)?(?:(?P<lang>iTA|ITA|Sub-ITA|Sub-iTA))?.*?</strong>(?P<block>.+?)(?:</span|</p)' item.contentType = 'tvshow' - item.contentSerieName = item.fulltitle + # item.contentSerieName = item.fulltitle + def itemHook(item): + # support.dbg() + if not scrapertoolsV2.find_single_match(item.title, r'(\d+x\d+)'): + item.title = re.sub(r'(\d+) -', '1x\\1', item.title) + return item - #debug = True return locals() @@ -145,7 +127,7 @@ def genres(item): action='peliculas' patron_block=r'<div id="bordobar" class="dropdown-menu(?P<block>.*?)</li>' - patron=r'<a class="dropdown-item" href="(?P<url>[^"]+)" title="(?P<title>[A-z]+)"' + patronMenu=r'<a class="dropdown-item" href="(?P<url>[^"]+)" title="(?P<title>[A-z]+)"' return locals() @@ -154,7 +136,7 @@ def search(item, texto): support.log(item.url,texto) text = texto.replace(' ', '+') item.url = host + "/?s=" + texto - item.contentType = 'episode' + item.contentType = 'tv' item.args = 'search' try: return peliculas(item) @@ -192,9 +174,41 @@ def newest(categoria): return itemlist +def check(item): + data = httptools.downloadpage(item.url, headers=headers).data + # support.log('cinemalibero DATA=',data) + if data: + genere = scrapertoolsV2.find_single_match(data, r'rel="category tag">([a-zA-Z0-9]+).+?<') + blockAnime = scrapertoolsV2.find_single_match(data, r'<div id="container" class="container">(.+?<div style="margin-left)') + support.log('GENRE',genere) + + if blockAnime: + support.log('È un ANIME') + if 'episodio' in blockAnime.lower() or 'saga' in blockAnime.lower(): + item.contentType = 'tvshow' + item.args = 'anime' + item.data = blockAnime + return episodios(item) + elif scrapertoolsV2.find_single_match(blockAnime,r'\d+(?:×|×)?\d+\-\d+|\d+(?:×|×)\d+'): + item.contentType = 'tvshow' + item.data = blockAnime + return episodios(item) + else: + support.log('È un ANIME FILM') + item.contentType = 'movie' + item.url = data + return findvideos(item) + if genere.lower() == 'serie': + item.contentType = 'tvshow' + item.data = data + return episodios(item) + else: + support.log('È un FILM') + item.contentType = 'movie' + item.url = data + return findvideos(item) + def findvideos(item): support.log('findvideos ->', item) - if item.contentType == 'movie' and item.args != 'anime': - return support.server(item) - else: - return support.server(item, data= item.url) + item.url = item.url.replace('http://rapidcrypt.net/verys/', '').replace('http://rapidcrypt.net/open/', '') #blocca la ricerca + return support.server(item, data= item.url) diff --git a/channels/mondolunatico.json b/channels/mondolunatico.json deleted file mode 100644 index 64b897ec..00000000 --- a/channels/mondolunatico.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "id": "mondolunatico", - "name": "Mondo Lunatico", - "language": ["ita"], - "active": true, - "adult": false, - "thumbnail": "http://mondolunatico.org/wp-content/uploads/2016/02/images-111.jpg", - "banner": "http://mondolunatico.org/wp-content/uploads/2016/02/images-111.jpg", - "categories": ["movie"], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Includi in Ricerca Globale", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_peliculas", - "type": "bool", - "label": "Includi in Novità - Film", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_italiano", - "type": "bool", - "label": "Includi in Novità - Italiano", - "default": true, - "enabled": true, - "visible": true - } - ] -} - diff --git a/channels/mondolunatico.py b/channels/mondolunatico.py deleted file mode 100644 index c6b1d485..00000000 --- a/channels/mondolunatico.py +++ /dev/null @@ -1,449 +0,0 @@ -# -*- coding: utf-8 -*- -# ------------------------------------------------------------ -# Ringraziamo Icarus crew -# Canale mondolunatico -# ------------------------------------------------------------ -import os -import re -import time -import urllib - -import urlparse - -from core import httptools -from core import scrapertools -from core import servertools -from core import tmdb -from core.item import Item -from platformcode import config -from platformcode import logger - -__channel__ = "mondolunatico" -host = config.get_channel_url(__channel__) -captcha_url = '%s/pass/CaptchaSecurityImages.php?width=100&height=40&characters=5' % host - -PERPAGE = 25 - - -def mainlist(item): - logger.info("kod.mondolunatico mainlist") - itemlist = [Item(channel=item.channel, - title="[COLOR azure]Novità[/COLOR]", - extra="movie", - action="peliculas", - url=host, - thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"), - Item(channel=item.channel, - title="[COLOR azure]Categorie[/COLOR]", - extra="movie", - action="categorias", - url=host, - thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png"), - Item(channel=item.channel, - title="[COLOR yellow]Cerca...[/COLOR]", - extra="movie", - action="search", - thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")] - - #Item(channel=item.channel, - # title="[COLOR azure]Serie TV[/COLOR]", - # extra="tvshow", - # action="serietv", - # url="%s/serietv/lista-alfabetica/" % host, - # thumbnail="http://i.imgur.com/rO0ggX2.png"), - #Item(channel=item.channel, - # title="[COLOR yellow]Cerca Serie TV...[/COLOR]", - # extra="tvshow", - # action="search", - # thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),] - return itemlist - - -def categorias(item): - logger.info("kod.mondolunatico categorias") - itemlist = [] - - data = httptools.downloadpage(item.url).data - - # Narrow search by selecting only the combo - bloque = scrapertools.find_single_match(data, '<option class="level-0" value="7">(.*?)<option class="level-0" value="8">') - - # The categories are the options for the combo - patron = '<option class=[^=]+="([^"]+)">(.*?)<' - matches = re.compile(patron, re.DOTALL).findall(bloque) - - for scrapedurl, scrapedtitle in matches: - scrapedtitle = scrapedtitle.replace(" ", "") - scrapedtitle = scrapedtitle.replace("(", "") - scrapedtitle = scrapedtitle.replace(")", "") - scrapedtitle = scrapedtitle.replace("0", "") - scrapedtitle = scrapedtitle.replace("1", "") - scrapedtitle = scrapedtitle.replace("2", "") - scrapedtitle = scrapedtitle.replace("3", "") - scrapedtitle = scrapedtitle.replace("4", "") - scrapedtitle = scrapedtitle.replace("5", "") - scrapedtitle = scrapedtitle.replace("6", "") - scrapedtitle = scrapedtitle.replace("7", "") - scrapedtitle = scrapedtitle.replace("8", "") - scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("9", "")) - scrapedurl = host + "/category/film-per-genere/" + scrapedtitle - scrapedthumbnail = "" - scrapedplot = "" - itemlist.append( - Item(channel=item.channel, - extra=item.extra, - action="peliculas", - title="[COLOR azure]" + scrapedtitle + "[/COLOR]", - url=scrapedurl, - thumbnail=scrapedthumbnail, - plot=scrapedplot)) - - return itemlist - - -def search(item, texto): - logger.info("[mondolunatico.py] " + item.url + " search " + texto) - item.url = host + "/?s=" + texto - try: - if item.extra == "movie": - return peliculas(item) - if item.extra == "tvshow": - item.url = "%s/serietv/lista-alfabetica/" % host - return search_serietv(item, texto) - # Continua la ricerca in caso di errore - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def peliculas(item): - logger.info("kod.mondolunatico peliculas") - - itemlist = [] - - # Carica la pagina - data = httptools.downloadpage(item.url).data - - # Estrae i contenuti - patron = '<div class="boxentry">\s*<a href="([^"]+)"[^>]+>\s*<img src="([^"]+)" alt="([^"]+)"[^>]+>' - matches = re.compile(patron, re.DOTALL).findall(data) - - scrapedplot = "" - for scrapedurl, scrapedthumbnail, scrapedtitle, in matches: - title = scrapertools.decodeHtmlentities(scrapedtitle) - itemlist.append( - Item(channel=item.channel, - extra=item.extra, - action="findvideos", - contentType="movie", - title=title, - url=scrapedurl, - thumbnail=scrapedthumbnail, - fulltitle=title, - show=title, - plot=scrapedplot, - folder=True)) - - # Paginazione - patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">' - matches = re.compile(patronvideos, re.DOTALL).findall(data) - - if len(matches) > 0: - scrapedurl = urlparse.urljoin(item.url, matches[0]) - itemlist.append( - Item(channel=item.channel, - extra=item.extra, - action="peliculas", - title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]", - url=scrapedurl, - thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png", - folder=True)) - - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - return itemlist - - -def serietv(item): - logger.info("kod.mondolunatico serietv") - - itemlist = [] - - p = 1 - if '{}' in item.url: - item.url, p = item.url.split('{}') - p = int(p) - - # Carica la pagina - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data, '<h1>Lista Alfabetica</h1>(.*?)</div>') - - # Estrae i contenuti - patron = '<li><a href="([^"]+)">([^<]+)</a></li>' - matches = re.compile(patron, re.DOTALL).findall(data) - - scrapedplot = "" - scrapedthumbnail = "" - for i, (scrapedurl, scrapedtitle) in enumerate(matches): - if (p - 1) * PERPAGE > i: continue - if i >= p * PERPAGE: break - title = scrapertools.decodeHtmlentities(scrapedtitle) - itemlist.append( - Item(channel=item.channel, - extra=item.extra, - action="episodios", - title=title, - url=scrapedurl, - thumbnail=scrapedthumbnail, - fulltitle=title, - show=title, - plot=scrapedplot, - folder=True)) - - if len(matches) >= p * PERPAGE: - scrapedurl = item.url + '{}' + str(p + 1) - itemlist.append( - Item(channel=item.channel, - extra=item.extra, - action="serietv", - title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]", - url=scrapedurl, - thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png", - folder=True)) - - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - return itemlist - - -def search_serietv(item, texto): - logger.info("kod.mondolunatico serietv") - - texto = urllib.unquote_plus(texto).lower() - - itemlist = [] - - # Carica la pagina - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data, '<h1>Lista Alfabetica</h1>(.*?)</div>') - - # Estrae i contenuti - patron = '<li><a href="([^"]+)">([^<]+)</a></li>' - matches = re.compile(patron, re.DOTALL).findall(data) - - scrapedplot = "" - scrapedthumbnail = "" - for i, (scrapedurl, scrapedtitle) in enumerate(matches): - title = scrapertools.decodeHtmlentities(scrapedtitle) - if texto not in title.lower(): continue - itemlist.append( - Item(channel=item.channel, - extra=item.extra, - action="episodios", - title=title, - url=scrapedurl, - thumbnail=scrapedthumbnail, - fulltitle=title, - show=title, - plot=scrapedplot, - folder=True)) - - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - return itemlist - -def episodios(item): - logger.info("kod.mondolunatico episodios") - - itemlist = [] - - # Carica la pagina - data = httptools.downloadpage(item.url).data - - html = [] - - for i in range(2): - patron = 'href="(https?://www\.keeplinks\.eu/p92/([^"]+))"' - matches = re.compile(patron, re.DOTALL).findall(data) - for keeplinks, id in matches: - _headers = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))], - ['Referer', keeplinks]] - - html.append(httptools.downloadpage(keeplinks, headers=_headers).data) - - patron = r'="(%s/pass/index\.php\?ID=[^"]+)"' % host - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl in matches: - tmp = httptools.downloadpage(scrapedurl).data - - if 'CaptchaSecurityImages.php' in tmp: - # Descarga el captcha - img_content = httptools.downloadpage(captcha_url).data - - captcha_fname = os.path.join(config.get_data_path(), __channel__ + "captcha.img") - with open(captcha_fname, 'wb') as ff: - ff.write(img_content) - - from platformcode import captcha - - keyb = captcha.Keyboard(heading='', captcha=captcha_fname) - keyb.doModal() - if keyb.isConfirmed(): - captcha_text = keyb.getText() - post_data = urllib.urlencode({'submit1': 'Invia', 'security_code': captcha_text}) - tmp = httptools.downloadpage(scrapedurl, post=post_data).data - - try: - os.remove(captcha_fname) - except: - pass - - html.append(tmp) - - data = '\n'.join(html) - - encontrados = set() - - patron = '<p><a href="([^"]+?)">([^<]+?)</a></p>' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: - scrapedtitle = scrapedtitle.split('/')[-1] - if not scrapedtitle or scrapedtitle in encontrados: continue - encontrados.add(scrapedtitle) - scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) - itemlist.append( - Item(channel=item.channel, - extra=item.extra, - action="findvideos", - title=scrapedtitle, - url=scrapedurl, - thumbnail=item.thumbnail, - fulltitle=item.fulltitle, - show=item.show)) - - patron = '<a href="([^"]+)" target="_blank" class="selecttext live">([^<]+)</a>' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: - scrapedtitle = scrapedtitle.split('/')[-1] - if not scrapedtitle or scrapedtitle in encontrados: continue - encontrados.add(scrapedtitle) - scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) - itemlist.append( - Item(channel=item.channel, - extra=item.extra, - action="findvideos", - title=scrapedtitle, - url=scrapedurl, - thumbnail=item.thumbnail, - fulltitle=item.fulltitle, - show=item.show)) - - return itemlist - - -def findvideos(item): - logger.info("kod.mondolunatico findvideos") - - itemlist = [] - - # Carica la pagina - data = item.url if item.extra == "tvshow" else httptools.downloadpage(item.url).data - - # Estrae i contenuti - patron = r'noshade>(.*?)<br>.*?<a href="(%s/pass/index\.php\?ID=[^"]+)"' % host - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedtitle, scrapedurl in matches: - scrapedtitle = scrapedtitle.replace('*', '').replace('Streaming', '').strip() - title = '%s - [%s]' % (item.title, scrapedtitle) - itemlist.append( - Item(channel=item.channel, - action="play", - title=title, - url=scrapedurl, - thumbnail=item.thumbnail, - fulltitle=item.fulltitle, - show=item.show, - server='captcha', - folder=False)) - - patron = 'href="(%s/stream/links/\d+/)"' % host - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl in matches: - data += httptools.downloadpage(scrapedurl).data - - ### robalo fix obfuscator - start #### - - patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p92/([^"]+))"' - matches = re.compile(patron, re.DOTALL).findall(data) - for keeplinks, id in matches: - headers = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))], - ['Referer', keeplinks]] - - html = httptools.downloadpage(keeplinks, headers=headers).data - data += str(scrapertools.find_multiple_matches(html, '</lable><a href="([^"]+)" target="_blank"')) - - ### robalo fix obfuscator - end #### - - patron = 'src="([^"]+)" frameborder="0"' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl in matches: - data += httptools.downloadpage(scrapedurl).data - - for videoitem in servertools.find_video_items(data=data): - videoitem.title = item.title + videoitem.title - videoitem.fulltitle = item.fulltitle - videoitem.thumbnail = item.thumbnail - videoitem.show = item.show - videoitem.plot = item.plot - videoitem.channel = item.channel - itemlist.append(videoitem) - - return itemlist - - -def play(item): - logger.info("kod.mondolunatico play") - - itemlist = [] - - if item.server == 'captcha': - headers = [['Referer', item.url]] - - # Carica la pagina - data = httptools.downloadpage(item.url, headers=headers).data - - if 'CaptchaSecurityImages.php' in data: - # Descarga el captcha - img_content = httptools.downloadpage(captcha_url, headers=headers).data - - captcha_fname = os.path.join(config.get_data_path(), __channel__ + "captcha.img") - with open(captcha_fname, 'wb') as ff: - ff.write(img_content) - - from platformcode import captcha - - keyb = captcha.Keyboard(heading='', captcha=captcha_fname) - keyb.doModal() - if keyb.isConfirmed(): - captcha_text = keyb.getText() - post_data = urllib.urlencode({'submit1': 'Invia', 'security_code': captcha_text}) - data = httptools.downloadpage(item.url, post=post_data, headers=headers).data - - try: - os.remove(captcha_fname) - except: - pass - - itemlist.extend(servertools.find_video_items(data=data)) - - for videoitem in itemlist: - videoitem.title = item.title - videoitem.fulltitle = item.fulltitle - videoitem.thumbnail = item.thumbnail - videoitem.show = item.show - videoitem.plot = item.plot - videoitem.channel = item.channel - else: - itemlist.append(item) - - return itemlist - diff --git a/channels/mondolunatico2.json b/channels/mondolunatico2.json deleted file mode 100644 index c07479c2..00000000 --- a/channels/mondolunatico2.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "id": "mondolunatico2", - "name": "MondoLunatico 2.0", - "language": ["ita"], - "active": true, - "adult": false, - "thumbnail": "mondolunatico2.png", - "banner": "mondolunatico2.png", - "categories": ["tvshow", "movie", "vos", "anime"], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Includi in Ricerca Globale", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_peliculas", - "type": "bool", - "label": "Includi in Novità - Film", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_italiano", - "type": "bool", - "label": "Includi in Novità - Italiano", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "comprueba_enlaces", - "type": "bool", - "label": "Verifica se i link esistono", - "default": false, - "enabled": true, - "visible": true - }, - { - "id": "comprueba_enlaces_num", - "type": "list", - "label": "Numero di link da verificare", - "default": 1, - "enabled": true, - "visible": "eq(-1,true)", - "lvalues": [ "5", "10", "15", "20" ] - }, - { - "id": "filter_languages", - "type": "list", - "label": "Mostra link in lingua...", - "default": 0, - "enabled": true, - "visible": true, - "lvalues": [ - "No filtrar", - "Italiano" - ] - } - ] -} \ No newline at end of file diff --git a/channels/mondolunatico2.py b/channels/mondolunatico2.py deleted file mode 100644 index 15598b02..00000000 --- a/channels/mondolunatico2.py +++ /dev/null @@ -1,447 +0,0 @@ -# -*- coding: utf-8 -*- -# ------------------------------------------------------------ -# Canale per MondoLunatico 2.0 -# ------------------------------------------------------------ -""" - WARNING: - questo sito è una kakatura di kazz...incredibile!!! - Per renderlo compatibile con support ci vuole MOLTA PAZIENZA!!! - - Problemi noti che non superano il test del canale: - Nelle pagine dei "FILM", film e serie nel sito sono mischiate, - I titoli quindi non saranno nello stesso numero nelle pagine del canale. - Alcuni Titoli sono pagine informative e NON devono apparire nel CANALE!!! - Controllare: - -che nelle varie sezioni dei FILM appaiano solo FILM, stessa cosa per le serie. - -che effettivamente vengano tagliati solo gli avvisi. - - Nella TOP FILM non ci sono le voci lingua, anno ( quindi niente TMDB o vari ) e qualità - Nella pagina delle serie potreste trovare solo il titolo senza stagione ed episodio - Nel menu contestuale potreste non trovare le voci: - -"Aggiungi in Videoteca" - -"Scarica" di qualunque tipo: stagione, serie, etc... - - AVVISO: - i link 1fichier hanno bisogno dei DNS modificati - il server woof potrebbe rispondere con "connettore assente" - I titoli nella sezione SUB-ITA che non riportano Sub-ITA sono in lingua originale senza sottotitoli - -""" - -import re -import urlparse -import urllib -import urllib2 -import time - -from channelselector import thumb -from specials import autoplay, filtertools -from core import scrapertools, httptools, tmdb, servertools, support, scrapertoolsV2 -from core.item import Item -from platformcode import config, platformtools #,logger - -__channel__ = "mondolunatico2" -host = config.get_channel_url(__channel__) -headers = [['Referer', host]] - -list_servers = ['verystream', 'wstream', 'openload', 'streamango'] -list_quality = ['HD', 'default'] - -@support.menu -def mainlist(item): - support.log() - - top = [('Film', ['/genre/film-aggiornati/', 'peliculas', 'movies']), - ('Al Cinema', ['/genre/al-cinema/', 'peliculas', 'cinema']), - ('Ultimi Aggiunti', ['/movies/', 'peliculas', 'latest']), - ('Ultime Richieste', ['/genre/richieste/', 'peliculas', 'request']), - ('Top ImDb', ['/top-imdb/', 'peliculas', 'top']), - ('Sub-ITA', ['/genre/subita/', 'peliculas', 'sub']), - ('Serie TV', ['/tvshows/', 'peliculas', '', 'tvshow']), - ('Top ImDb', ['/top-imdb/', 'peliculas', 'top', 'tvshow']), - ('Search...',['', 'search', 'search']) - ] - - return locals() - - -@support.scrape -def peliculas(item): - support.log() - - action = 'findvideos' - blacklist = ['Avviso Agli Utenti',] - - if item.args != 'search': - if item.contentType == 'movie': - action = 'findvideos' - patron = r'class="item movies"><div class="poster"><img src="(?P<thumb>[^"]+)"'\ - '[^>]+>(?:<div class="rating">)?[^>]+>.+?(?P<rating>\d+.\d+|\d+)'\ - '[^>]+>[^>]+>[^>]+>(:?(?P<lang>SubITA)?|(?P<quality>[^<]+)?)?'\ - '<.+?href="(?P<url>[^"]+)">[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>'\ - '[^>]+>(?P<title>.+?)</a>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>'\ - '[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<year>\d+)</span>'\ - '[^>]+>(?P<duration>\d+)?.+?<[^>]+>(?:[^>]+>[^>]+>[^>]+>[^>]+>)(?P<plot>.+?)<' - if item.args == 'movies': - patronBlock = r'<h1>\*Film Aggiornati</h1>(?P<block>.*?)<div class="pagination">' - elif item.args == 'cinema': - patronBlock = r'<h1>\*Al Cinema</h1>(?P<block>.*?)<div class="pagination">' - elif item.args == 'latest': - patronBlock = r'<h1>Film</h1>(?P<block>.*?)<div class="pagination">' - elif item.args == 'request': - patronBlock = r'<h1>\*Richieste</h1>(?P<block>.*?)<div class="pagination">' - elif item.args == 'sub': - patronBlock = r'<h1>\*SubITA</h1>(?P<block>.*?)<div class="pagination">' - elif item.args == 'top': - patronBlock = r'<h3>Film</h3>(?P<block>.*?)<div class="top-imdb-list tright">' - patron = r'<div class="image"><div class="[^"]+"><a href="(?P<url>[^"]+)"'\ - '[^"]+"(?P<thumb>[^"]+)"[^"]+alt="(?P<title>[^"]+)">[^>]+>[^>]+>'\ - '[^>]+>[^>]+>[^>]+>[^>]+>(?P<rating>[^<]+)<' - pagination = 25 - else: - action = 'episodios' - if item.args == 'top': - patronBlock = r'<h3>TVShows</h3>(?P<block>.*?)<h2 class="widget-title">' - patron = r'<div class="image"><div class="[^"]+"><a href="(?P<url>[^"]+)"'\ - '[^"]+"(?P<thumb>[^"]+)"[^"]+alt="(?P<title>[^"]+)">[^>]+>[^>]+>'\ - '[^>]+>[^>]+>[^>]+>[^>]+>(?P<rating>[^<]+)<' - else: - patronBlock = r'<h1>Serie T[v|V]</h1>(?P<block>.*?)<div class="pagination">' - patron = r'class="item tvshows">[^>]+>.+?src="(?P<thumb>[^"]+)".+?>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="(?P<url>[^"]+)">[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)</h4>[^>]+>[^>]+> (?:<span class="imdb">IMDb: (?P<rating>\d+.\d+|\d+|N\/A)(?:</span>)?[^>]+>(?P<year>\d+))?<[^>]+>[^>]+>[^>]+>(?:[^>]+>[^>]+>(?P<plot>[^<]+)<)' - else: - patronBlock = r'<h1>Results found\:.+?</h1>(?P<block>.*?)<div class="sidebar scrolling">' - patron = r'<div class="result-item">[^>]+>[^>]+>[^>]+>.+?href="(?P<url>[^"]+)">.+?src="(?P<thumb>[^"]+)" alt="(?P<title>[^"]+)"[^>]+>[^>]+>(?P<type>[^>]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>IMDb (?P<rating>\d+.\d+|\d+)[^>]+>[^>]+>(?P<year>\d+)<[^>]+>[^>]+>[^>]+>[^>]+>(:?[^>]+>[^>]+>)?(?P<plot>[^<]+)<' - - type_content_dict={'movie': ['film'], 'tvshow': ['tv']} - type_action_dict={'findvideos': ['film'], 'episodios': ['tv']} - - patronNext = r'<span class="current">.*?href="([^"]+)" class="inactive">' - -## debug = True - return locals() - - -def search(item, texto): - support.log('s-> '+texto) - - item.url = host + "/?s=" + texto - - try: - return peliculas(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def findvideos(item): - support.log() - - if item.contentType == "tvshow": - ret=support.dooplay_get_links(item, host) - - if ret == []: - return episodios(item) - else: - item.url = ret[0]["url"] - return videoplayer(item) - - #if item.args == "movies" or "movie": - if item.contentType == 'movie': - return videoplayer(item) - - else: - return halfplayer(item) - - -def episodios(item): - support.log() - itemlist = [] - - data = httptools.downloadpage(item.url, headers=headers).data - - if "<h2>Stagioni ed Episodi</h2>" in data: - # Se è presente direttamente la lista Stagioni con i relativi episodi - block = scrapertools.find_single_match(data, r'<h2>Stagioni ed Episodi</h2>(.*?)<div class=\'sbox\'>') - patron = r'episodiotitle.*?href=\'([^\']+)\'>([^<]+)' - matches = re.compile(patron, re.DOTALL).findall(block) - for scrapedurl, scrapedtitle in matches: - itemlist.append( - Item(channel=__channel__, - action="videoplayer", - contentType=item.contentType, - title=scrapedtitle, - thumbnail=item.thumbnail, - fulltitle=scrapedtitle, - url=scrapedurl, - args=item.args, - show=item.show)) - support.videolibrary(itemlist, item, 'color kod') - return itemlist - - if "File Unico..." in data: - #Se è direttamente un file unico - return dooplayer(item) - - if "http://mondolunatico.org/stream/wp-content/uploads/2017/08/hand.gif" in data: - # Keeplinks - return keeplink(item) - - else: - # Se nella lista è presente Dooplayer con elenco episodi - patron = r'<div class="sp-head" title="Espandi">([^<]+).*?<iframe.*?src="([^"]+)' - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) > 1: - for scrapedtitle, scrapedurl in matches: - itemlist.append( - Item(channel=__channel__, - action="player_list", - contentType=item.contentType, - title=scrapedtitle, - thumbnail=item.thumbnail, - fulltitle=scrapedtitle, - url=scrapedurl, - show=item.show)) - return itemlist - else: - return dooplayer(item) - -# --------------------------------------------------------------------------------------------------------------------------------------------- - -def player(item): - support.log() - - data = httptools.downloadpage(item.url, headers=headers).data - - item.url = scrapertools.find_single_match(item.url, r'([^/]+//[^/]+/[^/]+/[^/]+)') - - if "https://mondolunatico.tk" in data: - data = httptools.downloadpage(item.url, headers=headers).data - link = scrapertools.find_single_match(data, r'<p><iframe src="(.*?/.*?)[A-Z]') - item.url = link - return halfplayer(item) - - if "mondolunatico.tk" in item.url: - return halfplayer(item) - - #Scarica il link del video integrato nella pagina - ret=support.dooplay_get_links(item, host) - - #Prelevo il link del video integrato - url = ret[0]["url"] - - data = httptools.downloadpage(url, headers=headers).data - - if "zmdi zmdi-playlist-audio zmdi-hc-3x" in data: - return player_list(item) - - else: - #Correggo il link con il lin del POST - url = url.replace("/v/", "/api/source/").replace("/p/", "/api/source/") - - postData = urllib.urlencode({ - "r": "", - "d": "modolunatico.tk", - }) - - block = httptools.downloadpage(url, post=postData).data - - patron = r'"file":".*?\/(r[^"]+)' - matches = re.compile(patron, re.DOTALL).findall(block) - - itemlist = [] - - for scrapedurl in matches: - scrapedurl = "https://fvs.io/" + scrapedurl - itemlist.append( - Item(channel=__channel__, - action="play", - contentType=item.contentType, - title=item.title, - thumbnail=item.thumbnail, - fulltitle=item.title, - url=scrapedurl, - show=item.show)) - - autoplay.start(itemlist, item) - - return itemlist - -# --------------------------------------------------------------------------------------------------------------------------------------------- - -def player_list(item): - support.log() - itemlist = [] - - # Scarico la pagina - data = httptools.downloadpage(item.url, headers=headers).data - - if "panel_toggle toggleable" in data: - # Prelevo il blocco lista puntate - block = scrapertools.find_single_match(data, r'panel_toggle toggleable.*?(<div.*?)<!-- Javascript -->') - - patron = r'data-url="([^"]+)">.*?([A-Z].*?) ' - matches = re.compile(patron, re.DOTALL).findall(block) - - for scrapedurl, scrapedtitle in matches: - scrapedtitle = re.sub('mp4|avi|mkv', '', scrapedtitle) - scrapedtitle = re.sub('WebRip|WEBRip|x264|AC3|1080p|DLMux|XviD-|BDRip|BluRay|HD|WEBMux|H264|BDMux|720p|TV|NFMux|DVDRip|DivX|DVDip|Ac3|Dvdrip|Mux|NovaRip|DVD|SAT|Divx', '', scrapedtitle) - scrapedtitle = re.sub('ITA|ENG|Italian|SubITA|SUBITA|iTALiAN|LiAN|Ita', '', scrapedtitle) - scrapedtitle = re.sub('Pir8|UBi|M L|BEDLAM|REPACK|DD5.1|bloody|SVU', '', scrapedtitle) - scrapedtitle = scrapedtitle.replace(".", " ").replace(" - ", " ").replace(" -", "").replace(" ", "") - itemlist.append( - Item(channel=__channel__, - action="halfplayer", - contentType=item.contentType, - title=scrapedtitle, - thumbnail=item.thumbnail, - fulltitle=scrapedtitle, - url="https://mondolunatico.tk" + scrapedurl, - show=item.show)) - - support.videolibrary(itemlist, item, 'color kod') - - return itemlist - - else: - return player(item) - -# --------------------------------------------------------------------------------------------------------------------------------------------- - -def dooplayer(item): - support.log() - itemlist = [] - - url = item.url - data = httptools.downloadpage(url, headers=headers).data - - link= scrapertools.find_single_match(data, r'(https://mondolunatico.tk/./[^"]+)') - - data = httptools.downloadpage(link, headers=headers).data - if "panel_toggle toggleable" in data: - item.url = link - return player_list(item) - - # Correggo il link con il lin del POST - link1 = link.replace("/v/", "/api/source/").replace("/p/", "/api/source/") - - postData = urllib.urlencode({ - "r": link, - "d": "modolunatico.tk", - }) - - block = httptools.downloadpage(link1, post=postData).data - - patron = r'"file":".*?\/(r[^"]+)' - matches = re.compile(patron, re.DOTALL).findall(block) - - for scrapedurl in matches: - scrapedurl = "https://fvs.io/" + scrapedurl - itemlist.append( - Item(channel=__channel__, - action="play", - contentType=item.contentType, - title=item.title, - thumbnail=item.thumbnail, - fulltitle=item.title, - url=scrapedurl, - show=item.show)) - - autoplay.start(itemlist, item) - support.videolibrary(itemlist, item, 'color kod') - - return itemlist - -# --------------------------------------------------------------------------------------------------------------------------------------------- - -def keeplink(item): - support.log() - itemlist = [] - - # Scarico la pagina - data = httptools.downloadpage(item.url).data - - # Prendo url keeplink - patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p92/([^"]+))"' - matches = re.compile(patron, re.DOTALL).findall(data) - - for keeplinks, id in matches: - headers = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))], - ['Referer', keeplinks]] - - html = httptools.downloadpage(keeplinks, headers=headers).data - data += str(scrapertools.find_multiple_matches(html, '</lable><a href="([^"]+)" target="_blank"')) - - patron = 'src="([^"]+)" frameborder="0"' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl in matches: - data += httptools.downloadpage(scrapedurl).data - - for videoitem in servertools.find_video_items(data=data): - videoitem.title = item.title + " - " + videoitem.title - videoitem.fulltitle = item.fulltitle - videoitem.thumbnail = item.thumbnail - videoitem.show = item.show - videoitem.plot = item.plot - videoitem.channel = item.channel - itemlist.append(videoitem) - - return itemlist - -# --------------------------------------------------------------------------------------------------------------------------------------------- - -def videoplayer(item): - support.log() - itemlist = [] - - for link in support.dooplay_get_links(item, host): - server = link['server'][:link['server'].find(".")] - if server == "": - server = "mondolunatico" - - itemlist.append( - Item(channel=item.channel, - action="player" if "mondolunatico" in server else "play", - title=server + " [COLOR blue][" + link['title'] + "][/COLOR]", - url=link['url'], - server=server, - fulltitle=item.fulltitle, - thumbnail=item.thumbnail, - show=item.show, - quality=link['title'], - contentType=item.contentType, - folder=False)) - - support.videolibrary(itemlist, item, 'color kod', function_level=2) - - autoplay.start(itemlist, item) - - return itemlist - -# --------------------------------------------------------------------------------------------------------------------------------------------- - -def halfplayer(item): - support.log() - - url=item.url - - # Correggo il link con il lin del POST - url = url.replace("/v/", "/api/source/").replace("/p/", "/api/source/") - - postData = urllib.urlencode({ - "r": "", - "d": "modolunatico.tk", - }) - - block = httptools.downloadpage(url, post=postData).data - - patron = r'"file":".*?\/(r[^"]+)' - matches = re.compile(patron, re.DOTALL).findall(block) - - for scrapedurl in matches: - item.url = "https://fvs.io/" + scrapedurl - item.server = "" - itemlist = platformtools.play_video(item, force_direct=True, autoplay=True) - - return itemlist diff --git a/channels/seriehd.py b/channels/seriehd.py index 66a8cab5..1c56836d 100644 --- a/channels/seriehd.py +++ b/channels/seriehd.py @@ -7,7 +7,10 @@ from core import scrapertoolsV2, httptools, support from core.item import Item -host = 'https://www.seriehd.moda' +__channel__ = 'seriehd' +host = support.config.get_channel_url(__channel__) + +# host = 'https://www.seriehd.watch' headers = '' def findhost(): diff --git a/channels/serietvu.py b/channels/serietvu.py index a6f65459..35afa4bb 100644 --- a/channels/serietvu.py +++ b/channels/serietvu.py @@ -34,7 +34,7 @@ def mainlist(item): @support.scrape def peliculas(item): - patronBlock = r'<div class="wrap">\s+<h.>.*?</h.>(?P<block>.*?)<footer>' + patronBlock = r'<div class="wrap">\s*<h.>.*?</h.>(?P<block>.*?)<footer>' if item.args != 'update': diff --git a/channels/tantifilm.py b/channels/tantifilm.py index 226334e7..92929123 100644 --- a/channels/tantifilm.py +++ b/channels/tantifilm.py @@ -75,30 +75,6 @@ def peliculas(item): if item.args != 'all' and item.args != 'search': action = 'findvideos' if item.extra == 'movie' else 'episodios' item.contentType = 'movie' if item.extra == 'movie' else 'tvshow' - else: - def itemHook(item): - item.action = 'episodios' - item.contentType = 'tvshow' - data = httptools.downloadpage(item.url, headers=headers).data - data = re.sub('\n|\t', ' ', data) - data = re.sub(r'>\s+<', '> <', data) - check = scrapertoolsV2.find_single_match(data, r'<div class="category-film">\s+<h3>\s+(.*?)\s+</h3>\s+</div>') - if 'sub' in check.lower(): - item.contentLanguage = 'Sub-ITA' - item.title += support.typo('Sub-ITA', '_ [] color kod') - support.log("CHECK : ", check) - if 'anime' in check.lower(): - support.log('select = ### è una anime ###') - item.action = 'episodios' - anime = True - args='anime' - elif 'serie' in check.lower(): - pass - else: - support.log('select ELSE = ### è un film ###') - item.action = 'findvideos' - item.contentType='movie' - return item #debug = True return locals() @@ -107,30 +83,38 @@ def peliculas(item): def episodios(item): log() findhost() - - data_check = httptools.downloadpage(item.url, headers=headers).data - data_check = re.sub('\n|\t', ' ', data_check) - data_check = re.sub(r'>\s+<', '> <', data_check) + if not item.data: + data_check = httptools.downloadpage(item.url, headers=headers).data + data_check = re.sub('\n|\t', ' ', data_check) + data_check = re.sub(r'>\s+<', '> <', data_check) + else: + data_check = item.data patron_check = r'<iframe src="([^"]+)" scrolling="no" frameborder="0" width="626" height="550" allowfullscreen="true" webkitallowfullscreen="true" mozallowfullscreen="true">' item.url = scrapertoolsV2.find_single_match(data_check, patron_check) - patronBlock = r'Episodio<\/a>.*?<ul class="nav navbar-nav">(?P<block>.*?)<\/ul>' + patronBlock = r'Stagioni<\/a>.*?<ul class="nav navbar-nav">(?P<block>.*?)<\/ul>' patron = r'<a href="(?P<url>[^"]+)"\s*>\s*<i[^>]+><\/i>\s*(?P<episode>\d+)<\/a>' + # debug = True - def itemHook(item): - item.contentType = 'tvshow' - url_season = item.url.rpartition('/') - support.log("ITEM URL: ", url_season[0]) - seasons = support.match(item, r'<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>', r'Stagioni<\/a>.*?<ul class="nav navbar-nav">(.*?)<\/ul>', headers=headers, url=url_season[0])[0] - for season_url, season in seasons: - support.log("ITEM URL2: ", url_season[0],' - ', item.url) - if season_url[0] in item.url: - item.title = support.typo(season+'x'+unify.remove_format(item.title), 'bold') -## item.infoLabels['title'] = item.fulltitle if item.infoLabels['title'] == '' else item.infoLabels['title'] -## item.infoLabels['tvshowtitle'] = item.fulltitle if item.infoLabels['tvshowtitle'] == '' else item.infoLabels['tvshowtitle'] - break + def itemlistHook(itemlist): + retItemlist = [] + for item in itemlist: + item.contentType = 'episode' - return item + season = unify.remove_format(item.title) + season_data = httptools.downloadpage(item.url).data + season_data = re.sub('\n|\t', ' ', season_data) + season_data = re.sub(r'>\s+<', '> <', season_data) + block = scrapertoolsV2.find_single_match(season_data, 'Episodi.*?<ul class="nav navbar-nav">(.*?)</ul>') + episodes = scrapertoolsV2.find_multiple_matches(block, '<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>') + for url, episode in episodes: + i = item.clone() + i.action = 'findvideos' + i.url = url + i.title = str(season) + 'x' + str(episode) + retItemlist.append(i) + + return retItemlist #debug = True return locals() @@ -262,7 +246,23 @@ def findvideos(item): ## data = item.url ## else: ## data = httptools.downloadpage(item.url, headers=headers).data - data = item.url if item.contentType == "episode" else httptools.downloadpage(item.url, headers=headers).data + data = httptools.downloadpage(item.url, headers=headers).data + + data = re.sub('\n|\t', ' ', data) + data = re.sub(r'>\s+<', '> <', data) + check = scrapertoolsV2.find_single_match(data, r'<div class="category-film">\s+<h3>\s+(.*?)\s+</h3>\s+</div>') + if 'sub' in check.lower(): + item.contentLanguage = 'Sub-ITA' + support.log("CHECK : ", check) + if 'anime' in check.lower(): + item.contentType = 'tvshow' + item.data = data + support.log('select = ### è una anime ###') + return episodios(item) + elif 'serie' in check.lower(): + item.contentType = 'tvshow' + item.data = data + return episodios(item) if 'protectlink' in data: urls = scrapertoolsV2.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"') @@ -271,13 +271,13 @@ def findvideos(item): url = url.decode('base64') # tiro via l'ultimo carattere perchè non c'entra url, c = unshorten_only(url) - data += '\t' + url - support.log("SONO QUI: ", url) - if 'nodmca' in data: - page = httptools.downloadpage(url, headers=headers).data - url += isturl.add('\t' + scrapertoolsV2.find_single_match(page,'<meta name="og:url" content="([^=]+)">')) - - return support.server(item, data=listurl)#, headers=headers) + if 'nodmca' in url: + page = httptools.downloadpage(url, headers=headers).data + url = '\t' + scrapertoolsV2.find_single_match(page,'<meta name="og:url" content="([^=]+)">') + if url: + listurl.add(url) + support.dbg() + return support.server(item, data=listurl if listurl else data)#, headers=headers) # return itemlist ##def findvideos(item): diff --git a/channelselector.py b/channelselector.py index d7d0c4bd..13d7d22d 100644 --- a/channelselector.py +++ b/channelselector.py @@ -36,9 +36,7 @@ def getmainlist(view="thumb_"): if addon.getSetting('enable_search_menu') == "true": itemlist.append(Item(title=config.get_localized_string(30103), channel="search", path='special', action="mainlist", thumbnail=get_thumb("search.png", view), - category=config.get_localized_string(30119), viewmode="list", - context=[{"title": config.get_localized_string(70286), "channel": "search", "action": "opciones", - "goto": True}])) + category=config.get_localized_string(30119), viewmode="list")) if addon.getSetting('enable_onair_menu') == "true": itemlist.append(Item(channel="filmontv", action="mainlist", title=config.get_localized_string(50001), @@ -274,7 +272,10 @@ def get_thumb(thumb_name, view="thumb_", auto=False): if config.get_setting('enable_custom_theme') and config.get_setting('custom_theme') and os.path.isfile(config.get_setting('custom_theme') + view + thumb_name): media_path = config.get_setting('custom_theme') - thumbnail = os.path.join(media_path, view + thumb_name) + if thumb_name.startswith('http'): + thumbnail = thumb_name + else: + thumbnail = os.path.join(media_path, view + thumb_name) if 'http' in thumbnail: thumbnail = thumbnail.replace('\\','/') return thumbnail diff --git a/core/channeltools.py b/core/channeltools.py index abbfea73..bc4c0f6f 100644 --- a/core/channeltools.py +++ b/core/channeltools.py @@ -14,7 +14,6 @@ dict_channels_parameters = dict() remote_path = 'https://raw.githubusercontent.com/kodiondemand/media/master/' - def is_adult(channel_name): logger.info("channel_name=" + channel_name) channel_parameters = get_channel_parameters(channel_name) @@ -201,80 +200,68 @@ def get_lang(channel_name): return list_language def get_default_settings(channel_name): - import filetools, inspect - - # Check if it is a real channel - try: - channel = __import__('channels.%s' % channel_name, fromlist=["channels.%s" % channel_name]) - except: - return get_channel_json(channel_name).get('settings', list()) - - list_language = get_lang(channel_name) - - # Check if the automatic renumbering function exists - renumber = False - if 'episodios' in dir(channel): - from core import scrapertoolsV2 - if scrapertoolsV2.find_single_match(inspect.getsource(channel), r'(anime\s*=\s*True)') \ - or scrapertoolsV2.find_single_match(inspect.getsource(channel), r'(autorenumber\()'): - renumber = True - - # Collects configurations - channel_language = categories = get_channel_json(channel_name).get('language', list()) - channel_controls = get_channel_json(channel_name).get('settings', list()) + import filetools default_path = filetools.join(config.get_runtime_path(), 'default_channel_settings' + '.json') - default_controls = jsontools.load(filetools.read(default_path)).get('settings', list()) - default_controls_renumber = jsontools.load(filetools.read(default_path)).get('renumber', list()) - categories = get_channel_json(channel_name).get('categories', list()) - not_active = get_channel_json(channel_name).get('not_active', list()) - default_off = get_channel_json(channel_name).get('default_off', list()) + default_file = jsontools.load(filetools.read(default_path)) - # Apply default configurations if they do not exist - for control in default_controls: - if control['id'] not in str(channel_controls): - if 'include_in_newest' in control['id'] and 'include_in_newest' not in not_active and control['id'] not in not_active: - label = control['id'].split('_') - label = label[-1] - if label == 'peliculas': - if 'movie' in categories: - control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string(30122) - control['default'] = False if ('include_in_newest' in default_off) or ('include_in_newest_peliculas' in default_off) else True - channel_controls.append(control) - else: pass - elif label == 'series': - if 'tvshow' in categories: - control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string(30123) - control['default'] = False if ('include_in_newest' in default_off) or ('include_in_newest_series' in default_off) else True - channel_controls.append(control) - else: pass - elif label == 'anime': - if 'anime' in categories: - control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string(30124) - control['default'] = False if ('include_in_newest' in default_off) or ('include_in_newest_anime' in default_off) else True - channel_controls.append(control) - else: pass + channel_path = filetools.join(config.get_runtime_path(),'channels',channel_name + '.json') + adult_path = filetools.join(config.get_runtime_path(),'channels', 'porn', channel_name + '.json') - else: - control['label'] = config.get_localized_string(70727) + ' - ' + label.capitalize() - control['default'] = control['default'] if control['id'] not in default_off else False + # from core.support import dbg; dbg() + if os.path.exists(channel_path) or os.path.exists(adult_path): + default_controls = default_file['settings'] + default_controls_renumber = default_file['renumber'] + channel_json = get_channel_json(channel_name) + + # Collects configurations + channel_language = channel_json['language'] + channel_controls = channel_json['settings'] + categories = channel_json['categories'] + not_active = channel_json['not_active'] if channel_json.has_key('not_active') else [] + default_off = channel_json['default_off'] if channel_json.has_key('default_off') else [] + + # Apply default configurations if they do not exist + for control in default_controls: + if control['id'] not in str(channel_controls): + if 'include_in_newest' in control['id'] and 'include_in_newest' not in not_active and control['id'] not in not_active: + label = control['id'].split('_') + label = label[-1] + if label == 'peliculas': + if 'movie' in categories: + control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string(30122) + control['default'] = False if ('include_in_newest' in default_off) or ('include_in_newest_peliculas' in default_off) else True + channel_controls.append(control) + else: pass + elif label == 'series': + if 'tvshow' in categories: + control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string(30123) + control['default'] = False if ('include_in_newest' in default_off) or ('include_in_newest_series' in default_off) else True + channel_controls.append(control) + else: pass + elif label == 'anime': + if 'anime' in categories: + control['label'] = config.get_localized_string(70727) + ' - ' + config.get_localized_string(30124) + control['default'] = False if ('include_in_newest' in default_off) or ('include_in_newest_anime' in default_off) else True + channel_controls.append(control) + else: pass + + else: + control['label'] = config.get_localized_string(70727) + ' - ' + label.capitalize() + control['default'] = control['default'] if control['id'] not in default_off else False + channel_controls.append(control) + + elif control['id'] not in not_active and 'include_in_newest' not in control['id']: + if type(control['default']) == bool: + control['default'] = control['default'] if control['id'] not in default_off else False channel_controls.append(control) - # elif control['id'] == 'filter_languages': - # if len(channel_language) > 1: - # control['lvalues'] = list_language - # channel_controls.append(control) - # else: pass - - elif control['id'] not in not_active and 'include_in_newest' not in control['id']: - if type(control['default']) == bool: - control['default'] = control['default'] if control['id'] not in default_off else False - channel_controls.append(control) - - if renumber: - for control in default_controls_renumber: - if control['id'] not in str(channel_controls): - channel_controls.append(control) - else: pass + if 'anime' in categories: + for control in default_controls_renumber: + if control['id'] not in str(channel_controls): + channel_controls.append(control) + else: pass + else: + return get_channel_json(channel_name).get('settings', list()) return channel_controls diff --git a/core/httptools.py b/core/httptools.py index 00369812..136cc946 100755 --- a/core/httptools.py +++ b/core/httptools.py @@ -395,6 +395,8 @@ def downloadpage(url, **opt): @type ignore_response_code: bool @return: Result of the petition @rtype: HTTPResponse + @param use_requests: Use requests.session() + @type: bool Parameter Type Description -------------------------------------------------- -------------------------------------------------- ------------ @@ -409,15 +411,25 @@ def downloadpage(url, **opt): """ load_cookies() - if scrapertoolsV2.get_domain_from_url(url) in ['www.seriehd.moda', 'wstream.video', 'www.guardaserie.media', 'akvideo.stream','www.piratestreaming.top']: # cloudflare urls - if opt.get('session', False): - session = opt['session'] # same session to speed up search - else: - from lib import cloudscraper - session = cloudscraper.create_scraper() - else: + # if scrapertoolsV2.get_domain_from_url(url) in ['www.seriehd.moda', 'wstream.video', 'www.guardaserie.media', 'akvideo.stream','www.piratestreaming.top']: # cloudflare urls + # if opt.get('session', False): + # session = opt['session'] # same session to speed up search + # else: + # from lib import cloudscraper + # session = cloudscraper.create_scraper() + # else: + # from lib import requests + # session = requests.session() + + if opt.get('session', False): + session = opt['session'] # same session to speed up search + logger.info('same session') + elif opt.get('use_requests', False): from lib import requests session = requests.session() + else: + from lib import cloudscraper + session = cloudscraper.create_scraper() # Headers by default, if nothing is specified req_headers = default_headers.copy() @@ -445,7 +457,8 @@ def downloadpage(url, **opt): file_name = '' opt['proxy_retries_counter'] += 1 - # session.verify = False + session.verify = opt.get('verify', True) + if opt.get('cookies', True): session.cookies = cj session.headers.update(req_headers) diff --git a/core/support.py b/core/support.py index 606ea6ad..54941b8c 100755 --- a/core/support.py +++ b/core/support.py @@ -165,7 +165,6 @@ def scrapeLang(scraped, lang, longtitle): if not language: language = lang if language: longtitle += typo(language, '_ [] color kod') - return language, longtitle def cleantitle(title): @@ -221,8 +220,8 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t if scraped['season']: stagione = scraped['season'] episode = scraped['season'] +'x'+ scraped['episode'] - elif stagione: - episode = stagione +'x'+ scraped['episode'] + elif item.season: + episode = item.season +'x'+ scraped['episode'] elif item.contentType == 'tvshow' and (scraped['episode'] == '' and scraped['season'] == '' and stagione == ''): item.news = 'season_completed' episode = '' @@ -357,8 +356,6 @@ def scrape(func): search = args['search'] if 'search' in args else '' blacklist = args['blacklist'] if 'blacklist' in args else [] data = args['data'] if 'data' in args else '' - if not data and item.preloadedData: - data = item.preloadedData patron = args['patron'] if 'patron' in args else args['patronMenu'] if 'patronMenu' in args else '' if 'headers' in args: headers = args['headers'] @@ -371,6 +368,7 @@ def scrape(func): typeActionDict = args['typeActionDict'] if 'typeActionDict' in args else {} typeContentDict = args['typeContentDict'] if 'typeContentDict' in args else {} debug = args['debug'] if 'debug' in args else False + debugBlock = args['debugBlock'] if 'debugBlock' in args else False if 'pagination' in args and inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']: pagination = args['pagination'] if args['pagination'] else 20 else: pagination = '' lang = args['deflang'] if 'deflang' in args else '' @@ -385,9 +383,14 @@ def scrape(func): # replace all ' with " and eliminate newline, so we don't need to worry about if patronBlock: + if debugBlock: + regexDbg(item, patronBlock, headers, data) blocks = scrapertoolsV2.find_multiple_matches_groups(data, patronBlock) block = "" for bl in blocks: + # log(len(blocks),bl) + if 'season' in bl and bl['season']: + item.season = bl['season'] blockItemlist, blockMatches = scrapeBlock(item, args, bl['block'], patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang) for it in blockItemlist: diff --git a/core/tmdb.py b/core/tmdb.py index 60210b3e..65c6ff0b 100644 --- a/core/tmdb.py +++ b/core/tmdb.py @@ -4,6 +4,7 @@ import copy import re import sqlite3 import time +import urllib import xbmcaddon @@ -156,9 +157,11 @@ def cache_response(fn): result = fn(*args) else: - conn = sqlite3.connect(fname) + conn = sqlite3.connect(fname, timeout=15) c = conn.cursor() - url_base64 = base64.b64encode(args[0]) + url = re.sub('&year=-', '', args[0]) + logger.error('la url %s' % url) + url_base64 = base64.b64encode(url) c.execute("SELECT response, added FROM tmdb_cache WHERE url=?", (url_base64,)) row = c.fetchone() @@ -189,7 +192,7 @@ def cache_response(fn): return wrapper -def set_infoLabels(source, seekTmdb=True, idioma_busqueda=def_lang): +def set_infoLabels(source, seekTmdb=True, idioma_busqueda=def_lang, forced=False): """ Dependiendo del tipo de dato de source obtiene y fija (item.infoLabels) los datos extras de una o varias series, capitulos o peliculas. @@ -205,6 +208,9 @@ def set_infoLabels(source, seekTmdb=True, idioma_busqueda=def_lang): @rtype: int, list """ + if not config.get_setting('tmdb_active') and not forced: + return + start_time = time.time() if type(source) == list: ret = set_infoLabels_itemlist(source, seekTmdb, idioma_busqueda) @@ -215,7 +221,7 @@ def set_infoLabels(source, seekTmdb=True, idioma_busqueda=def_lang): return ret -def set_infoLabels_itemlist(item_list, seekTmdb=False, idioma_busqueda=def_lang): +def set_infoLabels_itemlist(item_list, seekTmdb=False, idioma_busqueda=def_lang, forced=False): """ De manera concurrente, obtiene los datos de los items incluidos en la lista item_list. @@ -236,6 +242,8 @@ def set_infoLabels_itemlist(item_list, seekTmdb=False, idioma_busqueda=def_lang) negativo en caso contrario. @rtype: list """ + if not config.get_setting('tmdb_active') and not forced: + return import threading threads_num = config.get_setting("tmdb_threads", default=20) @@ -555,20 +563,22 @@ def completar_codigos(item): item.infoLabels['url_scraper'].append(url_scraper) -def discovery(item): - if item.search_type == 'discover': +def discovery(item, dict_=False, cast=False): + from core.item import Item + + if dict_: + listado = Tmdb(discover = dict_, cast=cast) + + elif item.search_type == 'discover': listado = Tmdb(discover={'url':'discover/%s' % item.type, 'with_genres':item.list_type, 'language':def_lang, 'page':item.page}) elif item.search_type == 'list': if item.page == '': item.page = '1' - listado = Tmdb(list={'url': item.list_type, 'language':def_lang, 'page':item.page}) + listado = Tmdb(discover={'url': item.list_type, 'language':def_lang, 'page':item.page}) - logger.debug(listado.get_list_resultados()) - result = listado.get_list_resultados() - - return result + return listado def get_genres(type): lang = def_lang @@ -788,6 +798,7 @@ class Tmdb(object): def __init__(self, **kwargs): self.page = kwargs.get('page', 1) self.index_results = 0 + self.cast = kwargs.get('cast', False) self.results = [] self.result = ResultDictDefault() self.total_pages = 0 @@ -804,7 +815,6 @@ class Tmdb(object): self.busqueda_year = kwargs.get('year', '') self.busqueda_filtro = kwargs.get('filtro', {}) self.discover = kwargs.get('discover', {}) - self.list = kwargs.get('list', {}) # Reellenar diccionario de generos si es necesario if (self.busqueda_tipo == 'movie' or self.busqueda_tipo == "tv") and \ @@ -836,9 +846,6 @@ class Tmdb(object): elif self.discover: self.__discover() - elif self.list: - self.__list() - else: logger.debug("Creado objeto vacio") @@ -847,20 +854,19 @@ class Tmdb(object): def get_json(url): try: - result = httptools.downloadpage(url, cookies=False) + result = httptools.downloadpage(url, cookies=False, ignore_response_code=True) res_headers = result.headers - # logger.debug("res_headers es %s" % res_headers) dict_data = jsontools.load(result.data) - # logger.debug("result_data es %s" % dict_data) + #logger.debug("result_data es %s" % dict_data) if "status_code" in dict_data: - logger.debug("\nError de tmdb: %s %s" % (dict_data["status_code"], dict_data["status_message"])) + #logger.debug("\nError de tmdb: %s %s" % (dict_data["status_code"], dict_data["status_message"])) if dict_data["status_code"] == 25: while "status_code" in dict_data and dict_data["status_code"] == 25: wait = int(res_headers['retry-after']) - logger.debug("Limite alcanzado, esperamos para volver a llamar en ...%s" % wait) + #logger.error("Limite alcanzado, esperamos para volver a llamar en ...%s" % wait) time.sleep(wait) # logger.debug("RE Llamada #%s" % d) result = httptools.downloadpage(url, cookies=False) @@ -941,6 +947,8 @@ class Tmdb(object): self.result = ResultDictDefault() results = [] total_results = 0 + text_simple = self.busqueda_texto.lower() + text_quote = urllib.quote(text_simple) total_pages = 0 buscando = "" @@ -948,7 +956,7 @@ class Tmdb(object): # http://api.themoviedb.org/3/search/movie?api_key=a1ab8b8669da03637a4b98fa39c39228&query=superman&language=es # &include_adult=false&page=1 url = ('http://api.themoviedb.org/3/search/%s?api_key=a1ab8b8669da03637a4b98fa39c39228&query=%s&language=%s' - '&include_adult=%s&page=%s' % (self.busqueda_tipo, self.busqueda_texto.replace(' ', '%20'), + '&include_adult=%s&page=%s' % (self.busqueda_tipo, text_quote.replace(' ', '%20'), self.busqueda_idioma, self.busqueda_include_adult, page)) if self.busqueda_year: @@ -993,64 +1001,6 @@ class Tmdb(object): logger.error(msg) return 0 - def __list(self, index_results=0): - self.result = ResultDictDefault() - results = [] - total_results = 0 - total_pages = 0 - - # Ejemplo self.discover: {'url': 'movie/', 'with_cast': '1'} - # url: Método de la api a ejecutar - # resto de claves: Parámetros de la búsqueda concatenados a la url - type_search = self.list.get('url', '') - if type_search: - params = [] - for key, value in self.list.items(): - if key != "url": - params.append("&"+key + "=" + str(value)) - # http://api.themoviedb.org/3/movie/popolar?api_key=a1ab8b8669da03637a4b98fa39c39228&&language=es - url = ('http://api.themoviedb.org/3/%s?api_key=a1ab8b8669da03637a4b98fa39c39228%s' - % (type_search, ''.join(params))) - - logger.info("[Tmdb.py] Buscando %s:\n%s" % (type_search, url)) - resultado = self.get_json(url) - - total_results = resultado.get("total_results", -1) - total_pages = resultado.get("total_pages", 1) - - if total_results > 0: - results = resultado["results"] - if self.busqueda_filtro and results: - # TODO documentar esta parte - for key, value in dict(self.busqueda_filtro).items(): - for r in results[:]: - if key not in r or r[key] != value: - results.remove(r) - total_results -= 1 - elif total_results == -1: - results = resultado - - if index_results >= len(results): - logger.error( - "La busqueda de '%s' no dio %s resultados" % (type_search, index_results)) - return 0 - - # Retornamos el numero de resultados de esta pagina - if results: - self.results = results - self.total_results = total_results - self.total_pages = total_pages - if total_results > 0: - self.result = ResultDictDefault(self.results[index_results]) - else: - self.result = results - return len(self.results) - else: - # No hay resultados de la busqueda - logger.error("La busqueda de '%s' no dio resultados" % type_search) - return 0 - - def __discover(self, index_results=0): self.result = ResultDictDefault() @@ -1077,8 +1027,12 @@ class Tmdb(object): total_results = resultado.get("total_results", -1) total_pages = resultado.get("total_pages", 1) - if total_results > 0: - results = resultado["results"] + if total_results > 0 or self.cast: + if self.cast: + results = resultado[self.cast] + total_results = len(results) + else: + results = resultado["results"] if self.busqueda_filtro and results: # TODO documentar esta parte for key, value in dict(self.busqueda_filtro).items(): @@ -1101,6 +1055,7 @@ class Tmdb(object): self.total_pages = total_pages if total_results > 0: self.result = ResultDictDefault(self.results[index_results]) + else: self.result = results return len(self.results) @@ -1147,8 +1102,10 @@ class Tmdb(object): result['thumbnail'] = self.get_poster(size="w300") result['fanart'] = self.get_backdrop() + res.append(result) cr += 1 + if cr >= num_result: return res except: @@ -1370,7 +1327,7 @@ class Tmdb(object): msg += "\nError de tmdb: %s %s" % ( self.temporada[numtemporada]["status_code"], self.temporada[numtemporada]["status_message"]) logger.debug(msg) - self.temporada[numtemporada] = {"episodes": {}} + self.temporada[numtemporada] = {} return self.temporada[numtemporada] diff --git a/default.py b/default.py index 6d810948..314f38af 100644 --- a/default.py +++ b/default.py @@ -8,6 +8,8 @@ import sys import xbmc from platformcode import config, logger +import ssl +logger.info(ssl.OPENSSL_VERSION) logger.info("init...") diff --git a/lib/cloudscraper/__init__.py b/lib/cloudscraper/__init__.py index ee27544a..541b32cf 100644 --- a/lib/cloudscraper/__init__.py +++ b/lib/cloudscraper/__init__.py @@ -1,3 +1,4 @@ +# https://github.com/VeNoMouS/cloudscraper import logging import re import sys @@ -37,7 +38,7 @@ except ImportError: # ------------------------------------------------------------------------------- # -__version__ = '1.2.15' +__version__ = '1.2.16' # ------------------------------------------------------------------------------- # @@ -85,7 +86,7 @@ class CloudScraper(Session): self.debug = kwargs.pop('debug', False) self.delay = kwargs.pop('delay', None) self.cipherSuite = kwargs.pop('cipherSuite', None) - self.interpreter = kwargs.pop('interpreter', 'js2py') + self.interpreter = kwargs.pop('interpreter', 'native') self.recaptcha = kwargs.pop('recaptcha', {}) self.allow_brotli = kwargs.pop( 'allow_brotli', @@ -162,6 +163,7 @@ class CloudScraper(Session): def request(self, method, url, *args, **kwargs): # pylint: disable=E0203 + if kwargs.get('proxies') and kwargs.get('proxies') != self.proxies: self.proxies = kwargs.get('proxies') @@ -194,9 +196,8 @@ class CloudScraper(Session): resp = self.Challenge_Response(resp, **kwargs) else: - if resp.status_code not in [302, 429, 503]: + if not resp.is_redirect and resp.status_code not in [429, 503]: self._solveDepthCnt = 0 - return resp # ------------------------------------------------------------------------------- # @@ -452,9 +453,7 @@ class CloudScraper(Session): cloudflare_kwargs['headers'] = updateAttr( cloudflare_kwargs, 'headers', - { - 'Referer': resp.url - } + {'Referer': resp.url} ) ret = self.request( @@ -463,11 +462,16 @@ class CloudScraper(Session): **cloudflare_kwargs ) - if ret.status_code != 302: + # ------------------------------------------------------------------------------- # + # Return response if Cloudflare is doing content pass through instead of 3xx + # ------------------------------------------------------------------------------- # + + if not ret.is_redirect: return ret # ------------------------------------------------------------------------------- # - # We shouldn't be here.... Re-request the original query and process again.... + # Cloudflare is doing http 3xx instead of pass through again.... + # Re-request the original query and/or process again.... # ------------------------------------------------------------------------------- # return self.request(resp.request.method, resp.url, **kwargs) diff --git a/lib/cloudscraper/user_agent/__init__.py b/lib/cloudscraper/user_agent/__init__.py index 2db03760..ccd3cb4c 100644 --- a/lib/cloudscraper/user_agent/__init__.py +++ b/lib/cloudscraper/user_agent/__init__.py @@ -5,7 +5,6 @@ import re import sys import ssl - from collections import OrderedDict # ------------------------------------------------------------------------------- # @@ -111,6 +110,7 @@ class User_Agent(): self.headers['User-Agent'] = random.SystemRandom().choice(filteredAgents[user_agent_version]) - if not kwargs.get('allow_brotli', False): - if 'br' in self.headers['Accept-Encoding']: - self.headers['Accept-Encoding'] = ','.join([encoding for encoding in self.headers['Accept-Encoding'].split(',') if encoding.strip() != 'br']).strip() + if not kwargs.get('allow_brotli', False) and 'br' in self.headers['Accept-Encoding']: + self.headers['Accept-Encoding'] = ','.join([ + encoding for encoding in self.headers['Accept-Encoding'].split(',') if encoding.strip() != 'br' + ]).strip() diff --git a/lib/concurrent/__init__.py b/lib/concurrent/__init__.py new file mode 100644 index 00000000..b36383a6 --- /dev/null +++ b/lib/concurrent/__init__.py @@ -0,0 +1,3 @@ +from pkgutil import extend_path + +__path__ = extend_path(__path__, __name__) diff --git a/lib/concurrent/futures/__init__.py b/lib/concurrent/futures/__init__.py new file mode 100644 index 00000000..428b14bd --- /dev/null +++ b/lib/concurrent/futures/__init__.py @@ -0,0 +1,23 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Execute computations asynchronously using threads or processes.""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +from concurrent.futures._base import (FIRST_COMPLETED, + FIRST_EXCEPTION, + ALL_COMPLETED, + CancelledError, + TimeoutError, + Future, + Executor, + wait, + as_completed) +from concurrent.futures.thread import ThreadPoolExecutor + +try: + from concurrent.futures.process import ProcessPoolExecutor +except ImportError: + # some platforms don't have multiprocessing + pass diff --git a/lib/concurrent/futures/_base.py b/lib/concurrent/futures/_base.py new file mode 100644 index 00000000..510ffa53 --- /dev/null +++ b/lib/concurrent/futures/_base.py @@ -0,0 +1,667 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +import collections +import logging +import threading +import itertools +import time +import types + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +FIRST_COMPLETED = 'FIRST_COMPLETED' +FIRST_EXCEPTION = 'FIRST_EXCEPTION' +ALL_COMPLETED = 'ALL_COMPLETED' +_AS_COMPLETED = '_AS_COMPLETED' + +# Possible future states (for internal use by the futures package). +PENDING = 'PENDING' +RUNNING = 'RUNNING' +# The future was cancelled by the user... +CANCELLED = 'CANCELLED' +# ...and _Waiter.add_cancelled() was called by a worker. +CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' +FINISHED = 'FINISHED' + +_FUTURE_STATES = [ + PENDING, + RUNNING, + CANCELLED, + CANCELLED_AND_NOTIFIED, + FINISHED +] + +_STATE_TO_DESCRIPTION_MAP = { + PENDING: "pending", + RUNNING: "running", + CANCELLED: "cancelled", + CANCELLED_AND_NOTIFIED: "cancelled", + FINISHED: "finished" +} + +# Logger for internal use by the futures package. +LOGGER = logging.getLogger("concurrent.futures") + +class Error(Exception): + """Base class for all future-related exceptions.""" + pass + +class CancelledError(Error): + """The Future was cancelled.""" + pass + +class TimeoutError(Error): + """The operation exceeded the given deadline.""" + pass + +class _Waiter(object): + """Provides the event that wait() and as_completed() block on.""" + def __init__(self): + self.event = threading.Event() + self.finished_futures = [] + + def add_result(self, future): + self.finished_futures.append(future) + + def add_exception(self, future): + self.finished_futures.append(future) + + def add_cancelled(self, future): + self.finished_futures.append(future) + +class _AsCompletedWaiter(_Waiter): + """Used by as_completed().""" + + def __init__(self): + super(_AsCompletedWaiter, self).__init__() + self.lock = threading.Lock() + + def add_result(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_result(future) + self.event.set() + + def add_exception(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_exception(future) + self.event.set() + + def add_cancelled(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_cancelled(future) + self.event.set() + +class _FirstCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_COMPLETED).""" + + def add_result(self, future): + super(_FirstCompletedWaiter, self).add_result(future) + self.event.set() + + def add_exception(self, future): + super(_FirstCompletedWaiter, self).add_exception(future) + self.event.set() + + def add_cancelled(self, future): + super(_FirstCompletedWaiter, self).add_cancelled(future) + self.event.set() + +class _AllCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).""" + + def __init__(self, num_pending_calls, stop_on_exception): + self.num_pending_calls = num_pending_calls + self.stop_on_exception = stop_on_exception + self.lock = threading.Lock() + super(_AllCompletedWaiter, self).__init__() + + def _decrement_pending_calls(self): + with self.lock: + self.num_pending_calls -= 1 + if not self.num_pending_calls: + self.event.set() + + def add_result(self, future): + super(_AllCompletedWaiter, self).add_result(future) + self._decrement_pending_calls() + + def add_exception(self, future): + super(_AllCompletedWaiter, self).add_exception(future) + if self.stop_on_exception: + self.event.set() + else: + self._decrement_pending_calls() + + def add_cancelled(self, future): + super(_AllCompletedWaiter, self).add_cancelled(future) + self._decrement_pending_calls() + +class _AcquireFutures(object): + """A context manager that does an ordered acquire of Future conditions.""" + + def __init__(self, futures): + self.futures = sorted(futures, key=id) + + def __enter__(self): + for future in self.futures: + future._condition.acquire() + + def __exit__(self, *args): + for future in self.futures: + future._condition.release() + +def _create_and_install_waiters(fs, return_when): + if return_when == _AS_COMPLETED: + waiter = _AsCompletedWaiter() + elif return_when == FIRST_COMPLETED: + waiter = _FirstCompletedWaiter() + else: + pending_count = sum( + f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs) + + if return_when == FIRST_EXCEPTION: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True) + elif return_when == ALL_COMPLETED: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False) + else: + raise ValueError("Invalid return condition: %r" % return_when) + + for f in fs: + f._waiters.append(waiter) + + return waiter + + +def _yield_finished_futures(fs, waiter, ref_collect): + """ + Iterate on the list *fs*, yielding finished futures one by one in + reverse order. + Before yielding a future, *waiter* is removed from its waiters + and the future is removed from each set in the collection of sets + *ref_collect*. + + The aim of this function is to avoid keeping stale references after + the future is yielded and before the iterator resumes. + """ + while fs: + f = fs[-1] + for futures_set in ref_collect: + futures_set.remove(f) + with f._condition: + f._waiters.remove(waiter) + del f + # Careful not to keep a reference to the popped value + yield fs.pop() + + +def as_completed(fs, timeout=None): + """An iterator over the given futures that yields each as it completes. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + iterate over. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator that yields the given Futures as they complete (finished or + cancelled). If any given Futures are duplicated, they will be returned + once. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + """ + if timeout is not None: + end_time = timeout + time.time() + + fs = set(fs) + total_futures = len(fs) + with _AcquireFutures(fs): + finished = set( + f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) + pending = fs - finished + waiter = _create_and_install_waiters(fs, _AS_COMPLETED) + finished = list(finished) + try: + for f in _yield_finished_futures(finished, waiter, + ref_collect=(fs,)): + f = [f] + yield f.pop() + + while pending: + if timeout is None: + wait_timeout = None + else: + wait_timeout = end_time - time.time() + if wait_timeout < 0: + raise TimeoutError( + '%d (of %d) futures unfinished' % ( + len(pending), total_futures)) + + waiter.event.wait(wait_timeout) + + with waiter.lock: + finished = waiter.finished_futures + waiter.finished_futures = [] + waiter.event.clear() + + # reverse to keep finishing order + finished.reverse() + for f in _yield_finished_futures(finished, waiter, + ref_collect=(fs, pending)): + f = [f] + yield f.pop() + + finally: + # Remove waiter from unfinished futures + for f in fs: + with f._condition: + f._waiters.remove(waiter) + +DoneAndNotDoneFutures = collections.namedtuple( + 'DoneAndNotDoneFutures', 'done not_done') +def wait(fs, timeout=None, return_when=ALL_COMPLETED): + """Wait for the futures in the given sequence to complete. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + wait upon. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + return_when: Indicates when this function should return. The options + are: + + FIRST_COMPLETED - Return when any future finishes or is + cancelled. + FIRST_EXCEPTION - Return when any future finishes by raising an + exception. If no future raises an exception + then it is equivalent to ALL_COMPLETED. + ALL_COMPLETED - Return when all futures finish or are cancelled. + + Returns: + A named 2-tuple of sets. The first set, named 'done', contains the + futures that completed (is finished or cancelled) before the wait + completed. The second set, named 'not_done', contains uncompleted + futures. + """ + with _AcquireFutures(fs): + done = set(f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) + not_done = set(fs) - done + + if (return_when == FIRST_COMPLETED) and done: + return DoneAndNotDoneFutures(done, not_done) + elif (return_when == FIRST_EXCEPTION) and done: + if any(f for f in done + if not f.cancelled() and f.exception() is not None): + return DoneAndNotDoneFutures(done, not_done) + + if len(done) == len(fs): + return DoneAndNotDoneFutures(done, not_done) + + waiter = _create_and_install_waiters(fs, return_when) + + waiter.event.wait(timeout) + for f in fs: + with f._condition: + f._waiters.remove(waiter) + + done.update(waiter.finished_futures) + return DoneAndNotDoneFutures(done, set(fs) - done) + +class Future(object): + """Represents the result of an asynchronous computation.""" + + def __init__(self): + """Initializes the future. Should not be called by clients.""" + self._condition = threading.Condition() + self._state = PENDING + self._result = None + self._exception = None + self._traceback = None + self._waiters = [] + self._done_callbacks = [] + + def _invoke_callbacks(self): + for callback in self._done_callbacks: + try: + callback(self) + except Exception: + LOGGER.exception('exception calling callback for %r', self) + except BaseException: + # Explicitly let all other new-style exceptions through so + # that we can catch all old-style exceptions with a simple + # "except:" clause below. + # + # All old-style exception objects are instances of + # types.InstanceType, but "except types.InstanceType:" does + # not catch old-style exceptions for some reason. Thus, the + # only way to catch all old-style exceptions without catching + # any new-style exceptions is to filter out the new-style + # exceptions, which all derive from BaseException. + raise + except: + # Because of the BaseException clause above, this handler only + # executes for old-style exception objects. + LOGGER.exception('exception calling callback for %r', self) + + def __repr__(self): + with self._condition: + if self._state == FINISHED: + if self._exception: + return '<%s at %#x state=%s raised %s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._exception.__class__.__name__) + else: + return '<%s at %#x state=%s returned %s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._result.__class__.__name__) + return '<%s at %#x state=%s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state]) + + def cancel(self): + """Cancel the future if possible. + + Returns True if the future was cancelled, False otherwise. A future + cannot be cancelled if it is running or has already completed. + """ + with self._condition: + if self._state in [RUNNING, FINISHED]: + return False + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + return True + + self._state = CANCELLED + self._condition.notify_all() + + self._invoke_callbacks() + return True + + def cancelled(self): + """Return True if the future was cancelled.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED] + + def running(self): + """Return True if the future is currently executing.""" + with self._condition: + return self._state == RUNNING + + def done(self): + """Return True of the future was cancelled or finished executing.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED] + + def __get_result(self): + if self._exception: + if isinstance(self._exception, types.InstanceType): + # The exception is an instance of an old-style class, which + # means type(self._exception) returns types.ClassType instead + # of the exception's actual class type. + exception_type = self._exception.__class__ + else: + exception_type = type(self._exception) + raise exception_type, self._exception, self._traceback + else: + return self._result + + def add_done_callback(self, fn): + """Attaches a callable that will be called when the future finishes. + + Args: + fn: A callable that will be called with this future as its only + argument when the future completes or is cancelled. The callable + will always be called by a thread in the same process in which + it was added. If the future has already completed or been + cancelled then the callable will be called immediately. These + callables are called in the order that they were added. + """ + with self._condition: + if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: + self._done_callbacks.append(fn) + return + fn(self) + + def result(self, timeout=None): + """Return the result of the call that the future represents. + + Args: + timeout: The number of seconds to wait for the result if the future + isn't done. If None, then there is no limit on the wait time. + + Returns: + The result of the call that the future represents. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + Exception: If the call raised then that exception will be raised. + """ + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + else: + raise TimeoutError() + + def exception_info(self, timeout=None): + """Return a tuple of (exception, traceback) raised by the call that the + future represents. + + Args: + timeout: The number of seconds to wait for the exception if the + future isn't done. If None, then there is no limit on the wait + time. + + Returns: + The exception raised by the call that the future represents or None + if the call completed without raising. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + """ + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception, self._traceback + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception, self._traceback + else: + raise TimeoutError() + + def exception(self, timeout=None): + """Return the exception raised by the call that the future represents. + + Args: + timeout: The number of seconds to wait for the exception if the + future isn't done. If None, then there is no limit on the wait + time. + + Returns: + The exception raised by the call that the future represents or None + if the call completed without raising. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + """ + return self.exception_info(timeout)[0] + + # The following methods should only be used by Executors and in tests. + def set_running_or_notify_cancel(self): + """Mark the future as running or process any cancel notifications. + + Should only be used by Executor implementations and unit tests. + + If the future has been cancelled (cancel() was called and returned + True) then any threads waiting on the future completing (though calls + to as_completed() or wait()) are notified and False is returned. + + If the future was not cancelled then it is put in the running state + (future calls to running() will return True) and True is returned. + + This method should be called by Executor implementations before + executing the work associated with this future. If this method returns + False then the work should not be executed. + + Returns: + False if the Future was cancelled, True otherwise. + + Raises: + RuntimeError: if this method was already called or if set_result() + or set_exception() was called. + """ + with self._condition: + if self._state == CANCELLED: + self._state = CANCELLED_AND_NOTIFIED + for waiter in self._waiters: + waiter.add_cancelled(self) + # self._condition.notify_all() is not necessary because + # self.cancel() triggers a notification. + return False + elif self._state == PENDING: + self._state = RUNNING + return True + else: + LOGGER.critical('Future %s in unexpected state: %s', + id(self), + self._state) + raise RuntimeError('Future in unexpected state') + + def set_result(self, result): + """Sets the return value of work associated with the future. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + self._result = result + self._state = FINISHED + for waiter in self._waiters: + waiter.add_result(self) + self._condition.notify_all() + self._invoke_callbacks() + + def set_exception_info(self, exception, traceback): + """Sets the result of the future as being the given exception + and traceback. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + self._exception = exception + self._traceback = traceback + self._state = FINISHED + for waiter in self._waiters: + waiter.add_exception(self) + self._condition.notify_all() + self._invoke_callbacks() + + def set_exception(self, exception): + """Sets the result of the future as being the given exception. + + Should only be used by Executor implementations and unit tests. + """ + self.set_exception_info(exception, None) + +class Executor(object): + """This is an abstract base class for concrete asynchronous executors.""" + + def submit(self, fn, *args, **kwargs): + """Submits a callable to be executed with the given arguments. + + Schedules the callable to be executed as fn(*args, **kwargs) and returns + a Future instance representing the execution of the callable. + + Returns: + A Future representing the given call. + """ + raise NotImplementedError() + + def map(self, fn, *iterables, **kwargs): + """Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + timeout = kwargs.get('timeout') + if timeout is not None: + end_time = timeout + time.time() + + fs = [self.submit(fn, *args) for args in itertools.izip(*iterables)] + + # Yield must be hidden in closure so that the futures are submitted + # before the first iterator value is required. + def result_iterator(): + try: + # reverse to keep finishing order + fs.reverse() + while fs: + # Careful not to keep a reference to the popped future + if timeout is None: + yield fs.pop().result() + else: + yield fs.pop().result(end_time - time.time()) + finally: + for future in fs: + future.cancel() + return result_iterator() + + def shutdown(self, wait=True): + """Clean-up the resources associated with the Executor. + + It is safe to call this method several times. Otherwise, no other + methods can be called after this one. + + Args: + wait: If True then shutdown will not return until all running + futures have finished executing and the resources used by the + executor have been reclaimed. + """ + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown(wait=True) + return False diff --git a/lib/concurrent/futures/_compat.py b/lib/concurrent/futures/_compat.py new file mode 100644 index 00000000..e77cf0e5 --- /dev/null +++ b/lib/concurrent/futures/_compat.py @@ -0,0 +1,111 @@ +from keyword import iskeyword as _iskeyword +from operator import itemgetter as _itemgetter +import sys as _sys + + +def namedtuple(typename, field_names): + """Returns a new subclass of tuple with named fields. + + >>> Point = namedtuple('Point', 'x y') + >>> Point.__doc__ # docstring for the new class + 'Point(x, y)' + >>> p = Point(11, y=22) # instantiate with positional args or keywords + >>> p[0] + p[1] # indexable like a plain tuple + 33 + >>> x, y = p # unpack like a regular tuple + >>> x, y + (11, 22) + >>> p.x + p.y # fields also accessable by name + 33 + >>> d = p._asdict() # convert to a dictionary + >>> d['x'] + 11 + >>> Point(**d) # convert from a dictionary + Point(x=11, y=22) + >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields + Point(x=100, y=22) + + """ + + # Parse and validate the field names. Validation serves two purposes, + # generating informative error messages and preventing template injection attacks. + if isinstance(field_names, basestring): + field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas + field_names = tuple(map(str, field_names)) + for name in (typename,) + field_names: + if not all(c.isalnum() or c=='_' for c in name): + raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name) + if _iskeyword(name): + raise ValueError('Type names and field names cannot be a keyword: %r' % name) + if name[0].isdigit(): + raise ValueError('Type names and field names cannot start with a number: %r' % name) + seen_names = set() + for name in field_names: + if name.startswith('_'): + raise ValueError('Field names cannot start with an underscore: %r' % name) + if name in seen_names: + raise ValueError('Encountered duplicate field name: %r' % name) + seen_names.add(name) + + # Create and fill-in the class template + numfields = len(field_names) + argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes + reprtxt = ', '.join('%s=%%r' % name for name in field_names) + dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names)) + template = '''class %(typename)s(tuple): + '%(typename)s(%(argtxt)s)' \n + __slots__ = () \n + _fields = %(field_names)r \n + def __new__(_cls, %(argtxt)s): + return _tuple.__new__(_cls, (%(argtxt)s)) \n + @classmethod + def _make(cls, iterable, new=tuple.__new__, len=len): + 'Make a new %(typename)s object from a sequence or iterable' + result = new(cls, iterable) + if len(result) != %(numfields)d: + raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result)) + return result \n + def __repr__(self): + return '%(typename)s(%(reprtxt)s)' %% self \n + def _asdict(t): + 'Return a new dict which maps field names to their values' + return {%(dicttxt)s} \n + def _replace(_self, **kwds): + 'Return a new %(typename)s object replacing specified fields with new values' + result = _self._make(map(kwds.pop, %(field_names)r, _self)) + if kwds: + raise ValueError('Got unexpected field names: %%r' %% kwds.keys()) + return result \n + def __getnewargs__(self): + return tuple(self) \n\n''' % locals() + for i, name in enumerate(field_names): + template += ' %s = _property(_itemgetter(%d))\n' % (name, i) + + # Execute the template string in a temporary namespace and + # support tracing utilities by setting a value for frame.f_globals['__name__'] + namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename, + _property=property, _tuple=tuple) + try: + exec(template, namespace) + except SyntaxError: + e = _sys.exc_info()[1] + raise SyntaxError(e.message + ':\n' + template) + result = namespace[typename] + + # For pickling to work, the __module__ variable needs to be set to the frame + # where the named tuple is created. Bypass this step in enviroments where + # sys._getframe is not defined (Jython for example). + if hasattr(_sys, '_getframe'): + result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__') + + return result + + +if _sys.version_info[0] < 3: + def reraise(exc, traceback): + locals_ = {'exc_type': type(exc), 'exc_value': exc, 'traceback': traceback} + exec('raise exc_type, exc_value, traceback', {}, locals_) +else: + def reraise(exc, traceback): + # Tracebacks are embedded in exceptions in Python 3 + raise exc diff --git a/lib/concurrent/futures/process.py b/lib/concurrent/futures/process.py new file mode 100644 index 00000000..fa5b96fd --- /dev/null +++ b/lib/concurrent/futures/process.py @@ -0,0 +1,363 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ProcessPoolExecutor. + +The follow diagram and text describe the data-flow through the system: + +|======================= In-process =====================|== Out-of-process ==| + ++----------+ +----------+ +--------+ +-----------+ +---------+ +| | => | Work Ids | => | | => | Call Q | => | | +| | +----------+ | | +-----------+ | | +| | | ... | | | | ... | | | +| | | 6 | | | | 5, call() | | | +| | | 7 | | | | ... | | | +| Process | | ... | | Local | +-----------+ | Process | +| Pool | +----------+ | Worker | | #1..n | +| Executor | | Thread | | | +| | +----------- + | | +-----------+ | | +| | <=> | Work Items | <=> | | <= | Result Q | <= | | +| | +------------+ | | +-----------+ | | +| | | 6: call() | | | | ... | | | +| | | future | | | | 4, result | | | +| | | ... | | | | 3, except | | | ++----------+ +------------+ +--------+ +-----------+ +---------+ + +Executor.submit() called: +- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict +- adds the id of the _WorkItem to the "Work Ids" queue + +Local worker thread: +- reads work ids from the "Work Ids" queue and looks up the corresponding + WorkItem from the "Work Items" dict: if the work item has been cancelled then + it is simply removed from the dict, otherwise it is repackaged as a + _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" + until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because + calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). +- reads _ResultItems from "Result Q", updates the future stored in the + "Work Items" dict and deletes the dict entry + +Process #1..n: +- reads _CallItems from "Call Q", executes the calls, and puts the resulting + _ResultItems in "Request Q" +""" + +import atexit +from concurrent.futures import _base +import Queue as queue +import multiprocessing +import threading +import weakref +import sys + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +# Workers are created as daemon threads and processes. This is done to allow the +# interpreter to exit when there are still idle processes in a +# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However, +# allowing workers to die with the interpreter has two undesirable properties: +# - The workers would still be running during interpretor shutdown, +# meaning that they would fail in unpredictable ways. +# - The workers could be killed while evaluating a work item, which could +# be bad if the callable being evaluated has external side-effects e.g. +# writing to a file. +# +# To work around this problem, an exit handler is installed which tells the +# workers to exit when their work queues are empty and then waits until the +# threads/processes finish. + +_threads_queues = weakref.WeakKeyDictionary() +_shutdown = False + +def _python_exit(): + global _shutdown + _shutdown = True + items = list(_threads_queues.items()) if _threads_queues else () + for t, q in items: + q.put(None) + for t, q in items: + t.join(sys.maxint) + +# Controls how many more calls than processes will be queued in the call queue. +# A smaller number will mean that processes spend more time idle waiting for +# work while a larger number will make Future.cancel() succeed less frequently +# (Futures in the call queue cannot be cancelled). +EXTRA_QUEUED_CALLS = 1 + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + +class _ResultItem(object): + def __init__(self, work_id, exception=None, result=None): + self.work_id = work_id + self.exception = exception + self.result = result + +class _CallItem(object): + def __init__(self, work_id, fn, args, kwargs): + self.work_id = work_id + self.fn = fn + self.args = args + self.kwargs = kwargs + +def _process_worker(call_queue, result_queue): + """Evaluates calls from call_queue and places the results in result_queue. + + This worker is run in a separate process. + + Args: + call_queue: A multiprocessing.Queue of _CallItems that will be read and + evaluated by the worker. + result_queue: A multiprocessing.Queue of _ResultItems that will written + to by the worker. + shutdown: A multiprocessing.Event that will be set as a signal to the + worker that it should exit when call_queue is empty. + """ + while True: + call_item = call_queue.get(block=True) + if call_item is None: + # Wake up queue management thread + result_queue.put(None) + return + try: + r = call_item.fn(*call_item.args, **call_item.kwargs) + except: + e = sys.exc_info()[1] + result_queue.put(_ResultItem(call_item.work_id, + exception=e)) + else: + result_queue.put(_ResultItem(call_item.work_id, + result=r)) + +def _add_call_item_to_queue(pending_work_items, + work_ids, + call_queue): + """Fills call_queue with _WorkItems from pending_work_items. + + This function never blocks. + + Args: + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids + are consumed and the corresponding _WorkItems from + pending_work_items are transformed into _CallItems and put in + call_queue. + call_queue: A multiprocessing.Queue that will be filled with _CallItems + derived from _WorkItems. + """ + while True: + if call_queue.full(): + return + try: + work_id = work_ids.get(block=False) + except queue.Empty: + return + else: + work_item = pending_work_items[work_id] + + if work_item.future.set_running_or_notify_cancel(): + call_queue.put(_CallItem(work_id, + work_item.fn, + work_item.args, + work_item.kwargs), + block=True) + else: + del pending_work_items[work_id] + continue + +def _queue_management_worker(executor_reference, + processes, + pending_work_items, + work_ids_queue, + call_queue, + result_queue): + """Manages the communication between this process and the worker processes. + + This function is run in a local thread. + + Args: + executor_reference: A weakref.ref to the ProcessPoolExecutor that owns + this thread. Used to determine if the ProcessPoolExecutor has been + garbage collected and that this function can exit. + process: A list of the multiprocessing.Process instances used as + workers. + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). + call_queue: A multiprocessing.Queue that will be filled with _CallItems + derived from _WorkItems for processing by the process workers. + result_queue: A multiprocessing.Queue of _ResultItems generated by the + process workers. + """ + nb_shutdown_processes = [0] + def shutdown_one_process(): + """Tell a worker to terminate, which will in turn wake us again""" + call_queue.put(None) + nb_shutdown_processes[0] += 1 + while True: + _add_call_item_to_queue(pending_work_items, + work_ids_queue, + call_queue) + + result_item = result_queue.get(block=True) + if result_item is not None: + work_item = pending_work_items[result_item.work_id] + del pending_work_items[result_item.work_id] + + if result_item.exception: + work_item.future.set_exception(result_item.exception) + else: + work_item.future.set_result(result_item.result) + # Delete references to object. See issue16284 + del work_item + # Check whether we should start shutting down. + executor = executor_reference() + # No more work items can be added if: + # - The interpreter is shutting down OR + # - The executor that owns this worker has been collected OR + # - The executor that owns this worker has been shutdown. + if _shutdown or executor is None or executor._shutdown_thread: + # Since no new work items can be added, it is safe to shutdown + # this thread if there are no pending work items. + if not pending_work_items: + while nb_shutdown_processes[0] < len(processes): + shutdown_one_process() + # If .join() is not called on the created processes then + # some multiprocessing.Queue methods may deadlock on Mac OS + # X. + for p in processes: + p.join() + call_queue.close() + return + del executor + +_system_limits_checked = False +_system_limited = None +def _check_system_limits(): + global _system_limits_checked, _system_limited + if _system_limits_checked: + if _system_limited: + raise NotImplementedError(_system_limited) + _system_limits_checked = True + try: + import os + nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") + except (AttributeError, ValueError): + # sysconf not available or setting not available + return + if nsems_max == -1: + # indetermine limit, assume that limit is determined + # by available memory only + return + if nsems_max >= 256: + # minimum number of semaphores available + # according to POSIX + return + _system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max + raise NotImplementedError(_system_limited) + + +class ProcessPoolExecutor(_base.Executor): + def __init__(self, max_workers=None): + """Initializes a new ProcessPoolExecutor instance. + + Args: + max_workers: The maximum number of processes that can be used to + execute the given calls. If None or not given then as many + worker processes will be created as the machine has processors. + """ + _check_system_limits() + + if max_workers is None: + self._max_workers = multiprocessing.cpu_count() + else: + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + + self._max_workers = max_workers + + # Make the call queue slightly larger than the number of processes to + # prevent the worker processes from idling. But don't make it too big + # because futures in the call queue cannot be cancelled. + self._call_queue = multiprocessing.Queue(self._max_workers + + EXTRA_QUEUED_CALLS) + self._result_queue = multiprocessing.Queue() + self._work_ids = queue.Queue() + self._queue_management_thread = None + self._processes = set() + + # Shutdown is a two-step process. + self._shutdown_thread = False + self._shutdown_lock = threading.Lock() + self._queue_count = 0 + self._pending_work_items = {} + + def _start_queue_management_thread(self): + # When the executor gets lost, the weakref callback will wake up + # the queue management thread. + def weakref_cb(_, q=self._result_queue): + q.put(None) + if self._queue_management_thread is None: + self._queue_management_thread = threading.Thread( + target=_queue_management_worker, + args=(weakref.ref(self, weakref_cb), + self._processes, + self._pending_work_items, + self._work_ids, + self._call_queue, + self._result_queue)) + self._queue_management_thread.daemon = True + self._queue_management_thread.start() + _threads_queues[self._queue_management_thread] = self._result_queue + + def _adjust_process_count(self): + for _ in range(len(self._processes), self._max_workers): + p = multiprocessing.Process( + target=_process_worker, + args=(self._call_queue, + self._result_queue)) + p.start() + self._processes.add(p) + + def submit(self, fn, *args, **kwargs): + with self._shutdown_lock: + if self._shutdown_thread: + raise RuntimeError('cannot schedule new futures after shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._pending_work_items[self._queue_count] = w + self._work_ids.put(self._queue_count) + self._queue_count += 1 + # Wake up queue management thread + self._result_queue.put(None) + + self._start_queue_management_thread() + self._adjust_process_count() + return f + submit.__doc__ = _base.Executor.submit.__doc__ + + def shutdown(self, wait=True): + with self._shutdown_lock: + self._shutdown_thread = True + if self._queue_management_thread: + # Wake up queue management thread + self._result_queue.put(None) + if wait: + self._queue_management_thread.join(sys.maxint) + # To reduce the risk of openning too many files, remove references to + # objects that use file descriptors. + self._queue_management_thread = None + self._call_queue = None + self._result_queue = None + self._processes = None + shutdown.__doc__ = _base.Executor.shutdown.__doc__ + +atexit.register(_python_exit) diff --git a/lib/concurrent/futures/thread.py b/lib/concurrent/futures/thread.py new file mode 100644 index 00000000..b5f832ff --- /dev/null +++ b/lib/concurrent/futures/thread.py @@ -0,0 +1,170 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ThreadPoolExecutor.""" + +import atexit +from concurrent.futures import _base +import itertools +import Queue as queue +import threading +import weakref +import sys + +try: + from multiprocessing import cpu_count +except ImportError: + # some platforms don't have multiprocessing + def cpu_count(): + return None + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +# Workers are created as daemon threads. This is done to allow the interpreter +# to exit when there are still idle threads in a ThreadPoolExecutor's thread +# pool (i.e. shutdown() was not called). However, allowing workers to die with +# the interpreter has two undesirable properties: +# - The workers would still be running during interpretor shutdown, +# meaning that they would fail in unpredictable ways. +# - The workers could be killed while evaluating a work item, which could +# be bad if the callable being evaluated has external side-effects e.g. +# writing to a file. +# +# To work around this problem, an exit handler is installed which tells the +# workers to exit when their work queues are empty and then waits until the +# threads finish. + +_threads_queues = weakref.WeakKeyDictionary() +_shutdown = False + +def _python_exit(): + global _shutdown + _shutdown = True + items = list(_threads_queues.items()) if _threads_queues else () + for t, q in items: + q.put(None) + for t, q in items: + t.join(sys.maxint) + +atexit.register(_python_exit) + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + + def run(self): + if not self.future.set_running_or_notify_cancel(): + return + + try: + result = self.fn(*self.args, **self.kwargs) + except: + e, tb = sys.exc_info()[1:] + self.future.set_exception_info(e, tb) + else: + self.future.set_result(result) + +def _worker(executor_reference, work_queue): + try: + while True: + work_item = work_queue.get(block=True) + if work_item is not None: + work_item.run() + # Delete references to object. See issue16284 + del work_item + + # attempt to increment idle count + executor = executor_reference() + if executor is not None: + executor._idle_semaphore.release() + del executor + continue + executor = executor_reference() + # Exit if: + # - The interpreter is shutting down OR + # - The executor that owns the worker has been collected OR + # - The executor that owns the worker has been shutdown. + if _shutdown or executor is None or executor._shutdown: + # Notice other workers + work_queue.put(None) + return + del executor + except: + _base.LOGGER.critical('Exception in worker', exc_info=True) + + +class ThreadPoolExecutor(_base.Executor): + + # Used to assign unique thread names when thread_name_prefix is not supplied. + _counter = itertools.count().next + + def __init__(self, max_workers=None, thread_name_prefix=''): + """Initializes a new ThreadPoolExecutor instance. + + Args: + max_workers: The maximum number of threads that can be used to + execute the given calls. + thread_name_prefix: An optional name prefix to give our threads. + """ + if max_workers is None: + # Use this number because ThreadPoolExecutor is often + # used to overlap I/O instead of CPU work. + max_workers = (cpu_count() or 1) * 5 + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + + self._max_workers = max_workers + self._work_queue = queue.Queue() + self._idle_semaphore = threading.Semaphore(0) + self._threads = set() + self._shutdown = False + self._shutdown_lock = threading.Lock() + self._thread_name_prefix = (thread_name_prefix or + ("ThreadPoolExecutor-%d" % self._counter())) + + def submit(self, fn, *args, **kwargs): + with self._shutdown_lock: + if self._shutdown: + raise RuntimeError('cannot schedule new futures after shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._work_queue.put(w) + self._adjust_thread_count() + return f + submit.__doc__ = _base.Executor.submit.__doc__ + + def _adjust_thread_count(self): + # if idle threads are available, don't spin new threads + if self._idle_semaphore.acquire(False): + return + + # When the executor gets lost, the weakref callback will wake up + # the worker threads. + def weakref_cb(_, q=self._work_queue): + q.put(None) + + num_threads = len(self._threads) + if num_threads < self._max_workers: + thread_name = '%s_%d' % (self._thread_name_prefix or self, + num_threads) + t = threading.Thread(name=thread_name, target=_worker, + args=(weakref.ref(self, weakref_cb), + self._work_queue)) + t.daemon = True + t.start() + self._threads.add(t) + _threads_queues[t] = self._work_queue + + def shutdown(self, wait=True): + with self._shutdown_lock: + self._shutdown = True + self._work_queue.put(None) + if wait: + for t in self._threads: + t.join(sys.maxint) + shutdown.__doc__ = _base.Executor.shutdown.__doc__ diff --git a/lib/requests/__init__.py b/lib/requests/__init__.py index a5b3c9c3..9a899df6 100644 --- a/lib/requests/__init__.py +++ b/lib/requests/__init__.py @@ -22,7 +22,7 @@ usage: ... or POST: >>> payload = dict(key1='value1', key2='value2') - >>> r = requests.post('http://httpbin.org/post', data=payload) + >>> r = requests.post('https://httpbin.org/post', data=payload) >>> print(r.text) { ... @@ -57,10 +57,10 @@ def check_compatibility(urllib3_version, chardet_version): # Check urllib3 for compatibility. major, minor, patch = urllib3_version # noqa: F811 major, minor, patch = int(major), int(minor), int(patch) - # urllib3 >= 1.21.1, <= 1.23 + # urllib3 >= 1.21.1, <= 1.25 assert major == 1 assert minor >= 21 - assert minor <= 23 + assert minor <= 25 # Check chardet for compatibility. major, minor, patch = chardet_version.split('.')[:3] @@ -79,14 +79,14 @@ def _check_cryptography(cryptography_version): return if cryptography_version < [1, 3, 4]: - warning = 'Old version of cryptography ({0}) may cause slowdown.'.format(cryptography_version) + warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version) warnings.warn(warning, RequestsDependencyWarning) # Check imported dependencies for compatibility. try: check_compatibility(urllib3.__version__, chardet.__version__) except (AssertionError, ValueError): - warnings.warn("urllib3 ({0}) or chardet ({1}) doesn't match a supported " + warnings.warn("urllib3 ({}) or chardet ({}) doesn't match a supported " "version!".format(urllib3.__version__, chardet.__version__), RequestsDependencyWarning) @@ -123,12 +123,7 @@ from .exceptions import ( # Set default logging handler to avoid "No handler found" warnings. import logging -try: # Python 2.7+ - from logging import NullHandler -except ImportError: - class NullHandler(logging.Handler): - def emit(self, record): - pass +from logging import NullHandler logging.getLogger(__name__).addHandler(NullHandler()) diff --git a/lib/requests/__version__.py b/lib/requests/__version__.py index ef61ec0f..9844f740 100644 --- a/lib/requests/__version__.py +++ b/lib/requests/__version__.py @@ -5,10 +5,10 @@ __title__ = 'requests' __description__ = 'Python HTTP for Humans.' __url__ = 'http://python-requests.org' -__version__ = '2.19.1' -__build__ = 0x021901 +__version__ = '2.22.0' +__build__ = 0x022200 __author__ = 'Kenneth Reitz' __author_email__ = 'me@kennethreitz.org' __license__ = 'Apache 2.0' -__copyright__ = 'Copyright 2018 Kenneth Reitz' +__copyright__ = 'Copyright 2019 Kenneth Reitz' __cake__ = u'\u2728 \U0001f370 \u2728' diff --git a/lib/requests/adapters.py b/lib/requests/adapters.py index a4b02842..fa4d9b3c 100644 --- a/lib/requests/adapters.py +++ b/lib/requests/adapters.py @@ -26,6 +26,7 @@ from urllib3.exceptions import ProtocolError from urllib3.exceptions import ReadTimeoutError from urllib3.exceptions import SSLError as _SSLError from urllib3.exceptions import ResponseError +from urllib3.exceptions import LocationValueError from .models import Response from .compat import urlparse, basestring @@ -35,7 +36,8 @@ from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths, from .structures import CaseInsensitiveDict from .cookies import extract_cookies_to_jar from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError, - ProxyError, RetryError, InvalidSchema, InvalidProxyURL) + ProxyError, RetryError, InvalidSchema, InvalidProxyURL, + InvalidURL) from .auth import _basic_auth_str try: @@ -127,8 +129,7 @@ class HTTPAdapter(BaseAdapter): self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) def __getstate__(self): - return dict((attr, getattr(self, attr, None)) for attr in - self.__attrs__) + return {attr: getattr(self, attr, None) for attr in self.__attrs__} def __setstate__(self, state): # Can't handle by adding 'proxy_manager' to self.__attrs__ because @@ -224,7 +225,7 @@ class HTTPAdapter(BaseAdapter): if not cert_loc or not os.path.exists(cert_loc): raise IOError("Could not find a suitable TLS CA certificate bundle, " - "invalid path: {0}".format(cert_loc)) + "invalid path: {}".format(cert_loc)) conn.cert_reqs = 'CERT_REQUIRED' @@ -246,10 +247,10 @@ class HTTPAdapter(BaseAdapter): conn.key_file = None if conn.cert_file and not os.path.exists(conn.cert_file): raise IOError("Could not find the TLS certificate file, " - "invalid path: {0}".format(conn.cert_file)) + "invalid path: {}".format(conn.cert_file)) if conn.key_file and not os.path.exists(conn.key_file): raise IOError("Could not find the TLS key file, " - "invalid path: {0}".format(conn.key_file)) + "invalid path: {}".format(conn.key_file)) def build_response(self, req, resp): """Builds a :class:`Response <requests.Response>` object from a urllib3 @@ -378,7 +379,7 @@ class HTTPAdapter(BaseAdapter): when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. - :param proxies: The url of the proxy being used for this request. + :param proxy: The url of the proxy being used for this request. :rtype: dict """ headers = {} @@ -407,7 +408,10 @@ class HTTPAdapter(BaseAdapter): :rtype: requests.Response """ - conn = self.get_connection(request.url, proxies) + try: + conn = self.get_connection(request.url, proxies) + except LocationValueError as e: + raise InvalidURL(e, request=request) self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) @@ -421,7 +425,7 @@ class HTTPAdapter(BaseAdapter): timeout = TimeoutSauce(connect=connect, read=read) except ValueError as e: # this may raise a string formatting error. - err = ("Invalid timeout {0}. Pass a (connect, read) " + err = ("Invalid timeout {}. Pass a (connect, read) " "timeout tuple, or a single float to set " "both timeouts to the same value".format(timeout)) raise ValueError(err) @@ -471,11 +475,10 @@ class HTTPAdapter(BaseAdapter): # Receive the response from the server try: - # For Python 2.7+ versions, use buffering of HTTP - # responses + # For Python 2.7, use buffering of HTTP responses r = low_conn.getresponse(buffering=True) except TypeError: - # For compatibility with Python 2.6 versions and back + # For compatibility with Python 3.3+ r = low_conn.getresponse() resp = HTTPResponse.from_httplib( diff --git a/lib/requests/api.py b/lib/requests/api.py index a2cc84d7..ef71d075 100644 --- a/lib/requests/api.py +++ b/lib/requests/api.py @@ -18,8 +18,10 @@ def request(method, url, **kwargs): :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. - :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. - :param data: (optional) Dictionary or list of tuples ``[(key, value)]`` (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. + :param params: (optional) Dictionary, list of tuples or bytes to send + in the query string for the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. @@ -47,7 +49,7 @@ def request(method, url, **kwargs): Usage:: >>> import requests - >>> req = requests.request('GET', 'http://httpbin.org/get') + >>> req = requests.request('GET', 'https://httpbin.org/get') <Response [200]> """ @@ -62,7 +64,8 @@ def get(url, params=None, **kwargs): r"""Sends a GET request. :param url: URL for the new :class:`Request` object. - :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. + :param params: (optional) Dictionary, list of tuples or bytes to send + in the query string for the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response @@ -102,7 +105,8 @@ def post(url, data=None, json=None, **kwargs): r"""Sends a POST request. :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object @@ -116,7 +120,8 @@ def put(url, data=None, **kwargs): r"""Sends a PUT request. :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object @@ -130,7 +135,8 @@ def patch(url, data=None, **kwargs): r"""Sends a PATCH request. :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object diff --git a/lib/requests/auth.py b/lib/requests/auth.py index 4ae45947..bdde51c7 100644 --- a/lib/requests/auth.py +++ b/lib/requests/auth.py @@ -38,7 +38,7 @@ def _basic_auth_str(username, password): if not isinstance(username, basestring): warnings.warn( "Non-string usernames will no longer be supported in Requests " - "3.0.0. Please convert the object you've passed in ({0!r}) to " + "3.0.0. Please convert the object you've passed in ({!r}) to " "a string or bytes object in the near future to avoid " "problems.".format(username), category=DeprecationWarning, @@ -48,7 +48,7 @@ def _basic_auth_str(username, password): if not isinstance(password, basestring): warnings.warn( "Non-string passwords will no longer be supported in Requests " - "3.0.0. Please convert the object you've passed in ({0!r}) to " + "3.0.0. Please convert the object you've passed in ({!r}) to " "a string or bytes object in the near future to avoid " "problems.".format(password), category=DeprecationWarning, diff --git a/lib/requests/compat.py b/lib/requests/compat.py index 6b9c6fac..c44b35ef 100644 --- a/lib/requests/compat.py +++ b/lib/requests/compat.py @@ -43,9 +43,8 @@ if is_py2: import cookielib from Cookie import Morsel from StringIO import StringIO - from collections import Callable, Mapping, MutableMapping + from collections import Callable, Mapping, MutableMapping, OrderedDict - from urllib3.packages.ordered_dict import OrderedDict builtin_str = str bytes = str diff --git a/lib/requests/cookies.py b/lib/requests/cookies.py index 50883a84..56fccd9c 100644 --- a/lib/requests/cookies.py +++ b/lib/requests/cookies.py @@ -444,20 +444,21 @@ def create_cookie(name, value, **kwargs): By default, the pair of `name` and `value` will be set for the domain '' and sent on every request (this is sometimes called a "supercookie"). """ - result = dict( - version=0, - name=name, - value=value, - port=None, - domain='', - path='/', - secure=False, - expires=None, - discard=True, - comment=None, - comment_url=None, - rest={'HttpOnly': None}, - rfc2109=False,) + result = { + 'version': 0, + 'name': name, + 'value': value, + 'port': None, + 'domain': '', + 'path': '/', + 'secure': False, + 'expires': None, + 'discard': True, + 'comment': None, + 'comment_url': None, + 'rest': {'HttpOnly': None}, + 'rfc2109': False, + } badargs = set(kwargs) - set(result) if badargs: @@ -511,6 +512,7 @@ def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): :param cookiejar: (optional) A cookiejar to add the cookies to. :param overwrite: (optional) If False, will not replace cookies already in the jar with new ones. + :rtype: CookieJar """ if cookiejar is None: cookiejar = RequestsCookieJar() @@ -529,6 +531,7 @@ def merge_cookies(cookiejar, cookies): :param cookiejar: CookieJar object to add the cookies to. :param cookies: Dictionary or CookieJar object to be added. + :rtype: CookieJar """ if not isinstance(cookiejar, cookielib.CookieJar): raise ValueError('You can only merge into CookieJar') diff --git a/lib/requests/help.py b/lib/requests/help.py index 06e06b2a..e53d35ef 100644 --- a/lib/requests/help.py +++ b/lib/requests/help.py @@ -89,8 +89,7 @@ def info(): 'version': getattr(idna, '__version__', ''), } - # OPENSSL_VERSION_NUMBER doesn't exist in the Python 2.6 ssl module. - system_ssl = getattr(ssl, 'OPENSSL_VERSION_NUMBER', None) + system_ssl = ssl.OPENSSL_VERSION_NUMBER system_ssl_info = { 'version': '%x' % system_ssl if system_ssl is not None else '' } diff --git a/lib/requests/hooks.py b/lib/requests/hooks.py index 32b32de7..7a51f212 100644 --- a/lib/requests/hooks.py +++ b/lib/requests/hooks.py @@ -15,14 +15,14 @@ HOOKS = ['response'] def default_hooks(): - return dict((event, []) for event in HOOKS) + return {event: [] for event in HOOKS} # TODO: response is the only one def dispatch_hook(key, hooks, hook_data, **kwargs): """Dispatches a hook dictionary on a given piece of data.""" - hooks = hooks or dict() + hooks = hooks or {} hooks = hooks.get(key) if hooks: if hasattr(hooks, '__call__'): diff --git a/lib/requests/models.py b/lib/requests/models.py index 3d0e1f42..62dcd0b7 100644 --- a/lib/requests/models.py +++ b/lib/requests/models.py @@ -204,9 +204,13 @@ class Request(RequestHooksMixin): :param url: URL to send. :param headers: dictionary of headers to send. :param files: dictionary of {filename: fileobject} files to multipart upload. - :param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place. + :param data: the body to attach to the request. If a dictionary or + list of tuples ``[(key, value)]`` is provided, form-encoding will + take place. :param json: json for the body to attach to the request (if files or data is not specified). - :param params: dictionary of URL parameters to append to the URL. + :param params: URL parameters to append to the URL. If a dictionary or + list of tuples ``[(key, value)]`` is provided, form-encoding will + take place. :param auth: Auth handler or (user, pass) tuple. :param cookies: dictionary or CookieJar of cookies to attach to this request. :param hooks: dictionary of callback hooks, for internal usage. @@ -214,7 +218,7 @@ class Request(RequestHooksMixin): Usage:: >>> import requests - >>> req = requests.Request('GET', 'http://httpbin.org/get') + >>> req = requests.Request('GET', 'https://httpbin.org/get') >>> req.prepare() <PreparedRequest [GET]> """ @@ -274,7 +278,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): Usage:: >>> import requests - >>> req = requests.Request('GET', 'http://httpbin.org/get') + >>> req = requests.Request('GET', 'https://httpbin.org/get') >>> r = req.prepare() <PreparedRequest [GET]> @@ -648,10 +652,7 @@ class Response(object): if not self._content_consumed: self.content - return dict( - (attr, getattr(self, attr, None)) - for attr in self.__attrs__ - ) + return {attr: getattr(self, attr, None) for attr in self.__attrs__} def __setstate__(self, state): for name, value in state.items(): @@ -780,7 +781,7 @@ class Response(object): return chunks - def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None): + def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None): """Iterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. diff --git a/lib/requests/sessions.py b/lib/requests/sessions.py index ba135268..d73d700f 100644 --- a/lib/requests/sessions.py +++ b/lib/requests/sessions.py @@ -19,7 +19,7 @@ from .cookies import ( from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT from .hooks import default_hooks, dispatch_hook from ._internal_utils import to_native_string -from .utils import to_key_val_list, default_headers +from .utils import to_key_val_list, default_headers, DEFAULT_PORTS from .exceptions import ( TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) @@ -115,6 +115,31 @@ class SessionRedirectMixin(object): return to_native_string(location, 'utf8') return None + def should_strip_auth(self, old_url, new_url): + """Decide whether Authorization header should be removed when redirecting""" + old_parsed = urlparse(old_url) + new_parsed = urlparse(new_url) + if old_parsed.hostname != new_parsed.hostname: + return True + # Special case: allow http -> https redirect when using the standard + # ports. This isn't specified by RFC 7235, but is kept to avoid + # breaking backwards compatibility with older versions of requests + # that allowed any redirects on the same host. + if (old_parsed.scheme == 'http' and old_parsed.port in (80, None) + and new_parsed.scheme == 'https' and new_parsed.port in (443, None)): + return False + + # Handle default port usage corresponding to scheme. + changed_port = old_parsed.port != new_parsed.port + changed_scheme = old_parsed.scheme != new_parsed.scheme + default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) + if (not changed_scheme and old_parsed.port in default_port + and new_parsed.port in default_port): + return False + + # Standard case: root URI must match + return changed_port or changed_scheme + def resolve_redirects(self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs): """Receives a Response. Returns a generator of Responses or Requests.""" @@ -236,14 +261,10 @@ class SessionRedirectMixin(object): headers = prepared_request.headers url = prepared_request.url - if 'Authorization' in headers: + if 'Authorization' in headers and self.should_strip_auth(response.request.url, url): # If we get redirected to a new host, we should strip out any # authentication headers. - original_parsed = urlparse(response.request.url) - redirect_parsed = urlparse(url) - - if (original_parsed.hostname != redirect_parsed.hostname): - del headers['Authorization'] + del headers['Authorization'] # .netrc might have more auth for us on our new host. new_auth = get_netrc_auth(url) if self.trust_env else None @@ -299,7 +320,7 @@ class SessionRedirectMixin(object): """ method = prepared_request.method - # http://tools.ietf.org/html/rfc7231#section-6.4.4 + # https://tools.ietf.org/html/rfc7231#section-6.4.4 if response.status_code == codes.see_other and method != 'HEAD': method = 'GET' @@ -325,13 +346,13 @@ class Session(SessionRedirectMixin): >>> import requests >>> s = requests.Session() - >>> s.get('http://httpbin.org/get') + >>> s.get('https://httpbin.org/get') <Response [200]> Or as a context manager:: >>> with requests.Session() as s: - >>> s.get('http://httpbin.org/get') + >>> s.get('https://httpbin.org/get') <Response [200]> """ @@ -453,8 +474,8 @@ class Session(SessionRedirectMixin): :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. - :param data: (optional) Dictionary, bytes, or file-like object to send - in the body of the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the @@ -550,7 +571,8 @@ class Session(SessionRedirectMixin): r"""Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response @@ -562,7 +584,8 @@ class Session(SessionRedirectMixin): r"""Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ @@ -573,7 +596,8 @@ class Session(SessionRedirectMixin): r"""Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ @@ -723,7 +747,7 @@ class Session(SessionRedirectMixin): self.adapters[key] = self.adapters.pop(key) def __getstate__(self): - state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__) + state = {attr: getattr(self, attr, None) for attr in self.__attrs__} return state def __setstate__(self, state): @@ -735,7 +759,12 @@ def session(): """ Returns a :class:`Session` for context-management. + .. deprecated:: 1.0.0 + + This method has been deprecated since version 1.0.0 and is only kept for + backwards compatibility. New code should use :class:`~requests.sessions.Session` + to create a session. This may be removed at a future date. + :rtype: Session """ - return Session() diff --git a/lib/requests/status_codes.py b/lib/requests/status_codes.py index ff462c6c..813e8c4e 100644 --- a/lib/requests/status_codes.py +++ b/lib/requests/status_codes.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -""" +r""" The ``codes`` object defines a mapping from common names for HTTP statuses to their numerical codes, accessible either as attributes or as dictionary items. diff --git a/lib/requests/utils.py b/lib/requests/utils.py index 431f6be0..8170a8d2 100644 --- a/lib/requests/utils.py +++ b/lib/requests/utils.py @@ -38,6 +38,8 @@ NETRC_FILES = ('.netrc', '_netrc') DEFAULT_CA_BUNDLE_PATH = certs.where() +DEFAULT_PORTS = {'http': 80, 'https': 443} + if sys.platform == 'win32': # provide a proxy_bypass version on Windows without DNS lookups @@ -173,10 +175,10 @@ def get_netrc_auth(url, raise_errors=False): for f in NETRC_FILES: try: - loc = os.path.expanduser('~/{0}'.format(f)) + loc = os.path.expanduser('~/{}'.format(f)) except KeyError: # os.path.expanduser can fail when $HOME is undefined and - # getpwuid fails. See http://bugs.python.org/issue20164 & + # getpwuid fails. See https://bugs.python.org/issue20164 & # https://github.com/requests/requests/issues/1846 return @@ -264,7 +266,7 @@ def from_key_val_list(value): >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') - ValueError: need more than 1 value to unpack + ValueError: cannot encode objects that are not 2-tuples >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) @@ -466,7 +468,7 @@ def _parse_content_type_header(header): if index_of_equals != -1: key = param[:index_of_equals].strip(items_to_strip) value = param[index_of_equals + 1:].strip(items_to_strip) - params_dict[key] = value + params_dict[key.lower()] = value return content_type, params_dict @@ -706,6 +708,10 @@ def should_bypass_proxies(url, no_proxy): no_proxy = get_proxy('no_proxy') parsed = urlparse(url) + if parsed.hostname is None: + # URLs don't always have hostnames, e.g. file:/// urls. + return True + if no_proxy: # We need to check whether we match here. We need to see if we match # the end of the hostname, both with and without the port. @@ -725,7 +731,7 @@ def should_bypass_proxies(url, no_proxy): else: host_with_port = parsed.hostname if parsed.port: - host_with_port += ':{0}'.format(parsed.port) + host_with_port += ':{}'.format(parsed.port) for host in no_proxy: if parsed.hostname.endswith(host) or host_with_port.endswith(host): @@ -733,13 +739,8 @@ def should_bypass_proxies(url, no_proxy): # to apply the proxies on this URL. return True - # If the system proxy settings indicate that this URL should be bypassed, - # don't proxy. - # The proxy_bypass function is incredibly buggy on OS X in early versions - # of Python 2.6, so allow this call to fail. Only catch the specific - # exceptions we've seen, though: this call failing in other ways can reveal - # legitimate problems. with set_environ('no_proxy', no_proxy_arg): + # parsed.hostname can be `None` in cases such as a file URI. try: bypass = proxy_bypass(parsed.hostname) except (TypeError, socket.gaierror): diff --git a/lib/tornado/__init__.py b/lib/tornado/__init__.py deleted file mode 100755 index b269cf70..00000000 --- a/lib/tornado/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The Tornado web server and tools.""" - -from __future__ import absolute_import, division, print_function - -# version is a human-readable version number. - -# version_info is a four-tuple for programmatic comparison. The first -# three numbers are the components of the version number. The fourth -# is zero for an official release, positive for a development branch, -# or negative for a release candidate or beta (after the base version -# number has been incremented) -version = "5.1.1" -version_info = (5, 1, 1, 0) diff --git a/lib/tornado/_locale_data.py b/lib/tornado/_locale_data.py deleted file mode 100755 index a2c50390..00000000 --- a/lib/tornado/_locale_data.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2012 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Data used by the tornado.locale module.""" - -from __future__ import absolute_import, division, print_function - -LOCALE_NAMES = { - "af_ZA": {"name_en": u"Afrikaans", "name": u"Afrikaans"}, - "am_ET": {"name_en": u"Amharic", "name": u"አማርኛ"}, - "ar_AR": {"name_en": u"Arabic", "name": u"العربية"}, - "bg_BG": {"name_en": u"Bulgarian", "name": u"Български"}, - "bn_IN": {"name_en": u"Bengali", "name": u"বাংলা"}, - "bs_BA": {"name_en": u"Bosnian", "name": u"Bosanski"}, - "ca_ES": {"name_en": u"Catalan", "name": u"Català"}, - "cs_CZ": {"name_en": u"Czech", "name": u"Čeština"}, - "cy_GB": {"name_en": u"Welsh", "name": u"Cymraeg"}, - "da_DK": {"name_en": u"Danish", "name": u"Dansk"}, - "de_DE": {"name_en": u"German", "name": u"Deutsch"}, - "el_GR": {"name_en": u"Greek", "name": u"Ελληνικά"}, - "en_GB": {"name_en": u"English (UK)", "name": u"English (UK)"}, - "en_US": {"name_en": u"English (US)", "name": u"English (US)"}, - "es_ES": {"name_en": u"Spanish (Spain)", "name": u"Español (España)"}, - "es_LA": {"name_en": u"Spanish", "name": u"Español"}, - "et_EE": {"name_en": u"Estonian", "name": u"Eesti"}, - "eu_ES": {"name_en": u"Basque", "name": u"Euskara"}, - "fa_IR": {"name_en": u"Persian", "name": u"فارسی"}, - "fi_FI": {"name_en": u"Finnish", "name": u"Suomi"}, - "fr_CA": {"name_en": u"French (Canada)", "name": u"Français (Canada)"}, - "fr_FR": {"name_en": u"French", "name": u"Français"}, - "ga_IE": {"name_en": u"Irish", "name": u"Gaeilge"}, - "gl_ES": {"name_en": u"Galician", "name": u"Galego"}, - "he_IL": {"name_en": u"Hebrew", "name": u"עברית"}, - "hi_IN": {"name_en": u"Hindi", "name": u"हिन्दी"}, - "hr_HR": {"name_en": u"Croatian", "name": u"Hrvatski"}, - "hu_HU": {"name_en": u"Hungarian", "name": u"Magyar"}, - "id_ID": {"name_en": u"Indonesian", "name": u"Bahasa Indonesia"}, - "is_IS": {"name_en": u"Icelandic", "name": u"Íslenska"}, - "it_IT": {"name_en": u"Italian", "name": u"Italiano"}, - "ja_JP": {"name_en": u"Japanese", "name": u"日本語"}, - "ko_KR": {"name_en": u"Korean", "name": u"한국어"}, - "lt_LT": {"name_en": u"Lithuanian", "name": u"Lietuvių"}, - "lv_LV": {"name_en": u"Latvian", "name": u"Latviešu"}, - "mk_MK": {"name_en": u"Macedonian", "name": u"Македонски"}, - "ml_IN": {"name_en": u"Malayalam", "name": u"മലയാളം"}, - "ms_MY": {"name_en": u"Malay", "name": u"Bahasa Melayu"}, - "nb_NO": {"name_en": u"Norwegian (bokmal)", "name": u"Norsk (bokmål)"}, - "nl_NL": {"name_en": u"Dutch", "name": u"Nederlands"}, - "nn_NO": {"name_en": u"Norwegian (nynorsk)", "name": u"Norsk (nynorsk)"}, - "pa_IN": {"name_en": u"Punjabi", "name": u"ਪੰਜਾਬੀ"}, - "pl_PL": {"name_en": u"Polish", "name": u"Polski"}, - "pt_BR": {"name_en": u"Portuguese (Brazil)", "name": u"Português (Brasil)"}, - "pt_PT": {"name_en": u"Portuguese (Portugal)", "name": u"Português (Portugal)"}, - "ro_RO": {"name_en": u"Romanian", "name": u"Română"}, - "ru_RU": {"name_en": u"Russian", "name": u"Русский"}, - "sk_SK": {"name_en": u"Slovak", "name": u"Slovenčina"}, - "sl_SI": {"name_en": u"Slovenian", "name": u"Slovenščina"}, - "sq_AL": {"name_en": u"Albanian", "name": u"Shqip"}, - "sr_RS": {"name_en": u"Serbian", "name": u"Српски"}, - "sv_SE": {"name_en": u"Swedish", "name": u"Svenska"}, - "sw_KE": {"name_en": u"Swahili", "name": u"Kiswahili"}, - "ta_IN": {"name_en": u"Tamil", "name": u"தமிழ்"}, - "te_IN": {"name_en": u"Telugu", "name": u"తెలుగు"}, - "th_TH": {"name_en": u"Thai", "name": u"ภาษาไทย"}, - "tl_PH": {"name_en": u"Filipino", "name": u"Filipino"}, - "tr_TR": {"name_en": u"Turkish", "name": u"Türkçe"}, - "uk_UA": {"name_en": u"Ukraini ", "name": u"Українська"}, - "vi_VN": {"name_en": u"Vietnamese", "name": u"Tiếng Việt"}, - "zh_CN": {"name_en": u"Chinese (Simplified)", "name": u"中文(简体)"}, - "zh_TW": {"name_en": u"Chinese (Traditional)", "name": u"中文(繁體)"}, -} diff --git a/lib/tornado/auth.py b/lib/tornado/auth.py deleted file mode 100755 index b79ad14b..00000000 --- a/lib/tornado/auth.py +++ /dev/null @@ -1,1236 +0,0 @@ -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""This module contains implementations of various third-party -authentication schemes. - -All the classes in this file are class mixins designed to be used with -the `tornado.web.RequestHandler` class. They are used in two ways: - -* On a login handler, use methods such as ``authenticate_redirect()``, - ``authorize_redirect()``, and ``get_authenticated_user()`` to - establish the user's identity and store authentication tokens to your - database and/or cookies. -* In non-login handlers, use methods such as ``facebook_request()`` - or ``twitter_request()`` to use the authentication tokens to make - requests to the respective services. - -They all take slightly different arguments due to the fact all these -services implement authentication and authorization slightly differently. -See the individual service classes below for complete documentation. - -Example usage for Google OAuth: - -.. testcode:: - - class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, - tornado.auth.GoogleOAuth2Mixin): - async def get(self): - if self.get_argument('code', False): - user = await self.get_authenticated_user( - redirect_uri='http://your.site.com/auth/google', - code=self.get_argument('code')) - # Save the user with e.g. set_secure_cookie - else: - await self.authorize_redirect( - redirect_uri='http://your.site.com/auth/google', - client_id=self.settings['google_oauth']['key'], - scope=['profile', 'email'], - response_type='code', - extra_params={'approval_prompt': 'auto'}) - -.. testoutput:: - :hide: - - -.. versionchanged:: 4.0 - All of the callback interfaces in this module are now guaranteed - to run their callback with an argument of ``None`` on error. - Previously some functions would do this while others would simply - terminate the request on their own. This change also ensures that - errors are more consistently reported through the ``Future`` interfaces. -""" - -from __future__ import absolute_import, division, print_function - -import base64 -import binascii -import functools -import hashlib -import hmac -import time -import uuid -import warnings - -from tornado.concurrent import (Future, _non_deprecated_return_future, - future_set_exc_info, chain_future, - future_set_result_unless_cancelled) -from tornado import gen -from tornado import httpclient -from tornado import escape -from tornado.httputil import url_concat -from tornado.log import gen_log -from tornado.stack_context import ExceptionStackContext, wrap -from tornado.util import unicode_type, ArgReplacer, PY3 - -if PY3: - import urllib.parse as urlparse - import urllib.parse as urllib_parse - long = int -else: - import urlparse - import urllib as urllib_parse - - -class AuthError(Exception): - pass - - -def _auth_future_to_callback(callback, future): - try: - result = future.result() - except AuthError as e: - gen_log.warning(str(e)) - result = None - callback(result) - - -def _auth_return_future(f): - """Similar to tornado.concurrent.return_future, but uses the auth - module's legacy callback interface. - - Note that when using this decorator the ``callback`` parameter - inside the function will actually be a future. - - .. deprecated:: 5.1 - Will be removed in 6.0. - """ - replacer = ArgReplacer(f, 'callback') - - @functools.wraps(f) - def wrapper(*args, **kwargs): - future = Future() - callback, args, kwargs = replacer.replace(future, args, kwargs) - if callback is not None: - warnings.warn("callback arguments are deprecated, use the returned Future instead", - DeprecationWarning) - future.add_done_callback( - wrap(functools.partial(_auth_future_to_callback, callback))) - - def handle_exception(typ, value, tb): - if future.done(): - return False - else: - future_set_exc_info(future, (typ, value, tb)) - return True - with ExceptionStackContext(handle_exception, delay_warning=True): - f(*args, **kwargs) - return future - return wrapper - - -class OpenIdMixin(object): - """Abstract implementation of OpenID and Attribute Exchange. - - Class attributes: - - * ``_OPENID_ENDPOINT``: the identity provider's URI. - """ - @_non_deprecated_return_future - def authenticate_redirect(self, callback_uri=None, - ax_attrs=["name", "email", "language", "username"], - callback=None): - """Redirects to the authentication URL for this service. - - After authentication, the service will redirect back to the given - callback URI with additional parameters including ``openid.mode``. - - We request the given attributes for the authenticated user by - default (name, email, language, and username). If you don't need - all those attributes for your app, you can request fewer with - the ax_attrs keyword argument. - - .. versionchanged:: 3.1 - Returns a `.Future` and takes an optional callback. These are - not strictly necessary as this method is synchronous, - but they are supplied for consistency with - `OAuthMixin.authorize_redirect`. - - .. deprecated:: 5.1 - - The ``callback`` argument and returned awaitable will be removed - in Tornado 6.0; this will be an ordinary synchronous function. - """ - callback_uri = callback_uri or self.request.uri - args = self._openid_args(callback_uri, ax_attrs=ax_attrs) - self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args)) - callback() - - @_auth_return_future - def get_authenticated_user(self, callback, http_client=None): - """Fetches the authenticated user data upon redirect. - - This method should be called by the handler that receives the - redirect from the `authenticate_redirect()` method (which is - often the same as the one that calls it; in that case you would - call `get_authenticated_user` if the ``openid.mode`` parameter - is present and `authenticate_redirect` if it is not). - - The result of this method will generally be used to set a cookie. - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. - """ - # Verify the OpenID response via direct request to the OP - args = dict((k, v[-1]) for k, v in self.request.arguments.items()) - args["openid.mode"] = u"check_authentication" - url = self._OPENID_ENDPOINT - if http_client is None: - http_client = self.get_auth_http_client() - fut = http_client.fetch(url, method="POST", body=urllib_parse.urlencode(args)) - fut.add_done_callback(wrap(functools.partial( - self._on_authentication_verified, callback))) - - def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None): - url = urlparse.urljoin(self.request.full_url(), callback_uri) - args = { - "openid.ns": "http://specs.openid.net/auth/2.0", - "openid.claimed_id": - "http://specs.openid.net/auth/2.0/identifier_select", - "openid.identity": - "http://specs.openid.net/auth/2.0/identifier_select", - "openid.return_to": url, - "openid.realm": urlparse.urljoin(url, '/'), - "openid.mode": "checkid_setup", - } - if ax_attrs: - args.update({ - "openid.ns.ax": "http://openid.net/srv/ax/1.0", - "openid.ax.mode": "fetch_request", - }) - ax_attrs = set(ax_attrs) - required = [] - if "name" in ax_attrs: - ax_attrs -= set(["name", "firstname", "fullname", "lastname"]) - required += ["firstname", "fullname", "lastname"] - args.update({ - "openid.ax.type.firstname": - "http://axschema.org/namePerson/first", - "openid.ax.type.fullname": - "http://axschema.org/namePerson", - "openid.ax.type.lastname": - "http://axschema.org/namePerson/last", - }) - known_attrs = { - "email": "http://axschema.org/contact/email", - "language": "http://axschema.org/pref/language", - "username": "http://axschema.org/namePerson/friendly", - } - for name in ax_attrs: - args["openid.ax.type." + name] = known_attrs[name] - required.append(name) - args["openid.ax.required"] = ",".join(required) - if oauth_scope: - args.update({ - "openid.ns.oauth": - "http://specs.openid.net/extensions/oauth/1.0", - "openid.oauth.consumer": self.request.host.split(":")[0], - "openid.oauth.scope": oauth_scope, - }) - return args - - def _on_authentication_verified(self, future, response_fut): - try: - response = response_fut.result() - except Exception as e: - future.set_exception(AuthError( - "Error response %s" % e)) - return - if b"is_valid:true" not in response.body: - future.set_exception(AuthError( - "Invalid OpenID response: %s" % response.body)) - return - - # Make sure we got back at least an email from attribute exchange - ax_ns = None - for name in self.request.arguments: - if name.startswith("openid.ns.") and \ - self.get_argument(name) == u"http://openid.net/srv/ax/1.0": - ax_ns = name[10:] - break - - def get_ax_arg(uri): - if not ax_ns: - return u"" - prefix = "openid." + ax_ns + ".type." - ax_name = None - for name in self.request.arguments.keys(): - if self.get_argument(name) == uri and name.startswith(prefix): - part = name[len(prefix):] - ax_name = "openid." + ax_ns + ".value." + part - break - if not ax_name: - return u"" - return self.get_argument(ax_name, u"") - - email = get_ax_arg("http://axschema.org/contact/email") - name = get_ax_arg("http://axschema.org/namePerson") - first_name = get_ax_arg("http://axschema.org/namePerson/first") - last_name = get_ax_arg("http://axschema.org/namePerson/last") - username = get_ax_arg("http://axschema.org/namePerson/friendly") - locale = get_ax_arg("http://axschema.org/pref/language").lower() - user = dict() - name_parts = [] - if first_name: - user["first_name"] = first_name - name_parts.append(first_name) - if last_name: - user["last_name"] = last_name - name_parts.append(last_name) - if name: - user["name"] = name - elif name_parts: - user["name"] = u" ".join(name_parts) - elif email: - user["name"] = email.split("@")[0] - if email: - user["email"] = email - if locale: - user["locale"] = locale - if username: - user["username"] = username - claimed_id = self.get_argument("openid.claimed_id", None) - if claimed_id: - user["claimed_id"] = claimed_id - future_set_result_unless_cancelled(future, user) - - def get_auth_http_client(self): - """Returns the `.AsyncHTTPClient` instance to be used for auth requests. - - May be overridden by subclasses to use an HTTP client other than - the default. - """ - return httpclient.AsyncHTTPClient() - - -class OAuthMixin(object): - """Abstract implementation of OAuth 1.0 and 1.0a. - - See `TwitterMixin` below for an example implementation. - - Class attributes: - - * ``_OAUTH_AUTHORIZE_URL``: The service's OAuth authorization url. - * ``_OAUTH_ACCESS_TOKEN_URL``: The service's OAuth access token url. - * ``_OAUTH_VERSION``: May be either "1.0" or "1.0a". - * ``_OAUTH_NO_CALLBACKS``: Set this to True if the service requires - advance registration of callbacks. - - Subclasses must also override the `_oauth_get_user_future` and - `_oauth_consumer_token` methods. - """ - @_non_deprecated_return_future - def authorize_redirect(self, callback_uri=None, extra_params=None, - http_client=None, callback=None): - """Redirects the user to obtain OAuth authorization for this service. - - The ``callback_uri`` may be omitted if you have previously - registered a callback URI with the third-party service. For - some services, you must use a previously-registered callback - URI and cannot specify a callback via this method. - - This method sets a cookie called ``_oauth_request_token`` which is - subsequently used (and cleared) in `get_authenticated_user` for - security purposes. - - This method is asynchronous and must be called with ``await`` - or ``yield`` (This is different from other ``auth*_redirect`` - methods defined in this module). It calls - `.RequestHandler.finish` for you so you should not write any - other response after it returns. - - .. versionchanged:: 3.1 - Now returns a `.Future` and takes an optional callback, for - compatibility with `.gen.coroutine`. - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. - - """ - if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False): - raise Exception("This service does not support oauth_callback") - if http_client is None: - http_client = self.get_auth_http_client() - if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": - fut = http_client.fetch( - self._oauth_request_token_url(callback_uri=callback_uri, - extra_params=extra_params)) - fut.add_done_callback(wrap(functools.partial( - self._on_request_token, - self._OAUTH_AUTHORIZE_URL, - callback_uri, - callback))) - else: - fut = http_client.fetch(self._oauth_request_token_url()) - fut.add_done_callback( - wrap(functools.partial( - self._on_request_token, self._OAUTH_AUTHORIZE_URL, - callback_uri, - callback))) - - @_auth_return_future - def get_authenticated_user(self, callback, http_client=None): - """Gets the OAuth authorized user and access token. - - This method should be called from the handler for your - OAuth callback URL to complete the registration process. We run the - callback with the authenticated user dictionary. This dictionary - will contain an ``access_key`` which can be used to make authorized - requests to this service on behalf of the user. The dictionary will - also contain other fields such as ``name``, depending on the service - used. - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. - """ - future = callback - request_key = escape.utf8(self.get_argument("oauth_token")) - oauth_verifier = self.get_argument("oauth_verifier", None) - request_cookie = self.get_cookie("_oauth_request_token") - if not request_cookie: - future.set_exception(AuthError( - "Missing OAuth request token cookie")) - return - self.clear_cookie("_oauth_request_token") - cookie_key, cookie_secret = [ - base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")] - if cookie_key != request_key: - future.set_exception(AuthError( - "Request token does not match cookie")) - return - token = dict(key=cookie_key, secret=cookie_secret) - if oauth_verifier: - token["verifier"] = oauth_verifier - if http_client is None: - http_client = self.get_auth_http_client() - fut = http_client.fetch(self._oauth_access_token_url(token)) - fut.add_done_callback(wrap(functools.partial(self._on_access_token, callback))) - - def _oauth_request_token_url(self, callback_uri=None, extra_params=None): - consumer_token = self._oauth_consumer_token() - url = self._OAUTH_REQUEST_TOKEN_URL - args = dict( - oauth_consumer_key=escape.to_basestring(consumer_token["key"]), - oauth_signature_method="HMAC-SHA1", - oauth_timestamp=str(int(time.time())), - oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)), - oauth_version="1.0", - ) - if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": - if callback_uri == "oob": - args["oauth_callback"] = "oob" - elif callback_uri: - args["oauth_callback"] = urlparse.urljoin( - self.request.full_url(), callback_uri) - if extra_params: - args.update(extra_params) - signature = _oauth10a_signature(consumer_token, "GET", url, args) - else: - signature = _oauth_signature(consumer_token, "GET", url, args) - - args["oauth_signature"] = signature - return url + "?" + urllib_parse.urlencode(args) - - def _on_request_token(self, authorize_url, callback_uri, callback, - response_fut): - try: - response = response_fut.result() - except Exception as e: - raise Exception("Could not get request token: %s" % e) - request_token = _oauth_parse_response(response.body) - data = (base64.b64encode(escape.utf8(request_token["key"])) + b"|" + - base64.b64encode(escape.utf8(request_token["secret"]))) - self.set_cookie("_oauth_request_token", data) - args = dict(oauth_token=request_token["key"]) - if callback_uri == "oob": - self.finish(authorize_url + "?" + urllib_parse.urlencode(args)) - callback() - return - elif callback_uri: - args["oauth_callback"] = urlparse.urljoin( - self.request.full_url(), callback_uri) - self.redirect(authorize_url + "?" + urllib_parse.urlencode(args)) - callback() - - def _oauth_access_token_url(self, request_token): - consumer_token = self._oauth_consumer_token() - url = self._OAUTH_ACCESS_TOKEN_URL - args = dict( - oauth_consumer_key=escape.to_basestring(consumer_token["key"]), - oauth_token=escape.to_basestring(request_token["key"]), - oauth_signature_method="HMAC-SHA1", - oauth_timestamp=str(int(time.time())), - oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)), - oauth_version="1.0", - ) - if "verifier" in request_token: - args["oauth_verifier"] = request_token["verifier"] - - if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": - signature = _oauth10a_signature(consumer_token, "GET", url, args, - request_token) - else: - signature = _oauth_signature(consumer_token, "GET", url, args, - request_token) - - args["oauth_signature"] = signature - return url + "?" + urllib_parse.urlencode(args) - - def _on_access_token(self, future, response_fut): - try: - response = response_fut.result() - except Exception: - future.set_exception(AuthError("Could not fetch access token")) - return - - access_token = _oauth_parse_response(response.body) - fut = self._oauth_get_user_future(access_token) - fut = gen.convert_yielded(fut) - fut.add_done_callback( - wrap(functools.partial(self._on_oauth_get_user, access_token, future))) - - def _oauth_consumer_token(self): - """Subclasses must override this to return their OAuth consumer keys. - - The return value should be a `dict` with keys ``key`` and ``secret``. - """ - raise NotImplementedError() - - @_non_deprecated_return_future - def _oauth_get_user_future(self, access_token, callback): - """Subclasses must override this to get basic information about the - user. - - Should return a `.Future` whose result is a dictionary - containing information about the user, which may have been - retrieved by using ``access_token`` to make a request to the - service. - - The access token will be added to the returned dictionary to make - the result of `get_authenticated_user`. - - For backwards compatibility, the callback-based ``_oauth_get_user`` - method is also supported. - - .. versionchanged:: 5.1 - - Subclasses may also define this method with ``async def``. - - .. deprecated:: 5.1 - - The ``_oauth_get_user`` fallback is deprecated and support for it - will be removed in 6.0. - """ - warnings.warn("_oauth_get_user is deprecated, override _oauth_get_user_future instead", - DeprecationWarning) - # By default, call the old-style _oauth_get_user, but new code - # should override this method instead. - self._oauth_get_user(access_token, callback) - - def _oauth_get_user(self, access_token, callback): - raise NotImplementedError() - - def _on_oauth_get_user(self, access_token, future, user_future): - if user_future.exception() is not None: - future.set_exception(user_future.exception()) - return - user = user_future.result() - if not user: - future.set_exception(AuthError("Error getting user")) - return - user["access_token"] = access_token - future_set_result_unless_cancelled(future, user) - - def _oauth_request_parameters(self, url, access_token, parameters={}, - method="GET"): - """Returns the OAuth parameters as a dict for the given request. - - parameters should include all POST arguments and query string arguments - that will be sent with the request. - """ - consumer_token = self._oauth_consumer_token() - base_args = dict( - oauth_consumer_key=escape.to_basestring(consumer_token["key"]), - oauth_token=escape.to_basestring(access_token["key"]), - oauth_signature_method="HMAC-SHA1", - oauth_timestamp=str(int(time.time())), - oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)), - oauth_version="1.0", - ) - args = {} - args.update(base_args) - args.update(parameters) - if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": - signature = _oauth10a_signature(consumer_token, method, url, args, - access_token) - else: - signature = _oauth_signature(consumer_token, method, url, args, - access_token) - base_args["oauth_signature"] = escape.to_basestring(signature) - return base_args - - def get_auth_http_client(self): - """Returns the `.AsyncHTTPClient` instance to be used for auth requests. - - May be overridden by subclasses to use an HTTP client other than - the default. - """ - return httpclient.AsyncHTTPClient() - - -class OAuth2Mixin(object): - """Abstract implementation of OAuth 2.0. - - See `FacebookGraphMixin` or `GoogleOAuth2Mixin` below for example - implementations. - - Class attributes: - - * ``_OAUTH_AUTHORIZE_URL``: The service's authorization url. - * ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url. - """ - @_non_deprecated_return_future - def authorize_redirect(self, redirect_uri=None, client_id=None, - client_secret=None, extra_params=None, - callback=None, scope=None, response_type="code"): - """Redirects the user to obtain OAuth authorization for this service. - - Some providers require that you register a redirect URL with - your application instead of passing one via this method. You - should call this method to log the user in, and then call - ``get_authenticated_user`` in the handler for your - redirect URL to complete the authorization process. - - .. versionchanged:: 3.1 - Returns a `.Future` and takes an optional callback. These are - not strictly necessary as this method is synchronous, - but they are supplied for consistency with - `OAuthMixin.authorize_redirect`. - - .. deprecated:: 5.1 - - The ``callback`` argument and returned awaitable will be removed - in Tornado 6.0; this will be an ordinary synchronous function. - """ - args = { - "redirect_uri": redirect_uri, - "client_id": client_id, - "response_type": response_type - } - if extra_params: - args.update(extra_params) - if scope: - args['scope'] = ' '.join(scope) - self.redirect( - url_concat(self._OAUTH_AUTHORIZE_URL, args)) - callback() - - def _oauth_request_token_url(self, redirect_uri=None, client_id=None, - client_secret=None, code=None, - extra_params=None): - url = self._OAUTH_ACCESS_TOKEN_URL - args = dict( - redirect_uri=redirect_uri, - code=code, - client_id=client_id, - client_secret=client_secret, - ) - if extra_params: - args.update(extra_params) - return url_concat(url, args) - - @_auth_return_future - def oauth2_request(self, url, callback, access_token=None, - post_args=None, **args): - """Fetches the given URL auth an OAuth2 access token. - - If the request is a POST, ``post_args`` should be provided. Query - string arguments should be given as keyword arguments. - - Example usage: - - ..testcode:: - - class MainHandler(tornado.web.RequestHandler, - tornado.auth.FacebookGraphMixin): - @tornado.web.authenticated - async def get(self): - new_entry = await self.oauth2_request( - "https://graph.facebook.com/me/feed", - post_args={"message": "I am posting from my Tornado application!"}, - access_token=self.current_user["access_token"]) - - if not new_entry: - # Call failed; perhaps missing permission? - await self.authorize_redirect() - return - self.finish("Posted a message!") - - .. testoutput:: - :hide: - - .. versionadded:: 4.3 - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. - """ - all_args = {} - if access_token: - all_args["access_token"] = access_token - all_args.update(args) - - if all_args: - url += "?" + urllib_parse.urlencode(all_args) - callback = wrap(functools.partial(self._on_oauth2_request, callback)) - http = self.get_auth_http_client() - if post_args is not None: - fut = http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args)) - else: - fut = http.fetch(url) - fut.add_done_callback(callback) - - def _on_oauth2_request(self, future, response_fut): - try: - response = response_fut.result() - except Exception as e: - future.set_exception(AuthError("Error response %s" % e)) - return - - future_set_result_unless_cancelled(future, escape.json_decode(response.body)) - - def get_auth_http_client(self): - """Returns the `.AsyncHTTPClient` instance to be used for auth requests. - - May be overridden by subclasses to use an HTTP client other than - the default. - - .. versionadded:: 4.3 - """ - return httpclient.AsyncHTTPClient() - - -class TwitterMixin(OAuthMixin): - """Twitter OAuth authentication. - - To authenticate with Twitter, register your application with - Twitter at http://twitter.com/apps. Then copy your Consumer Key - and Consumer Secret to the application - `~tornado.web.Application.settings` ``twitter_consumer_key`` and - ``twitter_consumer_secret``. Use this mixin on the handler for the - URL you registered as your application's callback URL. - - When your application is set up, you can use this mixin like this - to authenticate the user with Twitter and get access to their stream: - - .. testcode:: - - class TwitterLoginHandler(tornado.web.RequestHandler, - tornado.auth.TwitterMixin): - async def get(self): - if self.get_argument("oauth_token", None): - user = await self.get_authenticated_user() - # Save the user using e.g. set_secure_cookie() - else: - await self.authorize_redirect() - - .. testoutput:: - :hide: - - The user object returned by `~OAuthMixin.get_authenticated_user` - includes the attributes ``username``, ``name``, ``access_token``, - and all of the custom Twitter user attributes described at - https://dev.twitter.com/docs/api/1.1/get/users/show - """ - _OAUTH_REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token" - _OAUTH_ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token" - _OAUTH_AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize" - _OAUTH_AUTHENTICATE_URL = "https://api.twitter.com/oauth/authenticate" - _OAUTH_NO_CALLBACKS = False - _TWITTER_BASE_URL = "https://api.twitter.com/1.1" - - @_non_deprecated_return_future - def authenticate_redirect(self, callback_uri=None, callback=None): - """Just like `~OAuthMixin.authorize_redirect`, but - auto-redirects if authorized. - - This is generally the right interface to use if you are using - Twitter for single-sign on. - - .. versionchanged:: 3.1 - Now returns a `.Future` and takes an optional callback, for - compatibility with `.gen.coroutine`. - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. - """ - http = self.get_auth_http_client() - fut = http.fetch(self._oauth_request_token_url(callback_uri=callback_uri)) - fut.add_done_callback(wrap(functools.partial( - self._on_request_token, self._OAUTH_AUTHENTICATE_URL, - None, callback))) - - @_auth_return_future - def twitter_request(self, path, callback=None, access_token=None, - post_args=None, **args): - """Fetches the given API path, e.g., ``statuses/user_timeline/btaylor`` - - The path should not include the format or API version number. - (we automatically use JSON format and API version 1). - - If the request is a POST, ``post_args`` should be provided. Query - string arguments should be given as keyword arguments. - - All the Twitter methods are documented at http://dev.twitter.com/ - - Many methods require an OAuth access token which you can - obtain through `~OAuthMixin.authorize_redirect` and - `~OAuthMixin.get_authenticated_user`. The user returned through that - process includes an 'access_token' attribute that can be used - to make authenticated requests via this method. Example - usage: - - .. testcode:: - - class MainHandler(tornado.web.RequestHandler, - tornado.auth.TwitterMixin): - @tornado.web.authenticated - async def get(self): - new_entry = await self.twitter_request( - "/statuses/update", - post_args={"status": "Testing Tornado Web Server"}, - access_token=self.current_user["access_token"]) - if not new_entry: - # Call failed; perhaps missing permission? - yield self.authorize_redirect() - return - self.finish("Posted a message!") - - .. testoutput:: - :hide: - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. - """ - if path.startswith('http:') or path.startswith('https:'): - # Raw urls are useful for e.g. search which doesn't follow the - # usual pattern: http://search.twitter.com/search.json - url = path - else: - url = self._TWITTER_BASE_URL + path + ".json" - # Add the OAuth resource request signature if we have credentials - if access_token: - all_args = {} - all_args.update(args) - all_args.update(post_args or {}) - method = "POST" if post_args is not None else "GET" - oauth = self._oauth_request_parameters( - url, access_token, all_args, method=method) - args.update(oauth) - if args: - url += "?" + urllib_parse.urlencode(args) - http = self.get_auth_http_client() - http_callback = wrap(functools.partial(self._on_twitter_request, callback, url)) - if post_args is not None: - fut = http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args)) - else: - fut = http.fetch(url) - fut.add_done_callback(http_callback) - - def _on_twitter_request(self, future, url, response_fut): - try: - response = response_fut.result() - except Exception as e: - future.set_exception(AuthError( - "Error response %s fetching %s" % (e, url))) - return - future_set_result_unless_cancelled(future, escape.json_decode(response.body)) - - def _oauth_consumer_token(self): - self.require_setting("twitter_consumer_key", "Twitter OAuth") - self.require_setting("twitter_consumer_secret", "Twitter OAuth") - return dict( - key=self.settings["twitter_consumer_key"], - secret=self.settings["twitter_consumer_secret"]) - - @gen.coroutine - def _oauth_get_user_future(self, access_token): - user = yield self.twitter_request( - "/account/verify_credentials", - access_token=access_token) - if user: - user["username"] = user["screen_name"] - raise gen.Return(user) - - -class GoogleOAuth2Mixin(OAuth2Mixin): - """Google authentication using OAuth2. - - In order to use, register your application with Google and copy the - relevant parameters to your application settings. - - * Go to the Google Dev Console at http://console.developers.google.com - * Select a project, or create a new one. - * In the sidebar on the left, select APIs & Auth. - * In the list of APIs, find the Google+ API service and set it to ON. - * In the sidebar on the left, select Credentials. - * In the OAuth section of the page, select Create New Client ID. - * Set the Redirect URI to point to your auth handler - * Copy the "Client secret" and "Client ID" to the application settings as - {"google_oauth": {"key": CLIENT_ID, "secret": CLIENT_SECRET}} - - .. versionadded:: 3.2 - """ - _OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/v2/auth" - _OAUTH_ACCESS_TOKEN_URL = "https://www.googleapis.com/oauth2/v4/token" - _OAUTH_USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo" - _OAUTH_NO_CALLBACKS = False - _OAUTH_SETTINGS_KEY = 'google_oauth' - - @_auth_return_future - def get_authenticated_user(self, redirect_uri, code, callback): - """Handles the login for the Google user, returning an access token. - - The result is a dictionary containing an ``access_token`` field - ([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)). - Unlike other ``get_authenticated_user`` methods in this package, - this method does not return any additional information about the user. - The returned access token can be used with `OAuth2Mixin.oauth2_request` - to request additional information (perhaps from - ``https://www.googleapis.com/oauth2/v2/userinfo``) - - Example usage: - - .. testcode:: - - class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, - tornado.auth.GoogleOAuth2Mixin): - async def get(self): - if self.get_argument('code', False): - access = await self.get_authenticated_user( - redirect_uri='http://your.site.com/auth/google', - code=self.get_argument('code')) - user = await self.oauth2_request( - "https://www.googleapis.com/oauth2/v1/userinfo", - access_token=access["access_token"]) - # Save the user and access token with - # e.g. set_secure_cookie. - else: - await self.authorize_redirect( - redirect_uri='http://your.site.com/auth/google', - client_id=self.settings['google_oauth']['key'], - scope=['profile', 'email'], - response_type='code', - extra_params={'approval_prompt': 'auto'}) - - .. testoutput:: - :hide: - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. - """ # noqa: E501 - http = self.get_auth_http_client() - body = urllib_parse.urlencode({ - "redirect_uri": redirect_uri, - "code": code, - "client_id": self.settings[self._OAUTH_SETTINGS_KEY]['key'], - "client_secret": self.settings[self._OAUTH_SETTINGS_KEY]['secret'], - "grant_type": "authorization_code", - }) - - fut = http.fetch(self._OAUTH_ACCESS_TOKEN_URL, - method="POST", - headers={'Content-Type': 'application/x-www-form-urlencoded'}, - body=body) - fut.add_done_callback(wrap(functools.partial(self._on_access_token, callback))) - - def _on_access_token(self, future, response_fut): - """Callback function for the exchange to the access token.""" - try: - response = response_fut.result() - except Exception as e: - future.set_exception(AuthError('Google auth error: %s' % str(e))) - return - - args = escape.json_decode(response.body) - future_set_result_unless_cancelled(future, args) - - -class FacebookGraphMixin(OAuth2Mixin): - """Facebook authentication using the new Graph API and OAuth2.""" - _OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?" - _OAUTH_AUTHORIZE_URL = "https://www.facebook.com/dialog/oauth?" - _OAUTH_NO_CALLBACKS = False - _FACEBOOK_BASE_URL = "https://graph.facebook.com" - - @_auth_return_future - def get_authenticated_user(self, redirect_uri, client_id, client_secret, - code, callback, extra_fields=None): - """Handles the login for the Facebook user, returning a user object. - - Example usage: - - .. testcode:: - - class FacebookGraphLoginHandler(tornado.web.RequestHandler, - tornado.auth.FacebookGraphMixin): - async def get(self): - if self.get_argument("code", False): - user = await self.get_authenticated_user( - redirect_uri='/auth/facebookgraph/', - client_id=self.settings["facebook_api_key"], - client_secret=self.settings["facebook_secret"], - code=self.get_argument("code")) - # Save the user with e.g. set_secure_cookie - else: - await self.authorize_redirect( - redirect_uri='/auth/facebookgraph/', - client_id=self.settings["facebook_api_key"], - extra_params={"scope": "read_stream,offline_access"}) - - .. testoutput:: - :hide: - - This method returns a dictionary which may contain the following fields: - - * ``access_token``, a string which may be passed to `facebook_request` - * ``session_expires``, an integer encoded as a string representing - the time until the access token expires in seconds. This field should - be used like ``int(user['session_expires'])``; in a future version of - Tornado it will change from a string to an integer. - * ``id``, ``name``, ``first_name``, ``last_name``, ``locale``, ``picture``, - ``link``, plus any fields named in the ``extra_fields`` argument. These - fields are copied from the Facebook graph API - `user object <https://developers.facebook.com/docs/graph-api/reference/user>`_ - - .. versionchanged:: 4.5 - The ``session_expires`` field was updated to support changes made to the - Facebook API in March 2017. - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. - """ - http = self.get_auth_http_client() - args = { - "redirect_uri": redirect_uri, - "code": code, - "client_id": client_id, - "client_secret": client_secret, - } - - fields = set(['id', 'name', 'first_name', 'last_name', - 'locale', 'picture', 'link']) - if extra_fields: - fields.update(extra_fields) - - fut = http.fetch(self._oauth_request_token_url(**args)) - fut.add_done_callback(wrap(functools.partial(self._on_access_token, redirect_uri, client_id, - client_secret, callback, fields))) - - @gen.coroutine - def _on_access_token(self, redirect_uri, client_id, client_secret, - future, fields, response_fut): - try: - response = response_fut.result() - except Exception as e: - future.set_exception(AuthError('Facebook auth error: %s' % str(e))) - return - - args = escape.json_decode(response.body) - session = { - "access_token": args.get("access_token"), - "expires_in": args.get("expires_in") - } - - user = yield self.facebook_request( - path="/me", - access_token=session["access_token"], - appsecret_proof=hmac.new(key=client_secret.encode('utf8'), - msg=session["access_token"].encode('utf8'), - digestmod=hashlib.sha256).hexdigest(), - fields=",".join(fields) - ) - - if user is None: - future_set_result_unless_cancelled(future, None) - return - - fieldmap = {} - for field in fields: - fieldmap[field] = user.get(field) - - # session_expires is converted to str for compatibility with - # older versions in which the server used url-encoding and - # this code simply returned the string verbatim. - # This should change in Tornado 5.0. - fieldmap.update({"access_token": session["access_token"], - "session_expires": str(session.get("expires_in"))}) - future_set_result_unless_cancelled(future, fieldmap) - - @_auth_return_future - def facebook_request(self, path, callback, access_token=None, - post_args=None, **args): - """Fetches the given relative API path, e.g., "/btaylor/picture" - - If the request is a POST, ``post_args`` should be provided. Query - string arguments should be given as keyword arguments. - - An introduction to the Facebook Graph API can be found at - http://developers.facebook.com/docs/api - - Many methods require an OAuth access token which you can - obtain through `~OAuth2Mixin.authorize_redirect` and - `get_authenticated_user`. The user returned through that - process includes an ``access_token`` attribute that can be - used to make authenticated requests via this method. - - Example usage: - - .. testcode:: - - class MainHandler(tornado.web.RequestHandler, - tornado.auth.FacebookGraphMixin): - @tornado.web.authenticated - async def get(self): - new_entry = await self.facebook_request( - "/me/feed", - post_args={"message": "I am posting from my Tornado application!"}, - access_token=self.current_user["access_token"]) - - if not new_entry: - # Call failed; perhaps missing permission? - yield self.authorize_redirect() - return - self.finish("Posted a message!") - - .. testoutput:: - :hide: - - The given path is relative to ``self._FACEBOOK_BASE_URL``, - by default "https://graph.facebook.com". - - This method is a wrapper around `OAuth2Mixin.oauth2_request`; - the only difference is that this method takes a relative path, - while ``oauth2_request`` takes a complete url. - - .. versionchanged:: 3.1 - Added the ability to override ``self._FACEBOOK_BASE_URL``. - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. - """ - url = self._FACEBOOK_BASE_URL + path - # Thanks to the _auth_return_future decorator, our "callback" - # argument is a Future, which we cannot pass as a callback to - # oauth2_request. Instead, have oauth2_request return a - # future and chain them together. - oauth_future = self.oauth2_request(url, access_token=access_token, - post_args=post_args, **args) - chain_future(oauth_future, callback) - - -def _oauth_signature(consumer_token, method, url, parameters={}, token=None): - """Calculates the HMAC-SHA1 OAuth signature for the given request. - - See http://oauth.net/core/1.0/#signing_process - """ - parts = urlparse.urlparse(url) - scheme, netloc, path = parts[:3] - normalized_url = scheme.lower() + "://" + netloc.lower() + path - - base_elems = [] - base_elems.append(method.upper()) - base_elems.append(normalized_url) - base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v))) - for k, v in sorted(parameters.items()))) - base_string = "&".join(_oauth_escape(e) for e in base_elems) - - key_elems = [escape.utf8(consumer_token["secret"])] - key_elems.append(escape.utf8(token["secret"] if token else "")) - key = b"&".join(key_elems) - - hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1) - return binascii.b2a_base64(hash.digest())[:-1] - - -def _oauth10a_signature(consumer_token, method, url, parameters={}, token=None): - """Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request. - - See http://oauth.net/core/1.0a/#signing_process - """ - parts = urlparse.urlparse(url) - scheme, netloc, path = parts[:3] - normalized_url = scheme.lower() + "://" + netloc.lower() + path - - base_elems = [] - base_elems.append(method.upper()) - base_elems.append(normalized_url) - base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v))) - for k, v in sorted(parameters.items()))) - - base_string = "&".join(_oauth_escape(e) for e in base_elems) - key_elems = [escape.utf8(urllib_parse.quote(consumer_token["secret"], safe='~'))] - key_elems.append(escape.utf8(urllib_parse.quote(token["secret"], safe='~') if token else "")) - key = b"&".join(key_elems) - - hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1) - return binascii.b2a_base64(hash.digest())[:-1] - - -def _oauth_escape(val): - if isinstance(val, unicode_type): - val = val.encode("utf-8") - return urllib_parse.quote(val, safe="~") - - -def _oauth_parse_response(body): - # I can't find an officially-defined encoding for oauth responses and - # have never seen anyone use non-ascii. Leave the response in a byte - # string for python 2, and use utf8 on python 3. - body = escape.native_str(body) - p = urlparse.parse_qs(body, keep_blank_values=False) - token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0]) - - # Add the extra parameters the Provider included to the token - special = ("oauth_token", "oauth_token_secret") - token.update((k, p[k][0]) for k in p if k not in special) - return token diff --git a/lib/tornado/autoreload.py b/lib/tornado/autoreload.py deleted file mode 100755 index 7d69474a..00000000 --- a/lib/tornado/autoreload.py +++ /dev/null @@ -1,356 +0,0 @@ -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Automatically restart the server when a source file is modified. - -Most applications should not access this module directly. Instead, -pass the keyword argument ``autoreload=True`` to the -`tornado.web.Application` constructor (or ``debug=True``, which -enables this setting and several others). This will enable autoreload -mode as well as checking for changes to templates and static -resources. Note that restarting is a destructive operation and any -requests in progress will be aborted when the process restarts. (If -you want to disable autoreload while using other debug-mode features, -pass both ``debug=True`` and ``autoreload=False``). - -This module can also be used as a command-line wrapper around scripts -such as unit test runners. See the `main` method for details. - -The command-line wrapper and Application debug modes can be used together. -This combination is encouraged as the wrapper catches syntax errors and -other import-time failures, while debug mode catches changes once -the server has started. - -This module depends on `.IOLoop`, so it will not work in WSGI applications -and Google App Engine. It also will not work correctly when `.HTTPServer`'s -multi-process mode is used. - -Reloading loses any Python interpreter command-line arguments (e.g. ``-u``) -because it re-executes Python using ``sys.executable`` and ``sys.argv``. -Additionally, modifying these variables will cause reloading to behave -incorrectly. - -""" - -from __future__ import absolute_import, division, print_function - -import os -import sys - -# sys.path handling -# ----------------- -# -# If a module is run with "python -m", the current directory (i.e. "") -# is automatically prepended to sys.path, but not if it is run as -# "path/to/file.py". The processing for "-m" rewrites the former to -# the latter, so subsequent executions won't have the same path as the -# original. -# -# Conversely, when run as path/to/file.py, the directory containing -# file.py gets added to the path, which can cause confusion as imports -# may become relative in spite of the future import. -# -# We address the former problem by reconstructing the original command -# line (Python >= 3.4) or by setting the $PYTHONPATH environment -# variable (Python < 3.4) before re-execution so the new process will -# see the correct path. We attempt to address the latter problem when -# tornado.autoreload is run as __main__. - -if __name__ == "__main__": - # This sys.path manipulation must come before our imports (as much - # as possible - if we introduced a tornado.sys or tornado.os - # module we'd be in trouble), or else our imports would become - # relative again despite the future import. - # - # There is a separate __main__ block at the end of the file to call main(). - if sys.path[0] == os.path.dirname(__file__): - del sys.path[0] - -import functools -import logging -import os -import pkgutil # type: ignore -import sys -import traceback -import types -import subprocess -import weakref - -from tornado import ioloop -from tornado.log import gen_log -from tornado import process -from tornado.util import exec_in - -try: - import signal -except ImportError: - signal = None - -# os.execv is broken on Windows and can't properly parse command line -# arguments and executable name if they contain whitespaces. subprocess -# fixes that behavior. -_has_execv = sys.platform != 'win32' - -_watched_files = set() -_reload_hooks = [] -_reload_attempted = False -_io_loops = weakref.WeakKeyDictionary() # type: ignore -_autoreload_is_main = False -_original_argv = None -_original_spec = None - - -def start(check_time=500): - """Begins watching source files for changes. - - .. versionchanged:: 5.0 - The ``io_loop`` argument (deprecated since version 4.1) has been removed. - """ - io_loop = ioloop.IOLoop.current() - if io_loop in _io_loops: - return - _io_loops[io_loop] = True - if len(_io_loops) > 1: - gen_log.warning("tornado.autoreload started more than once in the same process") - modify_times = {} - callback = functools.partial(_reload_on_update, modify_times) - scheduler = ioloop.PeriodicCallback(callback, check_time) - scheduler.start() - - -def wait(): - """Wait for a watched file to change, then restart the process. - - Intended to be used at the end of scripts like unit test runners, - to run the tests again after any source file changes (but see also - the command-line interface in `main`) - """ - io_loop = ioloop.IOLoop() - io_loop.add_callback(start) - io_loop.start() - - -def watch(filename): - """Add a file to the watch list. - - All imported modules are watched by default. - """ - _watched_files.add(filename) - - -def add_reload_hook(fn): - """Add a function to be called before reloading the process. - - Note that for open file and socket handles it is generally - preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or - ``tornado.platform.auto.set_close_exec``) instead - of using a reload hook to close them. - """ - _reload_hooks.append(fn) - - -def _reload_on_update(modify_times): - if _reload_attempted: - # We already tried to reload and it didn't work, so don't try again. - return - if process.task_id() is not None: - # We're in a child process created by fork_processes. If child - # processes restarted themselves, they'd all restart and then - # all call fork_processes again. - return - for module in list(sys.modules.values()): - # Some modules play games with sys.modules (e.g. email/__init__.py - # in the standard library), and occasionally this can cause strange - # failures in getattr. Just ignore anything that's not an ordinary - # module. - if not isinstance(module, types.ModuleType): - continue - path = getattr(module, "__file__", None) - if not path: - continue - if path.endswith(".pyc") or path.endswith(".pyo"): - path = path[:-1] - _check_file(modify_times, path) - for path in _watched_files: - _check_file(modify_times, path) - - -def _check_file(modify_times, path): - try: - modified = os.stat(path).st_mtime - except Exception: - return - if path not in modify_times: - modify_times[path] = modified - return - if modify_times[path] != modified: - gen_log.info("%s modified; restarting server", path) - _reload() - - -def _reload(): - global _reload_attempted - _reload_attempted = True - for fn in _reload_hooks: - fn() - if hasattr(signal, "setitimer"): - # Clear the alarm signal set by - # ioloop.set_blocking_log_threshold so it doesn't fire - # after the exec. - signal.setitimer(signal.ITIMER_REAL, 0, 0) - # sys.path fixes: see comments at top of file. If __main__.__spec__ - # exists, we were invoked with -m and the effective path is about to - # change on re-exec. Reconstruct the original command line to - # ensure that the new process sees the same path we did. If - # __spec__ is not available (Python < 3.4), check instead if - # sys.path[0] is an empty string and add the current directory to - # $PYTHONPATH. - if _autoreload_is_main: - spec = _original_spec - argv = _original_argv - else: - spec = getattr(sys.modules['__main__'], '__spec__', None) - argv = sys.argv - if spec: - argv = ['-m', spec.name] + argv[1:] - else: - path_prefix = '.' + os.pathsep - if (sys.path[0] == '' and - not os.environ.get("PYTHONPATH", "").startswith(path_prefix)): - os.environ["PYTHONPATH"] = (path_prefix + - os.environ.get("PYTHONPATH", "")) - if not _has_execv: - subprocess.Popen([sys.executable] + argv) - os._exit(0) - else: - try: - os.execv(sys.executable, [sys.executable] + argv) - except OSError: - # Mac OS X versions prior to 10.6 do not support execv in - # a process that contains multiple threads. Instead of - # re-executing in the current process, start a new one - # and cause the current process to exit. This isn't - # ideal since the new process is detached from the parent - # terminal and thus cannot easily be killed with ctrl-C, - # but it's better than not being able to autoreload at - # all. - # Unfortunately the errno returned in this case does not - # appear to be consistent, so we can't easily check for - # this error specifically. - os.spawnv(os.P_NOWAIT, sys.executable, [sys.executable] + argv) - # At this point the IOLoop has been closed and finally - # blocks will experience errors if we allow the stack to - # unwind, so just exit uncleanly. - os._exit(0) - - -_USAGE = """\ -Usage: - python -m tornado.autoreload -m module.to.run [args...] - python -m tornado.autoreload path/to/script.py [args...] -""" - - -def main(): - """Command-line wrapper to re-run a script whenever its source changes. - - Scripts may be specified by filename or module name:: - - python -m tornado.autoreload -m tornado.test.runtests - python -m tornado.autoreload tornado/test/runtests.py - - Running a script with this wrapper is similar to calling - `tornado.autoreload.wait` at the end of the script, but this wrapper - can catch import-time problems like syntax errors that would otherwise - prevent the script from reaching its call to `wait`. - """ - # Remember that we were launched with autoreload as main. - # The main module can be tricky; set the variables both in our globals - # (which may be __main__) and the real importable version. - import tornado.autoreload - global _autoreload_is_main - global _original_argv, _original_spec - tornado.autoreload._autoreload_is_main = _autoreload_is_main = True - original_argv = sys.argv - tornado.autoreload._original_argv = _original_argv = original_argv - original_spec = getattr(sys.modules['__main__'], '__spec__', None) - tornado.autoreload._original_spec = _original_spec = original_spec - sys.argv = sys.argv[:] - if len(sys.argv) >= 3 and sys.argv[1] == "-m": - mode = "module" - module = sys.argv[2] - del sys.argv[1:3] - elif len(sys.argv) >= 2: - mode = "script" - script = sys.argv[1] - sys.argv = sys.argv[1:] - else: - print(_USAGE, file=sys.stderr) - sys.exit(1) - - try: - if mode == "module": - import runpy - runpy.run_module(module, run_name="__main__", alter_sys=True) - elif mode == "script": - with open(script) as f: - # Execute the script in our namespace instead of creating - # a new one so that something that tries to import __main__ - # (e.g. the unittest module) will see names defined in the - # script instead of just those defined in this module. - global __file__ - __file__ = script - # If __package__ is defined, imports may be incorrectly - # interpreted as relative to this module. - global __package__ - del __package__ - exec_in(f.read(), globals(), globals()) - except SystemExit as e: - logging.basicConfig() - gen_log.info("Script exited with status %s", e.code) - except Exception as e: - logging.basicConfig() - gen_log.warning("Script exited with uncaught exception", exc_info=True) - # If an exception occurred at import time, the file with the error - # never made it into sys.modules and so we won't know to watch it. - # Just to make sure we've covered everything, walk the stack trace - # from the exception and watch every file. - for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]): - watch(filename) - if isinstance(e, SyntaxError): - # SyntaxErrors are special: their innermost stack frame is fake - # so extract_tb won't see it and we have to get the filename - # from the exception object. - watch(e.filename) - else: - logging.basicConfig() - gen_log.info("Script exited normally") - # restore sys.argv so subsequent executions will include autoreload - sys.argv = original_argv - - if mode == 'module': - # runpy did a fake import of the module as __main__, but now it's - # no longer in sys.modules. Figure out where it is and watch it. - loader = pkgutil.get_loader(module) - if loader is not None: - watch(loader.get_filename()) - - wait() - - -if __name__ == "__main__": - # See also the other __main__ block at the top of the file, which modifies - # sys.path before our imports - main() diff --git a/lib/tornado/concurrent.py b/lib/tornado/concurrent.py deleted file mode 100755 index f7e6bccc..00000000 --- a/lib/tornado/concurrent.py +++ /dev/null @@ -1,660 +0,0 @@ -# -# Copyright 2012 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Utilities for working with ``Future`` objects. - -``Futures`` are a pattern for concurrent programming introduced in -Python 3.2 in the `concurrent.futures` package, and also adopted (in a -slightly different form) in Python 3.4's `asyncio` package. This -package defines a ``Future`` class that is an alias for `asyncio.Future` -when available, and a compatible implementation for older versions of -Python. It also includes some utility functions for interacting with -``Future`` objects. - -While this package is an important part of Tornado's internal -implementation, applications rarely need to interact with it -directly. -""" -from __future__ import absolute_import, division, print_function - -import functools -import platform -import textwrap -import traceback -import sys -import warnings - -from tornado.log import app_log -from tornado.stack_context import ExceptionStackContext, wrap -from tornado.util import raise_exc_info, ArgReplacer, is_finalizing - -try: - from concurrent import futures -except ImportError: - futures = None - -try: - import asyncio -except ImportError: - asyncio = None - -try: - import typing -except ImportError: - typing = None - - -# Can the garbage collector handle cycles that include __del__ methods? -# This is true in cpython beginning with version 3.4 (PEP 442). -_GC_CYCLE_FINALIZERS = (platform.python_implementation() == 'CPython' and - sys.version_info >= (3, 4)) - - -class ReturnValueIgnoredError(Exception): - pass - -# This class and associated code in the future object is derived -# from the Trollius project, a backport of asyncio to Python 2.x - 3.x - - -class _TracebackLogger(object): - """Helper to log a traceback upon destruction if not cleared. - - This solves a nasty problem with Futures and Tasks that have an - exception set: if nobody asks for the exception, the exception is - never logged. This violates the Zen of Python: 'Errors should - never pass silently. Unless explicitly silenced.' - - However, we don't want to log the exception as soon as - set_exception() is called: if the calling code is written - properly, it will get the exception and handle it properly. But - we *do* want to log it if result() or exception() was never called - -- otherwise developers waste a lot of time wondering why their - buggy code fails silently. - - An earlier attempt added a __del__() method to the Future class - itself, but this backfired because the presence of __del__() - prevents garbage collection from breaking cycles. A way out of - this catch-22 is to avoid having a __del__() method on the Future - class itself, but instead to have a reference to a helper object - with a __del__() method that logs the traceback, where we ensure - that the helper object doesn't participate in cycles, and only the - Future has a reference to it. - - The helper object is added when set_exception() is called. When - the Future is collected, and the helper is present, the helper - object is also collected, and its __del__() method will log the - traceback. When the Future's result() or exception() method is - called (and a helper object is present), it removes the the helper - object, after calling its clear() method to prevent it from - logging. - - One downside is that we do a fair amount of work to extract the - traceback from the exception, even when it is never logged. It - would seem cheaper to just store the exception object, but that - references the traceback, which references stack frames, which may - reference the Future, which references the _TracebackLogger, and - then the _TracebackLogger would be included in a cycle, which is - what we're trying to avoid! As an optimization, we don't - immediately format the exception; we only do the work when - activate() is called, which call is delayed until after all the - Future's callbacks have run. Since usually a Future has at least - one callback (typically set by 'yield From') and usually that - callback extracts the callback, thereby removing the need to - format the exception. - - PS. I don't claim credit for this solution. I first heard of it - in a discussion about closing files when they are collected. - """ - - __slots__ = ('exc_info', 'formatted_tb') - - def __init__(self, exc_info): - self.exc_info = exc_info - self.formatted_tb = None - - def activate(self): - exc_info = self.exc_info - if exc_info is not None: - self.exc_info = None - self.formatted_tb = traceback.format_exception(*exc_info) - - def clear(self): - self.exc_info = None - self.formatted_tb = None - - def __del__(self, is_finalizing=is_finalizing): - if not is_finalizing() and self.formatted_tb: - app_log.error('Future exception was never retrieved: %s', - ''.join(self.formatted_tb).rstrip()) - - -class Future(object): - """Placeholder for an asynchronous result. - - A ``Future`` encapsulates the result of an asynchronous - operation. In synchronous applications ``Futures`` are used - to wait for the result from a thread or process pool; in - Tornado they are normally used with `.IOLoop.add_future` or by - yielding them in a `.gen.coroutine`. - - `tornado.concurrent.Future` is an alias for `asyncio.Future` when - that package is available (Python 3.4+). Unlike - `concurrent.futures.Future`, the ``Futures`` used by Tornado and - `asyncio` are not thread-safe (and therefore faster for use with - single-threaded event loops). - - In addition to ``exception`` and ``set_exception``, Tornado's - ``Future`` implementation supports storing an ``exc_info`` triple - to support better tracebacks on Python 2. To set an ``exc_info`` - triple, use `future_set_exc_info`, and to retrieve one, call - `result()` (which will raise it). - - .. versionchanged:: 4.0 - `tornado.concurrent.Future` is always a thread-unsafe ``Future`` - with support for the ``exc_info`` methods. Previously it would - be an alias for the thread-safe `concurrent.futures.Future` - if that package was available and fall back to the thread-unsafe - implementation if it was not. - - .. versionchanged:: 4.1 - If a `.Future` contains an error but that error is never observed - (by calling ``result()``, ``exception()``, or ``exc_info()``), - a stack trace will be logged when the `.Future` is garbage collected. - This normally indicates an error in the application, but in cases - where it results in undesired logging it may be necessary to - suppress the logging by ensuring that the exception is observed: - ``f.add_done_callback(lambda f: f.exception())``. - - .. versionchanged:: 5.0 - - This class was previoiusly available under the name - ``TracebackFuture``. This name, which was deprecated since - version 4.0, has been removed. When `asyncio` is available - ``tornado.concurrent.Future`` is now an alias for - `asyncio.Future`. Like `asyncio.Future`, callbacks are now - always scheduled on the `.IOLoop` and are never run - synchronously. - - """ - def __init__(self): - self._done = False - self._result = None - self._exc_info = None - - self._log_traceback = False # Used for Python >= 3.4 - self._tb_logger = None # Used for Python <= 3.3 - - self._callbacks = [] - - # Implement the Python 3.5 Awaitable protocol if possible - # (we can't use return and yield together until py33). - if sys.version_info >= (3, 3): - exec(textwrap.dedent(""" - def __await__(self): - return (yield self) - """)) - else: - # Py2-compatible version for use with cython. - def __await__(self): - result = yield self - # StopIteration doesn't take args before py33, - # but Cython recognizes the args tuple. - e = StopIteration() - e.args = (result,) - raise e - - def cancel(self): - """Cancel the operation, if possible. - - Tornado ``Futures`` do not support cancellation, so this method always - returns False. - """ - return False - - def cancelled(self): - """Returns True if the operation has been cancelled. - - Tornado ``Futures`` do not support cancellation, so this method - always returns False. - """ - return False - - def running(self): - """Returns True if this operation is currently running.""" - return not self._done - - def done(self): - """Returns True if the future has finished running.""" - return self._done - - def _clear_tb_log(self): - self._log_traceback = False - if self._tb_logger is not None: - self._tb_logger.clear() - self._tb_logger = None - - def result(self, timeout=None): - """If the operation succeeded, return its result. If it failed, - re-raise its exception. - - This method takes a ``timeout`` argument for compatibility with - `concurrent.futures.Future` but it is an error to call it - before the `Future` is done, so the ``timeout`` is never used. - """ - self._clear_tb_log() - if self._result is not None: - return self._result - if self._exc_info is not None: - try: - raise_exc_info(self._exc_info) - finally: - self = None - self._check_done() - return self._result - - def exception(self, timeout=None): - """If the operation raised an exception, return the `Exception` - object. Otherwise returns None. - - This method takes a ``timeout`` argument for compatibility with - `concurrent.futures.Future` but it is an error to call it - before the `Future` is done, so the ``timeout`` is never used. - """ - self._clear_tb_log() - if self._exc_info is not None: - return self._exc_info[1] - else: - self._check_done() - return None - - def add_done_callback(self, fn): - """Attaches the given callback to the `Future`. - - It will be invoked with the `Future` as its argument when the Future - has finished running and its result is available. In Tornado - consider using `.IOLoop.add_future` instead of calling - `add_done_callback` directly. - """ - if self._done: - from tornado.ioloop import IOLoop - IOLoop.current().add_callback(fn, self) - else: - self._callbacks.append(fn) - - def set_result(self, result): - """Sets the result of a ``Future``. - - It is undefined to call any of the ``set`` methods more than once - on the same object. - """ - self._result = result - self._set_done() - - def set_exception(self, exception): - """Sets the exception of a ``Future.``""" - self.set_exc_info( - (exception.__class__, - exception, - getattr(exception, '__traceback__', None))) - - def exc_info(self): - """Returns a tuple in the same format as `sys.exc_info` or None. - - .. versionadded:: 4.0 - """ - self._clear_tb_log() - return self._exc_info - - def set_exc_info(self, exc_info): - """Sets the exception information of a ``Future.`` - - Preserves tracebacks on Python 2. - - .. versionadded:: 4.0 - """ - self._exc_info = exc_info - self._log_traceback = True - if not _GC_CYCLE_FINALIZERS: - self._tb_logger = _TracebackLogger(exc_info) - - try: - self._set_done() - finally: - # Activate the logger after all callbacks have had a - # chance to call result() or exception(). - if self._log_traceback and self._tb_logger is not None: - self._tb_logger.activate() - self._exc_info = exc_info - - def _check_done(self): - if not self._done: - raise Exception("DummyFuture does not support blocking for results") - - def _set_done(self): - self._done = True - if self._callbacks: - from tornado.ioloop import IOLoop - loop = IOLoop.current() - for cb in self._callbacks: - loop.add_callback(cb, self) - self._callbacks = None - - # On Python 3.3 or older, objects with a destructor part of a reference - # cycle are never destroyed. It's no longer the case on Python 3.4 thanks to - # the PEP 442. - if _GC_CYCLE_FINALIZERS: - def __del__(self, is_finalizing=is_finalizing): - if is_finalizing() or not self._log_traceback: - # set_exception() was not called, or result() or exception() - # has consumed the exception - return - - tb = traceback.format_exception(*self._exc_info) - - app_log.error('Future %r exception was never retrieved: %s', - self, ''.join(tb).rstrip()) - - -if asyncio is not None: - Future = asyncio.Future # noqa - -if futures is None: - FUTURES = Future # type: typing.Union[type, typing.Tuple[type, ...]] -else: - FUTURES = (futures.Future, Future) - - -def is_future(x): - return isinstance(x, FUTURES) - - -class DummyExecutor(object): - def submit(self, fn, *args, **kwargs): - future = Future() - try: - future_set_result_unless_cancelled(future, fn(*args, **kwargs)) - except Exception: - future_set_exc_info(future, sys.exc_info()) - return future - - def shutdown(self, wait=True): - pass - - -dummy_executor = DummyExecutor() - - -def run_on_executor(*args, **kwargs): - """Decorator to run a synchronous method asynchronously on an executor. - - The decorated method may be called with a ``callback`` keyword - argument and returns a future. - - The executor to be used is determined by the ``executor`` - attributes of ``self``. To use a different attribute name, pass a - keyword argument to the decorator:: - - @run_on_executor(executor='_thread_pool') - def foo(self): - pass - - This decorator should not be confused with the similarly-named - `.IOLoop.run_in_executor`. In general, using ``run_in_executor`` - when *calling* a blocking method is recommended instead of using - this decorator when *defining* a method. If compatibility with older - versions of Tornado is required, consider defining an executor - and using ``executor.submit()`` at the call site. - - .. versionchanged:: 4.2 - Added keyword arguments to use alternative attributes. - - .. versionchanged:: 5.0 - Always uses the current IOLoop instead of ``self.io_loop``. - - .. versionchanged:: 5.1 - Returns a `.Future` compatible with ``await`` instead of a - `concurrent.futures.Future`. - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed in - 6.0. The decorator itself is discouraged in new code but will - not be removed in 6.0. - """ - def run_on_executor_decorator(fn): - executor = kwargs.get("executor", "executor") - - @functools.wraps(fn) - def wrapper(self, *args, **kwargs): - callback = kwargs.pop("callback", None) - async_future = Future() - conc_future = getattr(self, executor).submit(fn, self, *args, **kwargs) - chain_future(conc_future, async_future) - if callback: - warnings.warn("callback arguments are deprecated, use the returned Future instead", - DeprecationWarning) - from tornado.ioloop import IOLoop - IOLoop.current().add_future( - async_future, lambda future: callback(future.result())) - return async_future - return wrapper - if args and kwargs: - raise ValueError("cannot combine positional and keyword args") - if len(args) == 1: - return run_on_executor_decorator(args[0]) - elif len(args) != 0: - raise ValueError("expected 1 argument, got %d", len(args)) - return run_on_executor_decorator - - -_NO_RESULT = object() - - -def return_future(f): - """Decorator to make a function that returns via callback return a - `Future`. - - This decorator was provided to ease the transition from - callback-oriented code to coroutines. It is not recommended for - new code. - - The wrapped function should take a ``callback`` keyword argument - and invoke it with one argument when it has finished. To signal failure, - the function can simply raise an exception (which will be - captured by the `.StackContext` and passed along to the ``Future``). - - From the caller's perspective, the callback argument is optional. - If one is given, it will be invoked when the function is complete - with ``Future.result()`` as an argument. If the function fails, the - callback will not be run and an exception will be raised into the - surrounding `.StackContext`. - - If no callback is given, the caller should use the ``Future`` to - wait for the function to complete (perhaps by yielding it in a - coroutine, or passing it to `.IOLoop.add_future`). - - Usage: - - .. testcode:: - - @return_future - def future_func(arg1, arg2, callback): - # Do stuff (possibly asynchronous) - callback(result) - - async def caller(): - await future_func(arg1, arg2) - - .. - - Note that ``@return_future`` and ``@gen.engine`` can be applied to the - same function, provided ``@return_future`` appears first. However, - consider using ``@gen.coroutine`` instead of this combination. - - .. versionchanged:: 5.1 - - Now raises a `.DeprecationWarning` if a callback argument is passed to - the decorated function and deprecation warnings are enabled. - - .. deprecated:: 5.1 - - This decorator will be removed in Tornado 6.0. New code should - use coroutines directly instead of wrapping callback-based code - with this decorator. Interactions with non-Tornado - callback-based code should be managed explicitly to avoid - relying on the `.ExceptionStackContext` built into this - decorator. - """ - warnings.warn("@return_future is deprecated, use coroutines instead", - DeprecationWarning) - return _non_deprecated_return_future(f, warn=True) - - -def _non_deprecated_return_future(f, warn=False): - # Allow auth.py to use this decorator without triggering - # deprecation warnings. This will go away once auth.py has removed - # its legacy interfaces in 6.0. - replacer = ArgReplacer(f, 'callback') - - @functools.wraps(f) - def wrapper(*args, **kwargs): - future = Future() - callback, args, kwargs = replacer.replace( - lambda value=_NO_RESULT: future_set_result_unless_cancelled(future, value), - args, kwargs) - - def handle_error(typ, value, tb): - future_set_exc_info(future, (typ, value, tb)) - return True - exc_info = None - esc = ExceptionStackContext(handle_error, delay_warning=True) - with esc: - if not warn: - # HACK: In non-deprecated mode (only used in auth.py), - # suppress the warning entirely. Since this is added - # in a 5.1 patch release and already removed in 6.0 - # I'm prioritizing a minimial change instead of a - # clean solution. - esc.delay_warning = False - try: - result = f(*args, **kwargs) - if result is not None: - raise ReturnValueIgnoredError( - "@return_future should not be used with functions " - "that return values") - except: - exc_info = sys.exc_info() - raise - if exc_info is not None: - # If the initial synchronous part of f() raised an exception, - # go ahead and raise it to the caller directly without waiting - # for them to inspect the Future. - future.result() - - # If the caller passed in a callback, schedule it to be called - # when the future resolves. It is important that this happens - # just before we return the future, or else we risk confusing - # stack contexts with multiple exceptions (one here with the - # immediate exception, and again when the future resolves and - # the callback triggers its exception by calling future.result()). - if callback is not None: - warnings.warn("callback arguments are deprecated, use the returned Future instead", - DeprecationWarning) - - def run_callback(future): - result = future.result() - if result is _NO_RESULT: - callback() - else: - callback(future.result()) - future_add_done_callback(future, wrap(run_callback)) - return future - return wrapper - - -def chain_future(a, b): - """Chain two futures together so that when one completes, so does the other. - - The result (success or failure) of ``a`` will be copied to ``b``, unless - ``b`` has already been completed or cancelled by the time ``a`` finishes. - - .. versionchanged:: 5.0 - - Now accepts both Tornado/asyncio `Future` objects and - `concurrent.futures.Future`. - - """ - def copy(future): - assert future is a - if b.done(): - return - if (hasattr(a, 'exc_info') and - a.exc_info() is not None): - future_set_exc_info(b, a.exc_info()) - elif a.exception() is not None: - b.set_exception(a.exception()) - else: - b.set_result(a.result()) - if isinstance(a, Future): - future_add_done_callback(a, copy) - else: - # concurrent.futures.Future - from tornado.ioloop import IOLoop - IOLoop.current().add_future(a, copy) - - -def future_set_result_unless_cancelled(future, value): - """Set the given ``value`` as the `Future`'s result, if not cancelled. - - Avoids asyncio.InvalidStateError when calling set_result() on - a cancelled `asyncio.Future`. - - .. versionadded:: 5.0 - """ - if not future.cancelled(): - future.set_result(value) - - -def future_set_exc_info(future, exc_info): - """Set the given ``exc_info`` as the `Future`'s exception. - - Understands both `asyncio.Future` and Tornado's extensions to - enable better tracebacks on Python 2. - - .. versionadded:: 5.0 - """ - if hasattr(future, 'set_exc_info'): - # Tornado's Future - future.set_exc_info(exc_info) - else: - # asyncio.Future - future.set_exception(exc_info[1]) - - -def future_add_done_callback(future, callback): - """Arrange to call ``callback`` when ``future`` is complete. - - ``callback`` is invoked with one argument, the ``future``. - - If ``future`` is already done, ``callback`` is invoked immediately. - This may differ from the behavior of ``Future.add_done_callback``, - which makes no such guarantee. - - .. versionadded:: 5.0 - """ - if future.done(): - callback(future) - else: - future.add_done_callback(callback) diff --git a/lib/tornado/curl_httpclient.py b/lib/tornado/curl_httpclient.py deleted file mode 100755 index 7f5cb105..00000000 --- a/lib/tornado/curl_httpclient.py +++ /dev/null @@ -1,514 +0,0 @@ -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Non-blocking HTTP client implementation using pycurl.""" - -from __future__ import absolute_import, division, print_function - -import collections -import functools -import logging -import pycurl # type: ignore -import threading -import time -from io import BytesIO - -from tornado import httputil -from tornado import ioloop -from tornado import stack_context - -from tornado.escape import utf8, native_str -from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main - -curl_log = logging.getLogger('tornado.curl_httpclient') - - -class CurlAsyncHTTPClient(AsyncHTTPClient): - def initialize(self, max_clients=10, defaults=None): - super(CurlAsyncHTTPClient, self).initialize(defaults=defaults) - self._multi = pycurl.CurlMulti() - self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout) - self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket) - self._curls = [self._curl_create() for i in range(max_clients)] - self._free_list = self._curls[:] - self._requests = collections.deque() - self._fds = {} - self._timeout = None - - # libcurl has bugs that sometimes cause it to not report all - # relevant file descriptors and timeouts to TIMERFUNCTION/ - # SOCKETFUNCTION. Mitigate the effects of such bugs by - # forcing a periodic scan of all active requests. - self._force_timeout_callback = ioloop.PeriodicCallback( - self._handle_force_timeout, 1000) - self._force_timeout_callback.start() - - # Work around a bug in libcurl 7.29.0: Some fields in the curl - # multi object are initialized lazily, and its destructor will - # segfault if it is destroyed without having been used. Add - # and remove a dummy handle to make sure everything is - # initialized. - dummy_curl_handle = pycurl.Curl() - self._multi.add_handle(dummy_curl_handle) - self._multi.remove_handle(dummy_curl_handle) - - def close(self): - self._force_timeout_callback.stop() - if self._timeout is not None: - self.io_loop.remove_timeout(self._timeout) - for curl in self._curls: - curl.close() - self._multi.close() - super(CurlAsyncHTTPClient, self).close() - - # Set below properties to None to reduce the reference count of current - # instance, because those properties hold some methods of current - # instance that will case circular reference. - self._force_timeout_callback = None - self._multi = None - - def fetch_impl(self, request, callback): - self._requests.append((request, callback, self.io_loop.time())) - self._process_queue() - self._set_timeout(0) - - def _handle_socket(self, event, fd, multi, data): - """Called by libcurl when it wants to change the file descriptors - it cares about. - """ - event_map = { - pycurl.POLL_NONE: ioloop.IOLoop.NONE, - pycurl.POLL_IN: ioloop.IOLoop.READ, - pycurl.POLL_OUT: ioloop.IOLoop.WRITE, - pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE - } - if event == pycurl.POLL_REMOVE: - if fd in self._fds: - self.io_loop.remove_handler(fd) - del self._fds[fd] - else: - ioloop_event = event_map[event] - # libcurl sometimes closes a socket and then opens a new - # one using the same FD without giving us a POLL_NONE in - # between. This is a problem with the epoll IOLoop, - # because the kernel can tell when a socket is closed and - # removes it from the epoll automatically, causing future - # update_handler calls to fail. Since we can't tell when - # this has happened, always use remove and re-add - # instead of update. - if fd in self._fds: - self.io_loop.remove_handler(fd) - self.io_loop.add_handler(fd, self._handle_events, - ioloop_event) - self._fds[fd] = ioloop_event - - def _set_timeout(self, msecs): - """Called by libcurl to schedule a timeout.""" - if self._timeout is not None: - self.io_loop.remove_timeout(self._timeout) - self._timeout = self.io_loop.add_timeout( - self.io_loop.time() + msecs / 1000.0, self._handle_timeout) - - def _handle_events(self, fd, events): - """Called by IOLoop when there is activity on one of our - file descriptors. - """ - action = 0 - if events & ioloop.IOLoop.READ: - action |= pycurl.CSELECT_IN - if events & ioloop.IOLoop.WRITE: - action |= pycurl.CSELECT_OUT - while True: - try: - ret, num_handles = self._multi.socket_action(fd, action) - except pycurl.error as e: - ret = e.args[0] - if ret != pycurl.E_CALL_MULTI_PERFORM: - break - self._finish_pending_requests() - - def _handle_timeout(self): - """Called by IOLoop when the requested timeout has passed.""" - with stack_context.NullContext(): - self._timeout = None - while True: - try: - ret, num_handles = self._multi.socket_action( - pycurl.SOCKET_TIMEOUT, 0) - except pycurl.error as e: - ret = e.args[0] - if ret != pycurl.E_CALL_MULTI_PERFORM: - break - self._finish_pending_requests() - - # In theory, we shouldn't have to do this because curl will - # call _set_timeout whenever the timeout changes. However, - # sometimes after _handle_timeout we will need to reschedule - # immediately even though nothing has changed from curl's - # perspective. This is because when socket_action is - # called with SOCKET_TIMEOUT, libcurl decides internally which - # timeouts need to be processed by using a monotonic clock - # (where available) while tornado uses python's time.time() - # to decide when timeouts have occurred. When those clocks - # disagree on elapsed time (as they will whenever there is an - # NTP adjustment), tornado might call _handle_timeout before - # libcurl is ready. After each timeout, resync the scheduled - # timeout with libcurl's current state. - new_timeout = self._multi.timeout() - if new_timeout >= 0: - self._set_timeout(new_timeout) - - def _handle_force_timeout(self): - """Called by IOLoop periodically to ask libcurl to process any - events it may have forgotten about. - """ - with stack_context.NullContext(): - while True: - try: - ret, num_handles = self._multi.socket_all() - except pycurl.error as e: - ret = e.args[0] - if ret != pycurl.E_CALL_MULTI_PERFORM: - break - self._finish_pending_requests() - - def _finish_pending_requests(self): - """Process any requests that were completed by the last - call to multi.socket_action. - """ - while True: - num_q, ok_list, err_list = self._multi.info_read() - for curl in ok_list: - self._finish(curl) - for curl, errnum, errmsg in err_list: - self._finish(curl, errnum, errmsg) - if num_q == 0: - break - self._process_queue() - - def _process_queue(self): - with stack_context.NullContext(): - while True: - started = 0 - while self._free_list and self._requests: - started += 1 - curl = self._free_list.pop() - (request, callback, queue_start_time) = self._requests.popleft() - curl.info = { - "headers": httputil.HTTPHeaders(), - "buffer": BytesIO(), - "request": request, - "callback": callback, - "queue_start_time": queue_start_time, - "curl_start_time": time.time(), - "curl_start_ioloop_time": self.io_loop.current().time(), - } - try: - self._curl_setup_request( - curl, request, curl.info["buffer"], - curl.info["headers"]) - except Exception as e: - # If there was an error in setup, pass it on - # to the callback. Note that allowing the - # error to escape here will appear to work - # most of the time since we are still in the - # caller's original stack frame, but when - # _process_queue() is called from - # _finish_pending_requests the exceptions have - # nowhere to go. - self._free_list.append(curl) - callback(HTTPResponse( - request=request, - code=599, - error=e)) - else: - self._multi.add_handle(curl) - - if not started: - break - - def _finish(self, curl, curl_error=None, curl_message=None): - info = curl.info - curl.info = None - self._multi.remove_handle(curl) - self._free_list.append(curl) - buffer = info["buffer"] - if curl_error: - error = CurlError(curl_error, curl_message) - code = error.code - effective_url = None - buffer.close() - buffer = None - else: - error = None - code = curl.getinfo(pycurl.HTTP_CODE) - effective_url = curl.getinfo(pycurl.EFFECTIVE_URL) - buffer.seek(0) - # the various curl timings are documented at - # http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html - time_info = dict( - queue=info["curl_start_ioloop_time"] - info["queue_start_time"], - namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME), - connect=curl.getinfo(pycurl.CONNECT_TIME), - appconnect=curl.getinfo(pycurl.APPCONNECT_TIME), - pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME), - starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME), - total=curl.getinfo(pycurl.TOTAL_TIME), - redirect=curl.getinfo(pycurl.REDIRECT_TIME), - ) - try: - info["callback"](HTTPResponse( - request=info["request"], code=code, headers=info["headers"], - buffer=buffer, effective_url=effective_url, error=error, - reason=info['headers'].get("X-Http-Reason", None), - request_time=self.io_loop.time() - info["curl_start_ioloop_time"], - start_time=info["curl_start_time"], - time_info=time_info)) - except Exception: - self.handle_callback_exception(info["callback"]) - - def handle_callback_exception(self, callback): - self.io_loop.handle_callback_exception(callback) - - def _curl_create(self): - curl = pycurl.Curl() - if curl_log.isEnabledFor(logging.DEBUG): - curl.setopt(pycurl.VERBOSE, 1) - curl.setopt(pycurl.DEBUGFUNCTION, self._curl_debug) - if hasattr(pycurl, 'PROTOCOLS'): # PROTOCOLS first appeared in pycurl 7.19.5 (2014-07-12) - curl.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS) - curl.setopt(pycurl.REDIR_PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS) - return curl - - def _curl_setup_request(self, curl, request, buffer, headers): - curl.setopt(pycurl.URL, native_str(request.url)) - - # libcurl's magic "Expect: 100-continue" behavior causes delays - # with servers that don't support it (which include, among others, - # Google's OpenID endpoint). Additionally, this behavior has - # a bug in conjunction with the curl_multi_socket_action API - # (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976), - # which increases the delays. It's more trouble than it's worth, - # so just turn off the feature (yes, setting Expect: to an empty - # value is the official way to disable this) - if "Expect" not in request.headers: - request.headers["Expect"] = "" - - # libcurl adds Pragma: no-cache by default; disable that too - if "Pragma" not in request.headers: - request.headers["Pragma"] = "" - - curl.setopt(pycurl.HTTPHEADER, - ["%s: %s" % (native_str(k), native_str(v)) - for k, v in request.headers.get_all()]) - - curl.setopt(pycurl.HEADERFUNCTION, - functools.partial(self._curl_header_callback, - headers, request.header_callback)) - if request.streaming_callback: - def write_function(chunk): - self.io_loop.add_callback(request.streaming_callback, chunk) - else: - write_function = buffer.write - curl.setopt(pycurl.WRITEFUNCTION, write_function) - curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects) - curl.setopt(pycurl.MAXREDIRS, request.max_redirects) - curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout)) - curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout)) - if request.user_agent: - curl.setopt(pycurl.USERAGENT, native_str(request.user_agent)) - else: - curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)") - if request.network_interface: - curl.setopt(pycurl.INTERFACE, request.network_interface) - if request.decompress_response: - curl.setopt(pycurl.ENCODING, "gzip,deflate") - else: - curl.setopt(pycurl.ENCODING, "none") - if request.proxy_host and request.proxy_port: - curl.setopt(pycurl.PROXY, request.proxy_host) - curl.setopt(pycurl.PROXYPORT, request.proxy_port) - if request.proxy_username: - credentials = httputil.encode_username_password(request.proxy_username, - request.proxy_password) - curl.setopt(pycurl.PROXYUSERPWD, credentials) - - if (request.proxy_auth_mode is None or - request.proxy_auth_mode == "basic"): - curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_BASIC) - elif request.proxy_auth_mode == "digest": - curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_DIGEST) - else: - raise ValueError( - "Unsupported proxy_auth_mode %s" % request.proxy_auth_mode) - else: - curl.setopt(pycurl.PROXY, '') - curl.unsetopt(pycurl.PROXYUSERPWD) - if request.validate_cert: - curl.setopt(pycurl.SSL_VERIFYPEER, 1) - curl.setopt(pycurl.SSL_VERIFYHOST, 2) - else: - curl.setopt(pycurl.SSL_VERIFYPEER, 0) - curl.setopt(pycurl.SSL_VERIFYHOST, 0) - if request.ca_certs is not None: - curl.setopt(pycurl.CAINFO, request.ca_certs) - else: - # There is no way to restore pycurl.CAINFO to its default value - # (Using unsetopt makes it reject all certificates). - # I don't see any way to read the default value from python so it - # can be restored later. We'll have to just leave CAINFO untouched - # if no ca_certs file was specified, and require that if any - # request uses a custom ca_certs file, they all must. - pass - - if request.allow_ipv6 is False: - # Curl behaves reasonably when DNS resolution gives an ipv6 address - # that we can't reach, so allow ipv6 unless the user asks to disable. - curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4) - else: - curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER) - - # Set the request method through curl's irritating interface which makes - # up names for almost every single method - curl_options = { - "GET": pycurl.HTTPGET, - "POST": pycurl.POST, - "PUT": pycurl.UPLOAD, - "HEAD": pycurl.NOBODY, - } - custom_methods = set(["DELETE", "OPTIONS", "PATCH"]) - for o in curl_options.values(): - curl.setopt(o, False) - if request.method in curl_options: - curl.unsetopt(pycurl.CUSTOMREQUEST) - curl.setopt(curl_options[request.method], True) - elif request.allow_nonstandard_methods or request.method in custom_methods: - curl.setopt(pycurl.CUSTOMREQUEST, request.method) - else: - raise KeyError('unknown method ' + request.method) - - body_expected = request.method in ("POST", "PATCH", "PUT") - body_present = request.body is not None - if not request.allow_nonstandard_methods: - # Some HTTP methods nearly always have bodies while others - # almost never do. Fail in this case unless the user has - # opted out of sanity checks with allow_nonstandard_methods. - if ((body_expected and not body_present) or - (body_present and not body_expected)): - raise ValueError( - 'Body must %sbe None for method %s (unless ' - 'allow_nonstandard_methods is true)' % - ('not ' if body_expected else '', request.method)) - - if body_expected or body_present: - if request.method == "GET": - # Even with `allow_nonstandard_methods` we disallow - # GET with a body (because libcurl doesn't allow it - # unless we use CUSTOMREQUEST). While the spec doesn't - # forbid clients from sending a body, it arguably - # disallows the server from doing anything with them. - raise ValueError('Body must be None for GET request') - request_buffer = BytesIO(utf8(request.body or '')) - - def ioctl(cmd): - if cmd == curl.IOCMD_RESTARTREAD: - request_buffer.seek(0) - curl.setopt(pycurl.READFUNCTION, request_buffer.read) - curl.setopt(pycurl.IOCTLFUNCTION, ioctl) - if request.method == "POST": - curl.setopt(pycurl.POSTFIELDSIZE, len(request.body or '')) - else: - curl.setopt(pycurl.UPLOAD, True) - curl.setopt(pycurl.INFILESIZE, len(request.body or '')) - - if request.auth_username is not None: - if request.auth_mode is None or request.auth_mode == "basic": - curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC) - elif request.auth_mode == "digest": - curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST) - else: - raise ValueError("Unsupported auth_mode %s" % request.auth_mode) - - userpwd = httputil.encode_username_password(request.auth_username, - request.auth_password) - curl.setopt(pycurl.USERPWD, userpwd) - curl_log.debug("%s %s (username: %r)", request.method, request.url, - request.auth_username) - else: - curl.unsetopt(pycurl.USERPWD) - curl_log.debug("%s %s", request.method, request.url) - - if request.client_cert is not None: - curl.setopt(pycurl.SSLCERT, request.client_cert) - - if request.client_key is not None: - curl.setopt(pycurl.SSLKEY, request.client_key) - - if request.ssl_options is not None: - raise ValueError("ssl_options not supported in curl_httpclient") - - if threading.activeCount() > 1: - # libcurl/pycurl is not thread-safe by default. When multiple threads - # are used, signals should be disabled. This has the side effect - # of disabling DNS timeouts in some environments (when libcurl is - # not linked against ares), so we don't do it when there is only one - # thread. Applications that use many short-lived threads may need - # to set NOSIGNAL manually in a prepare_curl_callback since - # there may not be any other threads running at the time we call - # threading.activeCount. - curl.setopt(pycurl.NOSIGNAL, 1) - if request.prepare_curl_callback is not None: - request.prepare_curl_callback(curl) - - def _curl_header_callback(self, headers, header_callback, header_line): - header_line = native_str(header_line.decode('latin1')) - if header_callback is not None: - self.io_loop.add_callback(header_callback, header_line) - # header_line as returned by curl includes the end-of-line characters. - # whitespace at the start should be preserved to allow multi-line headers - header_line = header_line.rstrip() - if header_line.startswith("HTTP/"): - headers.clear() - try: - (__, __, reason) = httputil.parse_response_start_line(header_line) - header_line = "X-Http-Reason: %s" % reason - except httputil.HTTPInputError: - return - if not header_line: - return - headers.parse_line(header_line) - - def _curl_debug(self, debug_type, debug_msg): - debug_types = ('I', '<', '>', '<', '>') - if debug_type == 0: - debug_msg = native_str(debug_msg) - curl_log.debug('%s', debug_msg.strip()) - elif debug_type in (1, 2): - debug_msg = native_str(debug_msg) - for line in debug_msg.splitlines(): - curl_log.debug('%s %s', debug_types[debug_type], line) - elif debug_type == 4: - curl_log.debug('%s %r', debug_types[debug_type], debug_msg) - - -class CurlError(HTTPError): - def __init__(self, errno, message): - HTTPError.__init__(self, 599, message) - self.errno = errno - - -if __name__ == "__main__": - AsyncHTTPClient.configure(CurlAsyncHTTPClient) - main() diff --git a/lib/tornado/escape.py b/lib/tornado/escape.py deleted file mode 100755 index a79ece66..00000000 --- a/lib/tornado/escape.py +++ /dev/null @@ -1,399 +0,0 @@ -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Escaping/unescaping methods for HTML, JSON, URLs, and others. - -Also includes a few other miscellaneous string manipulation functions that -have crept in over time. -""" - -from __future__ import absolute_import, division, print_function - -import json -import re - -from tornado.util import PY3, unicode_type, basestring_type - -if PY3: - from urllib.parse import parse_qs as _parse_qs - import html.entities as htmlentitydefs - import urllib.parse as urllib_parse - unichr = chr -else: - from urlparse import parse_qs as _parse_qs - import htmlentitydefs - import urllib as urllib_parse - -try: - import typing # noqa -except ImportError: - pass - - -_XHTML_ESCAPE_RE = re.compile('[&<>"\']') -_XHTML_ESCAPE_DICT = {'&': '&', '<': '<', '>': '>', '"': '"', - '\'': '''} - - -def xhtml_escape(value): - """Escapes a string so it is valid within HTML or XML. - - Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``. - When used in attribute values the escaped strings must be enclosed - in quotes. - - .. versionchanged:: 3.2 - - Added the single quote to the list of escaped characters. - """ - return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)], - to_basestring(value)) - - -def xhtml_unescape(value): - """Un-escapes an XML-escaped string.""" - return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value)) - - -# The fact that json_encode wraps json.dumps is an implementation detail. -# Please see https://github.com/tornadoweb/tornado/pull/706 -# before sending a pull request that adds **kwargs to this function. -def json_encode(value): - """JSON-encodes the given Python object.""" - # JSON permits but does not require forward slashes to be escaped. - # This is useful when json data is emitted in a <script> tag - # in HTML, as it prevents </script> tags from prematurely terminating - # the javascript. Some json libraries do this escaping by default, - # although python's standard library does not, so we do it here. - # http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped - return json.dumps(value).replace("</", "<\\/") - - -def json_decode(value): - """Returns Python objects for the given JSON string.""" - return json.loads(to_basestring(value)) - - -def squeeze(value): - """Replace all sequences of whitespace chars with a single space.""" - return re.sub(r"[\x00-\x20]+", " ", value).strip() - - -def url_escape(value, plus=True): - """Returns a URL-encoded version of the given value. - - If ``plus`` is true (the default), spaces will be represented - as "+" instead of "%20". This is appropriate for query strings - but not for the path component of a URL. Note that this default - is the reverse of Python's urllib module. - - .. versionadded:: 3.1 - The ``plus`` argument - """ - quote = urllib_parse.quote_plus if plus else urllib_parse.quote - return quote(utf8(value)) - - -# python 3 changed things around enough that we need two separate -# implementations of url_unescape. We also need our own implementation -# of parse_qs since python 3's version insists on decoding everything. -if not PY3: - def url_unescape(value, encoding='utf-8', plus=True): - """Decodes the given value from a URL. - - The argument may be either a byte or unicode string. - - If encoding is None, the result will be a byte string. Otherwise, - the result is a unicode string in the specified encoding. - - If ``plus`` is true (the default), plus signs will be interpreted - as spaces (literal plus signs must be represented as "%2B"). This - is appropriate for query strings and form-encoded values but not - for the path component of a URL. Note that this default is the - reverse of Python's urllib module. - - .. versionadded:: 3.1 - The ``plus`` argument - """ - unquote = (urllib_parse.unquote_plus if plus else urllib_parse.unquote) - if encoding is None: - return unquote(utf8(value)) - else: - return unicode_type(unquote(utf8(value)), encoding) - - parse_qs_bytes = _parse_qs -else: - def url_unescape(value, encoding='utf-8', plus=True): - """Decodes the given value from a URL. - - The argument may be either a byte or unicode string. - - If encoding is None, the result will be a byte string. Otherwise, - the result is a unicode string in the specified encoding. - - If ``plus`` is true (the default), plus signs will be interpreted - as spaces (literal plus signs must be represented as "%2B"). This - is appropriate for query strings and form-encoded values but not - for the path component of a URL. Note that this default is the - reverse of Python's urllib module. - - .. versionadded:: 3.1 - The ``plus`` argument - """ - if encoding is None: - if plus: - # unquote_to_bytes doesn't have a _plus variant - value = to_basestring(value).replace('+', ' ') - return urllib_parse.unquote_to_bytes(value) - else: - unquote = (urllib_parse.unquote_plus if plus - else urllib_parse.unquote) - return unquote(to_basestring(value), encoding=encoding) - - def parse_qs_bytes(qs, keep_blank_values=False, strict_parsing=False): - """Parses a query string like urlparse.parse_qs, but returns the - values as byte strings. - - Keys still become type str (interpreted as latin1 in python3!) - because it's too painful to keep them as byte strings in - python3 and in practice they're nearly always ascii anyway. - """ - # This is gross, but python3 doesn't give us another way. - # Latin1 is the universal donor of character encodings. - result = _parse_qs(qs, keep_blank_values, strict_parsing, - encoding='latin1', errors='strict') - encoded = {} - for k, v in result.items(): - encoded[k] = [i.encode('latin1') for i in v] - return encoded - - -_UTF8_TYPES = (bytes, type(None)) - - -def utf8(value): - # type: (typing.Union[bytes,unicode_type,None])->typing.Union[bytes,None] - """Converts a string argument to a byte string. - - If the argument is already a byte string or None, it is returned unchanged. - Otherwise it must be a unicode string and is encoded as utf8. - """ - if isinstance(value, _UTF8_TYPES): - return value - if not isinstance(value, unicode_type): - raise TypeError( - "Expected bytes, unicode, or None; got %r" % type(value) - ) - return value.encode("utf-8") - - -_TO_UNICODE_TYPES = (unicode_type, type(None)) - - -def to_unicode(value): - """Converts a string argument to a unicode string. - - If the argument is already a unicode string or None, it is returned - unchanged. Otherwise it must be a byte string and is decoded as utf8. - """ - if isinstance(value, _TO_UNICODE_TYPES): - return value - if not isinstance(value, bytes): - raise TypeError( - "Expected bytes, unicode, or None; got %r" % type(value) - ) - return value.decode("utf-8") - - -# to_unicode was previously named _unicode not because it was private, -# but to avoid conflicts with the built-in unicode() function/type -_unicode = to_unicode - -# When dealing with the standard library across python 2 and 3 it is -# sometimes useful to have a direct conversion to the native string type -if str is unicode_type: - native_str = to_unicode -else: - native_str = utf8 - -_BASESTRING_TYPES = (basestring_type, type(None)) - - -def to_basestring(value): - """Converts a string argument to a subclass of basestring. - - In python2, byte and unicode strings are mostly interchangeable, - so functions that deal with a user-supplied argument in combination - with ascii string constants can use either and should return the type - the user supplied. In python3, the two types are not interchangeable, - so this method is needed to convert byte strings to unicode. - """ - if isinstance(value, _BASESTRING_TYPES): - return value - if not isinstance(value, bytes): - raise TypeError( - "Expected bytes, unicode, or None; got %r" % type(value) - ) - return value.decode("utf-8") - - -def recursive_unicode(obj): - """Walks a simple data structure, converting byte strings to unicode. - - Supports lists, tuples, and dictionaries. - """ - if isinstance(obj, dict): - return dict((recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items()) - elif isinstance(obj, list): - return list(recursive_unicode(i) for i in obj) - elif isinstance(obj, tuple): - return tuple(recursive_unicode(i) for i in obj) - elif isinstance(obj, bytes): - return to_unicode(obj) - else: - return obj - - -# I originally used the regex from -# http://daringfireball.net/2010/07/improved_regex_for_matching_urls -# but it gets all exponential on certain patterns (such as too many trailing -# dots), causing the regex matcher to never return. -# This regex should avoid those problems. -# Use to_unicode instead of tornado.util.u - we don't want backslashes getting -# processed as escapes. -_URL_RE = re.compile(to_unicode( - r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&|")*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)""" # noqa: E501 -)) - - -def linkify(text, shorten=False, extra_params="", - require_protocol=False, permitted_protocols=["http", "https"]): - """Converts plain text into HTML with links. - - For example: ``linkify("Hello http://tornadoweb.org!")`` would return - ``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!`` - - Parameters: - - * ``shorten``: Long urls will be shortened for display. - - * ``extra_params``: Extra text to include in the link tag, or a callable - taking the link as an argument and returning the extra text - e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``, - or:: - - def extra_params_cb(url): - if url.startswith("http://example.com"): - return 'class="internal"' - else: - return 'class="external" rel="nofollow"' - linkify(text, extra_params=extra_params_cb) - - * ``require_protocol``: Only linkify urls which include a protocol. If - this is False, urls such as www.facebook.com will also be linkified. - - * ``permitted_protocols``: List (or set) of protocols which should be - linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp", - "mailto"])``. It is very unsafe to include protocols such as - ``javascript``. - """ - if extra_params and not callable(extra_params): - extra_params = " " + extra_params.strip() - - def make_link(m): - url = m.group(1) - proto = m.group(2) - if require_protocol and not proto: - return url # not protocol, no linkify - - if proto and proto not in permitted_protocols: - return url # bad protocol, no linkify - - href = m.group(1) - if not proto: - href = "http://" + href # no proto specified, use http - - if callable(extra_params): - params = " " + extra_params(href).strip() - else: - params = extra_params - - # clip long urls. max_len is just an approximation - max_len = 30 - if shorten and len(url) > max_len: - before_clip = url - if proto: - proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for : - else: - proto_len = 0 - - parts = url[proto_len:].split("/") - if len(parts) > 1: - # Grab the whole host part plus the first bit of the path - # The path is usually not that interesting once shortened - # (no more slug, etc), so it really just provides a little - # extra indication of shortening. - url = url[:proto_len] + parts[0] + "/" + \ - parts[1][:8].split('?')[0].split('.')[0] - - if len(url) > max_len * 1.5: # still too long - url = url[:max_len] - - if url != before_clip: - amp = url.rfind('&') - # avoid splitting html char entities - if amp > max_len - 5: - url = url[:amp] - url += "..." - - if len(url) >= len(before_clip): - url = before_clip - else: - # full url is visible on mouse-over (for those who don't - # have a status bar, such as Safari by default) - params += ' title="%s"' % href - - return u'<a href="%s"%s>%s</a>' % (href, params, url) - - # First HTML-escape so that our strings are all safe. - # The regex is modified to avoid character entites other than & so - # that we won't pick up ", etc. - text = _unicode(xhtml_escape(text)) - return _URL_RE.sub(make_link, text) - - -def _convert_entity(m): - if m.group(1) == "#": - try: - if m.group(2)[:1].lower() == 'x': - return unichr(int(m.group(2)[1:], 16)) - else: - return unichr(int(m.group(2))) - except ValueError: - return "&#%s;" % m.group(2) - try: - return _HTML_UNICODE_MAP[m.group(2)] - except KeyError: - return "&%s;" % m.group(2) - - -def _build_unicode_map(): - unicode_map = {} - for name, value in htmlentitydefs.name2codepoint.items(): - unicode_map[name] = unichr(value) - return unicode_map - - -_HTML_UNICODE_MAP = _build_unicode_map() diff --git a/lib/tornado/gen.py b/lib/tornado/gen.py deleted file mode 100755 index 3556374d..00000000 --- a/lib/tornado/gen.py +++ /dev/null @@ -1,1367 +0,0 @@ -"""``tornado.gen`` implements generator-based coroutines. - -.. note:: - - The "decorator and generator" approach in this module is a - precursor to native coroutines (using ``async def`` and ``await``) - which were introduced in Python 3.5. Applications that do not - require compatibility with older versions of Python should use - native coroutines instead. Some parts of this module are still - useful with native coroutines, notably `multi`, `sleep`, - `WaitIterator`, and `with_timeout`. Some of these functions have - counterparts in the `asyncio` module which may be used as well, - although the two may not necessarily be 100% compatible. - -Coroutines provide an easier way to work in an asynchronous -environment than chaining callbacks. Code using coroutines is -technically asynchronous, but it is written as a single generator -instead of a collection of separate functions. - -For example, the following callback-based asynchronous handler: - -.. testcode:: - - class AsyncHandler(RequestHandler): - @asynchronous - def get(self): - http_client = AsyncHTTPClient() - http_client.fetch("http://example.com", - callback=self.on_fetch) - - def on_fetch(self, response): - do_something_with_response(response) - self.render("template.html") - -.. testoutput:: - :hide: - -could be written with ``gen`` as: - -.. testcode:: - - class GenAsyncHandler(RequestHandler): - @gen.coroutine - def get(self): - http_client = AsyncHTTPClient() - response = yield http_client.fetch("http://example.com") - do_something_with_response(response) - self.render("template.html") - -.. testoutput:: - :hide: - -Most asynchronous functions in Tornado return a `.Future`; -yielding this object returns its ``Future.result``. - -You can also yield a list or dict of ``Futures``, which will be -started at the same time and run in parallel; a list or dict of results will -be returned when they are all finished: - -.. testcode:: - - @gen.coroutine - def get(self): - http_client = AsyncHTTPClient() - response1, response2 = yield [http_client.fetch(url1), - http_client.fetch(url2)] - response_dict = yield dict(response3=http_client.fetch(url3), - response4=http_client.fetch(url4)) - response3 = response_dict['response3'] - response4 = response_dict['response4'] - -.. testoutput:: - :hide: - -If the `~functools.singledispatch` library is available (standard in -Python 3.4, available via the `singledispatch -<https://pypi.python.org/pypi/singledispatch>`_ package on older -versions), additional types of objects may be yielded. Tornado includes -support for ``asyncio.Future`` and Twisted's ``Deferred`` class when -``tornado.platform.asyncio`` and ``tornado.platform.twisted`` are imported. -See the `convert_yielded` function to extend this mechanism. - -.. versionchanged:: 3.2 - Dict support added. - -.. versionchanged:: 4.1 - Support added for yielding ``asyncio`` Futures and Twisted Deferreds - via ``singledispatch``. - -""" -from __future__ import absolute_import, division, print_function - -import collections -import functools -import itertools -import os -import sys -import types -import warnings - -from tornado.concurrent import (Future, is_future, chain_future, future_set_exc_info, - future_add_done_callback, future_set_result_unless_cancelled) -from tornado.ioloop import IOLoop -from tornado.log import app_log -from tornado import stack_context -from tornado.util import PY3, raise_exc_info, TimeoutError - -try: - try: - # py34+ - from functools import singledispatch # type: ignore - except ImportError: - from singledispatch import singledispatch # backport -except ImportError: - # In most cases, singledispatch is required (to avoid - # difficult-to-diagnose problems in which the functionality - # available differs depending on which invisble packages are - # installed). However, in Google App Engine third-party - # dependencies are more trouble so we allow this module to be - # imported without it. - if 'APPENGINE_RUNTIME' not in os.environ: - raise - singledispatch = None - -try: - try: - # py35+ - from collections.abc import Generator as GeneratorType # type: ignore - except ImportError: - from backports_abc import Generator as GeneratorType # type: ignore - - try: - # py35+ - from inspect import isawaitable # type: ignore - except ImportError: - from backports_abc import isawaitable -except ImportError: - if 'APPENGINE_RUNTIME' not in os.environ: - raise - from types import GeneratorType - - def isawaitable(x): # type: ignore - return False - -if PY3: - import builtins -else: - import __builtin__ as builtins - - -class KeyReuseError(Exception): - pass - - -class UnknownKeyError(Exception): - pass - - -class LeakedCallbackError(Exception): - pass - - -class BadYieldError(Exception): - pass - - -class ReturnValueIgnoredError(Exception): - pass - - -def _value_from_stopiteration(e): - try: - # StopIteration has a value attribute beginning in py33. - # So does our Return class. - return e.value - except AttributeError: - pass - try: - # Cython backports coroutine functionality by putting the value in - # e.args[0]. - return e.args[0] - except (AttributeError, IndexError): - return None - - -def _create_future(): - future = Future() - # Fixup asyncio debug info by removing extraneous stack entries - source_traceback = getattr(future, "_source_traceback", ()) - while source_traceback: - # Each traceback entry is equivalent to a - # (filename, self.lineno, self.name, self.line) tuple - filename = source_traceback[-1][0] - if filename == __file__: - del source_traceback[-1] - else: - break - return future - - -def engine(func): - """Callback-oriented decorator for asynchronous generators. - - This is an older interface; for new code that does not need to be - compatible with versions of Tornado older than 3.0 the - `coroutine` decorator is recommended instead. - - This decorator is similar to `coroutine`, except it does not - return a `.Future` and the ``callback`` argument is not treated - specially. - - In most cases, functions decorated with `engine` should take - a ``callback`` argument and invoke it with their result when - they are finished. One notable exception is the - `~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`, - which use ``self.finish()`` in place of a callback argument. - - .. deprecated:: 5.1 - - This decorator will be removed in 6.0. Use `coroutine` or - ``async def`` instead. - """ - warnings.warn("gen.engine is deprecated, use gen.coroutine or async def instead", - DeprecationWarning) - func = _make_coroutine_wrapper(func, replace_callback=False) - - @functools.wraps(func) - def wrapper(*args, **kwargs): - future = func(*args, **kwargs) - - def final_callback(future): - if future.result() is not None: - raise ReturnValueIgnoredError( - "@gen.engine functions cannot return values: %r" % - (future.result(),)) - # The engine interface doesn't give us any way to return - # errors but to raise them into the stack context. - # Save the stack context here to use when the Future has resolved. - future_add_done_callback(future, stack_context.wrap(final_callback)) - return wrapper - - -def coroutine(func): - """Decorator for asynchronous generators. - - Any generator that yields objects from this module must be wrapped - in either this decorator or `engine`. - - Coroutines may "return" by raising the special exception - `Return(value) <Return>`. In Python 3.3+, it is also possible for - the function to simply use the ``return value`` statement (prior to - Python 3.3 generators were not allowed to also return values). - In all versions of Python a coroutine that simply wishes to exit - early may use the ``return`` statement without a value. - - Functions with this decorator return a `.Future`. Additionally, - they may be called with a ``callback`` keyword argument, which - will be invoked with the future's result when it resolves. If the - coroutine fails, the callback will not be run and an exception - will be raised into the surrounding `.StackContext`. The - ``callback`` argument is not visible inside the decorated - function; it is handled by the decorator itself. - - .. warning:: - - When exceptions occur inside a coroutine, the exception - information will be stored in the `.Future` object. You must - examine the result of the `.Future` object, or the exception - may go unnoticed by your code. This means yielding the function - if called from another coroutine, using something like - `.IOLoop.run_sync` for top-level calls, or passing the `.Future` - to `.IOLoop.add_future`. - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. - """ - return _make_coroutine_wrapper(func, replace_callback=True) - - -def _make_coroutine_wrapper(func, replace_callback): - """The inner workings of ``@gen.coroutine`` and ``@gen.engine``. - - The two decorators differ in their treatment of the ``callback`` - argument, so we cannot simply implement ``@engine`` in terms of - ``@coroutine``. - """ - # On Python 3.5, set the coroutine flag on our generator, to allow it - # to be used with 'await'. - wrapped = func - if hasattr(types, 'coroutine'): - func = types.coroutine(func) - - @functools.wraps(wrapped) - def wrapper(*args, **kwargs): - future = _create_future() - - if replace_callback and 'callback' in kwargs: - warnings.warn("callback arguments are deprecated, use the returned Future instead", - DeprecationWarning, stacklevel=2) - callback = kwargs.pop('callback') - IOLoop.current().add_future( - future, lambda future: callback(future.result())) - - try: - result = func(*args, **kwargs) - except (Return, StopIteration) as e: - result = _value_from_stopiteration(e) - except Exception: - future_set_exc_info(future, sys.exc_info()) - try: - return future - finally: - # Avoid circular references - future = None - else: - if isinstance(result, GeneratorType): - # Inline the first iteration of Runner.run. This lets us - # avoid the cost of creating a Runner when the coroutine - # never actually yields, which in turn allows us to - # use "optional" coroutines in critical path code without - # performance penalty for the synchronous case. - try: - orig_stack_contexts = stack_context._state.contexts - yielded = next(result) - if stack_context._state.contexts is not orig_stack_contexts: - yielded = _create_future() - yielded.set_exception( - stack_context.StackContextInconsistentError( - 'stack_context inconsistency (probably caused ' - 'by yield within a "with StackContext" block)')) - except (StopIteration, Return) as e: - future_set_result_unless_cancelled(future, _value_from_stopiteration(e)) - except Exception: - future_set_exc_info(future, sys.exc_info()) - else: - # Provide strong references to Runner objects as long - # as their result future objects also have strong - # references (typically from the parent coroutine's - # Runner). This keeps the coroutine's Runner alive. - # We do this by exploiting the public API - # add_done_callback() instead of putting a private - # attribute on the Future. - # (Github issues #1769, #2229). - runner = Runner(result, future, yielded) - future.add_done_callback(lambda _: runner) - yielded = None - try: - return future - finally: - # Subtle memory optimization: if next() raised an exception, - # the future's exc_info contains a traceback which - # includes this stack frame. This creates a cycle, - # which will be collected at the next full GC but has - # been shown to greatly increase memory usage of - # benchmarks (relative to the refcount-based scheme - # used in the absence of cycles). We can avoid the - # cycle by clearing the local variable after we return it. - future = None - future_set_result_unless_cancelled(future, result) - return future - - wrapper.__wrapped__ = wrapped - wrapper.__tornado_coroutine__ = True - return wrapper - - -def is_coroutine_function(func): - """Return whether *func* is a coroutine function, i.e. a function - wrapped with `~.gen.coroutine`. - - .. versionadded:: 4.5 - """ - return getattr(func, '__tornado_coroutine__', False) - - -class Return(Exception): - """Special exception to return a value from a `coroutine`. - - If this exception is raised, its value argument is used as the - result of the coroutine:: - - @gen.coroutine - def fetch_json(url): - response = yield AsyncHTTPClient().fetch(url) - raise gen.Return(json_decode(response.body)) - - In Python 3.3, this exception is no longer necessary: the ``return`` - statement can be used directly to return a value (previously - ``yield`` and ``return`` with a value could not be combined in the - same function). - - By analogy with the return statement, the value argument is optional, - but it is never necessary to ``raise gen.Return()``. The ``return`` - statement can be used with no arguments instead. - """ - def __init__(self, value=None): - super(Return, self).__init__() - self.value = value - # Cython recognizes subclasses of StopIteration with a .args tuple. - self.args = (value,) - - -class WaitIterator(object): - """Provides an iterator to yield the results of futures as they finish. - - Yielding a set of futures like this: - - ``results = yield [future1, future2]`` - - pauses the coroutine until both ``future1`` and ``future2`` - return, and then restarts the coroutine with the results of both - futures. If either future is an exception, the expression will - raise that exception and all the results will be lost. - - If you need to get the result of each future as soon as possible, - or if you need the result of some futures even if others produce - errors, you can use ``WaitIterator``:: - - wait_iterator = gen.WaitIterator(future1, future2) - while not wait_iterator.done(): - try: - result = yield wait_iterator.next() - except Exception as e: - print("Error {} from {}".format(e, wait_iterator.current_future)) - else: - print("Result {} received from {} at {}".format( - result, wait_iterator.current_future, - wait_iterator.current_index)) - - Because results are returned as soon as they are available the - output from the iterator *will not be in the same order as the - input arguments*. If you need to know which future produced the - current result, you can use the attributes - ``WaitIterator.current_future``, or ``WaitIterator.current_index`` - to get the index of the future from the input list. (if keyword - arguments were used in the construction of the `WaitIterator`, - ``current_index`` will use the corresponding keyword). - - On Python 3.5, `WaitIterator` implements the async iterator - protocol, so it can be used with the ``async for`` statement (note - that in this version the entire iteration is aborted if any value - raises an exception, while the previous example can continue past - individual errors):: - - async for result in gen.WaitIterator(future1, future2): - print("Result {} received from {} at {}".format( - result, wait_iterator.current_future, - wait_iterator.current_index)) - - .. versionadded:: 4.1 - - .. versionchanged:: 4.3 - Added ``async for`` support in Python 3.5. - - """ - def __init__(self, *args, **kwargs): - if args and kwargs: - raise ValueError( - "You must provide args or kwargs, not both") - - if kwargs: - self._unfinished = dict((f, k) for (k, f) in kwargs.items()) - futures = list(kwargs.values()) - else: - self._unfinished = dict((f, i) for (i, f) in enumerate(args)) - futures = args - - self._finished = collections.deque() - self.current_index = self.current_future = None - self._running_future = None - - for future in futures: - future_add_done_callback(future, self._done_callback) - - def done(self): - """Returns True if this iterator has no more results.""" - if self._finished or self._unfinished: - return False - # Clear the 'current' values when iteration is done. - self.current_index = self.current_future = None - return True - - def next(self): - """Returns a `.Future` that will yield the next available result. - - Note that this `.Future` will not be the same object as any of - the inputs. - """ - self._running_future = Future() - - if self._finished: - self._return_result(self._finished.popleft()) - - return self._running_future - - def _done_callback(self, done): - if self._running_future and not self._running_future.done(): - self._return_result(done) - else: - self._finished.append(done) - - def _return_result(self, done): - """Called set the returned future's state that of the future - we yielded, and set the current future for the iterator. - """ - chain_future(done, self._running_future) - - self.current_future = done - self.current_index = self._unfinished.pop(done) - - def __aiter__(self): - return self - - def __anext__(self): - if self.done(): - # Lookup by name to silence pyflakes on older versions. - raise getattr(builtins, 'StopAsyncIteration')() - return self.next() - - -class YieldPoint(object): - """Base class for objects that may be yielded from the generator. - - .. deprecated:: 4.0 - Use `Futures <.Future>` instead. This class and all its subclasses - will be removed in 6.0 - """ - def __init__(self): - warnings.warn("YieldPoint is deprecated, use Futures instead", - DeprecationWarning) - - def start(self, runner): - """Called by the runner after the generator has yielded. - - No other methods will be called on this object before ``start``. - """ - raise NotImplementedError() - - def is_ready(self): - """Called by the runner to determine whether to resume the generator. - - Returns a boolean; may be called more than once. - """ - raise NotImplementedError() - - def get_result(self): - """Returns the value to use as the result of the yield expression. - - This method will only be called once, and only after `is_ready` - has returned true. - """ - raise NotImplementedError() - - -class Callback(YieldPoint): - """Returns a callable object that will allow a matching `Wait` to proceed. - - The key may be any value suitable for use as a dictionary key, and is - used to match ``Callbacks`` to their corresponding ``Waits``. The key - must be unique among outstanding callbacks within a single run of the - generator function, but may be reused across different runs of the same - function (so constants generally work fine). - - The callback may be called with zero or one arguments; if an argument - is given it will be returned by `Wait`. - - .. deprecated:: 4.0 - Use `Futures <.Future>` instead. This class will be removed in 6.0. - """ - def __init__(self, key): - warnings.warn("gen.Callback is deprecated, use Futures instead", - DeprecationWarning) - self.key = key - - def start(self, runner): - self.runner = runner - runner.register_callback(self.key) - - def is_ready(self): - return True - - def get_result(self): - return self.runner.result_callback(self.key) - - -class Wait(YieldPoint): - """Returns the argument passed to the result of a previous `Callback`. - - .. deprecated:: 4.0 - Use `Futures <.Future>` instead. This class will be removed in 6.0. - """ - def __init__(self, key): - warnings.warn("gen.Wait is deprecated, use Futures instead", - DeprecationWarning) - self.key = key - - def start(self, runner): - self.runner = runner - - def is_ready(self): - return self.runner.is_ready(self.key) - - def get_result(self): - return self.runner.pop_result(self.key) - - -class WaitAll(YieldPoint): - """Returns the results of multiple previous `Callbacks <Callback>`. - - The argument is a sequence of `Callback` keys, and the result is - a list of results in the same order. - - `WaitAll` is equivalent to yielding a list of `Wait` objects. - - .. deprecated:: 4.0 - Use `Futures <.Future>` instead. This class will be removed in 6.0. - """ - def __init__(self, keys): - warnings.warn("gen.WaitAll is deprecated, use gen.multi instead", - DeprecationWarning) - self.keys = keys - - def start(self, runner): - self.runner = runner - - def is_ready(self): - return all(self.runner.is_ready(key) for key in self.keys) - - def get_result(self): - return [self.runner.pop_result(key) for key in self.keys] - - -def Task(func, *args, **kwargs): - """Adapts a callback-based asynchronous function for use in coroutines. - - Takes a function (and optional additional arguments) and runs it with - those arguments plus a ``callback`` keyword argument. The argument passed - to the callback is returned as the result of the yield expression. - - .. versionchanged:: 4.0 - ``gen.Task`` is now a function that returns a `.Future`, instead of - a subclass of `YieldPoint`. It still behaves the same way when - yielded. - - .. deprecated:: 5.1 - This function is deprecated and will be removed in 6.0. - """ - warnings.warn("gen.Task is deprecated, use Futures instead", - DeprecationWarning) - future = _create_future() - - def handle_exception(typ, value, tb): - if future.done(): - return False - future_set_exc_info(future, (typ, value, tb)) - return True - - def set_result(result): - if future.done(): - return - future_set_result_unless_cancelled(future, result) - with stack_context.ExceptionStackContext(handle_exception): - func(*args, callback=_argument_adapter(set_result), **kwargs) - return future - - -class YieldFuture(YieldPoint): - def __init__(self, future): - """Adapts a `.Future` to the `YieldPoint` interface. - - .. versionchanged:: 5.0 - The ``io_loop`` argument (deprecated since version 4.1) has been removed. - - .. deprecated:: 5.1 - This class will be removed in 6.0. - """ - warnings.warn("YieldFuture is deprecated, use Futures instead", - DeprecationWarning) - self.future = future - self.io_loop = IOLoop.current() - - def start(self, runner): - if not self.future.done(): - self.runner = runner - self.key = object() - runner.register_callback(self.key) - self.io_loop.add_future(self.future, runner.result_callback(self.key)) - else: - self.runner = None - self.result_fn = self.future.result - - def is_ready(self): - if self.runner is not None: - return self.runner.is_ready(self.key) - else: - return True - - def get_result(self): - if self.runner is not None: - return self.runner.pop_result(self.key).result() - else: - return self.result_fn() - - -def _contains_yieldpoint(children): - """Returns True if ``children`` contains any YieldPoints. - - ``children`` may be a dict or a list, as used by `MultiYieldPoint` - and `multi_future`. - """ - if isinstance(children, dict): - return any(isinstance(i, YieldPoint) for i in children.values()) - if isinstance(children, list): - return any(isinstance(i, YieldPoint) for i in children) - return False - - -def multi(children, quiet_exceptions=()): - """Runs multiple asynchronous operations in parallel. - - ``children`` may either be a list or a dict whose values are - yieldable objects. ``multi()`` returns a new yieldable - object that resolves to a parallel structure containing their - results. If ``children`` is a list, the result is a list of - results in the same order; if it is a dict, the result is a dict - with the same keys. - - That is, ``results = yield multi(list_of_futures)`` is equivalent - to:: - - results = [] - for future in list_of_futures: - results.append(yield future) - - If any children raise exceptions, ``multi()`` will raise the first - one. All others will be logged, unless they are of types - contained in the ``quiet_exceptions`` argument. - - If any of the inputs are `YieldPoints <YieldPoint>`, the returned - yieldable object is a `YieldPoint`. Otherwise, returns a `.Future`. - This means that the result of `multi` can be used in a native - coroutine if and only if all of its children can be. - - In a ``yield``-based coroutine, it is not normally necessary to - call this function directly, since the coroutine runner will - do it automatically when a list or dict is yielded. However, - it is necessary in ``await``-based coroutines, or to pass - the ``quiet_exceptions`` argument. - - This function is available under the names ``multi()`` and ``Multi()`` - for historical reasons. - - Cancelling a `.Future` returned by ``multi()`` does not cancel its - children. `asyncio.gather` is similar to ``multi()``, but it does - cancel its children. - - .. versionchanged:: 4.2 - If multiple yieldables fail, any exceptions after the first - (which is raised) will be logged. Added the ``quiet_exceptions`` - argument to suppress this logging for selected exception types. - - .. versionchanged:: 4.3 - Replaced the class ``Multi`` and the function ``multi_future`` - with a unified function ``multi``. Added support for yieldables - other than `YieldPoint` and `.Future`. - - """ - if _contains_yieldpoint(children): - return MultiYieldPoint(children, quiet_exceptions=quiet_exceptions) - else: - return multi_future(children, quiet_exceptions=quiet_exceptions) - - -Multi = multi - - -class MultiYieldPoint(YieldPoint): - """Runs multiple asynchronous operations in parallel. - - This class is similar to `multi`, but it always creates a stack - context even when no children require it. It is not compatible with - native coroutines. - - .. versionchanged:: 4.2 - If multiple ``YieldPoints`` fail, any exceptions after the first - (which is raised) will be logged. Added the ``quiet_exceptions`` - argument to suppress this logging for selected exception types. - - .. versionchanged:: 4.3 - Renamed from ``Multi`` to ``MultiYieldPoint``. The name ``Multi`` - remains as an alias for the equivalent `multi` function. - - .. deprecated:: 4.3 - Use `multi` instead. This class will be removed in 6.0. - """ - def __init__(self, children, quiet_exceptions=()): - warnings.warn("MultiYieldPoint is deprecated, use Futures instead", - DeprecationWarning) - self.keys = None - if isinstance(children, dict): - self.keys = list(children.keys()) - children = children.values() - self.children = [] - for i in children: - if not isinstance(i, YieldPoint): - i = convert_yielded(i) - if is_future(i): - i = YieldFuture(i) - self.children.append(i) - assert all(isinstance(i, YieldPoint) for i in self.children) - self.unfinished_children = set(self.children) - self.quiet_exceptions = quiet_exceptions - - def start(self, runner): - for i in self.children: - i.start(runner) - - def is_ready(self): - finished = list(itertools.takewhile( - lambda i: i.is_ready(), self.unfinished_children)) - self.unfinished_children.difference_update(finished) - return not self.unfinished_children - - def get_result(self): - result_list = [] - exc_info = None - for f in self.children: - try: - result_list.append(f.get_result()) - except Exception as e: - if exc_info is None: - exc_info = sys.exc_info() - else: - if not isinstance(e, self.quiet_exceptions): - app_log.error("Multiple exceptions in yield list", - exc_info=True) - if exc_info is not None: - raise_exc_info(exc_info) - if self.keys is not None: - return dict(zip(self.keys, result_list)) - else: - return list(result_list) - - -def multi_future(children, quiet_exceptions=()): - """Wait for multiple asynchronous futures in parallel. - - This function is similar to `multi`, but does not support - `YieldPoints <YieldPoint>`. - - .. versionadded:: 4.0 - - .. versionchanged:: 4.2 - If multiple ``Futures`` fail, any exceptions after the first (which is - raised) will be logged. Added the ``quiet_exceptions`` - argument to suppress this logging for selected exception types. - - .. deprecated:: 4.3 - Use `multi` instead. - """ - if isinstance(children, dict): - keys = list(children.keys()) - children = children.values() - else: - keys = None - children = list(map(convert_yielded, children)) - assert all(is_future(i) or isinstance(i, _NullFuture) for i in children) - unfinished_children = set(children) - - future = _create_future() - if not children: - future_set_result_unless_cancelled(future, - {} if keys is not None else []) - - def callback(f): - unfinished_children.remove(f) - if not unfinished_children: - result_list = [] - for f in children: - try: - result_list.append(f.result()) - except Exception as e: - if future.done(): - if not isinstance(e, quiet_exceptions): - app_log.error("Multiple exceptions in yield list", - exc_info=True) - else: - future_set_exc_info(future, sys.exc_info()) - if not future.done(): - if keys is not None: - future_set_result_unless_cancelled(future, - dict(zip(keys, result_list))) - else: - future_set_result_unless_cancelled(future, result_list) - - listening = set() - for f in children: - if f not in listening: - listening.add(f) - future_add_done_callback(f, callback) - return future - - -def maybe_future(x): - """Converts ``x`` into a `.Future`. - - If ``x`` is already a `.Future`, it is simply returned; otherwise - it is wrapped in a new `.Future`. This is suitable for use as - ``result = yield gen.maybe_future(f())`` when you don't know whether - ``f()`` returns a `.Future` or not. - - .. deprecated:: 4.3 - This function only handles ``Futures``, not other yieldable objects. - Instead of `maybe_future`, check for the non-future result types - you expect (often just ``None``), and ``yield`` anything unknown. - """ - if is_future(x): - return x - else: - fut = _create_future() - fut.set_result(x) - return fut - - -def with_timeout(timeout, future, quiet_exceptions=()): - """Wraps a `.Future` (or other yieldable object) in a timeout. - - Raises `tornado.util.TimeoutError` if the input future does not - complete before ``timeout``, which may be specified in any form - allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or - an absolute time relative to `.IOLoop.time`) - - If the wrapped `.Future` fails after it has timed out, the exception - will be logged unless it is of a type contained in ``quiet_exceptions`` - (which may be an exception type or a sequence of types). - - Does not support `YieldPoint` subclasses. - - The wrapped `.Future` is not canceled when the timeout expires, - permitting it to be reused. `asyncio.wait_for` is similar to this - function but it does cancel the wrapped `.Future` on timeout. - - .. versionadded:: 4.0 - - .. versionchanged:: 4.1 - Added the ``quiet_exceptions`` argument and the logging of unhandled - exceptions. - - .. versionchanged:: 4.4 - Added support for yieldable objects other than `.Future`. - - """ - # TODO: allow YieldPoints in addition to other yieldables? - # Tricky to do with stack_context semantics. - # - # It's tempting to optimize this by cancelling the input future on timeout - # instead of creating a new one, but A) we can't know if we are the only - # one waiting on the input future, so cancelling it might disrupt other - # callers and B) concurrent futures can only be cancelled while they are - # in the queue, so cancellation cannot reliably bound our waiting time. - future = convert_yielded(future) - result = _create_future() - chain_future(future, result) - io_loop = IOLoop.current() - - def error_callback(future): - try: - future.result() - except Exception as e: - if not isinstance(e, quiet_exceptions): - app_log.error("Exception in Future %r after timeout", - future, exc_info=True) - - def timeout_callback(): - if not result.done(): - result.set_exception(TimeoutError("Timeout")) - # In case the wrapped future goes on to fail, log it. - future_add_done_callback(future, error_callback) - timeout_handle = io_loop.add_timeout( - timeout, timeout_callback) - if isinstance(future, Future): - # We know this future will resolve on the IOLoop, so we don't - # need the extra thread-safety of IOLoop.add_future (and we also - # don't care about StackContext here. - future_add_done_callback( - future, lambda future: io_loop.remove_timeout(timeout_handle)) - else: - # concurrent.futures.Futures may resolve on any thread, so we - # need to route them back to the IOLoop. - io_loop.add_future( - future, lambda future: io_loop.remove_timeout(timeout_handle)) - return result - - -def sleep(duration): - """Return a `.Future` that resolves after the given number of seconds. - - When used with ``yield`` in a coroutine, this is a non-blocking - analogue to `time.sleep` (which should not be used in coroutines - because it is blocking):: - - yield gen.sleep(0.5) - - Note that calling this function on its own does nothing; you must - wait on the `.Future` it returns (usually by yielding it). - - .. versionadded:: 4.1 - """ - f = _create_future() - IOLoop.current().call_later(duration, - lambda: future_set_result_unless_cancelled(f, None)) - return f - - -class _NullFuture(object): - """_NullFuture resembles a Future that finished with a result of None. - - It's not actually a `Future` to avoid depending on a particular event loop. - Handled as a special case in the coroutine runner. - """ - def result(self): - return None - - def done(self): - return True - - -# _null_future is used as a dummy value in the coroutine runner. It differs -# from moment in that moment always adds a delay of one IOLoop iteration -# while _null_future is processed as soon as possible. -_null_future = _NullFuture() - -moment = _NullFuture() -moment.__doc__ = \ - """A special object which may be yielded to allow the IOLoop to run for -one iteration. - -This is not needed in normal use but it can be helpful in long-running -coroutines that are likely to yield Futures that are ready instantly. - -Usage: ``yield gen.moment`` - -.. versionadded:: 4.0 - -.. deprecated:: 4.5 - ``yield None`` (or ``yield`` with no argument) is now equivalent to - ``yield gen.moment``. -""" - - -class Runner(object): - """Internal implementation of `tornado.gen.engine`. - - Maintains information about pending callbacks and their results. - - The results of the generator are stored in ``result_future`` (a - `.Future`) - """ - def __init__(self, gen, result_future, first_yielded): - self.gen = gen - self.result_future = result_future - self.future = _null_future - self.yield_point = None - self.pending_callbacks = None - self.results = None - self.running = False - self.finished = False - self.had_exception = False - self.io_loop = IOLoop.current() - # For efficiency, we do not create a stack context until we - # reach a YieldPoint (stack contexts are required for the historical - # semantics of YieldPoints, but not for Futures). When we have - # done so, this field will be set and must be called at the end - # of the coroutine. - self.stack_context_deactivate = None - if self.handle_yield(first_yielded): - gen = result_future = first_yielded = None - self.run() - - def register_callback(self, key): - """Adds ``key`` to the list of callbacks.""" - if self.pending_callbacks is None: - # Lazily initialize the old-style YieldPoint data structures. - self.pending_callbacks = set() - self.results = {} - if key in self.pending_callbacks: - raise KeyReuseError("key %r is already pending" % (key,)) - self.pending_callbacks.add(key) - - def is_ready(self, key): - """Returns true if a result is available for ``key``.""" - if self.pending_callbacks is None or key not in self.pending_callbacks: - raise UnknownKeyError("key %r is not pending" % (key,)) - return key in self.results - - def set_result(self, key, result): - """Sets the result for ``key`` and attempts to resume the generator.""" - self.results[key] = result - if self.yield_point is not None and self.yield_point.is_ready(): - try: - future_set_result_unless_cancelled(self.future, - self.yield_point.get_result()) - except: - future_set_exc_info(self.future, sys.exc_info()) - self.yield_point = None - self.run() - - def pop_result(self, key): - """Returns the result for ``key`` and unregisters it.""" - self.pending_callbacks.remove(key) - return self.results.pop(key) - - def run(self): - """Starts or resumes the generator, running until it reaches a - yield point that is not ready. - """ - if self.running or self.finished: - return - try: - self.running = True - while True: - future = self.future - if not future.done(): - return - self.future = None - try: - orig_stack_contexts = stack_context._state.contexts - exc_info = None - - try: - value = future.result() - except Exception: - self.had_exception = True - exc_info = sys.exc_info() - future = None - - if exc_info is not None: - try: - yielded = self.gen.throw(*exc_info) - finally: - # Break up a reference to itself - # for faster GC on CPython. - exc_info = None - else: - yielded = self.gen.send(value) - - if stack_context._state.contexts is not orig_stack_contexts: - self.gen.throw( - stack_context.StackContextInconsistentError( - 'stack_context inconsistency (probably caused ' - 'by yield within a "with StackContext" block)')) - except (StopIteration, Return) as e: - self.finished = True - self.future = _null_future - if self.pending_callbacks and not self.had_exception: - # If we ran cleanly without waiting on all callbacks - # raise an error (really more of a warning). If we - # had an exception then some callbacks may have been - # orphaned, so skip the check in that case. - raise LeakedCallbackError( - "finished without waiting for callbacks %r" % - self.pending_callbacks) - future_set_result_unless_cancelled(self.result_future, - _value_from_stopiteration(e)) - self.result_future = None - self._deactivate_stack_context() - return - except Exception: - self.finished = True - self.future = _null_future - future_set_exc_info(self.result_future, sys.exc_info()) - self.result_future = None - self._deactivate_stack_context() - return - if not self.handle_yield(yielded): - return - yielded = None - finally: - self.running = False - - def handle_yield(self, yielded): - # Lists containing YieldPoints require stack contexts; - # other lists are handled in convert_yielded. - if _contains_yieldpoint(yielded): - yielded = multi(yielded) - - if isinstance(yielded, YieldPoint): - # YieldPoints are too closely coupled to the Runner to go - # through the generic convert_yielded mechanism. - self.future = Future() - - def start_yield_point(): - try: - yielded.start(self) - if yielded.is_ready(): - future_set_result_unless_cancelled(self.future, yielded.get_result()) - else: - self.yield_point = yielded - except Exception: - self.future = Future() - future_set_exc_info(self.future, sys.exc_info()) - - if self.stack_context_deactivate is None: - # Start a stack context if this is the first - # YieldPoint we've seen. - with stack_context.ExceptionStackContext( - self.handle_exception) as deactivate: - self.stack_context_deactivate = deactivate - - def cb(): - start_yield_point() - self.run() - self.io_loop.add_callback(cb) - return False - else: - start_yield_point() - else: - try: - self.future = convert_yielded(yielded) - except BadYieldError: - self.future = Future() - future_set_exc_info(self.future, sys.exc_info()) - - if self.future is moment: - self.io_loop.add_callback(self.run) - return False - elif not self.future.done(): - def inner(f): - # Break a reference cycle to speed GC. - f = None # noqa - self.run() - self.io_loop.add_future( - self.future, inner) - return False - return True - - def result_callback(self, key): - return stack_context.wrap(_argument_adapter( - functools.partial(self.set_result, key))) - - def handle_exception(self, typ, value, tb): - if not self.running and not self.finished: - self.future = Future() - future_set_exc_info(self.future, (typ, value, tb)) - self.run() - return True - else: - return False - - def _deactivate_stack_context(self): - if self.stack_context_deactivate is not None: - self.stack_context_deactivate() - self.stack_context_deactivate = None - - -Arguments = collections.namedtuple('Arguments', ['args', 'kwargs']) - - -def _argument_adapter(callback): - """Returns a function that when invoked runs ``callback`` with one arg. - - If the function returned by this function is called with exactly - one argument, that argument is passed to ``callback``. Otherwise - the args tuple and kwargs dict are wrapped in an `Arguments` object. - """ - def wrapper(*args, **kwargs): - if kwargs or len(args) > 1: - callback(Arguments(args, kwargs)) - elif args: - callback(args[0]) - else: - callback(None) - return wrapper - - -# Convert Awaitables into Futures. -try: - import asyncio -except ImportError: - # Py2-compatible version for use with Cython. - # Copied from PEP 380. - @coroutine - def _wrap_awaitable(x): - if hasattr(x, '__await__'): - _i = x.__await__() - else: - _i = iter(x) - try: - _y = next(_i) - except StopIteration as _e: - _r = _value_from_stopiteration(_e) - else: - while 1: - try: - _s = yield _y - except GeneratorExit as _e: - try: - _m = _i.close - except AttributeError: - pass - else: - _m() - raise _e - except BaseException as _e: - _x = sys.exc_info() - try: - _m = _i.throw - except AttributeError: - raise _e - else: - try: - _y = _m(*_x) - except StopIteration as _e: - _r = _value_from_stopiteration(_e) - break - else: - try: - if _s is None: - _y = next(_i) - else: - _y = _i.send(_s) - except StopIteration as _e: - _r = _value_from_stopiteration(_e) - break - raise Return(_r) -else: - try: - _wrap_awaitable = asyncio.ensure_future - except AttributeError: - # asyncio.ensure_future was introduced in Python 3.4.4, but - # Debian jessie still ships with 3.4.2 so try the old name. - _wrap_awaitable = getattr(asyncio, 'async') - - -def convert_yielded(yielded): - """Convert a yielded object into a `.Future`. - - The default implementation accepts lists, dictionaries, and Futures. - - If the `~functools.singledispatch` library is available, this function - may be extended to support additional types. For example:: - - @convert_yielded.register(asyncio.Future) - def _(asyncio_future): - return tornado.platform.asyncio.to_tornado_future(asyncio_future) - - .. versionadded:: 4.1 - """ - # Lists and dicts containing YieldPoints were handled earlier. - if yielded is None or yielded is moment: - return moment - elif yielded is _null_future: - return _null_future - elif isinstance(yielded, (list, dict)): - return multi(yielded) - elif is_future(yielded): - return yielded - elif isawaitable(yielded): - return _wrap_awaitable(yielded) - else: - raise BadYieldError("yielded unknown object %r" % (yielded,)) - - -if singledispatch is not None: - convert_yielded = singledispatch(convert_yielded) diff --git a/lib/tornado/http1connection.py b/lib/tornado/http1connection.py deleted file mode 100755 index 6cc4071c..00000000 --- a/lib/tornado/http1connection.py +++ /dev/null @@ -1,751 +0,0 @@ -# -# Copyright 2014 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Client and server implementations of HTTP/1.x. - -.. versionadded:: 4.0 -""" - -from __future__ import absolute_import, division, print_function - -import re -import warnings - -from tornado.concurrent import (Future, future_add_done_callback, - future_set_result_unless_cancelled) -from tornado.escape import native_str, utf8 -from tornado import gen -from tornado import httputil -from tornado import iostream -from tornado.log import gen_log, app_log -from tornado import stack_context -from tornado.util import GzipDecompressor, PY3 - - -class _QuietException(Exception): - def __init__(self): - pass - - -class _ExceptionLoggingContext(object): - """Used with the ``with`` statement when calling delegate methods to - log any exceptions with the given logger. Any exceptions caught are - converted to _QuietException - """ - def __init__(self, logger): - self.logger = logger - - def __enter__(self): - pass - - def __exit__(self, typ, value, tb): - if value is not None: - self.logger.error("Uncaught exception", exc_info=(typ, value, tb)) - raise _QuietException - - -class HTTP1ConnectionParameters(object): - """Parameters for `.HTTP1Connection` and `.HTTP1ServerConnection`. - """ - def __init__(self, no_keep_alive=False, chunk_size=None, - max_header_size=None, header_timeout=None, max_body_size=None, - body_timeout=None, decompress=False): - """ - :arg bool no_keep_alive: If true, always close the connection after - one request. - :arg int chunk_size: how much data to read into memory at once - :arg int max_header_size: maximum amount of data for HTTP headers - :arg float header_timeout: how long to wait for all headers (seconds) - :arg int max_body_size: maximum amount of data for body - :arg float body_timeout: how long to wait while reading body (seconds) - :arg bool decompress: if true, decode incoming - ``Content-Encoding: gzip`` - """ - self.no_keep_alive = no_keep_alive - self.chunk_size = chunk_size or 65536 - self.max_header_size = max_header_size or 65536 - self.header_timeout = header_timeout - self.max_body_size = max_body_size - self.body_timeout = body_timeout - self.decompress = decompress - - -class HTTP1Connection(httputil.HTTPConnection): - """Implements the HTTP/1.x protocol. - - This class can be on its own for clients, or via `HTTP1ServerConnection` - for servers. - """ - def __init__(self, stream, is_client, params=None, context=None): - """ - :arg stream: an `.IOStream` - :arg bool is_client: client or server - :arg params: a `.HTTP1ConnectionParameters` instance or ``None`` - :arg context: an opaque application-defined object that can be accessed - as ``connection.context``. - """ - self.is_client = is_client - self.stream = stream - if params is None: - params = HTTP1ConnectionParameters() - self.params = params - self.context = context - self.no_keep_alive = params.no_keep_alive - # The body limits can be altered by the delegate, so save them - # here instead of just referencing self.params later. - self._max_body_size = (self.params.max_body_size or - self.stream.max_buffer_size) - self._body_timeout = self.params.body_timeout - # _write_finished is set to True when finish() has been called, - # i.e. there will be no more data sent. Data may still be in the - # stream's write buffer. - self._write_finished = False - # True when we have read the entire incoming body. - self._read_finished = False - # _finish_future resolves when all data has been written and flushed - # to the IOStream. - self._finish_future = Future() - # If true, the connection should be closed after this request - # (after the response has been written in the server side, - # and after it has been read in the client) - self._disconnect_on_finish = False - self._clear_callbacks() - # Save the start lines after we read or write them; they - # affect later processing (e.g. 304 responses and HEAD methods - # have content-length but no bodies) - self._request_start_line = None - self._response_start_line = None - self._request_headers = None - # True if we are writing output with chunked encoding. - self._chunking_output = None - # While reading a body with a content-length, this is the - # amount left to read. - self._expected_content_remaining = None - # A Future for our outgoing writes, returned by IOStream.write. - self._pending_write = None - - def read_response(self, delegate): - """Read a single HTTP response. - - Typical client-mode usage is to write a request using `write_headers`, - `write`, and `finish`, and then call ``read_response``. - - :arg delegate: a `.HTTPMessageDelegate` - - Returns a `.Future` that resolves to None after the full response has - been read. - """ - if self.params.decompress: - delegate = _GzipMessageDelegate(delegate, self.params.chunk_size) - return self._read_message(delegate) - - @gen.coroutine - def _read_message(self, delegate): - need_delegate_close = False - try: - header_future = self.stream.read_until_regex( - b"\r?\n\r?\n", - max_bytes=self.params.max_header_size) - if self.params.header_timeout is None: - header_data = yield header_future - else: - try: - header_data = yield gen.with_timeout( - self.stream.io_loop.time() + self.params.header_timeout, - header_future, - quiet_exceptions=iostream.StreamClosedError) - except gen.TimeoutError: - self.close() - raise gen.Return(False) - start_line, headers = self._parse_headers(header_data) - if self.is_client: - start_line = httputil.parse_response_start_line(start_line) - self._response_start_line = start_line - else: - start_line = httputil.parse_request_start_line(start_line) - self._request_start_line = start_line - self._request_headers = headers - - self._disconnect_on_finish = not self._can_keep_alive( - start_line, headers) - need_delegate_close = True - with _ExceptionLoggingContext(app_log): - header_future = delegate.headers_received(start_line, headers) - if header_future is not None: - yield header_future - if self.stream is None: - # We've been detached. - need_delegate_close = False - raise gen.Return(False) - skip_body = False - if self.is_client: - if (self._request_start_line is not None and - self._request_start_line.method == 'HEAD'): - skip_body = True - code = start_line.code - if code == 304: - # 304 responses may include the content-length header - # but do not actually have a body. - # http://tools.ietf.org/html/rfc7230#section-3.3 - skip_body = True - if code >= 100 and code < 200: - # 1xx responses should never indicate the presence of - # a body. - if ('Content-Length' in headers or - 'Transfer-Encoding' in headers): - raise httputil.HTTPInputError( - "Response code %d cannot have body" % code) - # TODO: client delegates will get headers_received twice - # in the case of a 100-continue. Document or change? - yield self._read_message(delegate) - else: - if (headers.get("Expect") == "100-continue" and - not self._write_finished): - self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n") - if not skip_body: - body_future = self._read_body( - start_line.code if self.is_client else 0, headers, delegate) - if body_future is not None: - if self._body_timeout is None: - yield body_future - else: - try: - yield gen.with_timeout( - self.stream.io_loop.time() + self._body_timeout, - body_future, - quiet_exceptions=iostream.StreamClosedError) - except gen.TimeoutError: - gen_log.info("Timeout reading body from %s", - self.context) - self.stream.close() - raise gen.Return(False) - self._read_finished = True - if not self._write_finished or self.is_client: - need_delegate_close = False - with _ExceptionLoggingContext(app_log): - delegate.finish() - # If we're waiting for the application to produce an asynchronous - # response, and we're not detached, register a close callback - # on the stream (we didn't need one while we were reading) - if (not self._finish_future.done() and - self.stream is not None and - not self.stream.closed()): - self.stream.set_close_callback(self._on_connection_close) - yield self._finish_future - if self.is_client and self._disconnect_on_finish: - self.close() - if self.stream is None: - raise gen.Return(False) - except httputil.HTTPInputError as e: - gen_log.info("Malformed HTTP message from %s: %s", - self.context, e) - if not self.is_client: - yield self.stream.write(b'HTTP/1.1 400 Bad Request\r\n\r\n') - self.close() - raise gen.Return(False) - finally: - if need_delegate_close: - with _ExceptionLoggingContext(app_log): - delegate.on_connection_close() - header_future = None - self._clear_callbacks() - raise gen.Return(True) - - def _clear_callbacks(self): - """Clears the callback attributes. - - This allows the request handler to be garbage collected more - quickly in CPython by breaking up reference cycles. - """ - self._write_callback = None - self._write_future = None - self._close_callback = None - if self.stream is not None: - self.stream.set_close_callback(None) - - def set_close_callback(self, callback): - """Sets a callback that will be run when the connection is closed. - - Note that this callback is slightly different from - `.HTTPMessageDelegate.on_connection_close`: The - `.HTTPMessageDelegate` method is called when the connection is - closed while recieving a message. This callback is used when - there is not an active delegate (for example, on the server - side this callback is used if the client closes the connection - after sending its request but before receiving all the - response. - """ - self._close_callback = stack_context.wrap(callback) - - def _on_connection_close(self): - # Note that this callback is only registered on the IOStream - # when we have finished reading the request and are waiting for - # the application to produce its response. - if self._close_callback is not None: - callback = self._close_callback - self._close_callback = None - callback() - if not self._finish_future.done(): - future_set_result_unless_cancelled(self._finish_future, None) - self._clear_callbacks() - - def close(self): - if self.stream is not None: - self.stream.close() - self._clear_callbacks() - if not self._finish_future.done(): - future_set_result_unless_cancelled(self._finish_future, None) - - def detach(self): - """Take control of the underlying stream. - - Returns the underlying `.IOStream` object and stops all further - HTTP processing. May only be called during - `.HTTPMessageDelegate.headers_received`. Intended for implementing - protocols like websockets that tunnel over an HTTP handshake. - """ - self._clear_callbacks() - stream = self.stream - self.stream = None - if not self._finish_future.done(): - future_set_result_unless_cancelled(self._finish_future, None) - return stream - - def set_body_timeout(self, timeout): - """Sets the body timeout for a single request. - - Overrides the value from `.HTTP1ConnectionParameters`. - """ - self._body_timeout = timeout - - def set_max_body_size(self, max_body_size): - """Sets the body size limit for a single request. - - Overrides the value from `.HTTP1ConnectionParameters`. - """ - self._max_body_size = max_body_size - - def write_headers(self, start_line, headers, chunk=None, callback=None): - """Implements `.HTTPConnection.write_headers`.""" - lines = [] - if self.is_client: - self._request_start_line = start_line - lines.append(utf8('%s %s HTTP/1.1' % (start_line[0], start_line[1]))) - # Client requests with a non-empty body must have either a - # Content-Length or a Transfer-Encoding. - self._chunking_output = ( - start_line.method in ('POST', 'PUT', 'PATCH') and - 'Content-Length' not in headers and - 'Transfer-Encoding' not in headers) - else: - self._response_start_line = start_line - lines.append(utf8('HTTP/1.1 %d %s' % (start_line[1], start_line[2]))) - self._chunking_output = ( - # TODO: should this use - # self._request_start_line.version or - # start_line.version? - self._request_start_line.version == 'HTTP/1.1' and - # 1xx, 204 and 304 responses have no body (not even a zero-length - # body), and so should not have either Content-Length or - # Transfer-Encoding headers. - start_line.code not in (204, 304) and - (start_line.code < 100 or start_line.code >= 200) and - # No need to chunk the output if a Content-Length is specified. - 'Content-Length' not in headers and - # Applications are discouraged from touching Transfer-Encoding, - # but if they do, leave it alone. - 'Transfer-Encoding' not in headers) - # If connection to a 1.1 client will be closed, inform client - if (self._request_start_line.version == 'HTTP/1.1' and self._disconnect_on_finish): - headers['Connection'] = 'close' - # If a 1.0 client asked for keep-alive, add the header. - if (self._request_start_line.version == 'HTTP/1.0' and - self._request_headers.get('Connection', '').lower() == 'keep-alive'): - headers['Connection'] = 'Keep-Alive' - if self._chunking_output: - headers['Transfer-Encoding'] = 'chunked' - if (not self.is_client and - (self._request_start_line.method == 'HEAD' or - start_line.code == 304)): - self._expected_content_remaining = 0 - elif 'Content-Length' in headers: - self._expected_content_remaining = int(headers['Content-Length']) - else: - self._expected_content_remaining = None - # TODO: headers are supposed to be of type str, but we still have some - # cases that let bytes slip through. Remove these native_str calls when those - # are fixed. - header_lines = (native_str(n) + ": " + native_str(v) for n, v in headers.get_all()) - if PY3: - lines.extend(l.encode('latin1') for l in header_lines) - else: - lines.extend(header_lines) - for line in lines: - if b'\n' in line: - raise ValueError('Newline in header: ' + repr(line)) - future = None - if self.stream.closed(): - future = self._write_future = Future() - future.set_exception(iostream.StreamClosedError()) - future.exception() - else: - if callback is not None: - warnings.warn("callback argument is deprecated, use returned Future instead", - DeprecationWarning) - self._write_callback = stack_context.wrap(callback) - else: - future = self._write_future = Future() - data = b"\r\n".join(lines) + b"\r\n\r\n" - if chunk: - data += self._format_chunk(chunk) - self._pending_write = self.stream.write(data) - future_add_done_callback(self._pending_write, self._on_write_complete) - return future - - def _format_chunk(self, chunk): - if self._expected_content_remaining is not None: - self._expected_content_remaining -= len(chunk) - if self._expected_content_remaining < 0: - # Close the stream now to stop further framing errors. - self.stream.close() - raise httputil.HTTPOutputError( - "Tried to write more data than Content-Length") - if self._chunking_output and chunk: - # Don't write out empty chunks because that means END-OF-STREAM - # with chunked encoding - return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n" - else: - return chunk - - def write(self, chunk, callback=None): - """Implements `.HTTPConnection.write`. - - For backwards compatibility it is allowed but deprecated to - skip `write_headers` and instead call `write()` with a - pre-encoded header block. - """ - future = None - if self.stream.closed(): - future = self._write_future = Future() - self._write_future.set_exception(iostream.StreamClosedError()) - self._write_future.exception() - else: - if callback is not None: - warnings.warn("callback argument is deprecated, use returned Future instead", - DeprecationWarning) - self._write_callback = stack_context.wrap(callback) - else: - future = self._write_future = Future() - self._pending_write = self.stream.write(self._format_chunk(chunk)) - self._pending_write.add_done_callback(self._on_write_complete) - return future - - def finish(self): - """Implements `.HTTPConnection.finish`.""" - if (self._expected_content_remaining is not None and - self._expected_content_remaining != 0 and - not self.stream.closed()): - self.stream.close() - raise httputil.HTTPOutputError( - "Tried to write %d bytes less than Content-Length" % - self._expected_content_remaining) - if self._chunking_output: - if not self.stream.closed(): - self._pending_write = self.stream.write(b"0\r\n\r\n") - self._pending_write.add_done_callback(self._on_write_complete) - self._write_finished = True - # If the app finished the request while we're still reading, - # divert any remaining data away from the delegate and - # close the connection when we're done sending our response. - # Closing the connection is the only way to avoid reading the - # whole input body. - if not self._read_finished: - self._disconnect_on_finish = True - # No more data is coming, so instruct TCP to send any remaining - # data immediately instead of waiting for a full packet or ack. - self.stream.set_nodelay(True) - if self._pending_write is None: - self._finish_request(None) - else: - future_add_done_callback(self._pending_write, self._finish_request) - - def _on_write_complete(self, future): - exc = future.exception() - if exc is not None and not isinstance(exc, iostream.StreamClosedError): - future.result() - if self._write_callback is not None: - callback = self._write_callback - self._write_callback = None - self.stream.io_loop.add_callback(callback) - if self._write_future is not None: - future = self._write_future - self._write_future = None - future_set_result_unless_cancelled(future, None) - - def _can_keep_alive(self, start_line, headers): - if self.params.no_keep_alive: - return False - connection_header = headers.get("Connection") - if connection_header is not None: - connection_header = connection_header.lower() - if start_line.version == "HTTP/1.1": - return connection_header != "close" - elif ("Content-Length" in headers or - headers.get("Transfer-Encoding", "").lower() == "chunked" or - getattr(start_line, 'method', None) in ("HEAD", "GET")): - # start_line may be a request or response start line; only - # the former has a method attribute. - return connection_header == "keep-alive" - return False - - def _finish_request(self, future): - self._clear_callbacks() - if not self.is_client and self._disconnect_on_finish: - self.close() - return - # Turn Nagle's algorithm back on, leaving the stream in its - # default state for the next request. - self.stream.set_nodelay(False) - if not self._finish_future.done(): - future_set_result_unless_cancelled(self._finish_future, None) - - def _parse_headers(self, data): - # The lstrip removes newlines that some implementations sometimes - # insert between messages of a reused connection. Per RFC 7230, - # we SHOULD ignore at least one empty line before the request. - # http://tools.ietf.org/html/rfc7230#section-3.5 - data = native_str(data.decode('latin1')).lstrip("\r\n") - # RFC 7230 section allows for both CRLF and bare LF. - eol = data.find("\n") - start_line = data[:eol].rstrip("\r") - headers = httputil.HTTPHeaders.parse(data[eol:]) - return start_line, headers - - def _read_body(self, code, headers, delegate): - if "Content-Length" in headers: - if "Transfer-Encoding" in headers: - # Response cannot contain both Content-Length and - # Transfer-Encoding headers. - # http://tools.ietf.org/html/rfc7230#section-3.3.3 - raise httputil.HTTPInputError( - "Response with both Transfer-Encoding and Content-Length") - if "," in headers["Content-Length"]: - # Proxies sometimes cause Content-Length headers to get - # duplicated. If all the values are identical then we can - # use them but if they differ it's an error. - pieces = re.split(r',\s*', headers["Content-Length"]) - if any(i != pieces[0] for i in pieces): - raise httputil.HTTPInputError( - "Multiple unequal Content-Lengths: %r" % - headers["Content-Length"]) - headers["Content-Length"] = pieces[0] - - try: - content_length = int(headers["Content-Length"]) - except ValueError: - # Handles non-integer Content-Length value. - raise httputil.HTTPInputError( - "Only integer Content-Length is allowed: %s" % headers["Content-Length"]) - - if content_length > self._max_body_size: - raise httputil.HTTPInputError("Content-Length too long") - else: - content_length = None - - if code == 204: - # This response code is not allowed to have a non-empty body, - # and has an implicit length of zero instead of read-until-close. - # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3 - if ("Transfer-Encoding" in headers or - content_length not in (None, 0)): - raise httputil.HTTPInputError( - "Response with code %d should not have body" % code) - content_length = 0 - - if content_length is not None: - return self._read_fixed_body(content_length, delegate) - if headers.get("Transfer-Encoding", "").lower() == "chunked": - return self._read_chunked_body(delegate) - if self.is_client: - return self._read_body_until_close(delegate) - return None - - @gen.coroutine - def _read_fixed_body(self, content_length, delegate): - while content_length > 0: - body = yield self.stream.read_bytes( - min(self.params.chunk_size, content_length), partial=True) - content_length -= len(body) - if not self._write_finished or self.is_client: - with _ExceptionLoggingContext(app_log): - ret = delegate.data_received(body) - if ret is not None: - yield ret - - @gen.coroutine - def _read_chunked_body(self, delegate): - # TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1 - total_size = 0 - while True: - chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64) - chunk_len = int(chunk_len.strip(), 16) - if chunk_len == 0: - crlf = yield self.stream.read_bytes(2) - if crlf != b'\r\n': - raise httputil.HTTPInputError("improperly terminated chunked request") - return - total_size += chunk_len - if total_size > self._max_body_size: - raise httputil.HTTPInputError("chunked body too large") - bytes_to_read = chunk_len - while bytes_to_read: - chunk = yield self.stream.read_bytes( - min(bytes_to_read, self.params.chunk_size), partial=True) - bytes_to_read -= len(chunk) - if not self._write_finished or self.is_client: - with _ExceptionLoggingContext(app_log): - ret = delegate.data_received(chunk) - if ret is not None: - yield ret - # chunk ends with \r\n - crlf = yield self.stream.read_bytes(2) - assert crlf == b"\r\n" - - @gen.coroutine - def _read_body_until_close(self, delegate): - body = yield self.stream.read_until_close() - if not self._write_finished or self.is_client: - with _ExceptionLoggingContext(app_log): - delegate.data_received(body) - - -class _GzipMessageDelegate(httputil.HTTPMessageDelegate): - """Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``. - """ - def __init__(self, delegate, chunk_size): - self._delegate = delegate - self._chunk_size = chunk_size - self._decompressor = None - - def headers_received(self, start_line, headers): - if headers.get("Content-Encoding") == "gzip": - self._decompressor = GzipDecompressor() - # Downstream delegates will only see uncompressed data, - # so rename the content-encoding header. - # (but note that curl_httpclient doesn't do this). - headers.add("X-Consumed-Content-Encoding", - headers["Content-Encoding"]) - del headers["Content-Encoding"] - return self._delegate.headers_received(start_line, headers) - - @gen.coroutine - def data_received(self, chunk): - if self._decompressor: - compressed_data = chunk - while compressed_data: - decompressed = self._decompressor.decompress( - compressed_data, self._chunk_size) - if decompressed: - ret = self._delegate.data_received(decompressed) - if ret is not None: - yield ret - compressed_data = self._decompressor.unconsumed_tail - else: - ret = self._delegate.data_received(chunk) - if ret is not None: - yield ret - - def finish(self): - if self._decompressor is not None: - tail = self._decompressor.flush() - if tail: - # I believe the tail will always be empty (i.e. - # decompress will return all it can). The purpose - # of the flush call is to detect errors such - # as truncated input. But in case it ever returns - # anything, treat it as an extra chunk - self._delegate.data_received(tail) - return self._delegate.finish() - - def on_connection_close(self): - return self._delegate.on_connection_close() - - -class HTTP1ServerConnection(object): - """An HTTP/1.x server.""" - def __init__(self, stream, params=None, context=None): - """ - :arg stream: an `.IOStream` - :arg params: a `.HTTP1ConnectionParameters` or None - :arg context: an opaque application-defined object that is accessible - as ``connection.context`` - """ - self.stream = stream - if params is None: - params = HTTP1ConnectionParameters() - self.params = params - self.context = context - self._serving_future = None - - @gen.coroutine - def close(self): - """Closes the connection. - - Returns a `.Future` that resolves after the serving loop has exited. - """ - self.stream.close() - # Block until the serving loop is done, but ignore any exceptions - # (start_serving is already responsible for logging them). - try: - yield self._serving_future - except Exception: - pass - - def start_serving(self, delegate): - """Starts serving requests on this connection. - - :arg delegate: a `.HTTPServerConnectionDelegate` - """ - assert isinstance(delegate, httputil.HTTPServerConnectionDelegate) - self._serving_future = self._server_request_loop(delegate) - # Register the future on the IOLoop so its errors get logged. - self.stream.io_loop.add_future(self._serving_future, - lambda f: f.result()) - - @gen.coroutine - def _server_request_loop(self, delegate): - try: - while True: - conn = HTTP1Connection(self.stream, False, - self.params, self.context) - request_delegate = delegate.start_request(self, conn) - try: - ret = yield conn.read_response(request_delegate) - except (iostream.StreamClosedError, - iostream.UnsatisfiableReadError): - return - except _QuietException: - # This exception was already logged. - conn.close() - return - except Exception: - gen_log.error("Uncaught exception", exc_info=True) - conn.close() - return - if not ret: - return - yield gen.moment - finally: - delegate.on_close(self) diff --git a/lib/tornado/httpclient.py b/lib/tornado/httpclient.py deleted file mode 100755 index 5ed2ee67..00000000 --- a/lib/tornado/httpclient.py +++ /dev/null @@ -1,748 +0,0 @@ -"""Blocking and non-blocking HTTP client interfaces. - -This module defines a common interface shared by two implementations, -``simple_httpclient`` and ``curl_httpclient``. Applications may either -instantiate their chosen implementation class directly or use the -`AsyncHTTPClient` class from this module, which selects an implementation -that can be overridden with the `AsyncHTTPClient.configure` method. - -The default implementation is ``simple_httpclient``, and this is expected -to be suitable for most users' needs. However, some applications may wish -to switch to ``curl_httpclient`` for reasons such as the following: - -* ``curl_httpclient`` has some features not found in ``simple_httpclient``, - including support for HTTP proxies and the ability to use a specified - network interface. - -* ``curl_httpclient`` is more likely to be compatible with sites that are - not-quite-compliant with the HTTP spec, or sites that use little-exercised - features of HTTP. - -* ``curl_httpclient`` is faster. - -* ``curl_httpclient`` was the default prior to Tornado 2.0. - -Note that if you are using ``curl_httpclient``, it is highly -recommended that you use a recent version of ``libcurl`` and -``pycurl``. Currently the minimum supported version of libcurl is -7.22.0, and the minimum version of pycurl is 7.18.2. It is highly -recommended that your ``libcurl`` installation is built with -asynchronous DNS resolver (threaded or c-ares), otherwise you may -encounter various problems with request timeouts (for more -information, see -http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS -and comments in curl_httpclient.py). - -To select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup:: - - AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") -""" - -from __future__ import absolute_import, division, print_function - -import functools -import time -import warnings -import weakref - -from tornado.concurrent import Future, future_set_result_unless_cancelled -from tornado.escape import utf8, native_str -from tornado import gen, httputil, stack_context -from tornado.ioloop import IOLoop -from tornado.util import Configurable - - -class HTTPClient(object): - """A blocking HTTP client. - - This interface is provided to make it easier to share code between - synchronous and asynchronous applications. Applications that are - running an `.IOLoop` must use `AsyncHTTPClient` instead. - - Typical usage looks like this:: - - http_client = httpclient.HTTPClient() - try: - response = http_client.fetch("http://www.google.com/") - print(response.body) - except httpclient.HTTPError as e: - # HTTPError is raised for non-200 responses; the response - # can be found in e.response. - print("Error: " + str(e)) - except Exception as e: - # Other errors are possible, such as IOError. - print("Error: " + str(e)) - http_client.close() - - .. versionchanged:: 5.0 - - Due to limitations in `asyncio`, it is no longer possible to - use the synchronous ``HTTPClient`` while an `.IOLoop` is running. - Use `AsyncHTTPClient` instead. - - """ - def __init__(self, async_client_class=None, **kwargs): - # Initialize self._closed at the beginning of the constructor - # so that an exception raised here doesn't lead to confusing - # failures in __del__. - self._closed = True - self._io_loop = IOLoop(make_current=False) - if async_client_class is None: - async_client_class = AsyncHTTPClient - # Create the client while our IOLoop is "current", without - # clobbering the thread's real current IOLoop (if any). - self._async_client = self._io_loop.run_sync( - gen.coroutine(lambda: async_client_class(**kwargs))) - self._closed = False - - def __del__(self): - self.close() - - def close(self): - """Closes the HTTPClient, freeing any resources used.""" - if not self._closed: - self._async_client.close() - self._io_loop.close() - self._closed = True - - def fetch(self, request, **kwargs): - """Executes a request, returning an `HTTPResponse`. - - The request may be either a string URL or an `HTTPRequest` object. - If it is a string, we construct an `HTTPRequest` using any additional - kwargs: ``HTTPRequest(request, **kwargs)`` - - If an error occurs during the fetch, we raise an `HTTPError` unless - the ``raise_error`` keyword argument is set to False. - """ - response = self._io_loop.run_sync(functools.partial( - self._async_client.fetch, request, **kwargs)) - return response - - -class AsyncHTTPClient(Configurable): - """An non-blocking HTTP client. - - Example usage:: - - async def f(): - http_client = AsyncHTTPClient() - try: - response = await http_client.fetch("http://www.google.com") - except Exception as e: - print("Error: %s" % e) - else: - print(response.body) - - The constructor for this class is magic in several respects: It - actually creates an instance of an implementation-specific - subclass, and instances are reused as a kind of pseudo-singleton - (one per `.IOLoop`). The keyword argument ``force_instance=True`` - can be used to suppress this singleton behavior. Unless - ``force_instance=True`` is used, no arguments should be passed to - the `AsyncHTTPClient` constructor. The implementation subclass as - well as arguments to its constructor can be set with the static - method `configure()` - - All `AsyncHTTPClient` implementations support a ``defaults`` - keyword argument, which can be used to set default values for - `HTTPRequest` attributes. For example:: - - AsyncHTTPClient.configure( - None, defaults=dict(user_agent="MyUserAgent")) - # or with force_instance: - client = AsyncHTTPClient(force_instance=True, - defaults=dict(user_agent="MyUserAgent")) - - .. versionchanged:: 5.0 - The ``io_loop`` argument (deprecated since version 4.1) has been removed. - - """ - @classmethod - def configurable_base(cls): - return AsyncHTTPClient - - @classmethod - def configurable_default(cls): - from tornado.simple_httpclient import SimpleAsyncHTTPClient - return SimpleAsyncHTTPClient - - @classmethod - def _async_clients(cls): - attr_name = '_async_client_dict_' + cls.__name__ - if not hasattr(cls, attr_name): - setattr(cls, attr_name, weakref.WeakKeyDictionary()) - return getattr(cls, attr_name) - - def __new__(cls, force_instance=False, **kwargs): - io_loop = IOLoop.current() - if force_instance: - instance_cache = None - else: - instance_cache = cls._async_clients() - if instance_cache is not None and io_loop in instance_cache: - return instance_cache[io_loop] - instance = super(AsyncHTTPClient, cls).__new__(cls, **kwargs) - # Make sure the instance knows which cache to remove itself from. - # It can't simply call _async_clients() because we may be in - # __new__(AsyncHTTPClient) but instance.__class__ may be - # SimpleAsyncHTTPClient. - instance._instance_cache = instance_cache - if instance_cache is not None: - instance_cache[instance.io_loop] = instance - return instance - - def initialize(self, defaults=None): - self.io_loop = IOLoop.current() - self.defaults = dict(HTTPRequest._DEFAULTS) - if defaults is not None: - self.defaults.update(defaults) - self._closed = False - - def close(self): - """Destroys this HTTP client, freeing any file descriptors used. - - This method is **not needed in normal use** due to the way - that `AsyncHTTPClient` objects are transparently reused. - ``close()`` is generally only necessary when either the - `.IOLoop` is also being closed, or the ``force_instance=True`` - argument was used when creating the `AsyncHTTPClient`. - - No other methods may be called on the `AsyncHTTPClient` after - ``close()``. - - """ - if self._closed: - return - self._closed = True - if self._instance_cache is not None: - if self._instance_cache.get(self.io_loop) is not self: - raise RuntimeError("inconsistent AsyncHTTPClient cache") - del self._instance_cache[self.io_loop] - - def fetch(self, request, callback=None, raise_error=True, **kwargs): - """Executes a request, asynchronously returning an `HTTPResponse`. - - The request may be either a string URL or an `HTTPRequest` object. - If it is a string, we construct an `HTTPRequest` using any additional - kwargs: ``HTTPRequest(request, **kwargs)`` - - This method returns a `.Future` whose result is an - `HTTPResponse`. By default, the ``Future`` will raise an - `HTTPError` if the request returned a non-200 response code - (other errors may also be raised if the server could not be - contacted). Instead, if ``raise_error`` is set to False, the - response will always be returned regardless of the response - code. - - If a ``callback`` is given, it will be invoked with the `HTTPResponse`. - In the callback interface, `HTTPError` is not automatically raised. - Instead, you must check the response's ``error`` attribute or - call its `~HTTPResponse.rethrow` method. - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed - in 6.0. Use the returned `.Future` instead. - - The ``raise_error=False`` argument currently suppresses - *all* errors, encapsulating them in `HTTPResponse` objects - with a 599 response code. This will change in Tornado 6.0: - ``raise_error=False`` will only affect the `HTTPError` - raised when a non-200 response code is used. - - """ - if self._closed: - raise RuntimeError("fetch() called on closed AsyncHTTPClient") - if not isinstance(request, HTTPRequest): - request = HTTPRequest(url=request, **kwargs) - else: - if kwargs: - raise ValueError("kwargs can't be used if request is an HTTPRequest object") - # We may modify this (to add Host, Accept-Encoding, etc), - # so make sure we don't modify the caller's object. This is also - # where normal dicts get converted to HTTPHeaders objects. - request.headers = httputil.HTTPHeaders(request.headers) - request = _RequestProxy(request, self.defaults) - future = Future() - if callback is not None: - warnings.warn("callback arguments are deprecated, use the returned Future instead", - DeprecationWarning) - callback = stack_context.wrap(callback) - - def handle_future(future): - exc = future.exception() - if isinstance(exc, HTTPError) and exc.response is not None: - response = exc.response - elif exc is not None: - response = HTTPResponse( - request, 599, error=exc, - request_time=time.time() - request.start_time) - else: - response = future.result() - self.io_loop.add_callback(callback, response) - future.add_done_callback(handle_future) - - def handle_response(response): - if raise_error and response.error: - if isinstance(response.error, HTTPError): - response.error.response = response - future.set_exception(response.error) - else: - if response.error and not response._error_is_response_code: - warnings.warn("raise_error=False will allow '%s' to be raised in the future" % - response.error, DeprecationWarning) - future_set_result_unless_cancelled(future, response) - self.fetch_impl(request, handle_response) - return future - - def fetch_impl(self, request, callback): - raise NotImplementedError() - - @classmethod - def configure(cls, impl, **kwargs): - """Configures the `AsyncHTTPClient` subclass to use. - - ``AsyncHTTPClient()`` actually creates an instance of a subclass. - This method may be called with either a class object or the - fully-qualified name of such a class (or ``None`` to use the default, - ``SimpleAsyncHTTPClient``) - - If additional keyword arguments are given, they will be passed - to the constructor of each subclass instance created. The - keyword argument ``max_clients`` determines the maximum number - of simultaneous `~AsyncHTTPClient.fetch()` operations that can - execute in parallel on each `.IOLoop`. Additional arguments - may be supported depending on the implementation class in use. - - Example:: - - AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") - """ - super(AsyncHTTPClient, cls).configure(impl, **kwargs) - - -class HTTPRequest(object): - """HTTP client request object.""" - - # Default values for HTTPRequest parameters. - # Merged with the values on the request object by AsyncHTTPClient - # implementations. - _DEFAULTS = dict( - connect_timeout=20.0, - request_timeout=20.0, - follow_redirects=True, - max_redirects=5, - decompress_response=True, - proxy_password='', - allow_nonstandard_methods=False, - validate_cert=True) - - def __init__(self, url, method="GET", headers=None, body=None, - auth_username=None, auth_password=None, auth_mode=None, - connect_timeout=None, request_timeout=None, - if_modified_since=None, follow_redirects=None, - max_redirects=None, user_agent=None, use_gzip=None, - network_interface=None, streaming_callback=None, - header_callback=None, prepare_curl_callback=None, - proxy_host=None, proxy_port=None, proxy_username=None, - proxy_password=None, proxy_auth_mode=None, - allow_nonstandard_methods=None, validate_cert=None, - ca_certs=None, allow_ipv6=None, client_key=None, - client_cert=None, body_producer=None, - expect_100_continue=False, decompress_response=None, - ssl_options=None): - r"""All parameters except ``url`` are optional. - - :arg str url: URL to fetch - :arg str method: HTTP method, e.g. "GET" or "POST" - :arg headers: Additional HTTP headers to pass on the request - :type headers: `~tornado.httputil.HTTPHeaders` or `dict` - :arg body: HTTP request body as a string (byte or unicode; if unicode - the utf-8 encoding will be used) - :arg body_producer: Callable used for lazy/asynchronous request bodies. - It is called with one argument, a ``write`` function, and should - return a `.Future`. It should call the write function with new - data as it becomes available. The write function returns a - `.Future` which can be used for flow control. - Only one of ``body`` and ``body_producer`` may - be specified. ``body_producer`` is not supported on - ``curl_httpclient``. When using ``body_producer`` it is recommended - to pass a ``Content-Length`` in the headers as otherwise chunked - encoding will be used, and many servers do not support chunked - encoding on requests. New in Tornado 4.0 - :arg str auth_username: Username for HTTP authentication - :arg str auth_password: Password for HTTP authentication - :arg str auth_mode: Authentication mode; default is "basic". - Allowed values are implementation-defined; ``curl_httpclient`` - supports "basic" and "digest"; ``simple_httpclient`` only supports - "basic" - :arg float connect_timeout: Timeout for initial connection in seconds, - default 20 seconds - :arg float request_timeout: Timeout for entire request in seconds, - default 20 seconds - :arg if_modified_since: Timestamp for ``If-Modified-Since`` header - :type if_modified_since: `datetime` or `float` - :arg bool follow_redirects: Should redirects be followed automatically - or return the 3xx response? Default True. - :arg int max_redirects: Limit for ``follow_redirects``, default 5. - :arg str user_agent: String to send as ``User-Agent`` header - :arg bool decompress_response: Request a compressed response from - the server and decompress it after downloading. Default is True. - New in Tornado 4.0. - :arg bool use_gzip: Deprecated alias for ``decompress_response`` - since Tornado 4.0. - :arg str network_interface: Network interface to use for request. - ``curl_httpclient`` only; see note below. - :arg collections.abc.Callable streaming_callback: If set, ``streaming_callback`` will - be run with each chunk of data as it is received, and - ``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in - the final response. - :arg collections.abc.Callable header_callback: If set, ``header_callback`` will - be run with each header line as it is received (including the - first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line - containing only ``\r\n``. All lines include the trailing newline - characters). ``HTTPResponse.headers`` will be empty in the final - response. This is most useful in conjunction with - ``streaming_callback``, because it's the only way to get access to - header data while the request is in progress. - :arg collections.abc.Callable prepare_curl_callback: If set, will be called with - a ``pycurl.Curl`` object to allow the application to make additional - ``setopt`` calls. - :arg str proxy_host: HTTP proxy hostname. To use proxies, - ``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``, - ``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are - currently only supported with ``curl_httpclient``. - :arg int proxy_port: HTTP proxy port - :arg str proxy_username: HTTP proxy username - :arg str proxy_password: HTTP proxy password - :arg str proxy_auth_mode: HTTP proxy Authentication mode; - default is "basic". supports "basic" and "digest" - :arg bool allow_nonstandard_methods: Allow unknown values for ``method`` - argument? Default is False. - :arg bool validate_cert: For HTTPS requests, validate the server's - certificate? Default is True. - :arg str ca_certs: filename of CA certificates in PEM format, - or None to use defaults. See note below when used with - ``curl_httpclient``. - :arg str client_key: Filename for client SSL key, if any. See - note below when used with ``curl_httpclient``. - :arg str client_cert: Filename for client SSL certificate, if any. - See note below when used with ``curl_httpclient``. - :arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in - ``simple_httpclient`` (unsupported by ``curl_httpclient``). - Overrides ``validate_cert``, ``ca_certs``, ``client_key``, - and ``client_cert``. - :arg bool allow_ipv6: Use IPv6 when available? Default is true. - :arg bool expect_100_continue: If true, send the - ``Expect: 100-continue`` header and wait for a continue response - before sending the request body. Only supported with - simple_httpclient. - - .. note:: - - When using ``curl_httpclient`` certain options may be - inherited by subsequent fetches because ``pycurl`` does - not allow them to be cleanly reset. This applies to the - ``ca_certs``, ``client_key``, ``client_cert``, and - ``network_interface`` arguments. If you use these - options, you should pass them on every request (you don't - have to always use the same values, but it's not possible - to mix requests that specify these options with ones that - use the defaults). - - .. versionadded:: 3.1 - The ``auth_mode`` argument. - - .. versionadded:: 4.0 - The ``body_producer`` and ``expect_100_continue`` arguments. - - .. versionadded:: 4.2 - The ``ssl_options`` argument. - - .. versionadded:: 4.5 - The ``proxy_auth_mode`` argument. - """ - # Note that some of these attributes go through property setters - # defined below. - self.headers = headers - if if_modified_since: - self.headers["If-Modified-Since"] = httputil.format_timestamp( - if_modified_since) - self.proxy_host = proxy_host - self.proxy_port = proxy_port - self.proxy_username = proxy_username - self.proxy_password = proxy_password - self.proxy_auth_mode = proxy_auth_mode - self.url = url - self.method = method - self.body = body - self.body_producer = body_producer - self.auth_username = auth_username - self.auth_password = auth_password - self.auth_mode = auth_mode - self.connect_timeout = connect_timeout - self.request_timeout = request_timeout - self.follow_redirects = follow_redirects - self.max_redirects = max_redirects - self.user_agent = user_agent - if decompress_response is not None: - self.decompress_response = decompress_response - else: - self.decompress_response = use_gzip - self.network_interface = network_interface - self.streaming_callback = streaming_callback - self.header_callback = header_callback - self.prepare_curl_callback = prepare_curl_callback - self.allow_nonstandard_methods = allow_nonstandard_methods - self.validate_cert = validate_cert - self.ca_certs = ca_certs - self.allow_ipv6 = allow_ipv6 - self.client_key = client_key - self.client_cert = client_cert - self.ssl_options = ssl_options - self.expect_100_continue = expect_100_continue - self.start_time = time.time() - - @property - def headers(self): - return self._headers - - @headers.setter - def headers(self, value): - if value is None: - self._headers = httputil.HTTPHeaders() - else: - self._headers = value - - @property - def body(self): - return self._body - - @body.setter - def body(self, value): - self._body = utf8(value) - - @property - def body_producer(self): - return self._body_producer - - @body_producer.setter - def body_producer(self, value): - self._body_producer = stack_context.wrap(value) - - @property - def streaming_callback(self): - return self._streaming_callback - - @streaming_callback.setter - def streaming_callback(self, value): - self._streaming_callback = stack_context.wrap(value) - - @property - def header_callback(self): - return self._header_callback - - @header_callback.setter - def header_callback(self, value): - self._header_callback = stack_context.wrap(value) - - @property - def prepare_curl_callback(self): - return self._prepare_curl_callback - - @prepare_curl_callback.setter - def prepare_curl_callback(self, value): - self._prepare_curl_callback = stack_context.wrap(value) - - -class HTTPResponse(object): - """HTTP Response object. - - Attributes: - - * request: HTTPRequest object - - * code: numeric HTTP status code, e.g. 200 or 404 - - * reason: human-readable reason phrase describing the status code - - * headers: `tornado.httputil.HTTPHeaders` object - - * effective_url: final location of the resource after following any - redirects - - * buffer: ``cStringIO`` object for response body - - * body: response body as bytes (created on demand from ``self.buffer``) - - * error: Exception object, if any - - * request_time: seconds from request start to finish. Includes all network - operations from DNS resolution to receiving the last byte of data. - Does not include time spent in the queue (due to the ``max_clients`` option). - If redirects were followed, only includes the final request. - - * start_time: Time at which the HTTP operation started, based on `time.time` - (not the monotonic clock used by `.IOLoop.time`). May be ``None`` if the request - timed out while in the queue. - - * time_info: dictionary of diagnostic timing information from the request. - Available data are subject to change, but currently uses timings - available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html, - plus ``queue``, which is the delay (if any) introduced by waiting for - a slot under `AsyncHTTPClient`'s ``max_clients`` setting. - - .. versionadded:: 5.1 - - Added the ``start_time`` attribute. - - .. versionchanged:: 5.1 - - The ``request_time`` attribute previously included time spent in the queue - for ``simple_httpclient``, but not in ``curl_httpclient``. Now queueing time - is excluded in both implementations. ``request_time`` is now more accurate for - ``curl_httpclient`` because it uses a monotonic clock when available. - """ - def __init__(self, request, code, headers=None, buffer=None, - effective_url=None, error=None, request_time=None, - time_info=None, reason=None, start_time=None): - if isinstance(request, _RequestProxy): - self.request = request.request - else: - self.request = request - self.code = code - self.reason = reason or httputil.responses.get(code, "Unknown") - if headers is not None: - self.headers = headers - else: - self.headers = httputil.HTTPHeaders() - self.buffer = buffer - self._body = None - if effective_url is None: - self.effective_url = request.url - else: - self.effective_url = effective_url - self._error_is_response_code = False - if error is None: - if self.code < 200 or self.code >= 300: - self._error_is_response_code = True - self.error = HTTPError(self.code, message=self.reason, - response=self) - else: - self.error = None - else: - self.error = error - self.start_time = start_time - self.request_time = request_time - self.time_info = time_info or {} - - @property - def body(self): - if self.buffer is None: - return None - elif self._body is None: - self._body = self.buffer.getvalue() - - return self._body - - def rethrow(self): - """If there was an error on the request, raise an `HTTPError`.""" - if self.error: - raise self.error - - def __repr__(self): - args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items())) - return "%s(%s)" % (self.__class__.__name__, args) - - -class HTTPClientError(Exception): - """Exception thrown for an unsuccessful HTTP request. - - Attributes: - - * ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is - used when no HTTP response was received, e.g. for a timeout. - - * ``response`` - `HTTPResponse` object, if any. - - Note that if ``follow_redirects`` is False, redirects become HTTPErrors, - and you can look at ``error.response.headers['Location']`` to see the - destination of the redirect. - - .. versionchanged:: 5.1 - - Renamed from ``HTTPError`` to ``HTTPClientError`` to avoid collisions with - `tornado.web.HTTPError`. The name ``tornado.httpclient.HTTPError`` remains - as an alias. - """ - def __init__(self, code, message=None, response=None): - self.code = code - self.message = message or httputil.responses.get(code, "Unknown") - self.response = response - super(HTTPClientError, self).__init__(code, message, response) - - def __str__(self): - return "HTTP %d: %s" % (self.code, self.message) - - # There is a cyclic reference between self and self.response, - # which breaks the default __repr__ implementation. - # (especially on pypy, which doesn't have the same recursion - # detection as cpython). - __repr__ = __str__ - - -HTTPError = HTTPClientError - - -class _RequestProxy(object): - """Combines an object with a dictionary of defaults. - - Used internally by AsyncHTTPClient implementations. - """ - def __init__(self, request, defaults): - self.request = request - self.defaults = defaults - - def __getattr__(self, name): - request_attr = getattr(self.request, name) - if request_attr is not None: - return request_attr - elif self.defaults is not None: - return self.defaults.get(name, None) - else: - return None - - -def main(): - from tornado.options import define, options, parse_command_line - define("print_headers", type=bool, default=False) - define("print_body", type=bool, default=True) - define("follow_redirects", type=bool, default=True) - define("validate_cert", type=bool, default=True) - define("proxy_host", type=str) - define("proxy_port", type=int) - args = parse_command_line() - client = HTTPClient() - for arg in args: - try: - response = client.fetch(arg, - follow_redirects=options.follow_redirects, - validate_cert=options.validate_cert, - proxy_host=options.proxy_host, - proxy_port=options.proxy_port, - ) - except HTTPError as e: - if e.response is not None: - response = e.response - else: - raise - if options.print_headers: - print(response.headers) - if options.print_body: - print(native_str(response.body)) - client.close() - - -if __name__ == "__main__": - main() diff --git a/lib/tornado/httpserver.py b/lib/tornado/httpserver.py deleted file mode 100755 index 3498d71f..00000000 --- a/lib/tornado/httpserver.py +++ /dev/null @@ -1,330 +0,0 @@ -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A non-blocking, single-threaded HTTP server. - -Typical applications have little direct interaction with the `HTTPServer` -class except to start a server at the beginning of the process -(and even that is often done indirectly via `tornado.web.Application.listen`). - -.. versionchanged:: 4.0 - - The ``HTTPRequest`` class that used to live in this module has been moved - to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias. -""" - -from __future__ import absolute_import, division, print_function - -import socket - -from tornado.escape import native_str -from tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters -from tornado import gen -from tornado import httputil -from tornado import iostream -from tornado import netutil -from tornado.tcpserver import TCPServer -from tornado.util import Configurable - - -class HTTPServer(TCPServer, Configurable, - httputil.HTTPServerConnectionDelegate): - r"""A non-blocking, single-threaded HTTP server. - - A server is defined by a subclass of `.HTTPServerConnectionDelegate`, - or, for backwards compatibility, a callback that takes an - `.HTTPServerRequest` as an argument. The delegate is usually a - `tornado.web.Application`. - - `HTTPServer` supports keep-alive connections by default - (automatically for HTTP/1.1, or for HTTP/1.0 when the client - requests ``Connection: keep-alive``). - - If ``xheaders`` is ``True``, we support the - ``X-Real-Ip``/``X-Forwarded-For`` and - ``X-Scheme``/``X-Forwarded-Proto`` headers, which override the - remote IP and URI scheme/protocol for all requests. These headers - are useful when running Tornado behind a reverse proxy or load - balancer. The ``protocol`` argument can also be set to ``https`` - if Tornado is run behind an SSL-decoding proxy that does not set one of - the supported ``xheaders``. - - By default, when parsing the ``X-Forwarded-For`` header, Tornado will - select the last (i.e., the closest) address on the list of hosts as the - remote host IP address. To select the next server in the chain, a list of - trusted downstream hosts may be passed as the ``trusted_downstream`` - argument. These hosts will be skipped when parsing the ``X-Forwarded-For`` - header. - - To make this server serve SSL traffic, send the ``ssl_options`` keyword - argument with an `ssl.SSLContext` object. For compatibility with older - versions of Python ``ssl_options`` may also be a dictionary of keyword - arguments for the `ssl.wrap_socket` method.:: - - ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) - ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"), - os.path.join(data_dir, "mydomain.key")) - HTTPServer(application, ssl_options=ssl_ctx) - - `HTTPServer` initialization follows one of three patterns (the - initialization methods are defined on `tornado.tcpserver.TCPServer`): - - 1. `~tornado.tcpserver.TCPServer.listen`: simple single-process:: - - server = HTTPServer(app) - server.listen(8888) - IOLoop.current().start() - - In many cases, `tornado.web.Application.listen` can be used to avoid - the need to explicitly create the `HTTPServer`. - - 2. `~tornado.tcpserver.TCPServer.bind`/`~tornado.tcpserver.TCPServer.start`: - simple multi-process:: - - server = HTTPServer(app) - server.bind(8888) - server.start(0) # Forks multiple sub-processes - IOLoop.current().start() - - When using this interface, an `.IOLoop` must *not* be passed - to the `HTTPServer` constructor. `~.TCPServer.start` will always start - the server on the default singleton `.IOLoop`. - - 3. `~tornado.tcpserver.TCPServer.add_sockets`: advanced multi-process:: - - sockets = tornado.netutil.bind_sockets(8888) - tornado.process.fork_processes(0) - server = HTTPServer(app) - server.add_sockets(sockets) - IOLoop.current().start() - - The `~.TCPServer.add_sockets` interface is more complicated, - but it can be used with `tornado.process.fork_processes` to - give you more flexibility in when the fork happens. - `~.TCPServer.add_sockets` can also be used in single-process - servers if you want to create your listening sockets in some - way other than `tornado.netutil.bind_sockets`. - - .. versionchanged:: 4.0 - Added ``decompress_request``, ``chunk_size``, ``max_header_size``, - ``idle_connection_timeout``, ``body_timeout``, ``max_body_size`` - arguments. Added support for `.HTTPServerConnectionDelegate` - instances as ``request_callback``. - - .. versionchanged:: 4.1 - `.HTTPServerConnectionDelegate.start_request` is now called with - two arguments ``(server_conn, request_conn)`` (in accordance with the - documentation) instead of one ``(request_conn)``. - - .. versionchanged:: 4.2 - `HTTPServer` is now a subclass of `tornado.util.Configurable`. - - .. versionchanged:: 4.5 - Added the ``trusted_downstream`` argument. - - .. versionchanged:: 5.0 - The ``io_loop`` argument has been removed. - """ - def __init__(self, *args, **kwargs): - # Ignore args to __init__; real initialization belongs in - # initialize since we're Configurable. (there's something - # weird in initialization order between this class, - # Configurable, and TCPServer so we can't leave __init__ out - # completely) - pass - - def initialize(self, request_callback, no_keep_alive=False, - xheaders=False, ssl_options=None, protocol=None, - decompress_request=False, - chunk_size=None, max_header_size=None, - idle_connection_timeout=None, body_timeout=None, - max_body_size=None, max_buffer_size=None, - trusted_downstream=None): - self.request_callback = request_callback - self.xheaders = xheaders - self.protocol = protocol - self.conn_params = HTTP1ConnectionParameters( - decompress=decompress_request, - chunk_size=chunk_size, - max_header_size=max_header_size, - header_timeout=idle_connection_timeout or 3600, - max_body_size=max_body_size, - body_timeout=body_timeout, - no_keep_alive=no_keep_alive) - TCPServer.__init__(self, ssl_options=ssl_options, - max_buffer_size=max_buffer_size, - read_chunk_size=chunk_size) - self._connections = set() - self.trusted_downstream = trusted_downstream - - @classmethod - def configurable_base(cls): - return HTTPServer - - @classmethod - def configurable_default(cls): - return HTTPServer - - @gen.coroutine - def close_all_connections(self): - while self._connections: - # Peek at an arbitrary element of the set - conn = next(iter(self._connections)) - yield conn.close() - - def handle_stream(self, stream, address): - context = _HTTPRequestContext(stream, address, - self.protocol, - self.trusted_downstream) - conn = HTTP1ServerConnection( - stream, self.conn_params, context) - self._connections.add(conn) - conn.start_serving(self) - - def start_request(self, server_conn, request_conn): - if isinstance(self.request_callback, httputil.HTTPServerConnectionDelegate): - delegate = self.request_callback.start_request(server_conn, request_conn) - else: - delegate = _CallableAdapter(self.request_callback, request_conn) - - if self.xheaders: - delegate = _ProxyAdapter(delegate, request_conn) - - return delegate - - def on_close(self, server_conn): - self._connections.remove(server_conn) - - -class _CallableAdapter(httputil.HTTPMessageDelegate): - def __init__(self, request_callback, request_conn): - self.connection = request_conn - self.request_callback = request_callback - self.request = None - self.delegate = None - self._chunks = [] - - def headers_received(self, start_line, headers): - self.request = httputil.HTTPServerRequest( - connection=self.connection, start_line=start_line, - headers=headers) - - def data_received(self, chunk): - self._chunks.append(chunk) - - def finish(self): - self.request.body = b''.join(self._chunks) - self.request._parse_body() - self.request_callback(self.request) - - def on_connection_close(self): - self._chunks = None - - -class _HTTPRequestContext(object): - def __init__(self, stream, address, protocol, trusted_downstream=None): - self.address = address - # Save the socket's address family now so we know how to - # interpret self.address even after the stream is closed - # and its socket attribute replaced with None. - if stream.socket is not None: - self.address_family = stream.socket.family - else: - self.address_family = None - # In HTTPServerRequest we want an IP, not a full socket address. - if (self.address_family in (socket.AF_INET, socket.AF_INET6) and - address is not None): - self.remote_ip = address[0] - else: - # Unix (or other) socket; fake the remote address. - self.remote_ip = '0.0.0.0' - if protocol: - self.protocol = protocol - elif isinstance(stream, iostream.SSLIOStream): - self.protocol = "https" - else: - self.protocol = "http" - self._orig_remote_ip = self.remote_ip - self._orig_protocol = self.protocol - self.trusted_downstream = set(trusted_downstream or []) - - def __str__(self): - if self.address_family in (socket.AF_INET, socket.AF_INET6): - return self.remote_ip - elif isinstance(self.address, bytes): - # Python 3 with the -bb option warns about str(bytes), - # so convert it explicitly. - # Unix socket addresses are str on mac but bytes on linux. - return native_str(self.address) - else: - return str(self.address) - - def _apply_xheaders(self, headers): - """Rewrite the ``remote_ip`` and ``protocol`` fields.""" - # Squid uses X-Forwarded-For, others use X-Real-Ip - ip = headers.get("X-Forwarded-For", self.remote_ip) - # Skip trusted downstream hosts in X-Forwarded-For list - for ip in (cand.strip() for cand in reversed(ip.split(','))): - if ip not in self.trusted_downstream: - break - ip = headers.get("X-Real-Ip", ip) - if netutil.is_valid_ip(ip): - self.remote_ip = ip - # AWS uses X-Forwarded-Proto - proto_header = headers.get( - "X-Scheme", headers.get("X-Forwarded-Proto", - self.protocol)) - if proto_header: - # use only the last proto entry if there is more than one - # TODO: support trusting mutiple layers of proxied protocol - proto_header = proto_header.split(',')[-1].strip() - if proto_header in ("http", "https"): - self.protocol = proto_header - - def _unapply_xheaders(self): - """Undo changes from `_apply_xheaders`. - - Xheaders are per-request so they should not leak to the next - request on the same connection. - """ - self.remote_ip = self._orig_remote_ip - self.protocol = self._orig_protocol - - -class _ProxyAdapter(httputil.HTTPMessageDelegate): - def __init__(self, delegate, request_conn): - self.connection = request_conn - self.delegate = delegate - - def headers_received(self, start_line, headers): - self.connection.context._apply_xheaders(headers) - return self.delegate.headers_received(start_line, headers) - - def data_received(self, chunk): - return self.delegate.data_received(chunk) - - def finish(self): - self.delegate.finish() - self._cleanup() - - def on_connection_close(self): - self.delegate.on_connection_close() - self._cleanup() - - def _cleanup(self): - self.connection.context._unapply_xheaders() - - -HTTPRequest = httputil.HTTPServerRequest diff --git a/lib/tornado/httputil.py b/lib/tornado/httputil.py deleted file mode 100755 index 39614466..00000000 --- a/lib/tornado/httputil.py +++ /dev/null @@ -1,1095 +0,0 @@ -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""HTTP utility code shared by clients and servers. - -This module also defines the `HTTPServerRequest` class which is exposed -via `tornado.web.RequestHandler.request`. -""" - -from __future__ import absolute_import, division, print_function - -import calendar -import collections -import copy -import datetime -import email.utils -import numbers -import re -import time -import unicodedata -import warnings - -from tornado.escape import native_str, parse_qs_bytes, utf8 -from tornado.log import gen_log -from tornado.util import ObjectDict, PY3, unicode_type - -if PY3: - import http.cookies as Cookie - from http.client import responses - from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl -else: - import Cookie - from httplib import responses - from urllib import urlencode - from urlparse import urlparse, urlunparse, parse_qsl - - -# responses is unused in this file, but we re-export it to other files. -# Reference it so pyflakes doesn't complain. -responses - -try: - from ssl import SSLError -except ImportError: - # ssl is unavailable on app engine. - class _SSLError(Exception): - pass - # Hack around a mypy limitation. We can't simply put "type: ignore" - # on the class definition itself; must go through an assignment. - SSLError = _SSLError # type: ignore - -try: - import typing # noqa: F401 -except ImportError: - pass - - -# RFC 7230 section 3.5: a recipient MAY recognize a single LF as a line -# terminator and ignore any preceding CR. -_CRLF_RE = re.compile(r'\r?\n') - - -class _NormalizedHeaderCache(dict): - """Dynamic cached mapping of header names to Http-Header-Case. - - Implemented as a dict subclass so that cache hits are as fast as a - normal dict lookup, without the overhead of a python function - call. - - >>> normalized_headers = _NormalizedHeaderCache(10) - >>> normalized_headers["coNtent-TYPE"] - 'Content-Type' - """ - def __init__(self, size): - super(_NormalizedHeaderCache, self).__init__() - self.size = size - self.queue = collections.deque() - - def __missing__(self, key): - normalized = "-".join([w.capitalize() for w in key.split("-")]) - self[key] = normalized - self.queue.append(key) - if len(self.queue) > self.size: - # Limit the size of the cache. LRU would be better, but this - # simpler approach should be fine. In Python 2.7+ we could - # use OrderedDict (or in 3.2+, @functools.lru_cache). - old_key = self.queue.popleft() - del self[old_key] - return normalized - - -_normalized_headers = _NormalizedHeaderCache(1000) - - -class HTTPHeaders(collections.MutableMapping): - """A dictionary that maintains ``Http-Header-Case`` for all keys. - - Supports multiple values per key via a pair of new methods, - `add()` and `get_list()`. The regular dictionary interface - returns a single value per key, with multiple values joined by a - comma. - - >>> h = HTTPHeaders({"content-type": "text/html"}) - >>> list(h.keys()) - ['Content-Type'] - >>> h["Content-Type"] - 'text/html' - - >>> h.add("Set-Cookie", "A=B") - >>> h.add("Set-Cookie", "C=D") - >>> h["set-cookie"] - 'A=B,C=D' - >>> h.get_list("set-cookie") - ['A=B', 'C=D'] - - >>> for (k,v) in sorted(h.get_all()): - ... print('%s: %s' % (k,v)) - ... - Content-Type: text/html - Set-Cookie: A=B - Set-Cookie: C=D - """ - def __init__(self, *args, **kwargs): - self._dict = {} # type: typing.Dict[str, str] - self._as_list = {} # type: typing.Dict[str, typing.List[str]] - self._last_key = None - if (len(args) == 1 and len(kwargs) == 0 and - isinstance(args[0], HTTPHeaders)): - # Copy constructor - for k, v in args[0].get_all(): - self.add(k, v) - else: - # Dict-style initialization - self.update(*args, **kwargs) - - # new public methods - - def add(self, name, value): - # type: (str, str) -> None - """Adds a new value for the given key.""" - norm_name = _normalized_headers[name] - self._last_key = norm_name - if norm_name in self: - self._dict[norm_name] = (native_str(self[norm_name]) + ',' + - native_str(value)) - self._as_list[norm_name].append(value) - else: - self[norm_name] = value - - def get_list(self, name): - """Returns all values for the given header as a list.""" - norm_name = _normalized_headers[name] - return self._as_list.get(norm_name, []) - - def get_all(self): - # type: () -> typing.Iterable[typing.Tuple[str, str]] - """Returns an iterable of all (name, value) pairs. - - If a header has multiple values, multiple pairs will be - returned with the same name. - """ - for name, values in self._as_list.items(): - for value in values: - yield (name, value) - - def parse_line(self, line): - """Updates the dictionary with a single header line. - - >>> h = HTTPHeaders() - >>> h.parse_line("Content-Type: text/html") - >>> h.get('content-type') - 'text/html' - """ - if line[0].isspace(): - # continuation of a multi-line header - if self._last_key is None: - raise HTTPInputError("first header line cannot start with whitespace") - new_part = ' ' + line.lstrip() - self._as_list[self._last_key][-1] += new_part - self._dict[self._last_key] += new_part - else: - try: - name, value = line.split(":", 1) - except ValueError: - raise HTTPInputError("no colon in header line") - self.add(name, value.strip()) - - @classmethod - def parse(cls, headers): - """Returns a dictionary from HTTP header text. - - >>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n") - >>> sorted(h.items()) - [('Content-Length', '42'), ('Content-Type', 'text/html')] - - .. versionchanged:: 5.1 - - Raises `HTTPInputError` on malformed headers instead of a - mix of `KeyError`, and `ValueError`. - - """ - h = cls() - for line in _CRLF_RE.split(headers): - if line: - h.parse_line(line) - return h - - # MutableMapping abstract method implementations. - - def __setitem__(self, name, value): - norm_name = _normalized_headers[name] - self._dict[norm_name] = value - self._as_list[norm_name] = [value] - - def __getitem__(self, name): - # type: (str) -> str - return self._dict[_normalized_headers[name]] - - def __delitem__(self, name): - norm_name = _normalized_headers[name] - del self._dict[norm_name] - del self._as_list[norm_name] - - def __len__(self): - return len(self._dict) - - def __iter__(self): - return iter(self._dict) - - def copy(self): - # defined in dict but not in MutableMapping. - return HTTPHeaders(self) - - # Use our overridden copy method for the copy.copy module. - # This makes shallow copies one level deeper, but preserves - # the appearance that HTTPHeaders is a single container. - __copy__ = copy - - def __str__(self): - lines = [] - for name, value in self.get_all(): - lines.append("%s: %s\n" % (name, value)) - return "".join(lines) - - __unicode__ = __str__ - - -class HTTPServerRequest(object): - """A single HTTP request. - - All attributes are type `str` unless otherwise noted. - - .. attribute:: method - - HTTP request method, e.g. "GET" or "POST" - - .. attribute:: uri - - The requested uri. - - .. attribute:: path - - The path portion of `uri` - - .. attribute:: query - - The query portion of `uri` - - .. attribute:: version - - HTTP version specified in request, e.g. "HTTP/1.1" - - .. attribute:: headers - - `.HTTPHeaders` dictionary-like object for request headers. Acts like - a case-insensitive dictionary with additional methods for repeated - headers. - - .. attribute:: body - - Request body, if present, as a byte string. - - .. attribute:: remote_ip - - Client's IP address as a string. If ``HTTPServer.xheaders`` is set, - will pass along the real IP address provided by a load balancer - in the ``X-Real-Ip`` or ``X-Forwarded-For`` header. - - .. versionchanged:: 3.1 - The list format of ``X-Forwarded-For`` is now supported. - - .. attribute:: protocol - - The protocol used, either "http" or "https". If ``HTTPServer.xheaders`` - is set, will pass along the protocol used by a load balancer if - reported via an ``X-Scheme`` header. - - .. attribute:: host - - The requested hostname, usually taken from the ``Host`` header. - - .. attribute:: arguments - - GET/POST arguments are available in the arguments property, which - maps arguments names to lists of values (to support multiple values - for individual names). Names are of type `str`, while arguments - are byte strings. Note that this is different from - `.RequestHandler.get_argument`, which returns argument values as - unicode strings. - - .. attribute:: query_arguments - - Same format as ``arguments``, but contains only arguments extracted - from the query string. - - .. versionadded:: 3.2 - - .. attribute:: body_arguments - - Same format as ``arguments``, but contains only arguments extracted - from the request body. - - .. versionadded:: 3.2 - - .. attribute:: files - - File uploads are available in the files property, which maps file - names to lists of `.HTTPFile`. - - .. attribute:: connection - - An HTTP request is attached to a single HTTP connection, which can - be accessed through the "connection" attribute. Since connections - are typically kept open in HTTP/1.1, multiple requests can be handled - sequentially on a single connection. - - .. versionchanged:: 4.0 - Moved from ``tornado.httpserver.HTTPRequest``. - """ - def __init__(self, method=None, uri=None, version="HTTP/1.0", headers=None, - body=None, host=None, files=None, connection=None, - start_line=None, server_connection=None): - if start_line is not None: - method, uri, version = start_line - self.method = method - self.uri = uri - self.version = version - self.headers = headers or HTTPHeaders() - self.body = body or b"" - - # set remote IP and protocol - context = getattr(connection, 'context', None) - self.remote_ip = getattr(context, 'remote_ip', None) - self.protocol = getattr(context, 'protocol', "http") - - self.host = host or self.headers.get("Host") or "127.0.0.1" - self.host_name = split_host_and_port(self.host.lower())[0] - self.files = files or {} - self.connection = connection - self.server_connection = server_connection - self._start_time = time.time() - self._finish_time = None - - self.path, sep, self.query = uri.partition('?') - self.arguments = parse_qs_bytes(self.query, keep_blank_values=True) - self.query_arguments = copy.deepcopy(self.arguments) - self.body_arguments = {} - - def supports_http_1_1(self): - """Returns True if this request supports HTTP/1.1 semantics. - - .. deprecated:: 4.0 - - Applications are less likely to need this information with - the introduction of `.HTTPConnection`. If you still need - it, access the ``version`` attribute directly. This method - will be removed in Tornado 6.0. - - """ - warnings.warn("supports_http_1_1() is deprecated, use request.version instead", - DeprecationWarning) - return self.version == "HTTP/1.1" - - @property - def cookies(self): - """A dictionary of Cookie.Morsel objects.""" - if not hasattr(self, "_cookies"): - self._cookies = Cookie.SimpleCookie() - if "Cookie" in self.headers: - try: - parsed = parse_cookie(self.headers["Cookie"]) - except Exception: - pass - else: - for k, v in parsed.items(): - try: - self._cookies[k] = v - except Exception: - # SimpleCookie imposes some restrictions on keys; - # parse_cookie does not. Discard any cookies - # with disallowed keys. - pass - return self._cookies - - def write(self, chunk, callback=None): - """Writes the given chunk to the response stream. - - .. deprecated:: 4.0 - Use ``request.connection`` and the `.HTTPConnection` methods - to write the response. This method will be removed in Tornado 6.0. - """ - warnings.warn("req.write deprecated, use req.connection.write and write_headers instead", - DeprecationWarning) - assert isinstance(chunk, bytes) - assert self.version.startswith("HTTP/1."), \ - "deprecated interface only supported in HTTP/1.x" - self.connection.write(chunk, callback=callback) - - def finish(self): - """Finishes this HTTP request on the open connection. - - .. deprecated:: 4.0 - Use ``request.connection`` and the `.HTTPConnection` methods - to write the response. This method will be removed in Tornado 6.0. - """ - warnings.warn("req.finish deprecated, use req.connection.finish instead", - DeprecationWarning) - self.connection.finish() - self._finish_time = time.time() - - def full_url(self): - """Reconstructs the full URL for this request.""" - return self.protocol + "://" + self.host + self.uri - - def request_time(self): - """Returns the amount of time it took for this request to execute.""" - if self._finish_time is None: - return time.time() - self._start_time - else: - return self._finish_time - self._start_time - - def get_ssl_certificate(self, binary_form=False): - """Returns the client's SSL certificate, if any. - - To use client certificates, the HTTPServer's - `ssl.SSLContext.verify_mode` field must be set, e.g.:: - - ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) - ssl_ctx.load_cert_chain("foo.crt", "foo.key") - ssl_ctx.load_verify_locations("cacerts.pem") - ssl_ctx.verify_mode = ssl.CERT_REQUIRED - server = HTTPServer(app, ssl_options=ssl_ctx) - - By default, the return value is a dictionary (or None, if no - client certificate is present). If ``binary_form`` is true, a - DER-encoded form of the certificate is returned instead. See - SSLSocket.getpeercert() in the standard library for more - details. - http://docs.python.org/library/ssl.html#sslsocket-objects - """ - try: - return self.connection.stream.socket.getpeercert( - binary_form=binary_form) - except SSLError: - return None - - def _parse_body(self): - parse_body_arguments( - self.headers.get("Content-Type", ""), self.body, - self.body_arguments, self.files, - self.headers) - - for k, v in self.body_arguments.items(): - self.arguments.setdefault(k, []).extend(v) - - def __repr__(self): - attrs = ("protocol", "host", "method", "uri", "version", "remote_ip") - args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs]) - return "%s(%s)" % (self.__class__.__name__, args) - - -class HTTPInputError(Exception): - """Exception class for malformed HTTP requests or responses - from remote sources. - - .. versionadded:: 4.0 - """ - pass - - -class HTTPOutputError(Exception): - """Exception class for errors in HTTP output. - - .. versionadded:: 4.0 - """ - pass - - -class HTTPServerConnectionDelegate(object): - """Implement this interface to handle requests from `.HTTPServer`. - - .. versionadded:: 4.0 - """ - def start_request(self, server_conn, request_conn): - """This method is called by the server when a new request has started. - - :arg server_conn: is an opaque object representing the long-lived - (e.g. tcp-level) connection. - :arg request_conn: is a `.HTTPConnection` object for a single - request/response exchange. - - This method should return a `.HTTPMessageDelegate`. - """ - raise NotImplementedError() - - def on_close(self, server_conn): - """This method is called when a connection has been closed. - - :arg server_conn: is a server connection that has previously been - passed to ``start_request``. - """ - pass - - -class HTTPMessageDelegate(object): - """Implement this interface to handle an HTTP request or response. - - .. versionadded:: 4.0 - """ - def headers_received(self, start_line, headers): - """Called when the HTTP headers have been received and parsed. - - :arg start_line: a `.RequestStartLine` or `.ResponseStartLine` - depending on whether this is a client or server message. - :arg headers: a `.HTTPHeaders` instance. - - Some `.HTTPConnection` methods can only be called during - ``headers_received``. - - May return a `.Future`; if it does the body will not be read - until it is done. - """ - pass - - def data_received(self, chunk): - """Called when a chunk of data has been received. - - May return a `.Future` for flow control. - """ - pass - - def finish(self): - """Called after the last chunk of data has been received.""" - pass - - def on_connection_close(self): - """Called if the connection is closed without finishing the request. - - If ``headers_received`` is called, either ``finish`` or - ``on_connection_close`` will be called, but not both. - """ - pass - - -class HTTPConnection(object): - """Applications use this interface to write their responses. - - .. versionadded:: 4.0 - """ - def write_headers(self, start_line, headers, chunk=None, callback=None): - """Write an HTTP header block. - - :arg start_line: a `.RequestStartLine` or `.ResponseStartLine`. - :arg headers: a `.HTTPHeaders` instance. - :arg chunk: the first (optional) chunk of data. This is an optimization - so that small responses can be written in the same call as their - headers. - :arg callback: a callback to be run when the write is complete. - - The ``version`` field of ``start_line`` is ignored. - - Returns a `.Future` if no callback is given. - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed - in Tornado 6.0. - """ - raise NotImplementedError() - - def write(self, chunk, callback=None): - """Writes a chunk of body data. - - The callback will be run when the write is complete. If no callback - is given, returns a Future. - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed - in Tornado 6.0. - """ - raise NotImplementedError() - - def finish(self): - """Indicates that the last body data has been written. - """ - raise NotImplementedError() - - -def url_concat(url, args): - """Concatenate url and arguments regardless of whether - url has existing query parameters. - - ``args`` may be either a dictionary or a list of key-value pairs - (the latter allows for multiple values with the same key. - - >>> url_concat("http://example.com/foo", dict(c="d")) - 'http://example.com/foo?c=d' - >>> url_concat("http://example.com/foo?a=b", dict(c="d")) - 'http://example.com/foo?a=b&c=d' - >>> url_concat("http://example.com/foo?a=b", [("c", "d"), ("c", "d2")]) - 'http://example.com/foo?a=b&c=d&c=d2' - """ - if args is None: - return url - parsed_url = urlparse(url) - if isinstance(args, dict): - parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True) - parsed_query.extend(args.items()) - elif isinstance(args, list) or isinstance(args, tuple): - parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True) - parsed_query.extend(args) - else: - err = "'args' parameter should be dict, list or tuple. Not {0}".format( - type(args)) - raise TypeError(err) - final_query = urlencode(parsed_query) - url = urlunparse(( - parsed_url[0], - parsed_url[1], - parsed_url[2], - parsed_url[3], - final_query, - parsed_url[5])) - return url - - -class HTTPFile(ObjectDict): - """Represents a file uploaded via a form. - - For backwards compatibility, its instance attributes are also - accessible as dictionary keys. - - * ``filename`` - * ``body`` - * ``content_type`` - """ - pass - - -def _parse_request_range(range_header): - """Parses a Range header. - - Returns either ``None`` or tuple ``(start, end)``. - Note that while the HTTP headers use inclusive byte positions, - this method returns indexes suitable for use in slices. - - >>> start, end = _parse_request_range("bytes=1-2") - >>> start, end - (1, 3) - >>> [0, 1, 2, 3, 4][start:end] - [1, 2] - >>> _parse_request_range("bytes=6-") - (6, None) - >>> _parse_request_range("bytes=-6") - (-6, None) - >>> _parse_request_range("bytes=-0") - (None, 0) - >>> _parse_request_range("bytes=") - (None, None) - >>> _parse_request_range("foo=42") - >>> _parse_request_range("bytes=1-2,6-10") - - Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed). - - See [0] for the details of the range header. - - [0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges - """ - unit, _, value = range_header.partition("=") - unit, value = unit.strip(), value.strip() - if unit != "bytes": - return None - start_b, _, end_b = value.partition("-") - try: - start = _int_or_none(start_b) - end = _int_or_none(end_b) - except ValueError: - return None - if end is not None: - if start is None: - if end != 0: - start = -end - end = None - else: - end += 1 - return (start, end) - - -def _get_content_range(start, end, total): - """Returns a suitable Content-Range header: - - >>> print(_get_content_range(None, 1, 4)) - bytes 0-0/4 - >>> print(_get_content_range(1, 3, 4)) - bytes 1-2/4 - >>> print(_get_content_range(None, None, 4)) - bytes 0-3/4 - """ - start = start or 0 - end = (end or total) - 1 - return "bytes %s-%s/%s" % (start, end, total) - - -def _int_or_none(val): - val = val.strip() - if val == "": - return None - return int(val) - - -def parse_body_arguments(content_type, body, arguments, files, headers=None): - """Parses a form request body. - - Supports ``application/x-www-form-urlencoded`` and - ``multipart/form-data``. The ``content_type`` parameter should be - a string and ``body`` should be a byte string. The ``arguments`` - and ``files`` parameters are dictionaries that will be updated - with the parsed contents. - """ - if headers and 'Content-Encoding' in headers: - gen_log.warning("Unsupported Content-Encoding: %s", - headers['Content-Encoding']) - return - if content_type.startswith("application/x-www-form-urlencoded"): - try: - uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True) - except Exception as e: - gen_log.warning('Invalid x-www-form-urlencoded body: %s', e) - uri_arguments = {} - for name, values in uri_arguments.items(): - if values: - arguments.setdefault(name, []).extend(values) - elif content_type.startswith("multipart/form-data"): - try: - fields = content_type.split(";") - for field in fields: - k, sep, v = field.strip().partition("=") - if k == "boundary" and v: - parse_multipart_form_data(utf8(v), body, arguments, files) - break - else: - raise ValueError("multipart boundary not found") - except Exception as e: - gen_log.warning("Invalid multipart/form-data: %s", e) - - -def parse_multipart_form_data(boundary, data, arguments, files): - """Parses a ``multipart/form-data`` body. - - The ``boundary`` and ``data`` parameters are both byte strings. - The dictionaries given in the arguments and files parameters - will be updated with the contents of the body. - - .. versionchanged:: 5.1 - - Now recognizes non-ASCII filenames in RFC 2231/5987 - (``filename*=``) format. - """ - # The standard allows for the boundary to be quoted in the header, - # although it's rare (it happens at least for google app engine - # xmpp). I think we're also supposed to handle backslash-escapes - # here but I'll save that until we see a client that uses them - # in the wild. - if boundary.startswith(b'"') and boundary.endswith(b'"'): - boundary = boundary[1:-1] - final_boundary_index = data.rfind(b"--" + boundary + b"--") - if final_boundary_index == -1: - gen_log.warning("Invalid multipart/form-data: no final boundary") - return - parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n") - for part in parts: - if not part: - continue - eoh = part.find(b"\r\n\r\n") - if eoh == -1: - gen_log.warning("multipart/form-data missing headers") - continue - headers = HTTPHeaders.parse(part[:eoh].decode("utf-8")) - disp_header = headers.get("Content-Disposition", "") - disposition, disp_params = _parse_header(disp_header) - if disposition != "form-data" or not part.endswith(b"\r\n"): - gen_log.warning("Invalid multipart/form-data") - continue - value = part[eoh + 4:-2] - if not disp_params.get("name"): - gen_log.warning("multipart/form-data value missing name") - continue - name = disp_params["name"] - if disp_params.get("filename"): - ctype = headers.get("Content-Type", "application/unknown") - files.setdefault(name, []).append(HTTPFile( # type: ignore - filename=disp_params["filename"], body=value, - content_type=ctype)) - else: - arguments.setdefault(name, []).append(value) - - -def format_timestamp(ts): - """Formats a timestamp in the format used by HTTP. - - The argument may be a numeric timestamp as returned by `time.time`, - a time tuple as returned by `time.gmtime`, or a `datetime.datetime` - object. - - >>> format_timestamp(1359312200) - 'Sun, 27 Jan 2013 18:43:20 GMT' - """ - if isinstance(ts, numbers.Real): - pass - elif isinstance(ts, (tuple, time.struct_time)): - ts = calendar.timegm(ts) - elif isinstance(ts, datetime.datetime): - ts = calendar.timegm(ts.utctimetuple()) - else: - raise TypeError("unknown timestamp type: %r" % ts) - return email.utils.formatdate(ts, usegmt=True) - - -RequestStartLine = collections.namedtuple( - 'RequestStartLine', ['method', 'path', 'version']) - - -def parse_request_start_line(line): - """Returns a (method, path, version) tuple for an HTTP 1.x request line. - - The response is a `collections.namedtuple`. - - >>> parse_request_start_line("GET /foo HTTP/1.1") - RequestStartLine(method='GET', path='/foo', version='HTTP/1.1') - """ - try: - method, path, version = line.split(" ") - except ValueError: - # https://tools.ietf.org/html/rfc7230#section-3.1.1 - # invalid request-line SHOULD respond with a 400 (Bad Request) - raise HTTPInputError("Malformed HTTP request line") - if not re.match(r"^HTTP/1\.[0-9]$", version): - raise HTTPInputError( - "Malformed HTTP version in HTTP Request-Line: %r" % version) - return RequestStartLine(method, path, version) - - -ResponseStartLine = collections.namedtuple( - 'ResponseStartLine', ['version', 'code', 'reason']) - - -def parse_response_start_line(line): - """Returns a (version, code, reason) tuple for an HTTP 1.x response line. - - The response is a `collections.namedtuple`. - - >>> parse_response_start_line("HTTP/1.1 200 OK") - ResponseStartLine(version='HTTP/1.1', code=200, reason='OK') - """ - line = native_str(line) - match = re.match("(HTTP/1.[0-9]) ([0-9]+) ([^\r]*)", line) - if not match: - raise HTTPInputError("Error parsing response start line") - return ResponseStartLine(match.group(1), int(match.group(2)), - match.group(3)) - -# _parseparam and _parse_header are copied and modified from python2.7's cgi.py -# The original 2.7 version of this code did not correctly support some -# combinations of semicolons and double quotes. -# It has also been modified to support valueless parameters as seen in -# websocket extension negotiations, and to support non-ascii values in -# RFC 2231/5987 format. - - -def _parseparam(s): - while s[:1] == ';': - s = s[1:] - end = s.find(';') - while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2: - end = s.find(';', end + 1) - if end < 0: - end = len(s) - f = s[:end] - yield f.strip() - s = s[end:] - - -def _parse_header(line): - r"""Parse a Content-type like header. - - Return the main content-type and a dictionary of options. - - >>> d = "form-data; foo=\"b\\\\a\\\"r\"; file*=utf-8''T%C3%A4st" - >>> ct, d = _parse_header(d) - >>> ct - 'form-data' - >>> d['file'] == r'T\u00e4st'.encode('ascii').decode('unicode_escape') - True - >>> d['foo'] - 'b\\a"r' - """ - parts = _parseparam(';' + line) - key = next(parts) - # decode_params treats first argument special, but we already stripped key - params = [('Dummy', 'value')] - for p in parts: - i = p.find('=') - if i >= 0: - name = p[:i].strip().lower() - value = p[i + 1:].strip() - params.append((name, native_str(value))) - params = email.utils.decode_params(params) - params.pop(0) # get rid of the dummy again - pdict = {} - for name, value in params: - value = email.utils.collapse_rfc2231_value(value) - if len(value) >= 2 and value[0] == '"' and value[-1] == '"': - value = value[1:-1] - pdict[name] = value - return key, pdict - - -def _encode_header(key, pdict): - """Inverse of _parse_header. - - >>> _encode_header('permessage-deflate', - ... {'client_max_window_bits': 15, 'client_no_context_takeover': None}) - 'permessage-deflate; client_max_window_bits=15; client_no_context_takeover' - """ - if not pdict: - return key - out = [key] - # Sort the parameters just to make it easy to test. - for k, v in sorted(pdict.items()): - if v is None: - out.append(k) - else: - # TODO: quote if necessary. - out.append('%s=%s' % (k, v)) - return '; '.join(out) - - -def encode_username_password(username, password): - """Encodes a username/password pair in the format used by HTTP auth. - - The return value is a byte string in the form ``username:password``. - - .. versionadded:: 5.1 - """ - if isinstance(username, unicode_type): - username = unicodedata.normalize('NFC', username) - if isinstance(password, unicode_type): - password = unicodedata.normalize('NFC', password) - return utf8(username) + b":" + utf8(password) - - -def doctests(): - import doctest - return doctest.DocTestSuite() - - -def split_host_and_port(netloc): - """Returns ``(host, port)`` tuple from ``netloc``. - - Returned ``port`` will be ``None`` if not present. - - .. versionadded:: 4.1 - """ - match = re.match(r'^(.+):(\d+)$', netloc) - if match: - host = match.group(1) - port = int(match.group(2)) - else: - host = netloc - port = None - return (host, port) - - -def qs_to_qsl(qs): - """Generator converting a result of ``parse_qs`` back to name-value pairs. - - .. versionadded:: 5.0 - """ - for k, vs in qs.items(): - for v in vs: - yield (k, v) - - -_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]") -_QuotePatt = re.compile(r"[\\].") -_nulljoin = ''.join - - -def _unquote_cookie(str): - """Handle double quotes and escaping in cookie values. - - This method is copied verbatim from the Python 3.5 standard - library (http.cookies._unquote) so we don't have to depend on - non-public interfaces. - """ - # If there aren't any doublequotes, - # then there can't be any special characters. See RFC 2109. - if str is None or len(str) < 2: - return str - if str[0] != '"' or str[-1] != '"': - return str - - # We have to assume that we must decode this string. - # Down to work. - - # Remove the "s - str = str[1:-1] - - # Check for special sequences. Examples: - # \012 --> \n - # \" --> " - # - i = 0 - n = len(str) - res = [] - while 0 <= i < n: - o_match = _OctalPatt.search(str, i) - q_match = _QuotePatt.search(str, i) - if not o_match and not q_match: # Neither matched - res.append(str[i:]) - break - # else: - j = k = -1 - if o_match: - j = o_match.start(0) - if q_match: - k = q_match.start(0) - if q_match and (not o_match or k < j): # QuotePatt matched - res.append(str[i:k]) - res.append(str[k + 1]) - i = k + 2 - else: # OctalPatt matched - res.append(str[i:j]) - res.append(chr(int(str[j + 1:j + 4], 8))) - i = j + 4 - return _nulljoin(res) - - -def parse_cookie(cookie): - """Parse a ``Cookie`` HTTP header into a dict of name/value pairs. - - This function attempts to mimic browser cookie parsing behavior; - it specifically does not follow any of the cookie-related RFCs - (because browsers don't either). - - The algorithm used is identical to that used by Django version 1.9.10. - - .. versionadded:: 4.4.2 - """ - cookiedict = {} - for chunk in cookie.split(str(';')): - if str('=') in chunk: - key, val = chunk.split(str('='), 1) - else: - # Assume an empty name per - # https://bugzilla.mozilla.org/show_bug.cgi?id=169091 - key, val = str(''), chunk - key, val = key.strip(), val.strip() - if key or val: - # unquote using Python's algorithm. - cookiedict[key] = _unquote_cookie(val) - return cookiedict diff --git a/lib/tornado/ioloop.py b/lib/tornado/ioloop.py deleted file mode 100755 index 889153af..00000000 --- a/lib/tornado/ioloop.py +++ /dev/null @@ -1,1267 +0,0 @@ -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""An I/O event loop for non-blocking sockets. - -On Python 3, `.IOLoop` is a wrapper around the `asyncio` event loop. - -Typical applications will use a single `IOLoop` object, accessed via -`IOLoop.current` class method. The `IOLoop.start` method (or -equivalently, `asyncio.AbstractEventLoop.run_forever`) should usually -be called at the end of the ``main()`` function. Atypical applications -may use more than one `IOLoop`, such as one `IOLoop` per thread, or -per `unittest` case. - -In addition to I/O events, the `IOLoop` can also schedule time-based -events. `IOLoop.add_timeout` is a non-blocking alternative to -`time.sleep`. - -""" - -from __future__ import absolute_import, division, print_function - -import collections -import datetime -import errno -import functools -import heapq -import itertools -import logging -import numbers -import os -import select -import sys -import threading -import time -import traceback -import math -import random - -from tornado.concurrent import Future, is_future, chain_future, future_set_exc_info, future_add_done_callback # noqa: E501 -from tornado.log import app_log, gen_log -from tornado.platform.auto import set_close_exec, Waker -from tornado import stack_context -from tornado.util import ( - PY3, Configurable, errno_from_exception, timedelta_to_seconds, - TimeoutError, unicode_type, import_object, -) - -try: - import signal -except ImportError: - signal = None - -try: - from concurrent.futures import ThreadPoolExecutor -except ImportError: - ThreadPoolExecutor = None - -if PY3: - import _thread as thread -else: - import thread - -try: - import asyncio -except ImportError: - asyncio = None - - -_POLL_TIMEOUT = 3600.0 - - -class IOLoop(Configurable): - """A level-triggered I/O loop. - - On Python 3, `IOLoop` is a wrapper around the `asyncio` event - loop. On Python 2, it uses ``epoll`` (Linux) or ``kqueue`` (BSD - and Mac OS X) if they are available, or else we fall back on - select(). If you are implementing a system that needs to handle - thousands of simultaneous connections, you should use a system - that supports either ``epoll`` or ``kqueue``. - - Example usage for a simple TCP server: - - .. testcode:: - - import errno - import functools - import socket - - import tornado.ioloop - from tornado.iostream import IOStream - - async def handle_connection(connection, address): - stream = IOStream(connection) - message = await stream.read_until_close() - print("message from client:", message.decode().strip()) - - def connection_ready(sock, fd, events): - while True: - try: - connection, address = sock.accept() - except socket.error as e: - if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN): - raise - return - connection.setblocking(0) - io_loop = tornado.ioloop.IOLoop.current() - io_loop.spawn_callback(handle_connection, connection, address) - - if __name__ == '__main__': - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - sock.setblocking(0) - sock.bind(("", 8888)) - sock.listen(128) - - io_loop = tornado.ioloop.IOLoop.current() - callback = functools.partial(connection_ready, sock) - io_loop.add_handler(sock.fileno(), callback, io_loop.READ) - io_loop.start() - - .. testoutput:: - :hide: - - By default, a newly-constructed `IOLoop` becomes the thread's current - `IOLoop`, unless there already is a current `IOLoop`. This behavior - can be controlled with the ``make_current`` argument to the `IOLoop` - constructor: if ``make_current=True``, the new `IOLoop` will always - try to become current and it raises an error if there is already a - current instance. If ``make_current=False``, the new `IOLoop` will - not try to become current. - - In general, an `IOLoop` cannot survive a fork or be shared across - processes in any way. When multiple processes are being used, each - process should create its own `IOLoop`, which also implies that - any objects which depend on the `IOLoop` (such as - `.AsyncHTTPClient`) must also be created in the child processes. - As a guideline, anything that starts processes (including the - `tornado.process` and `multiprocessing` modules) should do so as - early as possible, ideally the first thing the application does - after loading its configuration in ``main()``. - - .. versionchanged:: 4.2 - Added the ``make_current`` keyword argument to the `IOLoop` - constructor. - - .. versionchanged:: 5.0 - - Uses the `asyncio` event loop by default. The - ``IOLoop.configure`` method cannot be used on Python 3 except - to redundantly specify the `asyncio` event loop. - - """ - # Constants from the epoll module - _EPOLLIN = 0x001 - _EPOLLPRI = 0x002 - _EPOLLOUT = 0x004 - _EPOLLERR = 0x008 - _EPOLLHUP = 0x010 - _EPOLLRDHUP = 0x2000 - _EPOLLONESHOT = (1 << 30) - _EPOLLET = (1 << 31) - - # Our events map exactly to the epoll events - NONE = 0 - READ = _EPOLLIN - WRITE = _EPOLLOUT - ERROR = _EPOLLERR | _EPOLLHUP - - # In Python 2, _current.instance points to the current IOLoop. - _current = threading.local() - - # In Python 3, _ioloop_for_asyncio maps from asyncio loops to IOLoops. - _ioloop_for_asyncio = dict() - - @classmethod - def configure(cls, impl, **kwargs): - if asyncio is not None: - from tornado.platform.asyncio import BaseAsyncIOLoop - - if isinstance(impl, (str, unicode_type)): - impl = import_object(impl) - if not issubclass(impl, BaseAsyncIOLoop): - raise RuntimeError( - "only AsyncIOLoop is allowed when asyncio is available") - super(IOLoop, cls).configure(impl, **kwargs) - - @staticmethod - def instance(): - """Deprecated alias for `IOLoop.current()`. - - .. versionchanged:: 5.0 - - Previously, this method returned a global singleton - `IOLoop`, in contrast with the per-thread `IOLoop` returned - by `current()`. In nearly all cases the two were the same - (when they differed, it was generally used from non-Tornado - threads to communicate back to the main thread's `IOLoop`). - This distinction is not present in `asyncio`, so in order - to facilitate integration with that package `instance()` - was changed to be an alias to `current()`. Applications - using the cross-thread communications aspect of - `instance()` should instead set their own global variable - to point to the `IOLoop` they want to use. - - .. deprecated:: 5.0 - """ - return IOLoop.current() - - def install(self): - """Deprecated alias for `make_current()`. - - .. versionchanged:: 5.0 - - Previously, this method would set this `IOLoop` as the - global singleton used by `IOLoop.instance()`. Now that - `instance()` is an alias for `current()`, `install()` - is an alias for `make_current()`. - - .. deprecated:: 5.0 - """ - self.make_current() - - @staticmethod - def clear_instance(): - """Deprecated alias for `clear_current()`. - - .. versionchanged:: 5.0 - - Previously, this method would clear the `IOLoop` used as - the global singleton by `IOLoop.instance()`. Now that - `instance()` is an alias for `current()`, - `clear_instance()` is an alias for `clear_current()`. - - .. deprecated:: 5.0 - - """ - IOLoop.clear_current() - - @staticmethod - def current(instance=True): - """Returns the current thread's `IOLoop`. - - If an `IOLoop` is currently running or has been marked as - current by `make_current`, returns that instance. If there is - no current `IOLoop` and ``instance`` is true, creates one. - - .. versionchanged:: 4.1 - Added ``instance`` argument to control the fallback to - `IOLoop.instance()`. - .. versionchanged:: 5.0 - On Python 3, control of the current `IOLoop` is delegated - to `asyncio`, with this and other methods as pass-through accessors. - The ``instance`` argument now controls whether an `IOLoop` - is created automatically when there is none, instead of - whether we fall back to `IOLoop.instance()` (which is now - an alias for this method). ``instance=False`` is deprecated, - since even if we do not create an `IOLoop`, this method - may initialize the asyncio loop. - """ - if asyncio is None: - current = getattr(IOLoop._current, "instance", None) - if current is None and instance: - current = IOLoop() - if IOLoop._current.instance is not current: - raise RuntimeError("new IOLoop did not become current") - else: - try: - loop = asyncio.get_event_loop() - except (RuntimeError, AssertionError): - if not instance: - return None - raise - try: - return IOLoop._ioloop_for_asyncio[loop] - except KeyError: - if instance: - from tornado.platform.asyncio import AsyncIOMainLoop - current = AsyncIOMainLoop(make_current=True) - else: - current = None - return current - - def make_current(self): - """Makes this the `IOLoop` for the current thread. - - An `IOLoop` automatically becomes current for its thread - when it is started, but it is sometimes useful to call - `make_current` explicitly before starting the `IOLoop`, - so that code run at startup time can find the right - instance. - - .. versionchanged:: 4.1 - An `IOLoop` created while there is no current `IOLoop` - will automatically become current. - - .. versionchanged:: 5.0 - This method also sets the current `asyncio` event loop. - """ - # The asyncio event loops override this method. - assert asyncio is None - old = getattr(IOLoop._current, "instance", None) - if old is not None: - old.clear_current() - IOLoop._current.instance = self - - @staticmethod - def clear_current(): - """Clears the `IOLoop` for the current thread. - - Intended primarily for use by test frameworks in between tests. - - .. versionchanged:: 5.0 - This method also clears the current `asyncio` event loop. - """ - old = IOLoop.current(instance=False) - if old is not None: - old._clear_current_hook() - if asyncio is None: - IOLoop._current.instance = None - - def _clear_current_hook(self): - """Instance method called when an IOLoop ceases to be current. - - May be overridden by subclasses as a counterpart to make_current. - """ - pass - - @classmethod - def configurable_base(cls): - return IOLoop - - @classmethod - def configurable_default(cls): - if asyncio is not None: - from tornado.platform.asyncio import AsyncIOLoop - return AsyncIOLoop - return PollIOLoop - - def initialize(self, make_current=None): - if make_current is None: - if IOLoop.current(instance=False) is None: - self.make_current() - elif make_current: - current = IOLoop.current(instance=False) - # AsyncIO loops can already be current by this point. - if current is not None and current is not self: - raise RuntimeError("current IOLoop already exists") - self.make_current() - - def close(self, all_fds=False): - """Closes the `IOLoop`, freeing any resources used. - - If ``all_fds`` is true, all file descriptors registered on the - IOLoop will be closed (not just the ones created by the - `IOLoop` itself). - - Many applications will only use a single `IOLoop` that runs for the - entire lifetime of the process. In that case closing the `IOLoop` - is not necessary since everything will be cleaned up when the - process exits. `IOLoop.close` is provided mainly for scenarios - such as unit tests, which create and destroy a large number of - ``IOLoops``. - - An `IOLoop` must be completely stopped before it can be closed. This - means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must - be allowed to return before attempting to call `IOLoop.close()`. - Therefore the call to `close` will usually appear just after - the call to `start` rather than near the call to `stop`. - - .. versionchanged:: 3.1 - If the `IOLoop` implementation supports non-integer objects - for "file descriptors", those objects will have their - ``close`` method when ``all_fds`` is true. - """ - raise NotImplementedError() - - def add_handler(self, fd, handler, events): - """Registers the given handler to receive the given events for ``fd``. - - The ``fd`` argument may either be an integer file descriptor or - a file-like object with a ``fileno()`` method (and optionally a - ``close()`` method, which may be called when the `IOLoop` is shut - down). - - The ``events`` argument is a bitwise or of the constants - ``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``. - - When an event occurs, ``handler(fd, events)`` will be run. - - .. versionchanged:: 4.0 - Added the ability to pass file-like objects in addition to - raw file descriptors. - """ - raise NotImplementedError() - - def update_handler(self, fd, events): - """Changes the events we listen for ``fd``. - - .. versionchanged:: 4.0 - Added the ability to pass file-like objects in addition to - raw file descriptors. - """ - raise NotImplementedError() - - def remove_handler(self, fd): - """Stop listening for events on ``fd``. - - .. versionchanged:: 4.0 - Added the ability to pass file-like objects in addition to - raw file descriptors. - """ - raise NotImplementedError() - - def set_blocking_signal_threshold(self, seconds, action): - """Sends a signal if the `IOLoop` is blocked for more than - ``s`` seconds. - - Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy - platform. - - The action parameter is a Python signal handler. Read the - documentation for the `signal` module for more information. - If ``action`` is None, the process will be killed if it is - blocked for too long. - - .. deprecated:: 5.0 - - Not implemented on the `asyncio` event loop. Use the environment - variable ``PYTHONASYNCIODEBUG=1`` instead. This method will be - removed in Tornado 6.0. - """ - raise NotImplementedError() - - def set_blocking_log_threshold(self, seconds): - """Logs a stack trace if the `IOLoop` is blocked for more than - ``s`` seconds. - - Equivalent to ``set_blocking_signal_threshold(seconds, - self.log_stack)`` - - .. deprecated:: 5.0 - - Not implemented on the `asyncio` event loop. Use the environment - variable ``PYTHONASYNCIODEBUG=1`` instead. This method will be - removed in Tornado 6.0. - """ - self.set_blocking_signal_threshold(seconds, self.log_stack) - - def log_stack(self, signal, frame): - """Signal handler to log the stack trace of the current thread. - - For use with `set_blocking_signal_threshold`. - - .. deprecated:: 5.1 - - This method will be removed in Tornado 6.0. - """ - gen_log.warning('IOLoop blocked for %f seconds in\n%s', - self._blocking_signal_threshold, - ''.join(traceback.format_stack(frame))) - - def start(self): - """Starts the I/O loop. - - The loop will run until one of the callbacks calls `stop()`, which - will make the loop stop after the current event iteration completes. - """ - raise NotImplementedError() - - def _setup_logging(self): - """The IOLoop catches and logs exceptions, so it's - important that log output be visible. However, python's - default behavior for non-root loggers (prior to python - 3.2) is to print an unhelpful "no handlers could be - found" message rather than the actual log entry, so we - must explicitly configure logging if we've made it this - far without anything. - - This method should be called from start() in subclasses. - """ - if not any([logging.getLogger().handlers, - logging.getLogger('tornado').handlers, - logging.getLogger('tornado.application').handlers]): - logging.basicConfig() - - def stop(self): - """Stop the I/O loop. - - If the event loop is not currently running, the next call to `start()` - will return immediately. - - Note that even after `stop` has been called, the `IOLoop` is not - completely stopped until `IOLoop.start` has also returned. - Some work that was scheduled before the call to `stop` may still - be run before the `IOLoop` shuts down. - """ - raise NotImplementedError() - - def run_sync(self, func, timeout=None): - """Starts the `IOLoop`, runs the given function, and stops the loop. - - The function must return either an awaitable object or - ``None``. If the function returns an awaitable object, the - `IOLoop` will run until the awaitable is resolved (and - `run_sync()` will return the awaitable's result). If it raises - an exception, the `IOLoop` will stop and the exception will be - re-raised to the caller. - - The keyword-only argument ``timeout`` may be used to set - a maximum duration for the function. If the timeout expires, - a `tornado.util.TimeoutError` is raised. - - This method is useful to allow asynchronous calls in a - ``main()`` function:: - - async def main(): - # do stuff... - - if __name__ == '__main__': - IOLoop.current().run_sync(main) - - .. versionchanged:: 4.3 - Returning a non-``None``, non-awaitable value is now an error. - - .. versionchanged:: 5.0 - If a timeout occurs, the ``func`` coroutine will be cancelled. - - """ - future_cell = [None] - - def run(): - try: - result = func() - if result is not None: - from tornado.gen import convert_yielded - result = convert_yielded(result) - except Exception: - future_cell[0] = Future() - future_set_exc_info(future_cell[0], sys.exc_info()) - else: - if is_future(result): - future_cell[0] = result - else: - future_cell[0] = Future() - future_cell[0].set_result(result) - self.add_future(future_cell[0], lambda future: self.stop()) - self.add_callback(run) - if timeout is not None: - def timeout_callback(): - # If we can cancel the future, do so and wait on it. If not, - # Just stop the loop and return with the task still pending. - # (If we neither cancel nor wait for the task, a warning - # will be logged). - if not future_cell[0].cancel(): - self.stop() - timeout_handle = self.add_timeout(self.time() + timeout, timeout_callback) - self.start() - if timeout is not None: - self.remove_timeout(timeout_handle) - if future_cell[0].cancelled() or not future_cell[0].done(): - raise TimeoutError('Operation timed out after %s seconds' % timeout) - return future_cell[0].result() - - def time(self): - """Returns the current time according to the `IOLoop`'s clock. - - The return value is a floating-point number relative to an - unspecified time in the past. - - By default, the `IOLoop`'s time function is `time.time`. However, - it may be configured to use e.g. `time.monotonic` instead. - Calls to `add_timeout` that pass a number instead of a - `datetime.timedelta` should use this function to compute the - appropriate time, so they can work no matter what time function - is chosen. - """ - return time.time() - - def add_timeout(self, deadline, callback, *args, **kwargs): - """Runs the ``callback`` at the time ``deadline`` from the I/O loop. - - Returns an opaque handle that may be passed to - `remove_timeout` to cancel. - - ``deadline`` may be a number denoting a time (on the same - scale as `IOLoop.time`, normally `time.time`), or a - `datetime.timedelta` object for a deadline relative to the - current time. Since Tornado 4.0, `call_later` is a more - convenient alternative for the relative case since it does not - require a timedelta object. - - Note that it is not safe to call `add_timeout` from other threads. - Instead, you must use `add_callback` to transfer control to the - `IOLoop`'s thread, and then call `add_timeout` from there. - - Subclasses of IOLoop must implement either `add_timeout` or - `call_at`; the default implementations of each will call - the other. `call_at` is usually easier to implement, but - subclasses that wish to maintain compatibility with Tornado - versions prior to 4.0 must use `add_timeout` instead. - - .. versionchanged:: 4.0 - Now passes through ``*args`` and ``**kwargs`` to the callback. - """ - if isinstance(deadline, numbers.Real): - return self.call_at(deadline, callback, *args, **kwargs) - elif isinstance(deadline, datetime.timedelta): - return self.call_at(self.time() + timedelta_to_seconds(deadline), - callback, *args, **kwargs) - else: - raise TypeError("Unsupported deadline %r" % deadline) - - def call_later(self, delay, callback, *args, **kwargs): - """Runs the ``callback`` after ``delay`` seconds have passed. - - Returns an opaque handle that may be passed to `remove_timeout` - to cancel. Note that unlike the `asyncio` method of the same - name, the returned object does not have a ``cancel()`` method. - - See `add_timeout` for comments on thread-safety and subclassing. - - .. versionadded:: 4.0 - """ - return self.call_at(self.time() + delay, callback, *args, **kwargs) - - def call_at(self, when, callback, *args, **kwargs): - """Runs the ``callback`` at the absolute time designated by ``when``. - - ``when`` must be a number using the same reference point as - `IOLoop.time`. - - Returns an opaque handle that may be passed to `remove_timeout` - to cancel. Note that unlike the `asyncio` method of the same - name, the returned object does not have a ``cancel()`` method. - - See `add_timeout` for comments on thread-safety and subclassing. - - .. versionadded:: 4.0 - """ - return self.add_timeout(when, callback, *args, **kwargs) - - def remove_timeout(self, timeout): - """Cancels a pending timeout. - - The argument is a handle as returned by `add_timeout`. It is - safe to call `remove_timeout` even if the callback has already - been run. - """ - raise NotImplementedError() - - def add_callback(self, callback, *args, **kwargs): - """Calls the given callback on the next I/O loop iteration. - - It is safe to call this method from any thread at any time, - except from a signal handler. Note that this is the **only** - method in `IOLoop` that makes this thread-safety guarantee; all - other interaction with the `IOLoop` must be done from that - `IOLoop`'s thread. `add_callback()` may be used to transfer - control from other threads to the `IOLoop`'s thread. - - To add a callback from a signal handler, see - `add_callback_from_signal`. - """ - raise NotImplementedError() - - def add_callback_from_signal(self, callback, *args, **kwargs): - """Calls the given callback on the next I/O loop iteration. - - Safe for use from a Python signal handler; should not be used - otherwise. - - Callbacks added with this method will be run without any - `.stack_context`, to avoid picking up the context of the function - that was interrupted by the signal. - """ - raise NotImplementedError() - - def spawn_callback(self, callback, *args, **kwargs): - """Calls the given callback on the next IOLoop iteration. - - Unlike all other callback-related methods on IOLoop, - ``spawn_callback`` does not associate the callback with its caller's - ``stack_context``, so it is suitable for fire-and-forget callbacks - that should not interfere with the caller. - - .. versionadded:: 4.0 - """ - with stack_context.NullContext(): - self.add_callback(callback, *args, **kwargs) - - def add_future(self, future, callback): - """Schedules a callback on the ``IOLoop`` when the given - `.Future` is finished. - - The callback is invoked with one argument, the - `.Future`. - - This method only accepts `.Future` objects and not other - awaitables (unlike most of Tornado where the two are - interchangeable). - """ - assert is_future(future) - callback = stack_context.wrap(callback) - future_add_done_callback( - future, lambda future: self.add_callback(callback, future)) - - def run_in_executor(self, executor, func, *args): - """Runs a function in a ``concurrent.futures.Executor``. If - ``executor`` is ``None``, the IO loop's default executor will be used. - - Use `functools.partial` to pass keyword arguments to ``func``. - - .. versionadded:: 5.0 - """ - if ThreadPoolExecutor is None: - raise RuntimeError( - "concurrent.futures is required to use IOLoop.run_in_executor") - - if executor is None: - if not hasattr(self, '_executor'): - from tornado.process import cpu_count - self._executor = ThreadPoolExecutor(max_workers=(cpu_count() * 5)) - executor = self._executor - c_future = executor.submit(func, *args) - # Concurrent Futures are not usable with await. Wrap this in a - # Tornado Future instead, using self.add_future for thread-safety. - t_future = Future() - self.add_future(c_future, lambda f: chain_future(f, t_future)) - return t_future - - def set_default_executor(self, executor): - """Sets the default executor to use with :meth:`run_in_executor`. - - .. versionadded:: 5.0 - """ - self._executor = executor - - def _run_callback(self, callback): - """Runs a callback with error handling. - - For use in subclasses. - """ - try: - ret = callback() - if ret is not None: - from tornado import gen - # Functions that return Futures typically swallow all - # exceptions and store them in the Future. If a Future - # makes it out to the IOLoop, ensure its exception (if any) - # gets logged too. - try: - ret = gen.convert_yielded(ret) - except gen.BadYieldError: - # It's not unusual for add_callback to be used with - # methods returning a non-None and non-yieldable - # result, which should just be ignored. - pass - else: - self.add_future(ret, self._discard_future_result) - except Exception: - self.handle_callback_exception(callback) - - def _discard_future_result(self, future): - """Avoid unhandled-exception warnings from spawned coroutines.""" - future.result() - - def handle_callback_exception(self, callback): - """This method is called whenever a callback run by the `IOLoop` - throws an exception. - - By default simply logs the exception as an error. Subclasses - may override this method to customize reporting of exceptions. - - The exception itself is not passed explicitly, but is available - in `sys.exc_info`. - - .. versionchanged:: 5.0 - - When the `asyncio` event loop is used (which is now the - default on Python 3), some callback errors will be handled by - `asyncio` instead of this method. - - .. deprecated: 5.1 - - Support for this method will be removed in Tornado 6.0. - """ - app_log.error("Exception in callback %r", callback, exc_info=True) - - def split_fd(self, fd): - """Returns an (fd, obj) pair from an ``fd`` parameter. - - We accept both raw file descriptors and file-like objects as - input to `add_handler` and related methods. When a file-like - object is passed, we must retain the object itself so we can - close it correctly when the `IOLoop` shuts down, but the - poller interfaces favor file descriptors (they will accept - file-like objects and call ``fileno()`` for you, but they - always return the descriptor itself). - - This method is provided for use by `IOLoop` subclasses and should - not generally be used by application code. - - .. versionadded:: 4.0 - """ - try: - return fd.fileno(), fd - except AttributeError: - return fd, fd - - def close_fd(self, fd): - """Utility method to close an ``fd``. - - If ``fd`` is a file-like object, we close it directly; otherwise - we use `os.close`. - - This method is provided for use by `IOLoop` subclasses (in - implementations of ``IOLoop.close(all_fds=True)`` and should - not generally be used by application code. - - .. versionadded:: 4.0 - """ - try: - try: - fd.close() - except AttributeError: - os.close(fd) - except OSError: - pass - - -class PollIOLoop(IOLoop): - """Base class for IOLoops built around a select-like function. - - For concrete implementations, see `tornado.platform.epoll.EPollIOLoop` - (Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or - `tornado.platform.select.SelectIOLoop` (all platforms). - """ - def initialize(self, impl, time_func=None, **kwargs): - super(PollIOLoop, self).initialize(**kwargs) - self._impl = impl - if hasattr(self._impl, 'fileno'): - set_close_exec(self._impl.fileno()) - self.time_func = time_func or time.time - self._handlers = {} - self._events = {} - self._callbacks = collections.deque() - self._timeouts = [] - self._cancellations = 0 - self._running = False - self._stopped = False - self._closing = False - self._thread_ident = None - self._pid = os.getpid() - self._blocking_signal_threshold = None - self._timeout_counter = itertools.count() - - # Create a pipe that we send bogus data to when we want to wake - # the I/O loop when it is idle - self._waker = Waker() - self.add_handler(self._waker.fileno(), - lambda fd, events: self._waker.consume(), - self.READ) - - @classmethod - def configurable_base(cls): - return PollIOLoop - - @classmethod - def configurable_default(cls): - if hasattr(select, "epoll"): - from tornado.platform.epoll import EPollIOLoop - return EPollIOLoop - if hasattr(select, "kqueue"): - # Python 2.6+ on BSD or Mac - from tornado.platform.kqueue import KQueueIOLoop - return KQueueIOLoop - from tornado.platform.select import SelectIOLoop - return SelectIOLoop - - def close(self, all_fds=False): - self._closing = True - self.remove_handler(self._waker.fileno()) - if all_fds: - for fd, handler in list(self._handlers.values()): - self.close_fd(fd) - self._waker.close() - self._impl.close() - self._callbacks = None - self._timeouts = None - if hasattr(self, '_executor'): - self._executor.shutdown() - - def add_handler(self, fd, handler, events): - fd, obj = self.split_fd(fd) - self._handlers[fd] = (obj, stack_context.wrap(handler)) - self._impl.register(fd, events | self.ERROR) - - def update_handler(self, fd, events): - fd, obj = self.split_fd(fd) - self._impl.modify(fd, events | self.ERROR) - - def remove_handler(self, fd): - fd, obj = self.split_fd(fd) - self._handlers.pop(fd, None) - self._events.pop(fd, None) - try: - self._impl.unregister(fd) - except Exception: - gen_log.debug("Error deleting fd from IOLoop", exc_info=True) - - def set_blocking_signal_threshold(self, seconds, action): - if not hasattr(signal, "setitimer"): - gen_log.error("set_blocking_signal_threshold requires a signal module " - "with the setitimer method") - return - self._blocking_signal_threshold = seconds - if seconds is not None: - signal.signal(signal.SIGALRM, - action if action is not None else signal.SIG_DFL) - - def start(self): - if self._running: - raise RuntimeError("IOLoop is already running") - if os.getpid() != self._pid: - raise RuntimeError("Cannot share PollIOLoops across processes") - self._setup_logging() - if self._stopped: - self._stopped = False - return - old_current = IOLoop.current(instance=False) - if old_current is not self: - self.make_current() - self._thread_ident = thread.get_ident() - self._running = True - - # signal.set_wakeup_fd closes a race condition in event loops: - # a signal may arrive at the beginning of select/poll/etc - # before it goes into its interruptible sleep, so the signal - # will be consumed without waking the select. The solution is - # for the (C, synchronous) signal handler to write to a pipe, - # which will then be seen by select. - # - # In python's signal handling semantics, this only matters on the - # main thread (fortunately, set_wakeup_fd only works on the main - # thread and will raise a ValueError otherwise). - # - # If someone has already set a wakeup fd, we don't want to - # disturb it. This is an issue for twisted, which does its - # SIGCHLD processing in response to its own wakeup fd being - # written to. As long as the wakeup fd is registered on the IOLoop, - # the loop will still wake up and everything should work. - old_wakeup_fd = None - if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix': - # requires python 2.6+, unix. set_wakeup_fd exists but crashes - # the python process on windows. - try: - old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno()) - if old_wakeup_fd != -1: - # Already set, restore previous value. This is a little racy, - # but there's no clean get_wakeup_fd and in real use the - # IOLoop is just started once at the beginning. - signal.set_wakeup_fd(old_wakeup_fd) - old_wakeup_fd = None - except ValueError: - # Non-main thread, or the previous value of wakeup_fd - # is no longer valid. - old_wakeup_fd = None - - try: - while True: - # Prevent IO event starvation by delaying new callbacks - # to the next iteration of the event loop. - ncallbacks = len(self._callbacks) - - # Add any timeouts that have come due to the callback list. - # Do not run anything until we have determined which ones - # are ready, so timeouts that call add_timeout cannot - # schedule anything in this iteration. - due_timeouts = [] - if self._timeouts: - now = self.time() - while self._timeouts: - if self._timeouts[0].callback is None: - # The timeout was cancelled. Note that the - # cancellation check is repeated below for timeouts - # that are cancelled by another timeout or callback. - heapq.heappop(self._timeouts) - self._cancellations -= 1 - elif self._timeouts[0].deadline <= now: - due_timeouts.append(heapq.heappop(self._timeouts)) - else: - break - if (self._cancellations > 512 and - self._cancellations > (len(self._timeouts) >> 1)): - # Clean up the timeout queue when it gets large and it's - # more than half cancellations. - self._cancellations = 0 - self._timeouts = [x for x in self._timeouts - if x.callback is not None] - heapq.heapify(self._timeouts) - - for i in range(ncallbacks): - self._run_callback(self._callbacks.popleft()) - for timeout in due_timeouts: - if timeout.callback is not None: - self._run_callback(timeout.callback) - # Closures may be holding on to a lot of memory, so allow - # them to be freed before we go into our poll wait. - due_timeouts = timeout = None - - if self._callbacks: - # If any callbacks or timeouts called add_callback, - # we don't want to wait in poll() before we run them. - poll_timeout = 0.0 - elif self._timeouts: - # If there are any timeouts, schedule the first one. - # Use self.time() instead of 'now' to account for time - # spent running callbacks. - poll_timeout = self._timeouts[0].deadline - self.time() - poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT)) - else: - # No timeouts and no callbacks, so use the default. - poll_timeout = _POLL_TIMEOUT - - if not self._running: - break - - if self._blocking_signal_threshold is not None: - # clear alarm so it doesn't fire while poll is waiting for - # events. - signal.setitimer(signal.ITIMER_REAL, 0, 0) - - try: - event_pairs = self._impl.poll(poll_timeout) - except Exception as e: - # Depending on python version and IOLoop implementation, - # different exception types may be thrown and there are - # two ways EINTR might be signaled: - # * e.errno == errno.EINTR - # * e.args is like (errno.EINTR, 'Interrupted system call') - if errno_from_exception(e) == errno.EINTR: - continue - else: - raise - - if self._blocking_signal_threshold is not None: - signal.setitimer(signal.ITIMER_REAL, - self._blocking_signal_threshold, 0) - - # Pop one fd at a time from the set of pending fds and run - # its handler. Since that handler may perform actions on - # other file descriptors, there may be reentrant calls to - # this IOLoop that modify self._events - self._events.update(event_pairs) - while self._events: - fd, events = self._events.popitem() - try: - fd_obj, handler_func = self._handlers[fd] - handler_func(fd_obj, events) - except (OSError, IOError) as e: - if errno_from_exception(e) == errno.EPIPE: - # Happens when the client closes the connection - pass - else: - self.handle_callback_exception(self._handlers.get(fd)) - except Exception: - self.handle_callback_exception(self._handlers.get(fd)) - fd_obj = handler_func = None - - finally: - # reset the stopped flag so another start/stop pair can be issued - self._stopped = False - if self._blocking_signal_threshold is not None: - signal.setitimer(signal.ITIMER_REAL, 0, 0) - if old_current is None: - IOLoop.clear_current() - elif old_current is not self: - old_current.make_current() - if old_wakeup_fd is not None: - signal.set_wakeup_fd(old_wakeup_fd) - - def stop(self): - self._running = False - self._stopped = True - self._waker.wake() - - def time(self): - return self.time_func() - - def call_at(self, deadline, callback, *args, **kwargs): - timeout = _Timeout( - deadline, - functools.partial(stack_context.wrap(callback), *args, **kwargs), - self) - heapq.heappush(self._timeouts, timeout) - return timeout - - def remove_timeout(self, timeout): - # Removing from a heap is complicated, so just leave the defunct - # timeout object in the queue (see discussion in - # http://docs.python.org/library/heapq.html). - # If this turns out to be a problem, we could add a garbage - # collection pass whenever there are too many dead timeouts. - timeout.callback = None - self._cancellations += 1 - - def add_callback(self, callback, *args, **kwargs): - if self._closing: - return - # Blindly insert into self._callbacks. This is safe even - # from signal handlers because deque.append is atomic. - self._callbacks.append(functools.partial( - stack_context.wrap(callback), *args, **kwargs)) - if thread.get_ident() != self._thread_ident: - # This will write one byte but Waker.consume() reads many - # at once, so it's ok to write even when not strictly - # necessary. - self._waker.wake() - else: - # If we're on the IOLoop's thread, we don't need to wake anyone. - pass - - def add_callback_from_signal(self, callback, *args, **kwargs): - with stack_context.NullContext(): - self.add_callback(callback, *args, **kwargs) - - -class _Timeout(object): - """An IOLoop timeout, a UNIX timestamp and a callback""" - - # Reduce memory overhead when there are lots of pending callbacks - __slots__ = ['deadline', 'callback', 'tdeadline'] - - def __init__(self, deadline, callback, io_loop): - if not isinstance(deadline, numbers.Real): - raise TypeError("Unsupported deadline %r" % deadline) - self.deadline = deadline - self.callback = callback - self.tdeadline = (deadline, next(io_loop._timeout_counter)) - - # Comparison methods to sort by deadline, with object id as a tiebreaker - # to guarantee a consistent ordering. The heapq module uses __le__ - # in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons - # use __lt__). - def __lt__(self, other): - return self.tdeadline < other.tdeadline - - def __le__(self, other): - return self.tdeadline <= other.tdeadline - - -class PeriodicCallback(object): - """Schedules the given callback to be called periodically. - - The callback is called every ``callback_time`` milliseconds. - Note that the timeout is given in milliseconds, while most other - time-related functions in Tornado use seconds. - - If ``jitter`` is specified, each callback time will be randomly selected - within a window of ``jitter * callback_time`` milliseconds. - Jitter can be used to reduce alignment of events with similar periods. - A jitter of 0.1 means allowing a 10% variation in callback time. - The window is centered on ``callback_time`` so the total number of calls - within a given interval should not be significantly affected by adding - jitter. - - If the callback runs for longer than ``callback_time`` milliseconds, - subsequent invocations will be skipped to get back on schedule. - - `start` must be called after the `PeriodicCallback` is created. - - .. versionchanged:: 5.0 - The ``io_loop`` argument (deprecated since version 4.1) has been removed. - - .. versionchanged:: 5.1 - The ``jitter`` argument is added. - """ - def __init__(self, callback, callback_time, jitter=0): - self.callback = callback - if callback_time <= 0: - raise ValueError("Periodic callback must have a positive callback_time") - self.callback_time = callback_time - self.jitter = jitter - self._running = False - self._timeout = None - - def start(self): - """Starts the timer.""" - # Looking up the IOLoop here allows to first instantiate the - # PeriodicCallback in another thread, then start it using - # IOLoop.add_callback(). - self.io_loop = IOLoop.current() - self._running = True - self._next_timeout = self.io_loop.time() - self._schedule_next() - - def stop(self): - """Stops the timer.""" - self._running = False - if self._timeout is not None: - self.io_loop.remove_timeout(self._timeout) - self._timeout = None - - def is_running(self): - """Return True if this `.PeriodicCallback` has been started. - - .. versionadded:: 4.1 - """ - return self._running - - def _run(self): - if not self._running: - return - try: - return self.callback() - except Exception: - self.io_loop.handle_callback_exception(self.callback) - finally: - self._schedule_next() - - def _schedule_next(self): - if self._running: - self._update_next(self.io_loop.time()) - self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run) - - def _update_next(self, current_time): - callback_time_sec = self.callback_time / 1000.0 - if self.jitter: - # apply jitter fraction - callback_time_sec *= 1 + (self.jitter * (random.random() - 0.5)) - if self._next_timeout <= current_time: - # The period should be measured from the start of one call - # to the start of the next. If one call takes too long, - # skip cycles to get back to a multiple of the original - # schedule. - self._next_timeout += (math.floor((current_time - self._next_timeout) / - callback_time_sec) + 1) * callback_time_sec - else: - # If the clock moved backwards, ensure we advance the next - # timeout instead of recomputing the same value again. - # This may result in long gaps between callbacks if the - # clock jumps backwards by a lot, but the far more common - # scenario is a small NTP adjustment that should just be - # ignored. - # - # Note that on some systems if time.time() runs slower - # than time.monotonic() (most common on windows), we - # effectively experience a small backwards time jump on - # every iteration because PeriodicCallback uses - # time.time() while asyncio schedules callbacks using - # time.monotonic(). - # https://github.com/tornadoweb/tornado/issues/2333 - self._next_timeout += callback_time_sec diff --git a/lib/tornado/iostream.py b/lib/tornado/iostream.py deleted file mode 100755 index 89e1e234..00000000 --- a/lib/tornado/iostream.py +++ /dev/null @@ -1,1757 +0,0 @@ -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utility classes to write to and read from non-blocking files and sockets. - -Contents: - -* `BaseIOStream`: Generic interface for reading and writing. -* `IOStream`: Implementation of BaseIOStream using non-blocking sockets. -* `SSLIOStream`: SSL-aware version of IOStream. -* `PipeIOStream`: Pipe-based IOStream implementation. -""" - -from __future__ import absolute_import, division, print_function - -import collections -import errno -import io -import numbers -import os -import socket -import sys -import re -import warnings - -from tornado.concurrent import Future -from tornado import ioloop -from tornado.log import gen_log, app_log -from tornado.netutil import ssl_wrap_socket, _client_ssl_defaults, _server_ssl_defaults -from tornado import stack_context -from tornado.util import errno_from_exception - -try: - from tornado.platform.posix import _set_nonblocking -except ImportError: - _set_nonblocking = None - -try: - import ssl -except ImportError: - # ssl is not available on Google App Engine - ssl = None - -# These errnos indicate that a non-blocking operation must be retried -# at a later time. On most platforms they're the same value, but on -# some they differ. -_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN) - -if hasattr(errno, "WSAEWOULDBLOCK"): - _ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,) # type: ignore - -# These errnos indicate that a connection has been abruptly terminated. -# They should be caught and handled less noisily than other errors. -_ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE, - errno.ETIMEDOUT) - -if hasattr(errno, "WSAECONNRESET"): - _ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT) # type: ignore # noqa: E501 - -if sys.platform == 'darwin': - # OSX appears to have a race condition that causes send(2) to return - # EPROTOTYPE if called while a socket is being torn down: - # http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/ - # Since the socket is being closed anyway, treat this as an ECONNRESET - # instead of an unexpected error. - _ERRNO_CONNRESET += (errno.EPROTOTYPE,) # type: ignore - -# More non-portable errnos: -_ERRNO_INPROGRESS = (errno.EINPROGRESS,) - -if hasattr(errno, "WSAEINPROGRESS"): - _ERRNO_INPROGRESS += (errno.WSAEINPROGRESS,) # type: ignore - -_WINDOWS = sys.platform.startswith('win') - - -class StreamClosedError(IOError): - """Exception raised by `IOStream` methods when the stream is closed. - - Note that the close callback is scheduled to run *after* other - callbacks on the stream (to allow for buffered data to be processed), - so you may see this error before you see the close callback. - - The ``real_error`` attribute contains the underlying error that caused - the stream to close (if any). - - .. versionchanged:: 4.3 - Added the ``real_error`` attribute. - """ - def __init__(self, real_error=None): - super(StreamClosedError, self).__init__('Stream is closed') - self.real_error = real_error - - -class UnsatisfiableReadError(Exception): - """Exception raised when a read cannot be satisfied. - - Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes`` - argument. - """ - pass - - -class StreamBufferFullError(Exception): - """Exception raised by `IOStream` methods when the buffer is full. - """ - - -class _StreamBuffer(object): - """ - A specialized buffer that tries to avoid copies when large pieces - of data are encountered. - """ - - def __init__(self): - # A sequence of (False, bytearray) and (True, memoryview) objects - self._buffers = collections.deque() - # Position in the first buffer - self._first_pos = 0 - self._size = 0 - - def __len__(self): - return self._size - - # Data above this size will be appended separately instead - # of extending an existing bytearray - _large_buf_threshold = 2048 - - def append(self, data): - """ - Append the given piece of data (should be a buffer-compatible object). - """ - size = len(data) - if size > self._large_buf_threshold: - if not isinstance(data, memoryview): - data = memoryview(data) - self._buffers.append((True, data)) - elif size > 0: - if self._buffers: - is_memview, b = self._buffers[-1] - new_buf = is_memview or len(b) >= self._large_buf_threshold - else: - new_buf = True - if new_buf: - self._buffers.append((False, bytearray(data))) - else: - b += data - - self._size += size - - def peek(self, size): - """ - Get a view over at most ``size`` bytes (possibly fewer) at the - current buffer position. - """ - assert size > 0 - try: - is_memview, b = self._buffers[0] - except IndexError: - return memoryview(b'') - - pos = self._first_pos - if is_memview: - return b[pos:pos + size] - else: - return memoryview(b)[pos:pos + size] - - def advance(self, size): - """ - Advance the current buffer position by ``size`` bytes. - """ - assert 0 < size <= self._size - self._size -= size - pos = self._first_pos - - buffers = self._buffers - while buffers and size > 0: - is_large, b = buffers[0] - b_remain = len(b) - size - pos - if b_remain <= 0: - buffers.popleft() - size -= len(b) - pos - pos = 0 - elif is_large: - pos += size - size = 0 - else: - # Amortized O(1) shrink for Python 2 - pos += size - if len(b) <= 2 * pos: - del b[:pos] - pos = 0 - size = 0 - - assert size == 0 - self._first_pos = pos - - -class BaseIOStream(object): - """A utility class to write to and read from a non-blocking file or socket. - - We support a non-blocking ``write()`` and a family of ``read_*()`` methods. - All of the methods take an optional ``callback`` argument and return a - `.Future` only if no callback is given. When the operation completes, - the callback will be run or the `.Future` will resolve with the data - read (or ``None`` for ``write()``). All outstanding ``Futures`` will - resolve with a `StreamClosedError` when the stream is closed; users - of the callback interface will be notified via - `.BaseIOStream.set_close_callback` instead. - - When a stream is closed due to an error, the IOStream's ``error`` - attribute contains the exception object. - - Subclasses must implement `fileno`, `close_fd`, `write_to_fd`, - `read_from_fd`, and optionally `get_fd_error`. - """ - def __init__(self, max_buffer_size=None, - read_chunk_size=None, max_write_buffer_size=None): - """`BaseIOStream` constructor. - - :arg max_buffer_size: Maximum amount of incoming data to buffer; - defaults to 100MB. - :arg read_chunk_size: Amount of data to read at one time from the - underlying transport; defaults to 64KB. - :arg max_write_buffer_size: Amount of outgoing data to buffer; - defaults to unlimited. - - .. versionchanged:: 4.0 - Add the ``max_write_buffer_size`` parameter. Changed default - ``read_chunk_size`` to 64KB. - .. versionchanged:: 5.0 - The ``io_loop`` argument (deprecated since version 4.1) has been - removed. - """ - self.io_loop = ioloop.IOLoop.current() - self.max_buffer_size = max_buffer_size or 104857600 - # A chunk size that is too close to max_buffer_size can cause - # spurious failures. - self.read_chunk_size = min(read_chunk_size or 65536, - self.max_buffer_size // 2) - self.max_write_buffer_size = max_write_buffer_size - self.error = None - self._read_buffer = bytearray() - self._read_buffer_pos = 0 - self._read_buffer_size = 0 - self._user_read_buffer = False - self._after_user_read_buffer = None - self._write_buffer = _StreamBuffer() - self._total_write_index = 0 - self._total_write_done_index = 0 - self._read_delimiter = None - self._read_regex = None - self._read_max_bytes = None - self._read_bytes = None - self._read_partial = False - self._read_until_close = False - self._read_callback = None - self._read_future = None - self._streaming_callback = None - self._write_callback = None - self._write_futures = collections.deque() - self._close_callback = None - self._connect_callback = None - self._connect_future = None - # _ssl_connect_future should be defined in SSLIOStream - # but it's here so we can clean it up in maybe_run_close_callback. - # TODO: refactor that so subclasses can add additional futures - # to be cancelled. - self._ssl_connect_future = None - self._connecting = False - self._state = None - self._pending_callbacks = 0 - self._closed = False - - def fileno(self): - """Returns the file descriptor for this stream.""" - raise NotImplementedError() - - def close_fd(self): - """Closes the file underlying this stream. - - ``close_fd`` is called by `BaseIOStream` and should not be called - elsewhere; other users should call `close` instead. - """ - raise NotImplementedError() - - def write_to_fd(self, data): - """Attempts to write ``data`` to the underlying file. - - Returns the number of bytes written. - """ - raise NotImplementedError() - - def read_from_fd(self, buf): - """Attempts to read from the underlying file. - - Reads up to ``len(buf)`` bytes, storing them in the buffer. - Returns the number of bytes read. Returns None if there was - nothing to read (the socket returned `~errno.EWOULDBLOCK` or - equivalent), and zero on EOF. - - .. versionchanged:: 5.0 - - Interface redesigned to take a buffer and return a number - of bytes instead of a freshly-allocated object. - """ - raise NotImplementedError() - - def get_fd_error(self): - """Returns information about any error on the underlying file. - - This method is called after the `.IOLoop` has signaled an error on the - file descriptor, and should return an Exception (such as `socket.error` - with additional information, or None if no such information is - available. - """ - return None - - def read_until_regex(self, regex, callback=None, max_bytes=None): - """Asynchronously read until we have matched the given regex. - - The result includes the data that matches the regex and anything - that came before it. If a callback is given, it will be run - with the data as an argument; if not, this method returns a - `.Future`. - - If ``max_bytes`` is not None, the connection will be closed - if more than ``max_bytes`` bytes have been read and the regex is - not satisfied. - - .. versionchanged:: 4.0 - Added the ``max_bytes`` argument. The ``callback`` argument is - now optional and a `.Future` will be returned if it is omitted. - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed - in Tornado 6.0. Use the returned `.Future` instead. - - """ - future = self._set_read_callback(callback) - self._read_regex = re.compile(regex) - self._read_max_bytes = max_bytes - try: - self._try_inline_read() - except UnsatisfiableReadError as e: - # Handle this the same way as in _handle_events. - gen_log.info("Unsatisfiable read, closing connection: %s" % e) - self.close(exc_info=e) - return future - except: - if future is not None: - # Ensure that the future doesn't log an error because its - # failure was never examined. - future.add_done_callback(lambda f: f.exception()) - raise - return future - - def read_until(self, delimiter, callback=None, max_bytes=None): - """Asynchronously read until we have found the given delimiter. - - The result includes all the data read including the delimiter. - If a callback is given, it will be run with the data as an argument; - if not, this method returns a `.Future`. - - If ``max_bytes`` is not None, the connection will be closed - if more than ``max_bytes`` bytes have been read and the delimiter - is not found. - - .. versionchanged:: 4.0 - Added the ``max_bytes`` argument. The ``callback`` argument is - now optional and a `.Future` will be returned if it is omitted. - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed - in Tornado 6.0. Use the returned `.Future` instead. - """ - future = self._set_read_callback(callback) - self._read_delimiter = delimiter - self._read_max_bytes = max_bytes - try: - self._try_inline_read() - except UnsatisfiableReadError as e: - # Handle this the same way as in _handle_events. - gen_log.info("Unsatisfiable read, closing connection: %s" % e) - self.close(exc_info=e) - return future - except: - if future is not None: - future.add_done_callback(lambda f: f.exception()) - raise - return future - - def read_bytes(self, num_bytes, callback=None, streaming_callback=None, - partial=False): - """Asynchronously read a number of bytes. - - If a ``streaming_callback`` is given, it will be called with chunks - of data as they become available, and the final result will be empty. - Otherwise, the result is all the data that was read. - If a callback is given, it will be run with the data as an argument; - if not, this method returns a `.Future`. - - If ``partial`` is true, the callback is run as soon as we have - any bytes to return (but never more than ``num_bytes``) - - .. versionchanged:: 4.0 - Added the ``partial`` argument. The callback argument is now - optional and a `.Future` will be returned if it is omitted. - - .. deprecated:: 5.1 - - The ``callback`` and ``streaming_callback`` arguments are - deprecated and will be removed in Tornado 6.0. Use the - returned `.Future` (and ``partial=True`` for - ``streaming_callback``) instead. - - """ - future = self._set_read_callback(callback) - assert isinstance(num_bytes, numbers.Integral) - self._read_bytes = num_bytes - self._read_partial = partial - if streaming_callback is not None: - warnings.warn("streaming_callback is deprecated, use partial instead", - DeprecationWarning) - self._streaming_callback = stack_context.wrap(streaming_callback) - try: - self._try_inline_read() - except: - if future is not None: - future.add_done_callback(lambda f: f.exception()) - raise - return future - - def read_into(self, buf, callback=None, partial=False): - """Asynchronously read a number of bytes. - - ``buf`` must be a writable buffer into which data will be read. - If a callback is given, it will be run with the number of read - bytes as an argument; if not, this method returns a `.Future`. - - If ``partial`` is true, the callback is run as soon as any bytes - have been read. Otherwise, it is run when the ``buf`` has been - entirely filled with read data. - - .. versionadded:: 5.0 - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed - in Tornado 6.0. Use the returned `.Future` instead. - - """ - future = self._set_read_callback(callback) - - # First copy data already in read buffer - available_bytes = self._read_buffer_size - n = len(buf) - if available_bytes >= n: - end = self._read_buffer_pos + n - buf[:] = memoryview(self._read_buffer)[self._read_buffer_pos:end] - del self._read_buffer[:end] - self._after_user_read_buffer = self._read_buffer - elif available_bytes > 0: - buf[:available_bytes] = memoryview(self._read_buffer)[self._read_buffer_pos:] - - # Set up the supplied buffer as our temporary read buffer. - # The original (if it had any data remaining) has been - # saved for later. - self._user_read_buffer = True - self._read_buffer = buf - self._read_buffer_pos = 0 - self._read_buffer_size = available_bytes - self._read_bytes = n - self._read_partial = partial - - try: - self._try_inline_read() - except: - if future is not None: - future.add_done_callback(lambda f: f.exception()) - raise - return future - - def read_until_close(self, callback=None, streaming_callback=None): - """Asynchronously reads all data from the socket until it is closed. - - If a ``streaming_callback`` is given, it will be called with chunks - of data as they become available, and the final result will be empty. - Otherwise, the result is all the data that was read. - If a callback is given, it will be run with the data as an argument; - if not, this method returns a `.Future`. - - Note that if a ``streaming_callback`` is used, data will be - read from the socket as quickly as it becomes available; there - is no way to apply backpressure or cancel the reads. If flow - control or cancellation are desired, use a loop with - `read_bytes(partial=True) <.read_bytes>` instead. - - .. versionchanged:: 4.0 - The callback argument is now optional and a `.Future` will - be returned if it is omitted. - - .. deprecated:: 5.1 - - The ``callback`` and ``streaming_callback`` arguments are - deprecated and will be removed in Tornado 6.0. Use the - returned `.Future` (and `read_bytes` with ``partial=True`` - for ``streaming_callback``) instead. - - """ - future = self._set_read_callback(callback) - if streaming_callback is not None: - warnings.warn("streaming_callback is deprecated, use read_bytes(partial=True) instead", - DeprecationWarning) - self._streaming_callback = stack_context.wrap(streaming_callback) - if self.closed(): - if self._streaming_callback is not None: - self._run_read_callback(self._read_buffer_size, True) - self._run_read_callback(self._read_buffer_size, False) - return future - self._read_until_close = True - try: - self._try_inline_read() - except: - if future is not None: - future.add_done_callback(lambda f: f.exception()) - raise - return future - - def write(self, data, callback=None): - """Asynchronously write the given data to this stream. - - If ``callback`` is given, we call it when all of the buffered write - data has been successfully written to the stream. If there was - previously buffered write data and an old write callback, that - callback is simply overwritten with this new callback. - - If no ``callback`` is given, this method returns a `.Future` that - resolves (with a result of ``None``) when the write has been - completed. - - The ``data`` argument may be of type `bytes` or `memoryview`. - - .. versionchanged:: 4.0 - Now returns a `.Future` if no callback is given. - - .. versionchanged:: 4.5 - Added support for `memoryview` arguments. - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed - in Tornado 6.0. Use the returned `.Future` instead. - - """ - self._check_closed() - if data: - if (self.max_write_buffer_size is not None and - len(self._write_buffer) + len(data) > self.max_write_buffer_size): - raise StreamBufferFullError("Reached maximum write buffer size") - self._write_buffer.append(data) - self._total_write_index += len(data) - if callback is not None: - warnings.warn("callback argument is deprecated, use returned Future instead", - DeprecationWarning) - self._write_callback = stack_context.wrap(callback) - future = None - else: - future = Future() - future.add_done_callback(lambda f: f.exception()) - self._write_futures.append((self._total_write_index, future)) - if not self._connecting: - self._handle_write() - if self._write_buffer: - self._add_io_state(self.io_loop.WRITE) - self._maybe_add_error_listener() - return future - - def set_close_callback(self, callback): - """Call the given callback when the stream is closed. - - This mostly is not necessary for applications that use the - `.Future` interface; all outstanding ``Futures`` will resolve - with a `StreamClosedError` when the stream is closed. However, - it is still useful as a way to signal that the stream has been - closed while no other read or write is in progress. - - Unlike other callback-based interfaces, ``set_close_callback`` - will not be removed in Tornado 6.0. - """ - self._close_callback = stack_context.wrap(callback) - self._maybe_add_error_listener() - - def close(self, exc_info=False): - """Close this stream. - - If ``exc_info`` is true, set the ``error`` attribute to the current - exception from `sys.exc_info` (or if ``exc_info`` is a tuple, - use that instead of `sys.exc_info`). - """ - if not self.closed(): - if exc_info: - if isinstance(exc_info, tuple): - self.error = exc_info[1] - elif isinstance(exc_info, BaseException): - self.error = exc_info - else: - exc_info = sys.exc_info() - if any(exc_info): - self.error = exc_info[1] - if self._read_until_close: - if (self._streaming_callback is not None and - self._read_buffer_size): - self._run_read_callback(self._read_buffer_size, True) - self._read_until_close = False - self._run_read_callback(self._read_buffer_size, False) - if self._state is not None: - self.io_loop.remove_handler(self.fileno()) - self._state = None - self.close_fd() - self._closed = True - self._maybe_run_close_callback() - - def _maybe_run_close_callback(self): - # If there are pending callbacks, don't run the close callback - # until they're done (see _maybe_add_error_handler) - if self.closed() and self._pending_callbacks == 0: - futures = [] - if self._read_future is not None: - futures.append(self._read_future) - self._read_future = None - futures += [future for _, future in self._write_futures] - self._write_futures.clear() - if self._connect_future is not None: - futures.append(self._connect_future) - self._connect_future = None - if self._ssl_connect_future is not None: - futures.append(self._ssl_connect_future) - self._ssl_connect_future = None - for future in futures: - future.set_exception(StreamClosedError(real_error=self.error)) - future.exception() - if self._close_callback is not None: - cb = self._close_callback - self._close_callback = None - self._run_callback(cb) - # Delete any unfinished callbacks to break up reference cycles. - self._read_callback = self._write_callback = None - # Clear the buffers so they can be cleared immediately even - # if the IOStream object is kept alive by a reference cycle. - # TODO: Clear the read buffer too; it currently breaks some tests. - self._write_buffer = None - - def reading(self): - """Returns true if we are currently reading from the stream.""" - return self._read_callback is not None or self._read_future is not None - - def writing(self): - """Returns true if we are currently writing to the stream.""" - return bool(self._write_buffer) - - def closed(self): - """Returns true if the stream has been closed.""" - return self._closed - - def set_nodelay(self, value): - """Sets the no-delay flag for this stream. - - By default, data written to TCP streams may be held for a time - to make the most efficient use of bandwidth (according to - Nagle's algorithm). The no-delay flag requests that data be - written as soon as possible, even if doing so would consume - additional bandwidth. - - This flag is currently defined only for TCP-based ``IOStreams``. - - .. versionadded:: 3.1 - """ - pass - - def _handle_events(self, fd, events): - if self.closed(): - gen_log.warning("Got events for closed stream %s", fd) - return - try: - if self._connecting: - # Most IOLoops will report a write failed connect - # with the WRITE event, but SelectIOLoop reports a - # READ as well so we must check for connecting before - # either. - self._handle_connect() - if self.closed(): - return - if events & self.io_loop.READ: - self._handle_read() - if self.closed(): - return - if events & self.io_loop.WRITE: - self._handle_write() - if self.closed(): - return - if events & self.io_loop.ERROR: - self.error = self.get_fd_error() - # We may have queued up a user callback in _handle_read or - # _handle_write, so don't close the IOStream until those - # callbacks have had a chance to run. - self.io_loop.add_callback(self.close) - return - state = self.io_loop.ERROR - if self.reading(): - state |= self.io_loop.READ - if self.writing(): - state |= self.io_loop.WRITE - if state == self.io_loop.ERROR and self._read_buffer_size == 0: - # If the connection is idle, listen for reads too so - # we can tell if the connection is closed. If there is - # data in the read buffer we won't run the close callback - # yet anyway, so we don't need to listen in this case. - state |= self.io_loop.READ - if state != self._state: - assert self._state is not None, \ - "shouldn't happen: _handle_events without self._state" - self._state = state - self.io_loop.update_handler(self.fileno(), self._state) - except UnsatisfiableReadError as e: - gen_log.info("Unsatisfiable read, closing connection: %s" % e) - self.close(exc_info=e) - except Exception as e: - gen_log.error("Uncaught exception, closing connection.", - exc_info=True) - self.close(exc_info=e) - raise - - def _run_callback(self, callback, *args): - def wrapper(): - self._pending_callbacks -= 1 - try: - return callback(*args) - except Exception as e: - app_log.error("Uncaught exception, closing connection.", - exc_info=True) - # Close the socket on an uncaught exception from a user callback - # (It would eventually get closed when the socket object is - # gc'd, but we don't want to rely on gc happening before we - # run out of file descriptors) - self.close(exc_info=e) - # Re-raise the exception so that IOLoop.handle_callback_exception - # can see it and log the error - raise - finally: - self._maybe_add_error_listener() - # We schedule callbacks to be run on the next IOLoop iteration - # rather than running them directly for several reasons: - # * Prevents unbounded stack growth when a callback calls an - # IOLoop operation that immediately runs another callback - # * Provides a predictable execution context for e.g. - # non-reentrant mutexes - # * Ensures that the try/except in wrapper() is run outside - # of the application's StackContexts - with stack_context.NullContext(): - # stack_context was already captured in callback, we don't need to - # capture it again for IOStream's wrapper. This is especially - # important if the callback was pre-wrapped before entry to - # IOStream (as in HTTPConnection._header_callback), as we could - # capture and leak the wrong context here. - self._pending_callbacks += 1 - self.io_loop.add_callback(wrapper) - - def _read_to_buffer_loop(self): - # This method is called from _handle_read and _try_inline_read. - try: - if self._read_bytes is not None: - target_bytes = self._read_bytes - elif self._read_max_bytes is not None: - target_bytes = self._read_max_bytes - elif self.reading(): - # For read_until without max_bytes, or - # read_until_close, read as much as we can before - # scanning for the delimiter. - target_bytes = None - else: - target_bytes = 0 - next_find_pos = 0 - # Pretend to have a pending callback so that an EOF in - # _read_to_buffer doesn't trigger an immediate close - # callback. At the end of this method we'll either - # establish a real pending callback via - # _read_from_buffer or run the close callback. - # - # We need two try statements here so that - # pending_callbacks is decremented before the `except` - # clause below (which calls `close` and does need to - # trigger the callback) - self._pending_callbacks += 1 - while not self.closed(): - # Read from the socket until we get EWOULDBLOCK or equivalent. - # SSL sockets do some internal buffering, and if the data is - # sitting in the SSL object's buffer select() and friends - # can't see it; the only way to find out if it's there is to - # try to read it. - if self._read_to_buffer() == 0: - break - - self._run_streaming_callback() - - # If we've read all the bytes we can use, break out of - # this loop. We can't just call read_from_buffer here - # because of subtle interactions with the - # pending_callback and error_listener mechanisms. - # - # If we've reached target_bytes, we know we're done. - if (target_bytes is not None and - self._read_buffer_size >= target_bytes): - break - - # Otherwise, we need to call the more expensive find_read_pos. - # It's inefficient to do this on every read, so instead - # do it on the first read and whenever the read buffer - # size has doubled. - if self._read_buffer_size >= next_find_pos: - pos = self._find_read_pos() - if pos is not None: - return pos - next_find_pos = self._read_buffer_size * 2 - return self._find_read_pos() - finally: - self._pending_callbacks -= 1 - - def _handle_read(self): - try: - pos = self._read_to_buffer_loop() - except UnsatisfiableReadError: - raise - except Exception as e: - gen_log.warning("error on read: %s" % e) - self.close(exc_info=e) - return - if pos is not None: - self._read_from_buffer(pos) - return - else: - self._maybe_run_close_callback() - - def _set_read_callback(self, callback): - assert self._read_callback is None, "Already reading" - assert self._read_future is None, "Already reading" - if callback is not None: - warnings.warn("callbacks are deprecated, use returned Future instead", - DeprecationWarning) - self._read_callback = stack_context.wrap(callback) - else: - self._read_future = Future() - return self._read_future - - def _run_read_callback(self, size, streaming): - if self._user_read_buffer: - self._read_buffer = self._after_user_read_buffer or bytearray() - self._after_user_read_buffer = None - self._read_buffer_pos = 0 - self._read_buffer_size = len(self._read_buffer) - self._user_read_buffer = False - result = size - else: - result = self._consume(size) - if streaming: - callback = self._streaming_callback - else: - callback = self._read_callback - self._read_callback = self._streaming_callback = None - if self._read_future is not None: - assert callback is None - future = self._read_future - self._read_future = None - - future.set_result(result) - if callback is not None: - assert (self._read_future is None) or streaming - self._run_callback(callback, result) - else: - # If we scheduled a callback, we will add the error listener - # afterwards. If we didn't, we have to do it now. - self._maybe_add_error_listener() - - def _try_inline_read(self): - """Attempt to complete the current read operation from buffered data. - - If the read can be completed without blocking, schedules the - read callback on the next IOLoop iteration; otherwise starts - listening for reads on the socket. - """ - # See if we've already got the data from a previous read - self._run_streaming_callback() - pos = self._find_read_pos() - if pos is not None: - self._read_from_buffer(pos) - return - self._check_closed() - try: - pos = self._read_to_buffer_loop() - except Exception: - # If there was an in _read_to_buffer, we called close() already, - # but couldn't run the close callback because of _pending_callbacks. - # Before we escape from this function, run the close callback if - # applicable. - self._maybe_run_close_callback() - raise - if pos is not None: - self._read_from_buffer(pos) - return - # We couldn't satisfy the read inline, so either close the stream - # or listen for new data. - if self.closed(): - self._maybe_run_close_callback() - else: - self._add_io_state(ioloop.IOLoop.READ) - - def _read_to_buffer(self): - """Reads from the socket and appends the result to the read buffer. - - Returns the number of bytes read. Returns 0 if there is nothing - to read (i.e. the read returns EWOULDBLOCK or equivalent). On - error closes the socket and raises an exception. - """ - try: - while True: - try: - if self._user_read_buffer: - buf = memoryview(self._read_buffer)[self._read_buffer_size:] - else: - buf = bytearray(self.read_chunk_size) - bytes_read = self.read_from_fd(buf) - except (socket.error, IOError, OSError) as e: - if errno_from_exception(e) == errno.EINTR: - continue - # ssl.SSLError is a subclass of socket.error - if self._is_connreset(e): - # Treat ECONNRESET as a connection close rather than - # an error to minimize log spam (the exception will - # be available on self.error for apps that care). - self.close(exc_info=e) - return - self.close(exc_info=e) - raise - break - if bytes_read is None: - return 0 - elif bytes_read == 0: - self.close() - return 0 - if not self._user_read_buffer: - self._read_buffer += memoryview(buf)[:bytes_read] - self._read_buffer_size += bytes_read - finally: - # Break the reference to buf so we don't waste a chunk's worth of - # memory in case an exception hangs on to our stack frame. - buf = None - if self._read_buffer_size > self.max_buffer_size: - gen_log.error("Reached maximum read buffer size") - self.close() - raise StreamBufferFullError("Reached maximum read buffer size") - return bytes_read - - def _run_streaming_callback(self): - if self._streaming_callback is not None and self._read_buffer_size: - bytes_to_consume = self._read_buffer_size - if self._read_bytes is not None: - bytes_to_consume = min(self._read_bytes, bytes_to_consume) - self._read_bytes -= bytes_to_consume - self._run_read_callback(bytes_to_consume, True) - - def _read_from_buffer(self, pos): - """Attempts to complete the currently-pending read from the buffer. - - The argument is either a position in the read buffer or None, - as returned by _find_read_pos. - """ - self._read_bytes = self._read_delimiter = self._read_regex = None - self._read_partial = False - self._run_read_callback(pos, False) - - def _find_read_pos(self): - """Attempts to find a position in the read buffer that satisfies - the currently-pending read. - - Returns a position in the buffer if the current read can be satisfied, - or None if it cannot. - """ - if (self._read_bytes is not None and - (self._read_buffer_size >= self._read_bytes or - (self._read_partial and self._read_buffer_size > 0))): - num_bytes = min(self._read_bytes, self._read_buffer_size) - return num_bytes - elif self._read_delimiter is not None: - # Multi-byte delimiters (e.g. '\r\n') may straddle two - # chunks in the read buffer, so we can't easily find them - # without collapsing the buffer. However, since protocols - # using delimited reads (as opposed to reads of a known - # length) tend to be "line" oriented, the delimiter is likely - # to be in the first few chunks. Merge the buffer gradually - # since large merges are relatively expensive and get undone in - # _consume(). - if self._read_buffer: - loc = self._read_buffer.find(self._read_delimiter, - self._read_buffer_pos) - if loc != -1: - loc -= self._read_buffer_pos - delimiter_len = len(self._read_delimiter) - self._check_max_bytes(self._read_delimiter, - loc + delimiter_len) - return loc + delimiter_len - self._check_max_bytes(self._read_delimiter, - self._read_buffer_size) - elif self._read_regex is not None: - if self._read_buffer: - m = self._read_regex.search(self._read_buffer, - self._read_buffer_pos) - if m is not None: - loc = m.end() - self._read_buffer_pos - self._check_max_bytes(self._read_regex, loc) - return loc - self._check_max_bytes(self._read_regex, self._read_buffer_size) - return None - - def _check_max_bytes(self, delimiter, size): - if (self._read_max_bytes is not None and - size > self._read_max_bytes): - raise UnsatisfiableReadError( - "delimiter %r not found within %d bytes" % ( - delimiter, self._read_max_bytes)) - - def _handle_write(self): - while True: - size = len(self._write_buffer) - if not size: - break - assert size > 0 - try: - if _WINDOWS: - # On windows, socket.send blows up if given a - # write buffer that's too large, instead of just - # returning the number of bytes it was able to - # process. Therefore we must not call socket.send - # with more than 128KB at a time. - size = 128 * 1024 - - num_bytes = self.write_to_fd(self._write_buffer.peek(size)) - if num_bytes == 0: - break - self._write_buffer.advance(num_bytes) - self._total_write_done_index += num_bytes - except (socket.error, IOError, OSError) as e: - if e.args[0] in _ERRNO_WOULDBLOCK: - break - else: - if not self._is_connreset(e): - # Broken pipe errors are usually caused by connection - # reset, and its better to not log EPIPE errors to - # minimize log spam - gen_log.warning("Write error on %s: %s", - self.fileno(), e) - self.close(exc_info=e) - return - - while self._write_futures: - index, future = self._write_futures[0] - if index > self._total_write_done_index: - break - self._write_futures.popleft() - future.set_result(None) - - if not len(self._write_buffer): - if self._write_callback: - callback = self._write_callback - self._write_callback = None - self._run_callback(callback) - - def _consume(self, loc): - # Consume loc bytes from the read buffer and return them - if loc == 0: - return b"" - assert loc <= self._read_buffer_size - # Slice the bytearray buffer into bytes, without intermediate copying - b = (memoryview(self._read_buffer) - [self._read_buffer_pos:self._read_buffer_pos + loc] - ).tobytes() - self._read_buffer_pos += loc - self._read_buffer_size -= loc - # Amortized O(1) shrink - # (this heuristic is implemented natively in Python 3.4+ - # but is replicated here for Python 2) - if self._read_buffer_pos > self._read_buffer_size: - del self._read_buffer[:self._read_buffer_pos] - self._read_buffer_pos = 0 - return b - - def _check_closed(self): - if self.closed(): - raise StreamClosedError(real_error=self.error) - - def _maybe_add_error_listener(self): - # This method is part of an optimization: to detect a connection that - # is closed when we're not actively reading or writing, we must listen - # for read events. However, it is inefficient to do this when the - # connection is first established because we are going to read or write - # immediately anyway. Instead, we insert checks at various times to - # see if the connection is idle and add the read listener then. - if self._pending_callbacks != 0: - return - if self._state is None or self._state == ioloop.IOLoop.ERROR: - if self.closed(): - self._maybe_run_close_callback() - elif (self._read_buffer_size == 0 and - self._close_callback is not None): - self._add_io_state(ioloop.IOLoop.READ) - - def _add_io_state(self, state): - """Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler. - - Implementation notes: Reads and writes have a fast path and a - slow path. The fast path reads synchronously from socket - buffers, while the slow path uses `_add_io_state` to schedule - an IOLoop callback. Note that in both cases, the callback is - run asynchronously with `_run_callback`. - - To detect closed connections, we must have called - `_add_io_state` at some point, but we want to delay this as - much as possible so we don't have to set an `IOLoop.ERROR` - listener that will be overwritten by the next slow-path - operation. As long as there are callbacks scheduled for - fast-path ops, those callbacks may do more reads. - If a sequence of fast-path ops do not end in a slow-path op, - (e.g. for an @asynchronous long-poll request), we must add - the error handler. This is done in `_run_callback` and `write` - (since the write callback is optional so we can have a - fast-path write with no `_run_callback`) - """ - if self.closed(): - # connection has been closed, so there can be no future events - return - if self._state is None: - self._state = ioloop.IOLoop.ERROR | state - with stack_context.NullContext(): - self.io_loop.add_handler( - self.fileno(), self._handle_events, self._state) - elif not self._state & state: - self._state = self._state | state - self.io_loop.update_handler(self.fileno(), self._state) - - def _is_connreset(self, exc): - """Return true if exc is ECONNRESET or equivalent. - - May be overridden in subclasses. - """ - return (isinstance(exc, (socket.error, IOError)) and - errno_from_exception(exc) in _ERRNO_CONNRESET) - - -class IOStream(BaseIOStream): - r"""Socket-based `IOStream` implementation. - - This class supports the read and write methods from `BaseIOStream` - plus a `connect` method. - - The ``socket`` parameter may either be connected or unconnected. - For server operations the socket is the result of calling - `socket.accept <socket.socket.accept>`. For client operations the - socket is created with `socket.socket`, and may either be - connected before passing it to the `IOStream` or connected with - `IOStream.connect`. - - A very simple (and broken) HTTP client using this class: - - .. testcode:: - - import tornado.ioloop - import tornado.iostream - import socket - - async def main(): - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) - stream = tornado.iostream.IOStream(s) - await stream.connect(("friendfeed.com", 80)) - await stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n") - header_data = await stream.read_until(b"\r\n\r\n") - headers = {} - for line in header_data.split(b"\r\n"): - parts = line.split(b":") - if len(parts) == 2: - headers[parts[0].strip()] = parts[1].strip() - body_data = await stream.read_bytes(int(headers[b"Content-Length"])) - print(body_data) - stream.close() - - if __name__ == '__main__': - tornado.ioloop.IOLoop.current().run_sync(main) - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) - stream = tornado.iostream.IOStream(s) - stream.connect(("friendfeed.com", 80), send_request) - tornado.ioloop.IOLoop.current().start() - - .. testoutput:: - :hide: - - """ - def __init__(self, socket, *args, **kwargs): - self.socket = socket - self.socket.setblocking(False) - super(IOStream, self).__init__(*args, **kwargs) - - def fileno(self): - return self.socket - - def close_fd(self): - self.socket.close() - self.socket = None - - def get_fd_error(self): - errno = self.socket.getsockopt(socket.SOL_SOCKET, - socket.SO_ERROR) - return socket.error(errno, os.strerror(errno)) - - def read_from_fd(self, buf): - try: - return self.socket.recv_into(buf) - except socket.error as e: - if e.args[0] in _ERRNO_WOULDBLOCK: - return None - else: - raise - finally: - buf = None - - def write_to_fd(self, data): - try: - return self.socket.send(data) - finally: - # Avoid keeping to data, which can be a memoryview. - # See https://github.com/tornadoweb/tornado/pull/2008 - del data - - def connect(self, address, callback=None, server_hostname=None): - """Connects the socket to a remote address without blocking. - - May only be called if the socket passed to the constructor was - not previously connected. The address parameter is in the - same format as for `socket.connect <socket.socket.connect>` for - the type of socket passed to the IOStream constructor, - e.g. an ``(ip, port)`` tuple. Hostnames are accepted here, - but will be resolved synchronously and block the IOLoop. - If you have a hostname instead of an IP address, the `.TCPClient` - class is recommended instead of calling this method directly. - `.TCPClient` will do asynchronous DNS resolution and handle - both IPv4 and IPv6. - - If ``callback`` is specified, it will be called with no - arguments when the connection is completed; if not this method - returns a `.Future` (whose result after a successful - connection will be the stream itself). - - In SSL mode, the ``server_hostname`` parameter will be used - for certificate validation (unless disabled in the - ``ssl_options``) and SNI (if supported; requires Python - 2.7.9+). - - Note that it is safe to call `IOStream.write - <BaseIOStream.write>` while the connection is pending, in - which case the data will be written as soon as the connection - is ready. Calling `IOStream` read methods before the socket is - connected works on some platforms but is non-portable. - - .. versionchanged:: 4.0 - If no callback is given, returns a `.Future`. - - .. versionchanged:: 4.2 - SSL certificates are validated by default; pass - ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a - suitably-configured `ssl.SSLContext` to the - `SSLIOStream` constructor to disable. - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed - in Tornado 6.0. Use the returned `.Future` instead. - - """ - self._connecting = True - if callback is not None: - warnings.warn("callback argument is deprecated, use returned Future instead", - DeprecationWarning) - self._connect_callback = stack_context.wrap(callback) - future = None - else: - future = self._connect_future = Future() - try: - self.socket.connect(address) - except socket.error as e: - # In non-blocking mode we expect connect() to raise an - # exception with EINPROGRESS or EWOULDBLOCK. - # - # On freebsd, other errors such as ECONNREFUSED may be - # returned immediately when attempting to connect to - # localhost, so handle them the same way as an error - # reported later in _handle_connect. - if (errno_from_exception(e) not in _ERRNO_INPROGRESS and - errno_from_exception(e) not in _ERRNO_WOULDBLOCK): - if future is None: - gen_log.warning("Connect error on fd %s: %s", - self.socket.fileno(), e) - self.close(exc_info=e) - return future - self._add_io_state(self.io_loop.WRITE) - return future - - def start_tls(self, server_side, ssl_options=None, server_hostname=None): - """Convert this `IOStream` to an `SSLIOStream`. - - This enables protocols that begin in clear-text mode and - switch to SSL after some initial negotiation (such as the - ``STARTTLS`` extension to SMTP and IMAP). - - This method cannot be used if there are outstanding reads - or writes on the stream, or if there is any data in the - IOStream's buffer (data in the operating system's socket - buffer is allowed). This means it must generally be used - immediately after reading or writing the last clear-text - data. It can also be used immediately after connecting, - before any reads or writes. - - The ``ssl_options`` argument may be either an `ssl.SSLContext` - object or a dictionary of keyword arguments for the - `ssl.wrap_socket` function. The ``server_hostname`` argument - will be used for certificate validation unless disabled - in the ``ssl_options``. - - This method returns a `.Future` whose result is the new - `SSLIOStream`. After this method has been called, - any other operation on the original stream is undefined. - - If a close callback is defined on this stream, it will be - transferred to the new stream. - - .. versionadded:: 4.0 - - .. versionchanged:: 4.2 - SSL certificates are validated by default; pass - ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a - suitably-configured `ssl.SSLContext` to disable. - """ - if (self._read_callback or self._read_future or - self._write_callback or self._write_futures or - self._connect_callback or self._connect_future or - self._pending_callbacks or self._closed or - self._read_buffer or self._write_buffer): - raise ValueError("IOStream is not idle; cannot convert to SSL") - if ssl_options is None: - if server_side: - ssl_options = _server_ssl_defaults - else: - ssl_options = _client_ssl_defaults - - socket = self.socket - self.io_loop.remove_handler(socket) - self.socket = None - socket = ssl_wrap_socket(socket, ssl_options, - server_hostname=server_hostname, - server_side=server_side, - do_handshake_on_connect=False) - orig_close_callback = self._close_callback - self._close_callback = None - - future = Future() - ssl_stream = SSLIOStream(socket, ssl_options=ssl_options) - # Wrap the original close callback so we can fail our Future as well. - # If we had an "unwrap" counterpart to this method we would need - # to restore the original callback after our Future resolves - # so that repeated wrap/unwrap calls don't build up layers. - - def close_callback(): - if not future.done(): - # Note that unlike most Futures returned by IOStream, - # this one passes the underlying error through directly - # instead of wrapping everything in a StreamClosedError - # with a real_error attribute. This is because once the - # connection is established it's more helpful to raise - # the SSLError directly than to hide it behind a - # StreamClosedError (and the client is expecting SSL - # issues rather than network issues since this method is - # named start_tls). - future.set_exception(ssl_stream.error or StreamClosedError()) - if orig_close_callback is not None: - orig_close_callback() - ssl_stream.set_close_callback(close_callback) - ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream) - ssl_stream.max_buffer_size = self.max_buffer_size - ssl_stream.read_chunk_size = self.read_chunk_size - return future - - def _handle_connect(self): - try: - err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) - except socket.error as e: - # Hurd doesn't allow SO_ERROR for loopback sockets because all - # errors for such sockets are reported synchronously. - if errno_from_exception(e) == errno.ENOPROTOOPT: - err = 0 - if err != 0: - self.error = socket.error(err, os.strerror(err)) - # IOLoop implementations may vary: some of them return - # an error state before the socket becomes writable, so - # in that case a connection failure would be handled by the - # error path in _handle_events instead of here. - if self._connect_future is None: - gen_log.warning("Connect error on fd %s: %s", - self.socket.fileno(), errno.errorcode[err]) - self.close() - return - if self._connect_callback is not None: - callback = self._connect_callback - self._connect_callback = None - self._run_callback(callback) - if self._connect_future is not None: - future = self._connect_future - self._connect_future = None - future.set_result(self) - self._connecting = False - - def set_nodelay(self, value): - if (self.socket is not None and - self.socket.family in (socket.AF_INET, socket.AF_INET6)): - try: - self.socket.setsockopt(socket.IPPROTO_TCP, - socket.TCP_NODELAY, 1 if value else 0) - except socket.error as e: - # Sometimes setsockopt will fail if the socket is closed - # at the wrong time. This can happen with HTTPServer - # resetting the value to false between requests. - if e.errno != errno.EINVAL and not self._is_connreset(e): - raise - - -class SSLIOStream(IOStream): - """A utility class to write to and read from a non-blocking SSL socket. - - If the socket passed to the constructor is already connected, - it should be wrapped with:: - - ssl.wrap_socket(sock, do_handshake_on_connect=False, **kwargs) - - before constructing the `SSLIOStream`. Unconnected sockets will be - wrapped when `IOStream.connect` is finished. - """ - def __init__(self, *args, **kwargs): - """The ``ssl_options`` keyword argument may either be an - `ssl.SSLContext` object or a dictionary of keywords arguments - for `ssl.wrap_socket` - """ - self._ssl_options = kwargs.pop('ssl_options', _client_ssl_defaults) - super(SSLIOStream, self).__init__(*args, **kwargs) - self._ssl_accepting = True - self._handshake_reading = False - self._handshake_writing = False - self._ssl_connect_callback = None - self._server_hostname = None - - # If the socket is already connected, attempt to start the handshake. - try: - self.socket.getpeername() - except socket.error: - pass - else: - # Indirectly start the handshake, which will run on the next - # IOLoop iteration and then the real IO state will be set in - # _handle_events. - self._add_io_state(self.io_loop.WRITE) - - def reading(self): - return self._handshake_reading or super(SSLIOStream, self).reading() - - def writing(self): - return self._handshake_writing or super(SSLIOStream, self).writing() - - def _do_ssl_handshake(self): - # Based on code from test_ssl.py in the python stdlib - try: - self._handshake_reading = False - self._handshake_writing = False - self.socket.do_handshake() - except ssl.SSLError as err: - if err.args[0] == ssl.SSL_ERROR_WANT_READ: - self._handshake_reading = True - return - elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE: - self._handshake_writing = True - return - elif err.args[0] in (ssl.SSL_ERROR_EOF, - ssl.SSL_ERROR_ZERO_RETURN): - return self.close(exc_info=err) - elif err.args[0] == ssl.SSL_ERROR_SSL: - try: - peer = self.socket.getpeername() - except Exception: - peer = '(not connected)' - gen_log.warning("SSL Error on %s %s: %s", - self.socket.fileno(), peer, err) - return self.close(exc_info=err) - raise - except socket.error as err: - # Some port scans (e.g. nmap in -sT mode) have been known - # to cause do_handshake to raise EBADF and ENOTCONN, so make - # those errors quiet as well. - # https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0 - if (self._is_connreset(err) or - err.args[0] in (errno.EBADF, errno.ENOTCONN)): - return self.close(exc_info=err) - raise - except AttributeError as err: - # On Linux, if the connection was reset before the call to - # wrap_socket, do_handshake will fail with an - # AttributeError. - return self.close(exc_info=err) - else: - self._ssl_accepting = False - if not self._verify_cert(self.socket.getpeercert()): - self.close() - return - self._run_ssl_connect_callback() - - def _run_ssl_connect_callback(self): - if self._ssl_connect_callback is not None: - callback = self._ssl_connect_callback - self._ssl_connect_callback = None - self._run_callback(callback) - if self._ssl_connect_future is not None: - future = self._ssl_connect_future - self._ssl_connect_future = None - future.set_result(self) - - def _verify_cert(self, peercert): - """Returns True if peercert is valid according to the configured - validation mode and hostname. - - The ssl handshake already tested the certificate for a valid - CA signature; the only thing that remains is to check - the hostname. - """ - if isinstance(self._ssl_options, dict): - verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE) - elif isinstance(self._ssl_options, ssl.SSLContext): - verify_mode = self._ssl_options.verify_mode - assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL) - if verify_mode == ssl.CERT_NONE or self._server_hostname is None: - return True - cert = self.socket.getpeercert() - if cert is None and verify_mode == ssl.CERT_REQUIRED: - gen_log.warning("No SSL certificate given") - return False - try: - ssl.match_hostname(peercert, self._server_hostname) - except ssl.CertificateError as e: - gen_log.warning("Invalid SSL certificate: %s" % e) - return False - else: - return True - - def _handle_read(self): - if self._ssl_accepting: - self._do_ssl_handshake() - return - super(SSLIOStream, self)._handle_read() - - def _handle_write(self): - if self._ssl_accepting: - self._do_ssl_handshake() - return - super(SSLIOStream, self)._handle_write() - - def connect(self, address, callback=None, server_hostname=None): - self._server_hostname = server_hostname - # Ignore the result of connect(). If it fails, - # wait_for_handshake will raise an error too. This is - # necessary for the old semantics of the connect callback - # (which takes no arguments). In 6.0 this can be refactored to - # be a regular coroutine. - fut = super(SSLIOStream, self).connect(address) - fut.add_done_callback(lambda f: f.exception()) - return self.wait_for_handshake(callback) - - def _handle_connect(self): - # Call the superclass method to check for errors. - super(SSLIOStream, self)._handle_connect() - if self.closed(): - return - # When the connection is complete, wrap the socket for SSL - # traffic. Note that we do this by overriding _handle_connect - # instead of by passing a callback to super().connect because - # user callbacks are enqueued asynchronously on the IOLoop, - # but since _handle_events calls _handle_connect immediately - # followed by _handle_write we need this to be synchronous. - # - # The IOLoop will get confused if we swap out self.socket while the - # fd is registered, so remove it now and re-register after - # wrap_socket(). - self.io_loop.remove_handler(self.socket) - old_state = self._state - self._state = None - self.socket = ssl_wrap_socket(self.socket, self._ssl_options, - server_hostname=self._server_hostname, - do_handshake_on_connect=False) - self._add_io_state(old_state) - - def wait_for_handshake(self, callback=None): - """Wait for the initial SSL handshake to complete. - - If a ``callback`` is given, it will be called with no - arguments once the handshake is complete; otherwise this - method returns a `.Future` which will resolve to the - stream itself after the handshake is complete. - - Once the handshake is complete, information such as - the peer's certificate and NPN/ALPN selections may be - accessed on ``self.socket``. - - This method is intended for use on server-side streams - or after using `IOStream.start_tls`; it should not be used - with `IOStream.connect` (which already waits for the - handshake to complete). It may only be called once per stream. - - .. versionadded:: 4.2 - - .. deprecated:: 5.1 - - The ``callback`` argument is deprecated and will be removed - in Tornado 6.0. Use the returned `.Future` instead. - - """ - if (self._ssl_connect_callback is not None or - self._ssl_connect_future is not None): - raise RuntimeError("Already waiting") - if callback is not None: - warnings.warn("callback argument is deprecated, use returned Future instead", - DeprecationWarning) - self._ssl_connect_callback = stack_context.wrap(callback) - future = None - else: - future = self._ssl_connect_future = Future() - if not self._ssl_accepting: - self._run_ssl_connect_callback() - return future - - def write_to_fd(self, data): - try: - return self.socket.send(data) - except ssl.SSLError as e: - if e.args[0] == ssl.SSL_ERROR_WANT_WRITE: - # In Python 3.5+, SSLSocket.send raises a WANT_WRITE error if - # the socket is not writeable; we need to transform this into - # an EWOULDBLOCK socket.error or a zero return value, - # either of which will be recognized by the caller of this - # method. Prior to Python 3.5, an unwriteable socket would - # simply return 0 bytes written. - return 0 - raise - finally: - # Avoid keeping to data, which can be a memoryview. - # See https://github.com/tornadoweb/tornado/pull/2008 - del data - - def read_from_fd(self, buf): - try: - if self._ssl_accepting: - # If the handshake hasn't finished yet, there can't be anything - # to read (attempting to read may or may not raise an exception - # depending on the SSL version) - return None - try: - return self.socket.recv_into(buf) - except ssl.SSLError as e: - # SSLError is a subclass of socket.error, so this except - # block must come first. - if e.args[0] == ssl.SSL_ERROR_WANT_READ: - return None - else: - raise - except socket.error as e: - if e.args[0] in _ERRNO_WOULDBLOCK: - return None - else: - raise - finally: - buf = None - - def _is_connreset(self, e): - if isinstance(e, ssl.SSLError) and e.args[0] == ssl.SSL_ERROR_EOF: - return True - return super(SSLIOStream, self)._is_connreset(e) - - -class PipeIOStream(BaseIOStream): - """Pipe-based `IOStream` implementation. - - The constructor takes an integer file descriptor (such as one returned - by `os.pipe`) rather than an open file object. Pipes are generally - one-way, so a `PipeIOStream` can be used for reading or writing but not - both. - """ - def __init__(self, fd, *args, **kwargs): - self.fd = fd - self._fio = io.FileIO(self.fd, "r+") - _set_nonblocking(fd) - super(PipeIOStream, self).__init__(*args, **kwargs) - - def fileno(self): - return self.fd - - def close_fd(self): - self._fio.close() - - def write_to_fd(self, data): - try: - return os.write(self.fd, data) - finally: - # Avoid keeping to data, which can be a memoryview. - # See https://github.com/tornadoweb/tornado/pull/2008 - del data - - def read_from_fd(self, buf): - try: - return self._fio.readinto(buf) - except (IOError, OSError) as e: - if errno_from_exception(e) == errno.EBADF: - # If the writing half of a pipe is closed, select will - # report it as readable but reads will fail with EBADF. - self.close(exc_info=e) - return None - else: - raise - finally: - buf = None - - -def doctests(): - import doctest - return doctest.DocTestSuite() diff --git a/lib/tornado/locale.py b/lib/tornado/locale.py deleted file mode 100755 index d45172f3..00000000 --- a/lib/tornado/locale.py +++ /dev/null @@ -1,521 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Translation methods for generating localized strings. - -To load a locale and generate a translated string:: - - user_locale = tornado.locale.get("es_LA") - print(user_locale.translate("Sign out")) - -`tornado.locale.get()` returns the closest matching locale, not necessarily the -specific locale you requested. You can support pluralization with -additional arguments to `~Locale.translate()`, e.g.:: - - people = [...] - message = user_locale.translate( - "%(list)s is online", "%(list)s are online", len(people)) - print(message % {"list": user_locale.list(people)}) - -The first string is chosen if ``len(people) == 1``, otherwise the second -string is chosen. - -Applications should call one of `load_translations` (which uses a simple -CSV format) or `load_gettext_translations` (which uses the ``.mo`` format -supported by `gettext` and related tools). If neither method is called, -the `Locale.translate` method will simply return the original string. -""" - -from __future__ import absolute_import, division, print_function - -import codecs -import csv -import datetime -from io import BytesIO -import numbers -import os -import re - -from tornado import escape -from tornado.log import gen_log -from tornado.util import PY3 - -from tornado._locale_data import LOCALE_NAMES - -_default_locale = "en_US" -_translations = {} # type: dict -_supported_locales = frozenset([_default_locale]) -_use_gettext = False -CONTEXT_SEPARATOR = "\x04" - - -def get(*locale_codes): - """Returns the closest match for the given locale codes. - - We iterate over all given locale codes in order. If we have a tight - or a loose match for the code (e.g., "en" for "en_US"), we return - the locale. Otherwise we move to the next code in the list. - - By default we return ``en_US`` if no translations are found for any of - the specified locales. You can change the default locale with - `set_default_locale()`. - """ - return Locale.get_closest(*locale_codes) - - -def set_default_locale(code): - """Sets the default locale. - - The default locale is assumed to be the language used for all strings - in the system. The translations loaded from disk are mappings from - the default locale to the destination locale. Consequently, you don't - need to create a translation file for the default locale. - """ - global _default_locale - global _supported_locales - _default_locale = code - _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) - - -def load_translations(directory, encoding=None): - """Loads translations from CSV files in a directory. - - Translations are strings with optional Python-style named placeholders - (e.g., ``My name is %(name)s``) and their associated translations. - - The directory should have translation files of the form ``LOCALE.csv``, - e.g. ``es_GT.csv``. The CSV files should have two or three columns: string, - translation, and an optional plural indicator. Plural indicators should - be one of "plural" or "singular". A given string can have both singular - and plural forms. For example ``%(name)s liked this`` may have a - different verb conjugation depending on whether %(name)s is one - name or a list of names. There should be two rows in the CSV file for - that string, one with plural indicator "singular", and one "plural". - For strings with no verbs that would change on translation, simply - use "unknown" or the empty string (or don't include the column at all). - - The file is read using the `csv` module in the default "excel" dialect. - In this format there should not be spaces after the commas. - - If no ``encoding`` parameter is given, the encoding will be - detected automatically (among UTF-8 and UTF-16) if the file - contains a byte-order marker (BOM), defaulting to UTF-8 if no BOM - is present. - - Example translation ``es_LA.csv``:: - - "I love you","Te amo" - "%(name)s liked this","A %(name)s les gustó esto","plural" - "%(name)s liked this","A %(name)s le gustó esto","singular" - - .. versionchanged:: 4.3 - Added ``encoding`` parameter. Added support for BOM-based encoding - detection, UTF-16, and UTF-8-with-BOM. - """ - global _translations - global _supported_locales - _translations = {} - for path in os.listdir(directory): - if not path.endswith(".csv"): - continue - locale, extension = path.split(".") - if not re.match("[a-z]+(_[A-Z]+)?$", locale): - gen_log.error("Unrecognized locale %r (path: %s)", locale, - os.path.join(directory, path)) - continue - full_path = os.path.join(directory, path) - if encoding is None: - # Try to autodetect encoding based on the BOM. - with open(full_path, 'rb') as f: - data = f.read(len(codecs.BOM_UTF16_LE)) - if data in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): - encoding = 'utf-16' - else: - # utf-8-sig is "utf-8 with optional BOM". It's discouraged - # in most cases but is common with CSV files because Excel - # cannot read utf-8 files without a BOM. - encoding = 'utf-8-sig' - if PY3: - # python 3: csv.reader requires a file open in text mode. - # Force utf8 to avoid dependence on $LANG environment variable. - f = open(full_path, "r", encoding=encoding) - else: - # python 2: csv can only handle byte strings (in ascii-compatible - # encodings), which we decode below. Transcode everything into - # utf8 before passing it to csv.reader. - f = BytesIO() - with codecs.open(full_path, "r", encoding=encoding) as infile: - f.write(escape.utf8(infile.read())) - f.seek(0) - _translations[locale] = {} - for i, row in enumerate(csv.reader(f)): - if not row or len(row) < 2: - continue - row = [escape.to_unicode(c).strip() for c in row] - english, translation = row[:2] - if len(row) > 2: - plural = row[2] or "unknown" - else: - plural = "unknown" - if plural not in ("plural", "singular", "unknown"): - gen_log.error("Unrecognized plural indicator %r in %s line %d", - plural, path, i + 1) - continue - _translations[locale].setdefault(plural, {})[english] = translation - f.close() - _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) - gen_log.debug("Supported locales: %s", sorted(_supported_locales)) - - -def load_gettext_translations(directory, domain): - """Loads translations from `gettext`'s locale tree - - Locale tree is similar to system's ``/usr/share/locale``, like:: - - {directory}/{lang}/LC_MESSAGES/{domain}.mo - - Three steps are required to have your app translated: - - 1. Generate POT translation file:: - - xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc - - 2. Merge against existing POT file:: - - msgmerge old.po mydomain.po > new.po - - 3. Compile:: - - msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo - """ - import gettext - global _translations - global _supported_locales - global _use_gettext - _translations = {} - for lang in os.listdir(directory): - if lang.startswith('.'): - continue # skip .svn, etc - if os.path.isfile(os.path.join(directory, lang)): - continue - try: - os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo")) - _translations[lang] = gettext.translation(domain, directory, - languages=[lang]) - except Exception as e: - gen_log.error("Cannot load translation for '%s': %s", lang, str(e)) - continue - _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) - _use_gettext = True - gen_log.debug("Supported locales: %s", sorted(_supported_locales)) - - -def get_supported_locales(): - """Returns a list of all the supported locale codes.""" - return _supported_locales - - -class Locale(object): - """Object representing a locale. - - After calling one of `load_translations` or `load_gettext_translations`, - call `get` or `get_closest` to get a Locale object. - """ - @classmethod - def get_closest(cls, *locale_codes): - """Returns the closest match for the given locale code.""" - for code in locale_codes: - if not code: - continue - code = code.replace("-", "_") - parts = code.split("_") - if len(parts) > 2: - continue - elif len(parts) == 2: - code = parts[0].lower() + "_" + parts[1].upper() - if code in _supported_locales: - return cls.get(code) - if parts[0].lower() in _supported_locales: - return cls.get(parts[0].lower()) - return cls.get(_default_locale) - - @classmethod - def get(cls, code): - """Returns the Locale for the given locale code. - - If it is not supported, we raise an exception. - """ - if not hasattr(cls, "_cache"): - cls._cache = {} - if code not in cls._cache: - assert code in _supported_locales - translations = _translations.get(code, None) - if translations is None: - locale = CSVLocale(code, {}) - elif _use_gettext: - locale = GettextLocale(code, translations) - else: - locale = CSVLocale(code, translations) - cls._cache[code] = locale - return cls._cache[code] - - def __init__(self, code, translations): - self.code = code - self.name = LOCALE_NAMES.get(code, {}).get("name", u"Unknown") - self.rtl = False - for prefix in ["fa", "ar", "he"]: - if self.code.startswith(prefix): - self.rtl = True - break - self.translations = translations - - # Initialize strings for date formatting - _ = self.translate - self._months = [ - _("January"), _("February"), _("March"), _("April"), - _("May"), _("June"), _("July"), _("August"), - _("September"), _("October"), _("November"), _("December")] - self._weekdays = [ - _("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"), - _("Friday"), _("Saturday"), _("Sunday")] - - def translate(self, message, plural_message=None, count=None): - """Returns the translation for the given message for this locale. - - If ``plural_message`` is given, you must also provide - ``count``. We return ``plural_message`` when ``count != 1``, - and we return the singular form for the given message when - ``count == 1``. - """ - raise NotImplementedError() - - def pgettext(self, context, message, plural_message=None, count=None): - raise NotImplementedError() - - def format_date(self, date, gmt_offset=0, relative=True, shorter=False, - full_format=False): - """Formats the given date (which should be GMT). - - By default, we return a relative time (e.g., "2 minutes ago"). You - can return an absolute date string with ``relative=False``. - - You can force a full format date ("July 10, 1980") with - ``full_format=True``. - - This method is primarily intended for dates in the past. - For dates in the future, we fall back to full format. - """ - if isinstance(date, numbers.Real): - date = datetime.datetime.utcfromtimestamp(date) - now = datetime.datetime.utcnow() - if date > now: - if relative and (date - now).seconds < 60: - # Due to click skew, things are some things slightly - # in the future. Round timestamps in the immediate - # future down to now in relative mode. - date = now - else: - # Otherwise, future dates always use the full format. - full_format = True - local_date = date - datetime.timedelta(minutes=gmt_offset) - local_now = now - datetime.timedelta(minutes=gmt_offset) - local_yesterday = local_now - datetime.timedelta(hours=24) - difference = now - date - seconds = difference.seconds - days = difference.days - - _ = self.translate - format = None - if not full_format: - if relative and days == 0: - if seconds < 50: - return _("1 second ago", "%(seconds)d seconds ago", - seconds) % {"seconds": seconds} - - if seconds < 50 * 60: - minutes = round(seconds / 60.0) - return _("1 minute ago", "%(minutes)d minutes ago", - minutes) % {"minutes": minutes} - - hours = round(seconds / (60.0 * 60)) - return _("1 hour ago", "%(hours)d hours ago", - hours) % {"hours": hours} - - if days == 0: - format = _("%(time)s") - elif days == 1 and local_date.day == local_yesterday.day and \ - relative: - format = _("yesterday") if shorter else \ - _("yesterday at %(time)s") - elif days < 5: - format = _("%(weekday)s") if shorter else \ - _("%(weekday)s at %(time)s") - elif days < 334: # 11mo, since confusing for same month last year - format = _("%(month_name)s %(day)s") if shorter else \ - _("%(month_name)s %(day)s at %(time)s") - - if format is None: - format = _("%(month_name)s %(day)s, %(year)s") if shorter else \ - _("%(month_name)s %(day)s, %(year)s at %(time)s") - - tfhour_clock = self.code not in ("en", "en_US", "zh_CN") - if tfhour_clock: - str_time = "%d:%02d" % (local_date.hour, local_date.minute) - elif self.code == "zh_CN": - str_time = "%s%d:%02d" % ( - (u'\u4e0a\u5348', u'\u4e0b\u5348')[local_date.hour >= 12], - local_date.hour % 12 or 12, local_date.minute) - else: - str_time = "%d:%02d %s" % ( - local_date.hour % 12 or 12, local_date.minute, - ("am", "pm")[local_date.hour >= 12]) - - return format % { - "month_name": self._months[local_date.month - 1], - "weekday": self._weekdays[local_date.weekday()], - "day": str(local_date.day), - "year": str(local_date.year), - "time": str_time - } - - def format_day(self, date, gmt_offset=0, dow=True): - """Formats the given date as a day of week. - - Example: "Monday, January 22". You can remove the day of week with - ``dow=False``. - """ - local_date = date - datetime.timedelta(minutes=gmt_offset) - _ = self.translate - if dow: - return _("%(weekday)s, %(month_name)s %(day)s") % { - "month_name": self._months[local_date.month - 1], - "weekday": self._weekdays[local_date.weekday()], - "day": str(local_date.day), - } - else: - return _("%(month_name)s %(day)s") % { - "month_name": self._months[local_date.month - 1], - "day": str(local_date.day), - } - - def list(self, parts): - """Returns a comma-separated list for the given list of parts. - - The format is, e.g., "A, B and C", "A and B" or just "A" for lists - of size 1. - """ - _ = self.translate - if len(parts) == 0: - return "" - if len(parts) == 1: - return parts[0] - comma = u' \u0648 ' if self.code.startswith("fa") else u", " - return _("%(commas)s and %(last)s") % { - "commas": comma.join(parts[:-1]), - "last": parts[len(parts) - 1], - } - - def friendly_number(self, value): - """Returns a comma-separated number for the given integer.""" - if self.code not in ("en", "en_US"): - return str(value) - value = str(value) - parts = [] - while value: - parts.append(value[-3:]) - value = value[:-3] - return ",".join(reversed(parts)) - - -class CSVLocale(Locale): - """Locale implementation using tornado's CSV translation format.""" - def translate(self, message, plural_message=None, count=None): - if plural_message is not None: - assert count is not None - if count != 1: - message = plural_message - message_dict = self.translations.get("plural", {}) - else: - message_dict = self.translations.get("singular", {}) - else: - message_dict = self.translations.get("unknown", {}) - return message_dict.get(message, message) - - def pgettext(self, context, message, plural_message=None, count=None): - if self.translations: - gen_log.warning('pgettext is not supported by CSVLocale') - return self.translate(message, plural_message, count) - - -class GettextLocale(Locale): - """Locale implementation using the `gettext` module.""" - def __init__(self, code, translations): - try: - # python 2 - self.ngettext = translations.ungettext - self.gettext = translations.ugettext - except AttributeError: - # python 3 - self.ngettext = translations.ngettext - self.gettext = translations.gettext - # self.gettext must exist before __init__ is called, since it - # calls into self.translate - super(GettextLocale, self).__init__(code, translations) - - def translate(self, message, plural_message=None, count=None): - if plural_message is not None: - assert count is not None - return self.ngettext(message, plural_message, count) - else: - return self.gettext(message) - - def pgettext(self, context, message, plural_message=None, count=None): - """Allows to set context for translation, accepts plural forms. - - Usage example:: - - pgettext("law", "right") - pgettext("good", "right") - - Plural message example:: - - pgettext("organization", "club", "clubs", len(clubs)) - pgettext("stick", "club", "clubs", len(clubs)) - - To generate POT file with context, add following options to step 1 - of `load_gettext_translations` sequence:: - - xgettext [basic options] --keyword=pgettext:1c,2 --keyword=pgettext:1c,2,3 - - .. versionadded:: 4.2 - """ - if plural_message is not None: - assert count is not None - msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, message), - "%s%s%s" % (context, CONTEXT_SEPARATOR, plural_message), - count) - result = self.ngettext(*msgs_with_ctxt) - if CONTEXT_SEPARATOR in result: - # Translation not found - result = self.ngettext(message, plural_message, count) - return result - else: - msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message) - result = self.gettext(msg_with_ctxt) - if CONTEXT_SEPARATOR in result: - # Translation not found - result = message - return result diff --git a/lib/tornado/locks.py b/lib/tornado/locks.py deleted file mode 100755 index 9566a457..00000000 --- a/lib/tornado/locks.py +++ /dev/null @@ -1,526 +0,0 @@ -# Copyright 2015 The Tornado Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import, division, print_function - -import collections -from concurrent.futures import CancelledError - -from tornado import gen, ioloop -from tornado.concurrent import Future, future_set_result_unless_cancelled - -__all__ = ['Condition', 'Event', 'Semaphore', 'BoundedSemaphore', 'Lock'] - - -class _TimeoutGarbageCollector(object): - """Base class for objects that periodically clean up timed-out waiters. - - Avoids memory leak in a common pattern like: - - while True: - yield condition.wait(short_timeout) - print('looping....') - """ - def __init__(self): - self._waiters = collections.deque() # Futures. - self._timeouts = 0 - - def _garbage_collect(self): - # Occasionally clear timed-out waiters. - self._timeouts += 1 - if self._timeouts > 100: - self._timeouts = 0 - self._waiters = collections.deque( - w for w in self._waiters if not w.done()) - - -class Condition(_TimeoutGarbageCollector): - """A condition allows one or more coroutines to wait until notified. - - Like a standard `threading.Condition`, but does not need an underlying lock - that is acquired and released. - - With a `Condition`, coroutines can wait to be notified by other coroutines: - - .. testcode:: - - from tornado import gen - from tornado.ioloop import IOLoop - from tornado.locks import Condition - - condition = Condition() - - async def waiter(): - print("I'll wait right here") - await condition.wait() - print("I'm done waiting") - - async def notifier(): - print("About to notify") - condition.notify() - print("Done notifying") - - async def runner(): - # Wait for waiter() and notifier() in parallel - await gen.multi([waiter(), notifier()]) - - IOLoop.current().run_sync(runner) - - .. testoutput:: - - I'll wait right here - About to notify - Done notifying - I'm done waiting - - `wait` takes an optional ``timeout`` argument, which is either an absolute - timestamp:: - - io_loop = IOLoop.current() - - # Wait up to 1 second for a notification. - await condition.wait(timeout=io_loop.time() + 1) - - ...or a `datetime.timedelta` for a timeout relative to the current time:: - - # Wait up to 1 second. - await condition.wait(timeout=datetime.timedelta(seconds=1)) - - The method returns False if there's no notification before the deadline. - - .. versionchanged:: 5.0 - Previously, waiters could be notified synchronously from within - `notify`. Now, the notification will always be received on the - next iteration of the `.IOLoop`. - """ - - def __init__(self): - super(Condition, self).__init__() - self.io_loop = ioloop.IOLoop.current() - - def __repr__(self): - result = '<%s' % (self.__class__.__name__, ) - if self._waiters: - result += ' waiters[%s]' % len(self._waiters) - return result + '>' - - def wait(self, timeout=None): - """Wait for `.notify`. - - Returns a `.Future` that resolves ``True`` if the condition is notified, - or ``False`` after a timeout. - """ - waiter = Future() - self._waiters.append(waiter) - if timeout: - def on_timeout(): - if not waiter.done(): - future_set_result_unless_cancelled(waiter, False) - self._garbage_collect() - io_loop = ioloop.IOLoop.current() - timeout_handle = io_loop.add_timeout(timeout, on_timeout) - waiter.add_done_callback( - lambda _: io_loop.remove_timeout(timeout_handle)) - return waiter - - def notify(self, n=1): - """Wake ``n`` waiters.""" - waiters = [] # Waiters we plan to run right now. - while n and self._waiters: - waiter = self._waiters.popleft() - if not waiter.done(): # Might have timed out. - n -= 1 - waiters.append(waiter) - - for waiter in waiters: - future_set_result_unless_cancelled(waiter, True) - - def notify_all(self): - """Wake all waiters.""" - self.notify(len(self._waiters)) - - -class Event(object): - """An event blocks coroutines until its internal flag is set to True. - - Similar to `threading.Event`. - - A coroutine can wait for an event to be set. Once it is set, calls to - ``yield event.wait()`` will not block unless the event has been cleared: - - .. testcode:: - - from tornado import gen - from tornado.ioloop import IOLoop - from tornado.locks import Event - - event = Event() - - async def waiter(): - print("Waiting for event") - await event.wait() - print("Not waiting this time") - await event.wait() - print("Done") - - async def setter(): - print("About to set the event") - event.set() - - async def runner(): - await gen.multi([waiter(), setter()]) - - IOLoop.current().run_sync(runner) - - .. testoutput:: - - Waiting for event - About to set the event - Not waiting this time - Done - """ - def __init__(self): - self._value = False - self._waiters = set() - - def __repr__(self): - return '<%s %s>' % ( - self.__class__.__name__, 'set' if self.is_set() else 'clear') - - def is_set(self): - """Return ``True`` if the internal flag is true.""" - return self._value - - def set(self): - """Set the internal flag to ``True``. All waiters are awakened. - - Calling `.wait` once the flag is set will not block. - """ - if not self._value: - self._value = True - - for fut in self._waiters: - if not fut.done(): - fut.set_result(None) - - def clear(self): - """Reset the internal flag to ``False``. - - Calls to `.wait` will block until `.set` is called. - """ - self._value = False - - def wait(self, timeout=None): - """Block until the internal flag is true. - - Returns a Future, which raises `tornado.util.TimeoutError` after a - timeout. - """ - fut = Future() - if self._value: - fut.set_result(None) - return fut - self._waiters.add(fut) - fut.add_done_callback(lambda fut: self._waiters.remove(fut)) - if timeout is None: - return fut - else: - timeout_fut = gen.with_timeout(timeout, fut, quiet_exceptions=(CancelledError,)) - # This is a slightly clumsy workaround for the fact that - # gen.with_timeout doesn't cancel its futures. Cancelling - # fut will remove it from the waiters list. - timeout_fut.add_done_callback(lambda tf: fut.cancel() if not fut.done() else None) - return timeout_fut - - -class _ReleasingContextManager(object): - """Releases a Lock or Semaphore at the end of a "with" statement. - - with (yield semaphore.acquire()): - pass - - # Now semaphore.release() has been called. - """ - def __init__(self, obj): - self._obj = obj - - def __enter__(self): - pass - - def __exit__(self, exc_type, exc_val, exc_tb): - self._obj.release() - - -class Semaphore(_TimeoutGarbageCollector): - """A lock that can be acquired a fixed number of times before blocking. - - A Semaphore manages a counter representing the number of `.release` calls - minus the number of `.acquire` calls, plus an initial value. The `.acquire` - method blocks if necessary until it can return without making the counter - negative. - - Semaphores limit access to a shared resource. To allow access for two - workers at a time: - - .. testsetup:: semaphore - - from collections import deque - - from tornado import gen - from tornado.ioloop import IOLoop - from tornado.concurrent import Future - - # Ensure reliable doctest output: resolve Futures one at a time. - futures_q = deque([Future() for _ in range(3)]) - - async def simulator(futures): - for f in futures: - # simulate the asynchronous passage of time - await gen.sleep(0) - await gen.sleep(0) - f.set_result(None) - - IOLoop.current().add_callback(simulator, list(futures_q)) - - def use_some_resource(): - return futures_q.popleft() - - .. testcode:: semaphore - - from tornado import gen - from tornado.ioloop import IOLoop - from tornado.locks import Semaphore - - sem = Semaphore(2) - - async def worker(worker_id): - await sem.acquire() - try: - print("Worker %d is working" % worker_id) - await use_some_resource() - finally: - print("Worker %d is done" % worker_id) - sem.release() - - async def runner(): - # Join all workers. - await gen.multi([worker(i) for i in range(3)]) - - IOLoop.current().run_sync(runner) - - .. testoutput:: semaphore - - Worker 0 is working - Worker 1 is working - Worker 0 is done - Worker 2 is working - Worker 1 is done - Worker 2 is done - - Workers 0 and 1 are allowed to run concurrently, but worker 2 waits until - the semaphore has been released once, by worker 0. - - The semaphore can be used as an async context manager:: - - async def worker(worker_id): - async with sem: - print("Worker %d is working" % worker_id) - await use_some_resource() - - # Now the semaphore has been released. - print("Worker %d is done" % worker_id) - - For compatibility with older versions of Python, `.acquire` is a - context manager, so ``worker`` could also be written as:: - - @gen.coroutine - def worker(worker_id): - with (yield sem.acquire()): - print("Worker %d is working" % worker_id) - yield use_some_resource() - - # Now the semaphore has been released. - print("Worker %d is done" % worker_id) - - .. versionchanged:: 4.3 - Added ``async with`` support in Python 3.5. - - """ - def __init__(self, value=1): - super(Semaphore, self).__init__() - if value < 0: - raise ValueError('semaphore initial value must be >= 0') - - self._value = value - - def __repr__(self): - res = super(Semaphore, self).__repr__() - extra = 'locked' if self._value == 0 else 'unlocked,value:{0}'.format( - self._value) - if self._waiters: - extra = '{0},waiters:{1}'.format(extra, len(self._waiters)) - return '<{0} [{1}]>'.format(res[1:-1], extra) - - def release(self): - """Increment the counter and wake one waiter.""" - self._value += 1 - while self._waiters: - waiter = self._waiters.popleft() - if not waiter.done(): - self._value -= 1 - - # If the waiter is a coroutine paused at - # - # with (yield semaphore.acquire()): - # - # then the context manager's __exit__ calls release() at the end - # of the "with" block. - waiter.set_result(_ReleasingContextManager(self)) - break - - def acquire(self, timeout=None): - """Decrement the counter. Returns a Future. - - Block if the counter is zero and wait for a `.release`. The Future - raises `.TimeoutError` after the deadline. - """ - waiter = Future() - if self._value > 0: - self._value -= 1 - waiter.set_result(_ReleasingContextManager(self)) - else: - self._waiters.append(waiter) - if timeout: - def on_timeout(): - if not waiter.done(): - waiter.set_exception(gen.TimeoutError()) - self._garbage_collect() - io_loop = ioloop.IOLoop.current() - timeout_handle = io_loop.add_timeout(timeout, on_timeout) - waiter.add_done_callback( - lambda _: io_loop.remove_timeout(timeout_handle)) - return waiter - - def __enter__(self): - raise RuntimeError( - "Use Semaphore like 'with (yield semaphore.acquire())', not like" - " 'with semaphore'") - - __exit__ = __enter__ - - @gen.coroutine - def __aenter__(self): - yield self.acquire() - - @gen.coroutine - def __aexit__(self, typ, value, tb): - self.release() - - -class BoundedSemaphore(Semaphore): - """A semaphore that prevents release() being called too many times. - - If `.release` would increment the semaphore's value past the initial - value, it raises `ValueError`. Semaphores are mostly used to guard - resources with limited capacity, so a semaphore released too many times - is a sign of a bug. - """ - def __init__(self, value=1): - super(BoundedSemaphore, self).__init__(value=value) - self._initial_value = value - - def release(self): - """Increment the counter and wake one waiter.""" - if self._value >= self._initial_value: - raise ValueError("Semaphore released too many times") - super(BoundedSemaphore, self).release() - - -class Lock(object): - """A lock for coroutines. - - A Lock begins unlocked, and `acquire` locks it immediately. While it is - locked, a coroutine that yields `acquire` waits until another coroutine - calls `release`. - - Releasing an unlocked lock raises `RuntimeError`. - - A Lock can be used as an async context manager with the ``async - with`` statement: - - >>> from tornado import locks - >>> lock = locks.Lock() - >>> - >>> async def f(): - ... async with lock: - ... # Do something holding the lock. - ... pass - ... - ... # Now the lock is released. - - For compatibility with older versions of Python, the `.acquire` - method asynchronously returns a regular context manager: - - >>> async def f2(): - ... with (yield lock.acquire()): - ... # Do something holding the lock. - ... pass - ... - ... # Now the lock is released. - - .. versionchanged:: 4.3 - Added ``async with`` support in Python 3.5. - - """ - def __init__(self): - self._block = BoundedSemaphore(value=1) - - def __repr__(self): - return "<%s _block=%s>" % ( - self.__class__.__name__, - self._block) - - def acquire(self, timeout=None): - """Attempt to lock. Returns a Future. - - Returns a Future, which raises `tornado.util.TimeoutError` after a - timeout. - """ - return self._block.acquire(timeout) - - def release(self): - """Unlock. - - The first coroutine in line waiting for `acquire` gets the lock. - - If not locked, raise a `RuntimeError`. - """ - try: - self._block.release() - except ValueError: - raise RuntimeError('release unlocked lock') - - def __enter__(self): - raise RuntimeError( - "Use Lock like 'with (yield lock)', not like 'with lock'") - - __exit__ = __enter__ - - @gen.coroutine - def __aenter__(self): - yield self.acquire() - - @gen.coroutine - def __aexit__(self, typ, value, tb): - self.release() diff --git a/lib/tornado/log.py b/lib/tornado/log.py deleted file mode 100755 index cda905c9..00000000 --- a/lib/tornado/log.py +++ /dev/null @@ -1,290 +0,0 @@ -# -# Copyright 2012 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Logging support for Tornado. - -Tornado uses three logger streams: - -* ``tornado.access``: Per-request logging for Tornado's HTTP servers (and - potentially other servers in the future) -* ``tornado.application``: Logging of errors from application code (i.e. - uncaught exceptions from callbacks) -* ``tornado.general``: General-purpose logging, including any errors - or warnings from Tornado itself. - -These streams may be configured independently using the standard library's -`logging` module. For example, you may wish to send ``tornado.access`` logs -to a separate file for analysis. -""" -from __future__ import absolute_import, division, print_function - -import logging -import logging.handlers -import sys - -from tornado.escape import _unicode -from tornado.util import unicode_type, basestring_type - -try: - import colorama -except ImportError: - colorama = None - -try: - import curses # type: ignore -except ImportError: - curses = None - -# Logger objects for internal tornado use -access_log = logging.getLogger("tornado.access") -app_log = logging.getLogger("tornado.application") -gen_log = logging.getLogger("tornado.general") - - -def _stderr_supports_color(): - try: - if hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(): - if curses: - curses.setupterm() - if curses.tigetnum("colors") > 0: - return True - elif colorama: - if sys.stderr is getattr(colorama.initialise, 'wrapped_stderr', - object()): - return True - except Exception: - # Very broad exception handling because it's always better to - # fall back to non-colored logs than to break at startup. - pass - return False - - -def _safe_unicode(s): - try: - return _unicode(s) - except UnicodeDecodeError: - return repr(s) - - -class LogFormatter(logging.Formatter): - """Log formatter used in Tornado. - - Key features of this formatter are: - - * Color support when logging to a terminal that supports it. - * Timestamps on every log line. - * Robust against str/bytes encoding problems. - - This formatter is enabled automatically by - `tornado.options.parse_command_line` or `tornado.options.parse_config_file` - (unless ``--logging=none`` is used). - - Color support on Windows versions that do not support ANSI color codes is - enabled by use of the colorama__ library. Applications that wish to use - this must first initialize colorama with a call to ``colorama.init``. - See the colorama documentation for details. - - __ https://pypi.python.org/pypi/colorama - - .. versionchanged:: 4.5 - Added support for ``colorama``. Changed the constructor - signature to be compatible with `logging.config.dictConfig`. - """ - DEFAULT_FORMAT = \ - '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s' - DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S' - DEFAULT_COLORS = { - logging.DEBUG: 4, # Blue - logging.INFO: 2, # Green - logging.WARNING: 3, # Yellow - logging.ERROR: 1, # Red - } - - def __init__(self, fmt=DEFAULT_FORMAT, datefmt=DEFAULT_DATE_FORMAT, - style='%', color=True, colors=DEFAULT_COLORS): - r""" - :arg bool color: Enables color support. - :arg str fmt: Log message format. - It will be applied to the attributes dict of log records. The - text between ``%(color)s`` and ``%(end_color)s`` will be colored - depending on the level if color support is on. - :arg dict colors: color mappings from logging level to terminal color - code - :arg str datefmt: Datetime format. - Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. - - .. versionchanged:: 3.2 - - Added ``fmt`` and ``datefmt`` arguments. - """ - logging.Formatter.__init__(self, datefmt=datefmt) - self._fmt = fmt - - self._colors = {} - if color and _stderr_supports_color(): - if curses is not None: - # The curses module has some str/bytes confusion in - # python3. Until version 3.2.3, most methods return - # bytes, but only accept strings. In addition, we want to - # output these strings with the logging module, which - # works with unicode strings. The explicit calls to - # unicode() below are harmless in python2 but will do the - # right conversion in python 3. - fg_color = (curses.tigetstr("setaf") or - curses.tigetstr("setf") or "") - if (3, 0) < sys.version_info < (3, 2, 3): - fg_color = unicode_type(fg_color, "ascii") - - for levelno, code in colors.items(): - self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii") - self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii") - else: - # If curses is not present (currently we'll only get here for - # colorama on windows), assume hard-coded ANSI color codes. - for levelno, code in colors.items(): - self._colors[levelno] = '\033[2;3%dm' % code - self._normal = '\033[0m' - else: - self._normal = '' - - def format(self, record): - try: - message = record.getMessage() - assert isinstance(message, basestring_type) # guaranteed by logging - # Encoding notes: The logging module prefers to work with character - # strings, but only enforces that log messages are instances of - # basestring. In python 2, non-ascii bytestrings will make - # their way through the logging framework until they blow up with - # an unhelpful decoding error (with this formatter it happens - # when we attach the prefix, but there are other opportunities for - # exceptions further along in the framework). - # - # If a byte string makes it this far, convert it to unicode to - # ensure it will make it out to the logs. Use repr() as a fallback - # to ensure that all byte strings can be converted successfully, - # but don't do it by default so we don't add extra quotes to ascii - # bytestrings. This is a bit of a hacky place to do this, but - # it's worth it since the encoding errors that would otherwise - # result are so useless (and tornado is fond of using utf8-encoded - # byte strings wherever possible). - record.message = _safe_unicode(message) - except Exception as e: - record.message = "Bad message (%r): %r" % (e, record.__dict__) - - record.asctime = self.formatTime(record, self.datefmt) - - if record.levelno in self._colors: - record.color = self._colors[record.levelno] - record.end_color = self._normal - else: - record.color = record.end_color = '' - - formatted = self._fmt % record.__dict__ - - if record.exc_info: - if not record.exc_text: - record.exc_text = self.formatException(record.exc_info) - if record.exc_text: - # exc_text contains multiple lines. We need to _safe_unicode - # each line separately so that non-utf8 bytes don't cause - # all the newlines to turn into '\n'. - lines = [formatted.rstrip()] - lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n')) - formatted = '\n'.join(lines) - return formatted.replace("\n", "\n ") - - -def enable_pretty_logging(options=None, logger=None): - """Turns on formatted logging output as configured. - - This is called automatically by `tornado.options.parse_command_line` - and `tornado.options.parse_config_file`. - """ - if options is None: - import tornado.options - options = tornado.options.options - if options.logging is None or options.logging.lower() == 'none': - return - if logger is None: - logger = logging.getLogger() - logger.setLevel(getattr(logging, options.logging.upper())) - if options.log_file_prefix: - rotate_mode = options.log_rotate_mode - if rotate_mode == 'size': - channel = logging.handlers.RotatingFileHandler( - filename=options.log_file_prefix, - maxBytes=options.log_file_max_size, - backupCount=options.log_file_num_backups) - elif rotate_mode == 'time': - channel = logging.handlers.TimedRotatingFileHandler( - filename=options.log_file_prefix, - when=options.log_rotate_when, - interval=options.log_rotate_interval, - backupCount=options.log_file_num_backups) - else: - error_message = 'The value of log_rotate_mode option should be ' +\ - '"size" or "time", not "%s".' % rotate_mode - raise ValueError(error_message) - channel.setFormatter(LogFormatter(color=False)) - logger.addHandler(channel) - - if (options.log_to_stderr or - (options.log_to_stderr is None and not logger.handlers)): - # Set up color if we are in a tty and curses is installed - channel = logging.StreamHandler() - channel.setFormatter(LogFormatter()) - logger.addHandler(channel) - - -def define_logging_options(options=None): - """Add logging-related flags to ``options``. - - These options are present automatically on the default options instance; - this method is only necessary if you have created your own `.OptionParser`. - - .. versionadded:: 4.2 - This function existed in prior versions but was broken and undocumented until 4.2. - """ - if options is None: - # late import to prevent cycle - import tornado.options - options = tornado.options.options - options.define("logging", default="info", - help=("Set the Python log level. If 'none', tornado won't touch the " - "logging configuration."), - metavar="debug|info|warning|error|none") - options.define("log_to_stderr", type=bool, default=None, - help=("Send log output to stderr (colorized if possible). " - "By default use stderr if --log_file_prefix is not set and " - "no other logging is configured.")) - options.define("log_file_prefix", type=str, default=None, metavar="PATH", - help=("Path prefix for log files. " - "Note that if you are running multiple tornado processes, " - "log_file_prefix must be different for each of them (e.g. " - "include the port number)")) - options.define("log_file_max_size", type=int, default=100 * 1000 * 1000, - help="max size of log files before rollover") - options.define("log_file_num_backups", type=int, default=10, - help="number of log files to keep") - - options.define("log_rotate_when", type=str, default='midnight', - help=("specify the type of TimedRotatingFileHandler interval " - "other options:('S', 'M', 'H', 'D', 'W0'-'W6')")) - options.define("log_rotate_interval", type=int, default=1, - help="The interval value of timed rotating") - - options.define("log_rotate_mode", type=str, default='size', - help="The mode of rotating files(time or size)") - - options.add_parse_callback(lambda: enable_pretty_logging(options)) diff --git a/lib/tornado/netutil.py b/lib/tornado/netutil.py deleted file mode 100755 index e63683ad..00000000 --- a/lib/tornado/netutil.py +++ /dev/null @@ -1,575 +0,0 @@ -# -# Copyright 2011 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Miscellaneous network utility code.""" - -from __future__ import absolute_import, division, print_function - -import errno -import os -import sys -import socket -import stat - -from tornado.concurrent import dummy_executor, run_on_executor -from tornado import gen -from tornado.ioloop import IOLoop -from tornado.platform.auto import set_close_exec -from tornado.util import PY3, Configurable, errno_from_exception - -try: - import ssl -except ImportError: - # ssl is not available on Google App Engine - ssl = None - -if PY3: - xrange = range - -if ssl is not None: - # Note that the naming of ssl.Purpose is confusing; the purpose - # of a context is to authentiate the opposite side of the connection. - _client_ssl_defaults = ssl.create_default_context( - ssl.Purpose.SERVER_AUTH) - _server_ssl_defaults = ssl.create_default_context( - ssl.Purpose.CLIENT_AUTH) - if hasattr(ssl, 'OP_NO_COMPRESSION'): - # See netutil.ssl_options_to_context - _client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION - _server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION -else: - # Google App Engine - _client_ssl_defaults = dict(cert_reqs=None, - ca_certs=None) - _server_ssl_defaults = {} - -# ThreadedResolver runs getaddrinfo on a thread. If the hostname is unicode, -# getaddrinfo attempts to import encodings.idna. If this is done at -# module-import time, the import lock is already held by the main thread, -# leading to deadlock. Avoid it by caching the idna encoder on the main -# thread now. -u'foo'.encode('idna') - -# For undiagnosed reasons, 'latin1' codec may also need to be preloaded. -u'foo'.encode('latin1') - -# These errnos indicate that a non-blocking operation must be retried -# at a later time. On most platforms they're the same value, but on -# some they differ. -_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN) - -if hasattr(errno, "WSAEWOULDBLOCK"): - _ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,) # type: ignore - -# Default backlog used when calling sock.listen() -_DEFAULT_BACKLOG = 128 - - -def bind_sockets(port, address=None, family=socket.AF_UNSPEC, - backlog=_DEFAULT_BACKLOG, flags=None, reuse_port=False): - """Creates listening sockets bound to the given port and address. - - Returns a list of socket objects (multiple sockets are returned if - the given address maps to multiple IP addresses, which is most common - for mixed IPv4 and IPv6 use). - - Address may be either an IP address or hostname. If it's a hostname, - the server will listen on all IP addresses associated with the - name. Address may be an empty string or None to listen on all - available interfaces. Family may be set to either `socket.AF_INET` - or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise - both will be used if available. - - The ``backlog`` argument has the same meaning as for - `socket.listen() <socket.socket.listen>`. - - ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like - ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``. - - ``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket - in the list. If your platform doesn't support this option ValueError will - be raised. - """ - if reuse_port and not hasattr(socket, "SO_REUSEPORT"): - raise ValueError("the platform doesn't support SO_REUSEPORT") - - sockets = [] - if address == "": - address = None - if not socket.has_ipv6 and family == socket.AF_UNSPEC: - # Python can be compiled with --disable-ipv6, which causes - # operations on AF_INET6 sockets to fail, but does not - # automatically exclude those results from getaddrinfo - # results. - # http://bugs.python.org/issue16208 - family = socket.AF_INET - if flags is None: - flags = socket.AI_PASSIVE - bound_port = None - for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM, - 0, flags)): - af, socktype, proto, canonname, sockaddr = res - if (sys.platform == 'darwin' and address == 'localhost' and - af == socket.AF_INET6 and sockaddr[3] != 0): - # Mac OS X includes a link-local address fe80::1%lo0 in the - # getaddrinfo results for 'localhost'. However, the firewall - # doesn't understand that this is a local address and will - # prompt for access (often repeatedly, due to an apparent - # bug in its ability to remember granting access to an - # application). Skip these addresses. - continue - try: - sock = socket.socket(af, socktype, proto) - except socket.error as e: - if errno_from_exception(e) == errno.EAFNOSUPPORT: - continue - raise - set_close_exec(sock.fileno()) - if os.name != 'nt': - try: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except socket.error as e: - if errno_from_exception(e) != errno.ENOPROTOOPT: - # Hurd doesn't support SO_REUSEADDR. - raise - if reuse_port: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) - if af == socket.AF_INET6: - # On linux, ipv6 sockets accept ipv4 too by default, - # but this makes it impossible to bind to both - # 0.0.0.0 in ipv4 and :: in ipv6. On other systems, - # separate sockets *must* be used to listen for both ipv4 - # and ipv6. For consistency, always disable ipv4 on our - # ipv6 sockets and use a separate ipv4 socket when needed. - # - # Python 2.x on windows doesn't have IPPROTO_IPV6. - if hasattr(socket, "IPPROTO_IPV6"): - sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) - - # automatic port allocation with port=None - # should bind on the same port on IPv4 and IPv6 - host, requested_port = sockaddr[:2] - if requested_port == 0 and bound_port is not None: - sockaddr = tuple([host, bound_port] + list(sockaddr[2:])) - - sock.setblocking(0) - sock.bind(sockaddr) - bound_port = sock.getsockname()[1] - sock.listen(backlog) - sockets.append(sock) - return sockets - - -if hasattr(socket, 'AF_UNIX'): - def bind_unix_socket(file, mode=0o600, backlog=_DEFAULT_BACKLOG): - """Creates a listening unix socket. - - If a socket with the given name already exists, it will be deleted. - If any other file with that name exists, an exception will be - raised. - - Returns a socket object (not a list of socket objects like - `bind_sockets`) - """ - sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - set_close_exec(sock.fileno()) - try: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - except socket.error as e: - if errno_from_exception(e) != errno.ENOPROTOOPT: - # Hurd doesn't support SO_REUSEADDR - raise - sock.setblocking(0) - try: - st = os.stat(file) - except OSError as err: - if errno_from_exception(err) != errno.ENOENT: - raise - else: - if stat.S_ISSOCK(st.st_mode): - os.remove(file) - else: - raise ValueError("File %s exists and is not a socket", file) - sock.bind(file) - os.chmod(file, mode) - sock.listen(backlog) - return sock - - -def add_accept_handler(sock, callback): - """Adds an `.IOLoop` event handler to accept new connections on ``sock``. - - When a connection is accepted, ``callback(connection, address)`` will - be run (``connection`` is a socket object, and ``address`` is the - address of the other end of the connection). Note that this signature - is different from the ``callback(fd, events)`` signature used for - `.IOLoop` handlers. - - A callable is returned which, when called, will remove the `.IOLoop` - event handler and stop processing further incoming connections. - - .. versionchanged:: 5.0 - The ``io_loop`` argument (deprecated since version 4.1) has been removed. - - .. versionchanged:: 5.0 - A callable is returned (``None`` was returned before). - """ - io_loop = IOLoop.current() - removed = [False] - - def accept_handler(fd, events): - # More connections may come in while we're handling callbacks; - # to prevent starvation of other tasks we must limit the number - # of connections we accept at a time. Ideally we would accept - # up to the number of connections that were waiting when we - # entered this method, but this information is not available - # (and rearranging this method to call accept() as many times - # as possible before running any callbacks would have adverse - # effects on load balancing in multiprocess configurations). - # Instead, we use the (default) listen backlog as a rough - # heuristic for the number of connections we can reasonably - # accept at once. - for i in xrange(_DEFAULT_BACKLOG): - if removed[0]: - # The socket was probably closed - return - try: - connection, address = sock.accept() - except socket.error as e: - # _ERRNO_WOULDBLOCK indicate we have accepted every - # connection that is available. - if errno_from_exception(e) in _ERRNO_WOULDBLOCK: - return - # ECONNABORTED indicates that there was a connection - # but it was closed while still in the accept queue. - # (observed on FreeBSD). - if errno_from_exception(e) == errno.ECONNABORTED: - continue - raise - set_close_exec(connection.fileno()) - callback(connection, address) - - def remove_handler(): - io_loop.remove_handler(sock) - removed[0] = True - - io_loop.add_handler(sock, accept_handler, IOLoop.READ) - return remove_handler - - -def is_valid_ip(ip): - """Returns true if the given string is a well-formed IP address. - - Supports IPv4 and IPv6. - """ - if not ip or '\x00' in ip: - # getaddrinfo resolves empty strings to localhost, and truncates - # on zero bytes. - return False - try: - res = socket.getaddrinfo(ip, 0, socket.AF_UNSPEC, - socket.SOCK_STREAM, - 0, socket.AI_NUMERICHOST) - return bool(res) - except socket.gaierror as e: - if e.args[0] == socket.EAI_NONAME: - return False - raise - return True - - -class Resolver(Configurable): - """Configurable asynchronous DNS resolver interface. - - By default, a blocking implementation is used (which simply calls - `socket.getaddrinfo`). An alternative implementation can be - chosen with the `Resolver.configure <.Configurable.configure>` - class method:: - - Resolver.configure('tornado.netutil.ThreadedResolver') - - The implementations of this interface included with Tornado are - - * `tornado.netutil.DefaultExecutorResolver` - * `tornado.netutil.BlockingResolver` (deprecated) - * `tornado.netutil.ThreadedResolver` (deprecated) - * `tornado.netutil.OverrideResolver` - * `tornado.platform.twisted.TwistedResolver` - * `tornado.platform.caresresolver.CaresResolver` - - .. versionchanged:: 5.0 - The default implementation has changed from `BlockingResolver` to - `DefaultExecutorResolver`. - """ - @classmethod - def configurable_base(cls): - return Resolver - - @classmethod - def configurable_default(cls): - return DefaultExecutorResolver - - def resolve(self, host, port, family=socket.AF_UNSPEC, callback=None): - """Resolves an address. - - The ``host`` argument is a string which may be a hostname or a - literal IP address. - - Returns a `.Future` whose result is a list of (family, - address) pairs, where address is a tuple suitable to pass to - `socket.connect <socket.socket.connect>` (i.e. a ``(host, - port)`` pair for IPv4; additional fields may be present for - IPv6). If a ``callback`` is passed, it will be run with the - result as an argument when it is complete. - - :raises IOError: if the address cannot be resolved. - - .. versionchanged:: 4.4 - Standardized all implementations to raise `IOError`. - - .. deprecated:: 5.1 - The ``callback`` argument is deprecated and will be removed in 6.0. - Use the returned awaitable object instead. - """ - raise NotImplementedError() - - def close(self): - """Closes the `Resolver`, freeing any resources used. - - .. versionadded:: 3.1 - - """ - pass - - -def _resolve_addr(host, port, family=socket.AF_UNSPEC): - # On Solaris, getaddrinfo fails if the given port is not found - # in /etc/services and no socket type is given, so we must pass - # one here. The socket type used here doesn't seem to actually - # matter (we discard the one we get back in the results), - # so the addresses we return should still be usable with SOCK_DGRAM. - addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM) - results = [] - for family, socktype, proto, canonname, address in addrinfo: - results.append((family, address)) - return results - - -class DefaultExecutorResolver(Resolver): - """Resolver implementation using `.IOLoop.run_in_executor`. - - .. versionadded:: 5.0 - """ - @gen.coroutine - def resolve(self, host, port, family=socket.AF_UNSPEC): - result = yield IOLoop.current().run_in_executor( - None, _resolve_addr, host, port, family) - raise gen.Return(result) - - -class ExecutorResolver(Resolver): - """Resolver implementation using a `concurrent.futures.Executor`. - - Use this instead of `ThreadedResolver` when you require additional - control over the executor being used. - - The executor will be shut down when the resolver is closed unless - ``close_resolver=False``; use this if you want to reuse the same - executor elsewhere. - - .. versionchanged:: 5.0 - The ``io_loop`` argument (deprecated since version 4.1) has been removed. - - .. deprecated:: 5.0 - The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead - of this class. - """ - def initialize(self, executor=None, close_executor=True): - self.io_loop = IOLoop.current() - if executor is not None: - self.executor = executor - self.close_executor = close_executor - else: - self.executor = dummy_executor - self.close_executor = False - - def close(self): - if self.close_executor: - self.executor.shutdown() - self.executor = None - - @run_on_executor - def resolve(self, host, port, family=socket.AF_UNSPEC): - return _resolve_addr(host, port, family) - - -class BlockingResolver(ExecutorResolver): - """Default `Resolver` implementation, using `socket.getaddrinfo`. - - The `.IOLoop` will be blocked during the resolution, although the - callback will not be run until the next `.IOLoop` iteration. - - .. deprecated:: 5.0 - The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead - of this class. - """ - def initialize(self): - super(BlockingResolver, self).initialize() - - -class ThreadedResolver(ExecutorResolver): - """Multithreaded non-blocking `Resolver` implementation. - - Requires the `concurrent.futures` package to be installed - (available in the standard library since Python 3.2, - installable with ``pip install futures`` in older versions). - - The thread pool size can be configured with:: - - Resolver.configure('tornado.netutil.ThreadedResolver', - num_threads=10) - - .. versionchanged:: 3.1 - All ``ThreadedResolvers`` share a single thread pool, whose - size is set by the first one to be created. - - .. deprecated:: 5.0 - The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead - of this class. - """ - _threadpool = None # type: ignore - _threadpool_pid = None # type: int - - def initialize(self, num_threads=10): - threadpool = ThreadedResolver._create_threadpool(num_threads) - super(ThreadedResolver, self).initialize( - executor=threadpool, close_executor=False) - - @classmethod - def _create_threadpool(cls, num_threads): - pid = os.getpid() - if cls._threadpool_pid != pid: - # Threads cannot survive after a fork, so if our pid isn't what it - # was when we created the pool then delete it. - cls._threadpool = None - if cls._threadpool is None: - from concurrent.futures import ThreadPoolExecutor - cls._threadpool = ThreadPoolExecutor(num_threads) - cls._threadpool_pid = pid - return cls._threadpool - - -class OverrideResolver(Resolver): - """Wraps a resolver with a mapping of overrides. - - This can be used to make local DNS changes (e.g. for testing) - without modifying system-wide settings. - - The mapping can be in three formats:: - - { - # Hostname to host or ip - "example.com": "127.0.1.1", - - # Host+port to host+port - ("login.example.com", 443): ("localhost", 1443), - - # Host+port+address family to host+port - ("login.example.com", 443, socket.AF_INET6): ("::1", 1443), - } - - .. versionchanged:: 5.0 - Added support for host-port-family triplets. - """ - def initialize(self, resolver, mapping): - self.resolver = resolver - self.mapping = mapping - - def close(self): - self.resolver.close() - - def resolve(self, host, port, family=socket.AF_UNSPEC, *args, **kwargs): - if (host, port, family) in self.mapping: - host, port = self.mapping[(host, port, family)] - elif (host, port) in self.mapping: - host, port = self.mapping[(host, port)] - elif host in self.mapping: - host = self.mapping[host] - return self.resolver.resolve(host, port, family, *args, **kwargs) - - -# These are the keyword arguments to ssl.wrap_socket that must be translated -# to their SSLContext equivalents (the other arguments are still passed -# to SSLContext.wrap_socket). -_SSL_CONTEXT_KEYWORDS = frozenset(['ssl_version', 'certfile', 'keyfile', - 'cert_reqs', 'ca_certs', 'ciphers']) - - -def ssl_options_to_context(ssl_options): - """Try to convert an ``ssl_options`` dictionary to an - `~ssl.SSLContext` object. - - The ``ssl_options`` dictionary contains keywords to be passed to - `ssl.wrap_socket`. In Python 2.7.9+, `ssl.SSLContext` objects can - be used instead. This function converts the dict form to its - `~ssl.SSLContext` equivalent, and may be used when a component which - accepts both forms needs to upgrade to the `~ssl.SSLContext` version - to use features like SNI or NPN. - """ - if isinstance(ssl_options, ssl.SSLContext): - return ssl_options - assert isinstance(ssl_options, dict) - assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options - # Can't use create_default_context since this interface doesn't - # tell us client vs server. - context = ssl.SSLContext( - ssl_options.get('ssl_version', ssl.PROTOCOL_SSLv23)) - if 'certfile' in ssl_options: - context.load_cert_chain(ssl_options['certfile'], ssl_options.get('keyfile', None)) - if 'cert_reqs' in ssl_options: - context.verify_mode = ssl_options['cert_reqs'] - if 'ca_certs' in ssl_options: - context.load_verify_locations(ssl_options['ca_certs']) - if 'ciphers' in ssl_options: - context.set_ciphers(ssl_options['ciphers']) - if hasattr(ssl, 'OP_NO_COMPRESSION'): - # Disable TLS compression to avoid CRIME and related attacks. - # This constant depends on openssl version 1.0. - # TODO: Do we need to do this ourselves or can we trust - # the defaults? - context.options |= ssl.OP_NO_COMPRESSION - return context - - -def ssl_wrap_socket(socket, ssl_options, server_hostname=None, **kwargs): - """Returns an ``ssl.SSLSocket`` wrapping the given socket. - - ``ssl_options`` may be either an `ssl.SSLContext` object or a - dictionary (as accepted by `ssl_options_to_context`). Additional - keyword arguments are passed to ``wrap_socket`` (either the - `~ssl.SSLContext` method or the `ssl` module function as - appropriate). - """ - context = ssl_options_to_context(ssl_options) - if ssl.HAS_SNI: - # In python 3.4, wrap_socket only accepts the server_hostname - # argument if HAS_SNI is true. - # TODO: add a unittest (python added server-side SNI support in 3.4) - # In the meantime it can be manually tested with - # python3 -m tornado.httpclient https://sni.velox.ch - return context.wrap_socket(socket, server_hostname=server_hostname, - **kwargs) - else: - return context.wrap_socket(socket, **kwargs) diff --git a/lib/tornado/options.py b/lib/tornado/options.py deleted file mode 100755 index 0a4b965f..00000000 --- a/lib/tornado/options.py +++ /dev/null @@ -1,654 +0,0 @@ -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A command line parsing module that lets modules define their own options. - -This module is inspired by Google's `gflags -<https://github.com/google/python-gflags>`_. The primary difference -with libraries such as `argparse` is that a global registry is used so -that options may be defined in any module (it also enables -`tornado.log` by default). The rest of Tornado does not depend on this -module, so feel free to use `argparse` or other configuration -libraries if you prefer them. - -Options must be defined with `tornado.options.define` before use, -generally at the top level of a module. The options are then -accessible as attributes of `tornado.options.options`:: - - # myapp/db.py - from tornado.options import define, options - - define("mysql_host", default="127.0.0.1:3306", help="Main user DB") - define("memcache_hosts", default="127.0.0.1:11011", multiple=True, - help="Main user memcache servers") - - def connect(): - db = database.Connection(options.mysql_host) - ... - - # myapp/server.py - from tornado.options import define, options - - define("port", default=8080, help="port to listen on") - - def start_server(): - app = make_app() - app.listen(options.port) - -The ``main()`` method of your application does not need to be aware of all of -the options used throughout your program; they are all automatically loaded -when the modules are loaded. However, all modules that define options -must have been imported before the command line is parsed. - -Your ``main()`` method can parse the command line or parse a config file with -either `parse_command_line` or `parse_config_file`:: - - import myapp.db, myapp.server - import tornado.options - - if __name__ == '__main__': - tornado.options.parse_command_line() - # or - tornado.options.parse_config_file("/etc/server.conf") - -.. note:: - - When using multiple ``parse_*`` functions, pass ``final=False`` to all - but the last one, or side effects may occur twice (in particular, - this can result in log messages being doubled). - -`tornado.options.options` is a singleton instance of `OptionParser`, and -the top-level functions in this module (`define`, `parse_command_line`, etc) -simply call methods on it. You may create additional `OptionParser` -instances to define isolated sets of options, such as for subcommands. - -.. note:: - - By default, several options are defined that will configure the - standard `logging` module when `parse_command_line` or `parse_config_file` - are called. If you want Tornado to leave the logging configuration - alone so you can manage it yourself, either pass ``--logging=none`` - on the command line or do the following to disable it in code:: - - from tornado.options import options, parse_command_line - options.logging = None - parse_command_line() - -.. versionchanged:: 4.3 - Dashes and underscores are fully interchangeable in option names; - options can be defined, set, and read with any mix of the two. - Dashes are typical for command-line usage while config files require - underscores. -""" - -from __future__ import absolute_import, division, print_function - -import datetime -import numbers -import re -import sys -import os -import textwrap - -from tornado.escape import _unicode, native_str -from tornado.log import define_logging_options -from tornado import stack_context -from tornado.util import basestring_type, exec_in - - -class Error(Exception): - """Exception raised by errors in the options module.""" - pass - - -class OptionParser(object): - """A collection of options, a dictionary with object-like access. - - Normally accessed via static functions in the `tornado.options` module, - which reference a global instance. - """ - def __init__(self): - # we have to use self.__dict__ because we override setattr. - self.__dict__['_options'] = {} - self.__dict__['_parse_callbacks'] = [] - self.define("help", type=bool, help="show this help information", - callback=self._help_callback) - - def _normalize_name(self, name): - return name.replace('_', '-') - - def __getattr__(self, name): - name = self._normalize_name(name) - if isinstance(self._options.get(name), _Option): - return self._options[name].value() - raise AttributeError("Unrecognized option %r" % name) - - def __setattr__(self, name, value): - name = self._normalize_name(name) - if isinstance(self._options.get(name), _Option): - return self._options[name].set(value) - raise AttributeError("Unrecognized option %r" % name) - - def __iter__(self): - return (opt.name for opt in self._options.values()) - - def __contains__(self, name): - name = self._normalize_name(name) - return name in self._options - - def __getitem__(self, name): - return self.__getattr__(name) - - def __setitem__(self, name, value): - return self.__setattr__(name, value) - - def items(self): - """A sequence of (name, value) pairs. - - .. versionadded:: 3.1 - """ - return [(opt.name, opt.value()) for name, opt in self._options.items()] - - def groups(self): - """The set of option-groups created by ``define``. - - .. versionadded:: 3.1 - """ - return set(opt.group_name for opt in self._options.values()) - - def group_dict(self, group): - """The names and values of options in a group. - - Useful for copying options into Application settings:: - - from tornado.options import define, parse_command_line, options - - define('template_path', group='application') - define('static_path', group='application') - - parse_command_line() - - application = Application( - handlers, **options.group_dict('application')) - - .. versionadded:: 3.1 - """ - return dict( - (opt.name, opt.value()) for name, opt in self._options.items() - if not group or group == opt.group_name) - - def as_dict(self): - """The names and values of all options. - - .. versionadded:: 3.1 - """ - return dict( - (opt.name, opt.value()) for name, opt in self._options.items()) - - def define(self, name, default=None, type=None, help=None, metavar=None, - multiple=False, group=None, callback=None): - """Defines a new command line option. - - ``type`` can be any of `str`, `int`, `float`, `bool`, - `~datetime.datetime`, or `~datetime.timedelta`. If no ``type`` - is given but a ``default`` is, ``type`` is the type of - ``default``. Otherwise, ``type`` defaults to `str`. - - If ``multiple`` is True, the option value is a list of ``type`` - instead of an instance of ``type``. - - ``help`` and ``metavar`` are used to construct the - automatically generated command line help string. The help - message is formatted like:: - - --name=METAVAR help string - - ``group`` is used to group the defined options in logical - groups. By default, command line options are grouped by the - file in which they are defined. - - Command line option names must be unique globally. - - If a ``callback`` is given, it will be run with the new value whenever - the option is changed. This can be used to combine command-line - and file-based options:: - - define("config", type=str, help="path to config file", - callback=lambda path: parse_config_file(path, final=False)) - - With this definition, options in the file specified by ``--config`` will - override options set earlier on the command line, but can be overridden - by later flags. - - """ - normalized = self._normalize_name(name) - if normalized in self._options: - raise Error("Option %r already defined in %s" % - (normalized, self._options[normalized].file_name)) - frame = sys._getframe(0) - options_file = frame.f_code.co_filename - - # Can be called directly, or through top level define() fn, in which - # case, step up above that frame to look for real caller. - if (frame.f_back.f_code.co_filename == options_file and - frame.f_back.f_code.co_name == 'define'): - frame = frame.f_back - - file_name = frame.f_back.f_code.co_filename - if file_name == options_file: - file_name = "" - if type is None: - if not multiple and default is not None: - type = default.__class__ - else: - type = str - if group: - group_name = group - else: - group_name = file_name - option = _Option(name, file_name=file_name, - default=default, type=type, help=help, - metavar=metavar, multiple=multiple, - group_name=group_name, - callback=callback) - self._options[normalized] = option - - def parse_command_line(self, args=None, final=True): - """Parses all options given on the command line (defaults to - `sys.argv`). - - Options look like ``--option=value`` and are parsed according - to their ``type``. For boolean options, ``--option`` is - equivalent to ``--option=true`` - - If the option has ``multiple=True``, comma-separated values - are accepted. For multi-value integer options, the syntax - ``x:y`` is also accepted and equivalent to ``range(x, y)``. - - Note that ``args[0]`` is ignored since it is the program name - in `sys.argv`. - - We return a list of all arguments that are not parsed as options. - - If ``final`` is ``False``, parse callbacks will not be run. - This is useful for applications that wish to combine configurations - from multiple sources. - - """ - if args is None: - args = sys.argv - remaining = [] - for i in range(1, len(args)): - # All things after the last option are command line arguments - if not args[i].startswith("-"): - remaining = args[i:] - break - if args[i] == "--": - remaining = args[i + 1:] - break - arg = args[i].lstrip("-") - name, equals, value = arg.partition("=") - name = self._normalize_name(name) - if name not in self._options: - self.print_help() - raise Error('Unrecognized command line option: %r' % name) - option = self._options[name] - if not equals: - if option.type == bool: - value = "true" - else: - raise Error('Option %r requires a value' % name) - option.parse(value) - - if final: - self.run_parse_callbacks() - - return remaining - - def parse_config_file(self, path, final=True): - """Parses and loads the config file at the given path. - - The config file contains Python code that will be executed (so - it is **not safe** to use untrusted config files). Anything in - the global namespace that matches a defined option will be - used to set that option's value. - - Options may either be the specified type for the option or - strings (in which case they will be parsed the same way as in - `.parse_command_line`) - - Example (using the options defined in the top-level docs of - this module):: - - port = 80 - mysql_host = 'mydb.example.com:3306' - # Both lists and comma-separated strings are allowed for - # multiple=True. - memcache_hosts = ['cache1.example.com:11011', - 'cache2.example.com:11011'] - memcache_hosts = 'cache1.example.com:11011,cache2.example.com:11011' - - If ``final`` is ``False``, parse callbacks will not be run. - This is useful for applications that wish to combine configurations - from multiple sources. - - .. note:: - - `tornado.options` is primarily a command-line library. - Config file support is provided for applications that wish - to use it, but applications that prefer config files may - wish to look at other libraries instead. - - .. versionchanged:: 4.1 - Config files are now always interpreted as utf-8 instead of - the system default encoding. - - .. versionchanged:: 4.4 - The special variable ``__file__`` is available inside config - files, specifying the absolute path to the config file itself. - - .. versionchanged:: 5.1 - Added the ability to set options via strings in config files. - - """ - config = {'__file__': os.path.abspath(path)} - with open(path, 'rb') as f: - exec_in(native_str(f.read()), config, config) - for name in config: - normalized = self._normalize_name(name) - if normalized in self._options: - option = self._options[normalized] - if option.multiple: - if not isinstance(config[name], (list, str)): - raise Error("Option %r is required to be a list of %s " - "or a comma-separated string" % - (option.name, option.type.__name__)) - - if type(config[name]) == str and option.type != str: - option.parse(config[name]) - else: - option.set(config[name]) - - if final: - self.run_parse_callbacks() - - def print_help(self, file=None): - """Prints all the command line options to stderr (or another file).""" - if file is None: - file = sys.stderr - print("Usage: %s [OPTIONS]" % sys.argv[0], file=file) - print("\nOptions:\n", file=file) - by_group = {} - for option in self._options.values(): - by_group.setdefault(option.group_name, []).append(option) - - for filename, o in sorted(by_group.items()): - if filename: - print("\n%s options:\n" % os.path.normpath(filename), file=file) - o.sort(key=lambda option: option.name) - for option in o: - # Always print names with dashes in a CLI context. - prefix = self._normalize_name(option.name) - if option.metavar: - prefix += "=" + option.metavar - description = option.help or "" - if option.default is not None and option.default != '': - description += " (default %s)" % option.default - lines = textwrap.wrap(description, 79 - 35) - if len(prefix) > 30 or len(lines) == 0: - lines.insert(0, '') - print(" --%-30s %s" % (prefix, lines[0]), file=file) - for line in lines[1:]: - print("%-34s %s" % (' ', line), file=file) - print(file=file) - - def _help_callback(self, value): - if value: - self.print_help() - sys.exit(0) - - def add_parse_callback(self, callback): - """Adds a parse callback, to be invoked when option parsing is done.""" - self._parse_callbacks.append(stack_context.wrap(callback)) - - def run_parse_callbacks(self): - for callback in self._parse_callbacks: - callback() - - def mockable(self): - """Returns a wrapper around self that is compatible with - `mock.patch <unittest.mock.patch>`. - - The `mock.patch <unittest.mock.patch>` function (included in - the standard library `unittest.mock` package since Python 3.3, - or in the third-party ``mock`` package for older versions of - Python) is incompatible with objects like ``options`` that - override ``__getattr__`` and ``__setattr__``. This function - returns an object that can be used with `mock.patch.object - <unittest.mock.patch.object>` to modify option values:: - - with mock.patch.object(options.mockable(), 'name', value): - assert options.name == value - """ - return _Mockable(self) - - -class _Mockable(object): - """`mock.patch` compatible wrapper for `OptionParser`. - - As of ``mock`` version 1.0.1, when an object uses ``__getattr__`` - hooks instead of ``__dict__``, ``patch.__exit__`` tries to delete - the attribute it set instead of setting a new one (assuming that - the object does not catpure ``__setattr__``, so the patch - created a new attribute in ``__dict__``). - - _Mockable's getattr and setattr pass through to the underlying - OptionParser, and delattr undoes the effect of a previous setattr. - """ - def __init__(self, options): - # Modify __dict__ directly to bypass __setattr__ - self.__dict__['_options'] = options - self.__dict__['_originals'] = {} - - def __getattr__(self, name): - return getattr(self._options, name) - - def __setattr__(self, name, value): - assert name not in self._originals, "don't reuse mockable objects" - self._originals[name] = getattr(self._options, name) - setattr(self._options, name, value) - - def __delattr__(self, name): - setattr(self._options, name, self._originals.pop(name)) - - -class _Option(object): - UNSET = object() - - def __init__(self, name, default=None, type=basestring_type, help=None, - metavar=None, multiple=False, file_name=None, group_name=None, - callback=None): - if default is None and multiple: - default = [] - self.name = name - self.type = type - self.help = help - self.metavar = metavar - self.multiple = multiple - self.file_name = file_name - self.group_name = group_name - self.callback = callback - self.default = default - self._value = _Option.UNSET - - def value(self): - return self.default if self._value is _Option.UNSET else self._value - - def parse(self, value): - _parse = { - datetime.datetime: self._parse_datetime, - datetime.timedelta: self._parse_timedelta, - bool: self._parse_bool, - basestring_type: self._parse_string, - }.get(self.type, self.type) - if self.multiple: - self._value = [] - for part in value.split(","): - if issubclass(self.type, numbers.Integral): - # allow ranges of the form X:Y (inclusive at both ends) - lo, _, hi = part.partition(":") - lo = _parse(lo) - hi = _parse(hi) if hi else lo - self._value.extend(range(lo, hi + 1)) - else: - self._value.append(_parse(part)) - else: - self._value = _parse(value) - if self.callback is not None: - self.callback(self._value) - return self.value() - - def set(self, value): - if self.multiple: - if not isinstance(value, list): - raise Error("Option %r is required to be a list of %s" % - (self.name, self.type.__name__)) - for item in value: - if item is not None and not isinstance(item, self.type): - raise Error("Option %r is required to be a list of %s" % - (self.name, self.type.__name__)) - else: - if value is not None and not isinstance(value, self.type): - raise Error("Option %r is required to be a %s (%s given)" % - (self.name, self.type.__name__, type(value))) - self._value = value - if self.callback is not None: - self.callback(self._value) - - # Supported date/time formats in our options - _DATETIME_FORMATS = [ - "%a %b %d %H:%M:%S %Y", - "%Y-%m-%d %H:%M:%S", - "%Y-%m-%d %H:%M", - "%Y-%m-%dT%H:%M", - "%Y%m%d %H:%M:%S", - "%Y%m%d %H:%M", - "%Y-%m-%d", - "%Y%m%d", - "%H:%M:%S", - "%H:%M", - ] - - def _parse_datetime(self, value): - for format in self._DATETIME_FORMATS: - try: - return datetime.datetime.strptime(value, format) - except ValueError: - pass - raise Error('Unrecognized date/time format: %r' % value) - - _TIMEDELTA_ABBREV_DICT = { - 'h': 'hours', - 'm': 'minutes', - 'min': 'minutes', - 's': 'seconds', - 'sec': 'seconds', - 'ms': 'milliseconds', - 'us': 'microseconds', - 'd': 'days', - 'w': 'weeks', - } - - _FLOAT_PATTERN = r'[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?' - - _TIMEDELTA_PATTERN = re.compile( - r'\s*(%s)\s*(\w*)\s*' % _FLOAT_PATTERN, re.IGNORECASE) - - def _parse_timedelta(self, value): - try: - sum = datetime.timedelta() - start = 0 - while start < len(value): - m = self._TIMEDELTA_PATTERN.match(value, start) - if not m: - raise Exception() - num = float(m.group(1)) - units = m.group(2) or 'seconds' - units = self._TIMEDELTA_ABBREV_DICT.get(units, units) - sum += datetime.timedelta(**{units: num}) - start = m.end() - return sum - except Exception: - raise - - def _parse_bool(self, value): - return value.lower() not in ("false", "0", "f") - - def _parse_string(self, value): - return _unicode(value) - - -options = OptionParser() -"""Global options object. - -All defined options are available as attributes on this object. -""" - - -def define(name, default=None, type=None, help=None, metavar=None, - multiple=False, group=None, callback=None): - """Defines an option in the global namespace. - - See `OptionParser.define`. - """ - return options.define(name, default=default, type=type, help=help, - metavar=metavar, multiple=multiple, group=group, - callback=callback) - - -def parse_command_line(args=None, final=True): - """Parses global options from the command line. - - See `OptionParser.parse_command_line`. - """ - return options.parse_command_line(args, final=final) - - -def parse_config_file(path, final=True): - """Parses global options from a config file. - - See `OptionParser.parse_config_file`. - """ - return options.parse_config_file(path, final=final) - - -def print_help(file=None): - """Prints all the command line options to stderr (or another file). - - See `OptionParser.print_help`. - """ - return options.print_help(file) - - -def add_parse_callback(callback): - """Adds a parse callback, to be invoked when option parsing is done. - - See `OptionParser.add_parse_callback` - """ - options.add_parse_callback(callback) - - -# Default options -define_logging_options(options) diff --git a/lib/tornado/platform/asyncio.py b/lib/tornado/platform/asyncio.py deleted file mode 100755 index e0042e1d..00000000 --- a/lib/tornado/platform/asyncio.py +++ /dev/null @@ -1,299 +0,0 @@ -"""Bridges between the `asyncio` module and Tornado IOLoop. - -.. versionadded:: 3.2 - -This module integrates Tornado with the ``asyncio`` module introduced -in Python 3.4. This makes it possible to combine the two libraries on -the same event loop. - -.. deprecated:: 5.0 - - While the code in this module is still used, it is now enabled - automatically when `asyncio` is available, so applications should - no longer need to refer to this module directly. - -.. note:: - - Tornado requires the `~asyncio.AbstractEventLoop.add_reader` family of - methods, so it is not compatible with the `~asyncio.ProactorEventLoop` on - Windows. Use the `~asyncio.SelectorEventLoop` instead. -""" - -from __future__ import absolute_import, division, print_function -import functools - -from tornado.gen import convert_yielded -from tornado.ioloop import IOLoop -from tornado import stack_context - -import asyncio - - -class BaseAsyncIOLoop(IOLoop): - def initialize(self, asyncio_loop, **kwargs): - self.asyncio_loop = asyncio_loop - # Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler) - self.handlers = {} - # Set of fds listening for reads/writes - self.readers = set() - self.writers = set() - self.closing = False - # If an asyncio loop was closed through an asyncio interface - # instead of IOLoop.close(), we'd never hear about it and may - # have left a dangling reference in our map. In case an - # application (or, more likely, a test suite) creates and - # destroys a lot of event loops in this way, check here to - # ensure that we don't have a lot of dead loops building up in - # the map. - # - # TODO(bdarnell): consider making self.asyncio_loop a weakref - # for AsyncIOMainLoop and make _ioloop_for_asyncio a - # WeakKeyDictionary. - for loop in list(IOLoop._ioloop_for_asyncio): - if loop.is_closed(): - del IOLoop._ioloop_for_asyncio[loop] - IOLoop._ioloop_for_asyncio[asyncio_loop] = self - super(BaseAsyncIOLoop, self).initialize(**kwargs) - - def close(self, all_fds=False): - self.closing = True - for fd in list(self.handlers): - fileobj, handler_func = self.handlers[fd] - self.remove_handler(fd) - if all_fds: - self.close_fd(fileobj) - # Remove the mapping before closing the asyncio loop. If this - # happened in the other order, we could race against another - # initialize() call which would see the closed asyncio loop, - # assume it was closed from the asyncio side, and do this - # cleanup for us, leading to a KeyError. - del IOLoop._ioloop_for_asyncio[self.asyncio_loop] - self.asyncio_loop.close() - - def add_handler(self, fd, handler, events): - fd, fileobj = self.split_fd(fd) - if fd in self.handlers: - raise ValueError("fd %s added twice" % fd) - self.handlers[fd] = (fileobj, stack_context.wrap(handler)) - if events & IOLoop.READ: - self.asyncio_loop.add_reader( - fd, self._handle_events, fd, IOLoop.READ) - self.readers.add(fd) - if events & IOLoop.WRITE: - self.asyncio_loop.add_writer( - fd, self._handle_events, fd, IOLoop.WRITE) - self.writers.add(fd) - - def update_handler(self, fd, events): - fd, fileobj = self.split_fd(fd) - if events & IOLoop.READ: - if fd not in self.readers: - self.asyncio_loop.add_reader( - fd, self._handle_events, fd, IOLoop.READ) - self.readers.add(fd) - else: - if fd in self.readers: - self.asyncio_loop.remove_reader(fd) - self.readers.remove(fd) - if events & IOLoop.WRITE: - if fd not in self.writers: - self.asyncio_loop.add_writer( - fd, self._handle_events, fd, IOLoop.WRITE) - self.writers.add(fd) - else: - if fd in self.writers: - self.asyncio_loop.remove_writer(fd) - self.writers.remove(fd) - - def remove_handler(self, fd): - fd, fileobj = self.split_fd(fd) - if fd not in self.handlers: - return - if fd in self.readers: - self.asyncio_loop.remove_reader(fd) - self.readers.remove(fd) - if fd in self.writers: - self.asyncio_loop.remove_writer(fd) - self.writers.remove(fd) - del self.handlers[fd] - - def _handle_events(self, fd, events): - fileobj, handler_func = self.handlers[fd] - handler_func(fileobj, events) - - def start(self): - try: - old_loop = asyncio.get_event_loop() - except (RuntimeError, AssertionError): - old_loop = None - try: - self._setup_logging() - asyncio.set_event_loop(self.asyncio_loop) - self.asyncio_loop.run_forever() - finally: - asyncio.set_event_loop(old_loop) - - def stop(self): - self.asyncio_loop.stop() - - def call_at(self, when, callback, *args, **kwargs): - # asyncio.call_at supports *args but not **kwargs, so bind them here. - # We do not synchronize self.time and asyncio_loop.time, so - # convert from absolute to relative. - return self.asyncio_loop.call_later( - max(0, when - self.time()), self._run_callback, - functools.partial(stack_context.wrap(callback), *args, **kwargs)) - - def remove_timeout(self, timeout): - timeout.cancel() - - def add_callback(self, callback, *args, **kwargs): - try: - self.asyncio_loop.call_soon_threadsafe( - self._run_callback, - functools.partial(stack_context.wrap(callback), *args, **kwargs)) - except RuntimeError: - # "Event loop is closed". Swallow the exception for - # consistency with PollIOLoop (and logical consistency - # with the fact that we can't guarantee that an - # add_callback that completes without error will - # eventually execute). - pass - - add_callback_from_signal = add_callback - - def run_in_executor(self, executor, func, *args): - return self.asyncio_loop.run_in_executor(executor, func, *args) - - def set_default_executor(self, executor): - return self.asyncio_loop.set_default_executor(executor) - - -class AsyncIOMainLoop(BaseAsyncIOLoop): - """``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the - current ``asyncio`` event loop (i.e. the one returned by - ``asyncio.get_event_loop()``). - - .. deprecated:: 5.0 - - Now used automatically when appropriate; it is no longer necessary - to refer to this class directly. - - .. versionchanged:: 5.0 - - Closing an `AsyncIOMainLoop` now closes the underlying asyncio loop. - """ - def initialize(self, **kwargs): - super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(), **kwargs) - - def make_current(self): - # AsyncIOMainLoop already refers to the current asyncio loop so - # nothing to do here. - pass - - -class AsyncIOLoop(BaseAsyncIOLoop): - """``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop. - This class follows the usual Tornado semantics for creating new - ``IOLoops``; these loops are not necessarily related to the - ``asyncio`` default event loop. - - Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object - can be accessed with the ``asyncio_loop`` attribute. - - .. versionchanged:: 5.0 - - When an ``AsyncIOLoop`` becomes the current `.IOLoop`, it also sets - the current `asyncio` event loop. - - .. deprecated:: 5.0 - - Now used automatically when appropriate; it is no longer necessary - to refer to this class directly. - """ - def initialize(self, **kwargs): - self.is_current = False - loop = asyncio.new_event_loop() - try: - super(AsyncIOLoop, self).initialize(loop, **kwargs) - except Exception: - # If initialize() does not succeed (taking ownership of the loop), - # we have to close it. - loop.close() - raise - - def close(self, all_fds=False): - if self.is_current: - self.clear_current() - super(AsyncIOLoop, self).close(all_fds=all_fds) - - def make_current(self): - if not self.is_current: - try: - self.old_asyncio = asyncio.get_event_loop() - except (RuntimeError, AssertionError): - self.old_asyncio = None - self.is_current = True - asyncio.set_event_loop(self.asyncio_loop) - - def _clear_current_hook(self): - if self.is_current: - asyncio.set_event_loop(self.old_asyncio) - self.is_current = False - - -def to_tornado_future(asyncio_future): - """Convert an `asyncio.Future` to a `tornado.concurrent.Future`. - - .. versionadded:: 4.1 - - .. deprecated:: 5.0 - Tornado ``Futures`` have been merged with `asyncio.Future`, - so this method is now a no-op. - """ - return asyncio_future - - -def to_asyncio_future(tornado_future): - """Convert a Tornado yieldable object to an `asyncio.Future`. - - .. versionadded:: 4.1 - - .. versionchanged:: 4.3 - Now accepts any yieldable object, not just - `tornado.concurrent.Future`. - - .. deprecated:: 5.0 - Tornado ``Futures`` have been merged with `asyncio.Future`, - so this method is now equivalent to `tornado.gen.convert_yielded`. - """ - return convert_yielded(tornado_future) - - -class AnyThreadEventLoopPolicy(asyncio.DefaultEventLoopPolicy): - """Event loop policy that allows loop creation on any thread. - - The default `asyncio` event loop policy only automatically creates - event loops in the main threads. Other threads must create event - loops explicitly or `asyncio.get_event_loop` (and therefore - `.IOLoop.current`) will fail. Installing this policy allows event - loops to be created automatically on any thread, matching the - behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2). - - Usage:: - - asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) - - .. versionadded:: 5.0 - - """ - def get_event_loop(self): - try: - return super().get_event_loop() - except (RuntimeError, AssertionError): - # This was an AssertionError in python 3.4.2 (which ships with debian jessie) - # and changed to a RuntimeError in 3.4.3. - # "There is no current event loop in thread %r" - loop = self.new_event_loop() - self.set_event_loop(loop) - return loop diff --git a/lib/tornado/platform/auto.py b/lib/tornado/platform/auto.py deleted file mode 100755 index 1a9133fa..00000000 --- a/lib/tornado/platform/auto.py +++ /dev/null @@ -1,58 +0,0 @@ -# -# Copyright 2011 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of platform-specific functionality. - -For each function or class described in `tornado.platform.interface`, -the appropriate platform-specific implementation exists in this module. -Most code that needs access to this functionality should do e.g.:: - - from tornado.platform.auto import set_close_exec -""" - -from __future__ import absolute_import, division, print_function - -import os - -if 'APPENGINE_RUNTIME' in os.environ: - from tornado.platform.common import Waker - - def set_close_exec(fd): - pass -elif os.name == 'nt': - from tornado.platform.common import Waker - from tornado.platform.windows import set_close_exec -else: - from tornado.platform.posix import set_close_exec, Waker - -try: - # monotime monkey-patches the time module to have a monotonic function - # in versions of python before 3.3. - import monotime - # Silence pyflakes warning about this unused import - monotime -except ImportError: - pass -try: - # monotonic can provide a monotonic function in versions of python before - # 3.3, too. - from monotonic import monotonic as monotonic_time -except ImportError: - try: - from time import monotonic as monotonic_time - except ImportError: - monotonic_time = None - -__all__ = ['Waker', 'set_close_exec', 'monotonic_time'] diff --git a/lib/tornado/platform/auto.pyi b/lib/tornado/platform/auto.pyi deleted file mode 100755 index a1c97228..00000000 --- a/lib/tornado/platform/auto.pyi +++ /dev/null @@ -1,4 +0,0 @@ -# auto.py is full of patterns mypy doesn't like, so for type checking -# purposes we replace it with interface.py. - -from .interface import * diff --git a/lib/tornado/platform/caresresolver.py b/lib/tornado/platform/caresresolver.py deleted file mode 100755 index 768cb624..00000000 --- a/lib/tornado/platform/caresresolver.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import absolute_import, division, print_function -import pycares # type: ignore -import socket - -from tornado.concurrent import Future -from tornado import gen -from tornado.ioloop import IOLoop -from tornado.netutil import Resolver, is_valid_ip - - -class CaresResolver(Resolver): - """Name resolver based on the c-ares library. - - This is a non-blocking and non-threaded resolver. It may not produce - the same results as the system resolver, but can be used for non-blocking - resolution when threads cannot be used. - - c-ares fails to resolve some names when ``family`` is ``AF_UNSPEC``, - so it is only recommended for use in ``AF_INET`` (i.e. IPv4). This is - the default for ``tornado.simple_httpclient``, but other libraries - may default to ``AF_UNSPEC``. - - .. versionchanged:: 5.0 - The ``io_loop`` argument (deprecated since version 4.1) has been removed. - """ - def initialize(self): - self.io_loop = IOLoop.current() - self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb) - self.fds = {} - - def _sock_state_cb(self, fd, readable, writable): - state = ((IOLoop.READ if readable else 0) | - (IOLoop.WRITE if writable else 0)) - if not state: - self.io_loop.remove_handler(fd) - del self.fds[fd] - elif fd in self.fds: - self.io_loop.update_handler(fd, state) - self.fds[fd] = state - else: - self.io_loop.add_handler(fd, self._handle_events, state) - self.fds[fd] = state - - def _handle_events(self, fd, events): - read_fd = pycares.ARES_SOCKET_BAD - write_fd = pycares.ARES_SOCKET_BAD - if events & IOLoop.READ: - read_fd = fd - if events & IOLoop.WRITE: - write_fd = fd - self.channel.process_fd(read_fd, write_fd) - - @gen.coroutine - def resolve(self, host, port, family=0): - if is_valid_ip(host): - addresses = [host] - else: - # gethostbyname doesn't take callback as a kwarg - fut = Future() - self.channel.gethostbyname(host, family, - lambda result, error: fut.set_result((result, error))) - result, error = yield fut - if error: - raise IOError('C-Ares returned error %s: %s while resolving %s' % - (error, pycares.errno.strerror(error), host)) - addresses = result.addresses - addrinfo = [] - for address in addresses: - if '.' in address: - address_family = socket.AF_INET - elif ':' in address: - address_family = socket.AF_INET6 - else: - address_family = socket.AF_UNSPEC - if family != socket.AF_UNSPEC and family != address_family: - raise IOError('Requested socket family %d but got %d' % - (family, address_family)) - addrinfo.append((address_family, (address, port))) - raise gen.Return(addrinfo) diff --git a/lib/tornado/platform/common.py b/lib/tornado/platform/common.py deleted file mode 100755 index b597748d..00000000 --- a/lib/tornado/platform/common.py +++ /dev/null @@ -1,113 +0,0 @@ -"""Lowest-common-denominator implementations of platform functionality.""" -from __future__ import absolute_import, division, print_function - -import errno -import socket -import time - -from tornado.platform import interface -from tornado.util import errno_from_exception - - -def try_close(f): - # Avoid issue #875 (race condition when using the file in another - # thread). - for i in range(10): - try: - f.close() - except IOError: - # Yield to another thread - time.sleep(1e-3) - else: - break - # Try a last time and let raise - f.close() - - -class Waker(interface.Waker): - """Create an OS independent asynchronous pipe. - - For use on platforms that don't have os.pipe() (or where pipes cannot - be passed to select()), but do have sockets. This includes Windows - and Jython. - """ - def __init__(self): - from .auto import set_close_exec - # Based on Zope select_trigger.py: - # https://github.com/zopefoundation/Zope/blob/master/src/ZServer/medusa/thread/select_trigger.py - - self.writer = socket.socket() - set_close_exec(self.writer.fileno()) - # Disable buffering -- pulling the trigger sends 1 byte, - # and we want that sent immediately, to wake up ASAP. - self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - count = 0 - while 1: - count += 1 - # Bind to a local port; for efficiency, let the OS pick - # a free port for us. - # Unfortunately, stress tests showed that we may not - # be able to connect to that port ("Address already in - # use") despite that the OS picked it. This appears - # to be a race bug in the Windows socket implementation. - # So we loop until a connect() succeeds (almost always - # on the first try). See the long thread at - # http://mail.zope.org/pipermail/zope/2005-July/160433.html - # for hideous details. - a = socket.socket() - set_close_exec(a.fileno()) - a.bind(("127.0.0.1", 0)) - a.listen(1) - connect_address = a.getsockname() # assigned (host, port) pair - try: - self.writer.connect(connect_address) - break # success - except socket.error as detail: - if (not hasattr(errno, 'WSAEADDRINUSE') or - errno_from_exception(detail) != errno.WSAEADDRINUSE): - # "Address already in use" is the only error - # I've seen on two WinXP Pro SP2 boxes, under - # Pythons 2.3.5 and 2.4.1. - raise - # (10048, 'Address already in use') - # assert count <= 2 # never triggered in Tim's tests - if count >= 10: # I've never seen it go above 2 - a.close() - self.writer.close() - raise socket.error("Cannot bind trigger!") - # Close `a` and try again. Note: I originally put a short - # sleep() here, but it didn't appear to help or hurt. - a.close() - - self.reader, addr = a.accept() - set_close_exec(self.reader.fileno()) - self.reader.setblocking(0) - self.writer.setblocking(0) - a.close() - self.reader_fd = self.reader.fileno() - - def fileno(self): - return self.reader.fileno() - - def write_fileno(self): - return self.writer.fileno() - - def wake(self): - try: - self.writer.send(b"x") - except (IOError, socket.error, ValueError): - pass - - def consume(self): - try: - while True: - result = self.reader.recv(1024) - if not result: - break - except (IOError, socket.error): - pass - - def close(self): - self.reader.close() - try_close(self.writer) diff --git a/lib/tornado/platform/epoll.py b/lib/tornado/platform/epoll.py deleted file mode 100755 index 4e346174..00000000 --- a/lib/tornado/platform/epoll.py +++ /dev/null @@ -1,25 +0,0 @@ -# -# Copyright 2012 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""EPoll-based IOLoop implementation for Linux systems.""" -from __future__ import absolute_import, division, print_function - -import select - -from tornado.ioloop import PollIOLoop - - -class EPollIOLoop(PollIOLoop): - def initialize(self, **kwargs): - super(EPollIOLoop, self).initialize(impl=select.epoll(), **kwargs) diff --git a/lib/tornado/platform/interface.py b/lib/tornado/platform/interface.py deleted file mode 100755 index cac53264..00000000 --- a/lib/tornado/platform/interface.py +++ /dev/null @@ -1,66 +0,0 @@ -# -# Copyright 2011 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Interfaces for platform-specific functionality. - -This module exists primarily for documentation purposes and as base classes -for other tornado.platform modules. Most code should import the appropriate -implementation from `tornado.platform.auto`. -""" - -from __future__ import absolute_import, division, print_function - - -def set_close_exec(fd): - """Sets the close-on-exec bit (``FD_CLOEXEC``)for a file descriptor.""" - raise NotImplementedError() - - -class Waker(object): - """A socket-like object that can wake another thread from ``select()``. - - The `~tornado.ioloop.IOLoop` will add the Waker's `fileno()` to - its ``select`` (or ``epoll`` or ``kqueue``) calls. When another - thread wants to wake up the loop, it calls `wake`. Once it has woken - up, it will call `consume` to do any necessary per-wake cleanup. When - the ``IOLoop`` is closed, it closes its waker too. - """ - def fileno(self): - """Returns the read file descriptor for this waker. - - Must be suitable for use with ``select()`` or equivalent on the - local platform. - """ - raise NotImplementedError() - - def write_fileno(self): - """Returns the write file descriptor for this waker.""" - raise NotImplementedError() - - def wake(self): - """Triggers activity on the waker's file descriptor.""" - raise NotImplementedError() - - def consume(self): - """Called after the listen has woken up to do any necessary cleanup.""" - raise NotImplementedError() - - def close(self): - """Closes the waker's file descriptor(s).""" - raise NotImplementedError() - - -def monotonic_time(): - raise NotImplementedError() diff --git a/lib/tornado/platform/kqueue.py b/lib/tornado/platform/kqueue.py deleted file mode 100755 index 4e0aee02..00000000 --- a/lib/tornado/platform/kqueue.py +++ /dev/null @@ -1,90 +0,0 @@ -# -# Copyright 2012 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""KQueue-based IOLoop implementation for BSD/Mac systems.""" -from __future__ import absolute_import, division, print_function - -import select - -from tornado.ioloop import IOLoop, PollIOLoop - -assert hasattr(select, 'kqueue'), 'kqueue not supported' - - -class _KQueue(object): - """A kqueue-based event loop for BSD/Mac systems.""" - def __init__(self): - self._kqueue = select.kqueue() - self._active = {} - - def fileno(self): - return self._kqueue.fileno() - - def close(self): - self._kqueue.close() - - def register(self, fd, events): - if fd in self._active: - raise IOError("fd %s already registered" % fd) - self._control(fd, events, select.KQ_EV_ADD) - self._active[fd] = events - - def modify(self, fd, events): - self.unregister(fd) - self.register(fd, events) - - def unregister(self, fd): - events = self._active.pop(fd) - self._control(fd, events, select.KQ_EV_DELETE) - - def _control(self, fd, events, flags): - kevents = [] - if events & IOLoop.WRITE: - kevents.append(select.kevent( - fd, filter=select.KQ_FILTER_WRITE, flags=flags)) - if events & IOLoop.READ: - kevents.append(select.kevent( - fd, filter=select.KQ_FILTER_READ, flags=flags)) - # Even though control() takes a list, it seems to return EINVAL - # on Mac OS X (10.6) when there is more than one event in the list. - for kevent in kevents: - self._kqueue.control([kevent], 0) - - def poll(self, timeout): - kevents = self._kqueue.control(None, 1000, timeout) - events = {} - for kevent in kevents: - fd = kevent.ident - if kevent.filter == select.KQ_FILTER_READ: - events[fd] = events.get(fd, 0) | IOLoop.READ - if kevent.filter == select.KQ_FILTER_WRITE: - if kevent.flags & select.KQ_EV_EOF: - # If an asynchronous connection is refused, kqueue - # returns a write event with the EOF flag set. - # Turn this into an error for consistency with the - # other IOLoop implementations. - # Note that for read events, EOF may be returned before - # all data has been consumed from the socket buffer, - # so we only check for EOF on write events. - events[fd] = IOLoop.ERROR - else: - events[fd] = events.get(fd, 0) | IOLoop.WRITE - if kevent.flags & select.KQ_EV_ERROR: - events[fd] = events.get(fd, 0) | IOLoop.ERROR - return events.items() - - -class KQueueIOLoop(PollIOLoop): - def initialize(self, **kwargs): - super(KQueueIOLoop, self).initialize(impl=_KQueue(), **kwargs) diff --git a/lib/tornado/platform/posix.py b/lib/tornado/platform/posix.py deleted file mode 100755 index 6fe1fa83..00000000 --- a/lib/tornado/platform/posix.py +++ /dev/null @@ -1,69 +0,0 @@ -# -# Copyright 2011 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Posix implementations of platform-specific functionality.""" - -from __future__ import absolute_import, division, print_function - -import fcntl -import os - -from tornado.platform import common, interface - - -def set_close_exec(fd): - flags = fcntl.fcntl(fd, fcntl.F_GETFD) - fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) - - -def _set_nonblocking(fd): - flags = fcntl.fcntl(fd, fcntl.F_GETFL) - fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) - - -class Waker(interface.Waker): - def __init__(self): - r, w = os.pipe() - _set_nonblocking(r) - _set_nonblocking(w) - set_close_exec(r) - set_close_exec(w) - self.reader = os.fdopen(r, "rb", 0) - self.writer = os.fdopen(w, "wb", 0) - - def fileno(self): - return self.reader.fileno() - - def write_fileno(self): - return self.writer.fileno() - - def wake(self): - try: - self.writer.write(b"x") - except (IOError, ValueError): - pass - - def consume(self): - try: - while True: - result = self.reader.read() - if not result: - break - except IOError: - pass - - def close(self): - self.reader.close() - common.try_close(self.writer) diff --git a/lib/tornado/platform/select.py b/lib/tornado/platform/select.py deleted file mode 100755 index 14e8a474..00000000 --- a/lib/tornado/platform/select.py +++ /dev/null @@ -1,75 +0,0 @@ -# -# Copyright 2012 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Select-based IOLoop implementation. - -Used as a fallback for systems that don't support epoll or kqueue. -""" -from __future__ import absolute_import, division, print_function - -import select - -from tornado.ioloop import IOLoop, PollIOLoop - - -class _Select(object): - """A simple, select()-based IOLoop implementation for non-Linux systems""" - def __init__(self): - self.read_fds = set() - self.write_fds = set() - self.error_fds = set() - self.fd_sets = (self.read_fds, self.write_fds, self.error_fds) - - def close(self): - pass - - def register(self, fd, events): - if fd in self.read_fds or fd in self.write_fds or fd in self.error_fds: - raise IOError("fd %s already registered" % fd) - if events & IOLoop.READ: - self.read_fds.add(fd) - if events & IOLoop.WRITE: - self.write_fds.add(fd) - if events & IOLoop.ERROR: - self.error_fds.add(fd) - # Closed connections are reported as errors by epoll and kqueue, - # but as zero-byte reads by select, so when errors are requested - # we need to listen for both read and error. - # self.read_fds.add(fd) - - def modify(self, fd, events): - self.unregister(fd) - self.register(fd, events) - - def unregister(self, fd): - self.read_fds.discard(fd) - self.write_fds.discard(fd) - self.error_fds.discard(fd) - - def poll(self, timeout): - readable, writeable, errors = select.select( - self.read_fds, self.write_fds, self.error_fds, timeout) - events = {} - for fd in readable: - events[fd] = events.get(fd, 0) | IOLoop.READ - for fd in writeable: - events[fd] = events.get(fd, 0) | IOLoop.WRITE - for fd in errors: - events[fd] = events.get(fd, 0) | IOLoop.ERROR - return events.items() - - -class SelectIOLoop(PollIOLoop): - def initialize(self, **kwargs): - super(SelectIOLoop, self).initialize(impl=_Select(), **kwargs) diff --git a/lib/tornado/platform/twisted.py b/lib/tornado/platform/twisted.py deleted file mode 100755 index b38a755c..00000000 --- a/lib/tornado/platform/twisted.py +++ /dev/null @@ -1,609 +0,0 @@ -# Author: Ovidiu Predescu -# Date: July 2011 -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Bridges between the Twisted reactor and Tornado IOLoop. - -This module lets you run applications and libraries written for -Twisted in a Tornado application. It can be used in two modes, -depending on which library's underlying event loop you want to use. - -This module has been tested with Twisted versions 11.0.0 and newer. -""" - -from __future__ import absolute_import, division, print_function - -import datetime -import functools -import numbers -import socket -import sys - -import twisted.internet.abstract # type: ignore -from twisted.internet.defer import Deferred # type: ignore -from twisted.internet.posixbase import PosixReactorBase # type: ignore -from twisted.internet.interfaces import IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor # type: ignore # noqa: E501 -from twisted.python import failure, log # type: ignore -from twisted.internet import error # type: ignore -import twisted.names.cache # type: ignore -import twisted.names.client # type: ignore -import twisted.names.hosts # type: ignore -import twisted.names.resolve # type: ignore - -from zope.interface import implementer # type: ignore - -from tornado.concurrent import Future, future_set_exc_info -from tornado.escape import utf8 -from tornado import gen -import tornado.ioloop -from tornado.log import app_log -from tornado.netutil import Resolver -from tornado.stack_context import NullContext, wrap -from tornado.ioloop import IOLoop -from tornado.util import timedelta_to_seconds - - -@implementer(IDelayedCall) -class TornadoDelayedCall(object): - """DelayedCall object for Tornado.""" - def __init__(self, reactor, seconds, f, *args, **kw): - self._reactor = reactor - self._func = functools.partial(f, *args, **kw) - self._time = self._reactor.seconds() + seconds - self._timeout = self._reactor._io_loop.add_timeout(self._time, - self._called) - self._active = True - - def _called(self): - self._active = False - self._reactor._removeDelayedCall(self) - try: - self._func() - except: - app_log.error("_called caught exception", exc_info=True) - - def getTime(self): - return self._time - - def cancel(self): - self._active = False - self._reactor._io_loop.remove_timeout(self._timeout) - self._reactor._removeDelayedCall(self) - - def delay(self, seconds): - self._reactor._io_loop.remove_timeout(self._timeout) - self._time += seconds - self._timeout = self._reactor._io_loop.add_timeout(self._time, - self._called) - - def reset(self, seconds): - self._reactor._io_loop.remove_timeout(self._timeout) - self._time = self._reactor.seconds() + seconds - self._timeout = self._reactor._io_loop.add_timeout(self._time, - self._called) - - def active(self): - return self._active - - -@implementer(IReactorTime, IReactorFDSet) -class TornadoReactor(PosixReactorBase): - """Twisted reactor built on the Tornado IOLoop. - - `TornadoReactor` implements the Twisted reactor interface on top of - the Tornado IOLoop. To use it, simply call `install` at the beginning - of the application:: - - import tornado.platform.twisted - tornado.platform.twisted.install() - from twisted.internet import reactor - - When the app is ready to start, call ``IOLoop.current().start()`` - instead of ``reactor.run()``. - - It is also possible to create a non-global reactor by calling - ``tornado.platform.twisted.TornadoReactor()``. However, if - the `.IOLoop` and reactor are to be short-lived (such as those used in - unit tests), additional cleanup may be required. Specifically, it is - recommended to call:: - - reactor.fireSystemEvent('shutdown') - reactor.disconnectAll() - - before closing the `.IOLoop`. - - .. versionchanged:: 5.0 - The ``io_loop`` argument (deprecated since version 4.1) has been removed. - - .. deprecated:: 5.1 - - This class will be removed in Tornado 6.0. Use - ``twisted.internet.asyncioreactor.AsyncioSelectorReactor`` - instead. - - """ - def __init__(self): - self._io_loop = tornado.ioloop.IOLoop.current() - self._readers = {} # map of reader objects to fd - self._writers = {} # map of writer objects to fd - self._fds = {} # a map of fd to a (reader, writer) tuple - self._delayedCalls = {} - PosixReactorBase.__init__(self) - self.addSystemEventTrigger('during', 'shutdown', self.crash) - - # IOLoop.start() bypasses some of the reactor initialization. - # Fire off the necessary events if they weren't already triggered - # by reactor.run(). - def start_if_necessary(): - if not self._started: - self.fireSystemEvent('startup') - self._io_loop.add_callback(start_if_necessary) - - # IReactorTime - def seconds(self): - return self._io_loop.time() - - def callLater(self, seconds, f, *args, **kw): - dc = TornadoDelayedCall(self, seconds, f, *args, **kw) - self._delayedCalls[dc] = True - return dc - - def getDelayedCalls(self): - return [x for x in self._delayedCalls if x._active] - - def _removeDelayedCall(self, dc): - if dc in self._delayedCalls: - del self._delayedCalls[dc] - - # IReactorThreads - def callFromThread(self, f, *args, **kw): - assert callable(f), "%s is not callable" % f - with NullContext(): - # This NullContext is mainly for an edge case when running - # TwistedIOLoop on top of a TornadoReactor. - # TwistedIOLoop.add_callback uses reactor.callFromThread and - # should not pick up additional StackContexts along the way. - self._io_loop.add_callback(f, *args, **kw) - - # We don't need the waker code from the super class, Tornado uses - # its own waker. - def installWaker(self): - pass - - def wakeUp(self): - pass - - # IReactorFDSet - def _invoke_callback(self, fd, events): - if fd not in self._fds: - return - (reader, writer) = self._fds[fd] - if reader: - err = None - if reader.fileno() == -1: - err = error.ConnectionLost() - elif events & IOLoop.READ: - err = log.callWithLogger(reader, reader.doRead) - if err is None and events & IOLoop.ERROR: - err = error.ConnectionLost() - if err is not None: - self.removeReader(reader) - reader.readConnectionLost(failure.Failure(err)) - if writer: - err = None - if writer.fileno() == -1: - err = error.ConnectionLost() - elif events & IOLoop.WRITE: - err = log.callWithLogger(writer, writer.doWrite) - if err is None and events & IOLoop.ERROR: - err = error.ConnectionLost() - if err is not None: - self.removeWriter(writer) - writer.writeConnectionLost(failure.Failure(err)) - - def addReader(self, reader): - if reader in self._readers: - # Don't add the reader if it's already there - return - fd = reader.fileno() - self._readers[reader] = fd - if fd in self._fds: - (_, writer) = self._fds[fd] - self._fds[fd] = (reader, writer) - if writer: - # We already registered this fd for write events, - # update it for read events as well. - self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE) - else: - with NullContext(): - self._fds[fd] = (reader, None) - self._io_loop.add_handler(fd, self._invoke_callback, - IOLoop.READ) - - def addWriter(self, writer): - if writer in self._writers: - return - fd = writer.fileno() - self._writers[writer] = fd - if fd in self._fds: - (reader, _) = self._fds[fd] - self._fds[fd] = (reader, writer) - if reader: - # We already registered this fd for read events, - # update it for write events as well. - self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE) - else: - with NullContext(): - self._fds[fd] = (None, writer) - self._io_loop.add_handler(fd, self._invoke_callback, - IOLoop.WRITE) - - def removeReader(self, reader): - if reader in self._readers: - fd = self._readers.pop(reader) - (_, writer) = self._fds[fd] - if writer: - # We have a writer so we need to update the IOLoop for - # write events only. - self._fds[fd] = (None, writer) - self._io_loop.update_handler(fd, IOLoop.WRITE) - else: - # Since we have no writer registered, we remove the - # entry from _fds and unregister the handler from the - # IOLoop - del self._fds[fd] - self._io_loop.remove_handler(fd) - - def removeWriter(self, writer): - if writer in self._writers: - fd = self._writers.pop(writer) - (reader, _) = self._fds[fd] - if reader: - # We have a reader so we need to update the IOLoop for - # read events only. - self._fds[fd] = (reader, None) - self._io_loop.update_handler(fd, IOLoop.READ) - else: - # Since we have no reader registered, we remove the - # entry from the _fds and unregister the handler from - # the IOLoop. - del self._fds[fd] - self._io_loop.remove_handler(fd) - - def removeAll(self): - return self._removeAll(self._readers, self._writers) - - def getReaders(self): - return self._readers.keys() - - def getWriters(self): - return self._writers.keys() - - # The following functions are mainly used in twisted-style test cases; - # it is expected that most users of the TornadoReactor will call - # IOLoop.start() instead of Reactor.run(). - def stop(self): - PosixReactorBase.stop(self) - fire_shutdown = functools.partial(self.fireSystemEvent, "shutdown") - self._io_loop.add_callback(fire_shutdown) - - def crash(self): - PosixReactorBase.crash(self) - self._io_loop.stop() - - def doIteration(self, delay): - raise NotImplementedError("doIteration") - - def mainLoop(self): - # Since this class is intended to be used in applications - # where the top-level event loop is ``io_loop.start()`` rather - # than ``reactor.run()``, it is implemented a little - # differently than other Twisted reactors. We override - # ``mainLoop`` instead of ``doIteration`` and must implement - # timed call functionality on top of `.IOLoop.add_timeout` - # rather than using the implementation in - # ``PosixReactorBase``. - self._io_loop.start() - - -class _TestReactor(TornadoReactor): - """Subclass of TornadoReactor for use in unittests. - - This can't go in the test.py file because of import-order dependencies - with the Twisted reactor test builder. - """ - def __init__(self): - # always use a new ioloop - IOLoop.clear_current() - IOLoop(make_current=True) - super(_TestReactor, self).__init__() - IOLoop.clear_current() - - def listenTCP(self, port, factory, backlog=50, interface=''): - # default to localhost to avoid firewall prompts on the mac - if not interface: - interface = '127.0.0.1' - return super(_TestReactor, self).listenTCP( - port, factory, backlog=backlog, interface=interface) - - def listenUDP(self, port, protocol, interface='', maxPacketSize=8192): - if not interface: - interface = '127.0.0.1' - return super(_TestReactor, self).listenUDP( - port, protocol, interface=interface, maxPacketSize=maxPacketSize) - - -def install(): - """Install this package as the default Twisted reactor. - - ``install()`` must be called very early in the startup process, - before most other twisted-related imports. Conversely, because it - initializes the `.IOLoop`, it cannot be called before - `.fork_processes` or multi-process `~.TCPServer.start`. These - conflicting requirements make it difficult to use `.TornadoReactor` - in multi-process mode, and an external process manager such as - ``supervisord`` is recommended instead. - - .. versionchanged:: 5.0 - The ``io_loop`` argument (deprecated since version 4.1) has been removed. - - .. deprecated:: 5.1 - - This functio will be removed in Tornado 6.0. Use - ``twisted.internet.asyncioreactor.install`` instead. - """ - reactor = TornadoReactor() - from twisted.internet.main import installReactor # type: ignore - installReactor(reactor) - return reactor - - -@implementer(IReadDescriptor, IWriteDescriptor) -class _FD(object): - def __init__(self, fd, fileobj, handler): - self.fd = fd - self.fileobj = fileobj - self.handler = handler - self.reading = False - self.writing = False - self.lost = False - - def fileno(self): - return self.fd - - def doRead(self): - if not self.lost: - self.handler(self.fileobj, tornado.ioloop.IOLoop.READ) - - def doWrite(self): - if not self.lost: - self.handler(self.fileobj, tornado.ioloop.IOLoop.WRITE) - - def connectionLost(self, reason): - if not self.lost: - self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR) - self.lost = True - - writeConnectionLost = readConnectionLost = connectionLost - - def logPrefix(self): - return '' - - -class TwistedIOLoop(tornado.ioloop.IOLoop): - """IOLoop implementation that runs on Twisted. - - `TwistedIOLoop` implements the Tornado IOLoop interface on top of - the Twisted reactor. Recommended usage:: - - from tornado.platform.twisted import TwistedIOLoop - from twisted.internet import reactor - TwistedIOLoop().install() - # Set up your tornado application as usual using `IOLoop.instance` - reactor.run() - - Uses the global Twisted reactor by default. To create multiple - ``TwistedIOLoops`` in the same process, you must pass a unique reactor - when constructing each one. - - Not compatible with `tornado.process.Subprocess.set_exit_callback` - because the ``SIGCHLD`` handlers used by Tornado and Twisted conflict - with each other. - - See also :meth:`tornado.ioloop.IOLoop.install` for general notes on - installing alternative IOLoops. - - .. deprecated:: 5.1 - - The `asyncio` event loop will be the only available implementation in - Tornado 6.0. - """ - def initialize(self, reactor=None, **kwargs): - super(TwistedIOLoop, self).initialize(**kwargs) - if reactor is None: - import twisted.internet.reactor # type: ignore - reactor = twisted.internet.reactor - self.reactor = reactor - self.fds = {} - - def close(self, all_fds=False): - fds = self.fds - self.reactor.removeAll() - for c in self.reactor.getDelayedCalls(): - c.cancel() - if all_fds: - for fd in fds.values(): - self.close_fd(fd.fileobj) - - def add_handler(self, fd, handler, events): - if fd in self.fds: - raise ValueError('fd %s added twice' % fd) - fd, fileobj = self.split_fd(fd) - self.fds[fd] = _FD(fd, fileobj, wrap(handler)) - if events & tornado.ioloop.IOLoop.READ: - self.fds[fd].reading = True - self.reactor.addReader(self.fds[fd]) - if events & tornado.ioloop.IOLoop.WRITE: - self.fds[fd].writing = True - self.reactor.addWriter(self.fds[fd]) - - def update_handler(self, fd, events): - fd, fileobj = self.split_fd(fd) - if events & tornado.ioloop.IOLoop.READ: - if not self.fds[fd].reading: - self.fds[fd].reading = True - self.reactor.addReader(self.fds[fd]) - else: - if self.fds[fd].reading: - self.fds[fd].reading = False - self.reactor.removeReader(self.fds[fd]) - if events & tornado.ioloop.IOLoop.WRITE: - if not self.fds[fd].writing: - self.fds[fd].writing = True - self.reactor.addWriter(self.fds[fd]) - else: - if self.fds[fd].writing: - self.fds[fd].writing = False - self.reactor.removeWriter(self.fds[fd]) - - def remove_handler(self, fd): - fd, fileobj = self.split_fd(fd) - if fd not in self.fds: - return - self.fds[fd].lost = True - if self.fds[fd].reading: - self.reactor.removeReader(self.fds[fd]) - if self.fds[fd].writing: - self.reactor.removeWriter(self.fds[fd]) - del self.fds[fd] - - def start(self): - old_current = IOLoop.current(instance=False) - try: - self._setup_logging() - self.make_current() - self.reactor.run() - finally: - if old_current is None: - IOLoop.clear_current() - else: - old_current.make_current() - - def stop(self): - self.reactor.crash() - - def add_timeout(self, deadline, callback, *args, **kwargs): - # This method could be simplified (since tornado 4.0) by - # overriding call_at instead of add_timeout, but we leave it - # for now as a test of backwards-compatibility. - if isinstance(deadline, numbers.Real): - delay = max(deadline - self.time(), 0) - elif isinstance(deadline, datetime.timedelta): - delay = timedelta_to_seconds(deadline) - else: - raise TypeError("Unsupported deadline %r") - return self.reactor.callLater( - delay, self._run_callback, - functools.partial(wrap(callback), *args, **kwargs)) - - def remove_timeout(self, timeout): - if timeout.active(): - timeout.cancel() - - def add_callback(self, callback, *args, **kwargs): - self.reactor.callFromThread( - self._run_callback, - functools.partial(wrap(callback), *args, **kwargs)) - - def add_callback_from_signal(self, callback, *args, **kwargs): - self.add_callback(callback, *args, **kwargs) - - -class TwistedResolver(Resolver): - """Twisted-based asynchronous resolver. - - This is a non-blocking and non-threaded resolver. It is - recommended only when threads cannot be used, since it has - limitations compared to the standard ``getaddrinfo``-based - `~tornado.netutil.Resolver` and - `~tornado.netutil.DefaultExecutorResolver`. Specifically, it returns at - most one result, and arguments other than ``host`` and ``family`` - are ignored. It may fail to resolve when ``family`` is not - ``socket.AF_UNSPEC``. - - Requires Twisted 12.1 or newer. - - .. versionchanged:: 5.0 - The ``io_loop`` argument (deprecated since version 4.1) has been removed. - """ - def initialize(self): - # partial copy of twisted.names.client.createResolver, which doesn't - # allow for a reactor to be passed in. - self.reactor = tornado.platform.twisted.TornadoReactor() - - host_resolver = twisted.names.hosts.Resolver('/etc/hosts') - cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor) - real_resolver = twisted.names.client.Resolver('/etc/resolv.conf', - reactor=self.reactor) - self.resolver = twisted.names.resolve.ResolverChain( - [host_resolver, cache_resolver, real_resolver]) - - @gen.coroutine - def resolve(self, host, port, family=0): - # getHostByName doesn't accept IP addresses, so if the input - # looks like an IP address just return it immediately. - if twisted.internet.abstract.isIPAddress(host): - resolved = host - resolved_family = socket.AF_INET - elif twisted.internet.abstract.isIPv6Address(host): - resolved = host - resolved_family = socket.AF_INET6 - else: - deferred = self.resolver.getHostByName(utf8(host)) - fut = Future() - deferred.addBoth(fut.set_result) - resolved = yield fut - if isinstance(resolved, failure.Failure): - try: - resolved.raiseException() - except twisted.names.error.DomainError as e: - raise IOError(e) - elif twisted.internet.abstract.isIPAddress(resolved): - resolved_family = socket.AF_INET - elif twisted.internet.abstract.isIPv6Address(resolved): - resolved_family = socket.AF_INET6 - else: - resolved_family = socket.AF_UNSPEC - if family != socket.AF_UNSPEC and family != resolved_family: - raise Exception('Requested socket family %d but got %d' % - (family, resolved_family)) - result = [ - (resolved_family, (resolved, port)), - ] - raise gen.Return(result) - - -if hasattr(gen.convert_yielded, 'register'): - @gen.convert_yielded.register(Deferred) # type: ignore - def _(d): - f = Future() - - def errback(failure): - try: - failure.raiseException() - # Should never happen, but just in case - raise Exception("errback called without error") - except: - future_set_exc_info(f, sys.exc_info()) - d.addCallbacks(f.set_result, errback) - return f diff --git a/lib/tornado/platform/windows.py b/lib/tornado/platform/windows.py deleted file mode 100755 index 41277006..00000000 --- a/lib/tornado/platform/windows.py +++ /dev/null @@ -1,20 +0,0 @@ -# NOTE: win32 support is currently experimental, and not recommended -# for production use. - - -from __future__ import absolute_import, division, print_function -import ctypes # type: ignore -import ctypes.wintypes # type: ignore - -# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx -SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation -SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD) # noqa: E501 -SetHandleInformation.restype = ctypes.wintypes.BOOL - -HANDLE_FLAG_INHERIT = 0x00000001 - - -def set_close_exec(fd): - success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0) - if not success: - raise ctypes.WinError() diff --git a/lib/tornado/process.py b/lib/tornado/process.py deleted file mode 100755 index 122fd7e1..00000000 --- a/lib/tornado/process.py +++ /dev/null @@ -1,361 +0,0 @@ -# -# Copyright 2011 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utilities for working with multiple processes, including both forking -the server into multiple processes and managing subprocesses. -""" - -from __future__ import absolute_import, division, print_function - -import errno -import os -import signal -import subprocess -import sys -import time - -from binascii import hexlify - -from tornado.concurrent import Future, future_set_result_unless_cancelled -from tornado import ioloop -from tornado.iostream import PipeIOStream -from tornado.log import gen_log -from tornado.platform.auto import set_close_exec -from tornado import stack_context -from tornado.util import errno_from_exception, PY3 - -try: - import multiprocessing -except ImportError: - # Multiprocessing is not available on Google App Engine. - multiprocessing = None - -if PY3: - long = int - -# Re-export this exception for convenience. -try: - CalledProcessError = subprocess.CalledProcessError -except AttributeError: - # The subprocess module exists in Google App Engine, but is empty. - # This module isn't very useful in that case, but it should - # at least be importable. - if 'APPENGINE_RUNTIME' not in os.environ: - raise - - -def cpu_count(): - """Returns the number of processors on this machine.""" - if multiprocessing is None: - return 1 - try: - return multiprocessing.cpu_count() - except NotImplementedError: - pass - try: - return os.sysconf("SC_NPROCESSORS_CONF") - except (AttributeError, ValueError): - pass - gen_log.error("Could not detect number of processors; assuming 1") - return 1 - - -def _reseed_random(): - if 'random' not in sys.modules: - return - import random - # If os.urandom is available, this method does the same thing as - # random.seed (at least as of python 2.6). If os.urandom is not - # available, we mix in the pid in addition to a timestamp. - try: - seed = long(hexlify(os.urandom(16)), 16) - except NotImplementedError: - seed = int(time.time() * 1000) ^ os.getpid() - random.seed(seed) - - -def _pipe_cloexec(): - r, w = os.pipe() - set_close_exec(r) - set_close_exec(w) - return r, w - - -_task_id = None - - -def fork_processes(num_processes, max_restarts=100): - """Starts multiple worker processes. - - If ``num_processes`` is None or <= 0, we detect the number of cores - available on this machine and fork that number of child - processes. If ``num_processes`` is given and > 0, we fork that - specific number of sub-processes. - - Since we use processes and not threads, there is no shared memory - between any server code. - - Note that multiple processes are not compatible with the autoreload - module (or the ``autoreload=True`` option to `tornado.web.Application` - which defaults to True when ``debug=True``). - When using multiple processes, no IOLoops can be created or - referenced until after the call to ``fork_processes``. - - In each child process, ``fork_processes`` returns its *task id*, a - number between 0 and ``num_processes``. Processes that exit - abnormally (due to a signal or non-zero exit status) are restarted - with the same id (up to ``max_restarts`` times). In the parent - process, ``fork_processes`` returns None if all child processes - have exited normally, but will otherwise only exit by throwing an - exception. - """ - global _task_id - assert _task_id is None - if num_processes is None or num_processes <= 0: - num_processes = cpu_count() - gen_log.info("Starting %d processes", num_processes) - children = {} - - def start_child(i): - pid = os.fork() - if pid == 0: - # child process - _reseed_random() - global _task_id - _task_id = i - return i - else: - children[pid] = i - return None - - for i in range(num_processes): - id = start_child(i) - if id is not None: - return id - num_restarts = 0 - while children: - try: - pid, status = os.wait() - except OSError as e: - if errno_from_exception(e) == errno.EINTR: - continue - raise - if pid not in children: - continue - id = children.pop(pid) - if os.WIFSIGNALED(status): - gen_log.warning("child %d (pid %d) killed by signal %d, restarting", - id, pid, os.WTERMSIG(status)) - elif os.WEXITSTATUS(status) != 0: - gen_log.warning("child %d (pid %d) exited with status %d, restarting", - id, pid, os.WEXITSTATUS(status)) - else: - gen_log.info("child %d (pid %d) exited normally", id, pid) - continue - num_restarts += 1 - if num_restarts > max_restarts: - raise RuntimeError("Too many child restarts, giving up") - new_id = start_child(id) - if new_id is not None: - return new_id - # All child processes exited cleanly, so exit the master process - # instead of just returning to right after the call to - # fork_processes (which will probably just start up another IOLoop - # unless the caller checks the return value). - sys.exit(0) - - -def task_id(): - """Returns the current task id, if any. - - Returns None if this process was not created by `fork_processes`. - """ - global _task_id - return _task_id - - -class Subprocess(object): - """Wraps ``subprocess.Popen`` with IOStream support. - - The constructor is the same as ``subprocess.Popen`` with the following - additions: - - * ``stdin``, ``stdout``, and ``stderr`` may have the value - ``tornado.process.Subprocess.STREAM``, which will make the corresponding - attribute of the resulting Subprocess a `.PipeIOStream`. If this option - is used, the caller is responsible for closing the streams when done - with them. - - The ``Subprocess.STREAM`` option and the ``set_exit_callback`` and - ``wait_for_exit`` methods do not work on Windows. There is - therefore no reason to use this class instead of - ``subprocess.Popen`` on that platform. - - .. versionchanged:: 5.0 - The ``io_loop`` argument (deprecated since version 4.1) has been removed. - - """ - STREAM = object() - - _initialized = False - _waiting = {} # type: ignore - - def __init__(self, *args, **kwargs): - self.io_loop = ioloop.IOLoop.current() - # All FDs we create should be closed on error; those in to_close - # should be closed in the parent process on success. - pipe_fds = [] - to_close = [] - if kwargs.get('stdin') is Subprocess.STREAM: - in_r, in_w = _pipe_cloexec() - kwargs['stdin'] = in_r - pipe_fds.extend((in_r, in_w)) - to_close.append(in_r) - self.stdin = PipeIOStream(in_w) - if kwargs.get('stdout') is Subprocess.STREAM: - out_r, out_w = _pipe_cloexec() - kwargs['stdout'] = out_w - pipe_fds.extend((out_r, out_w)) - to_close.append(out_w) - self.stdout = PipeIOStream(out_r) - if kwargs.get('stderr') is Subprocess.STREAM: - err_r, err_w = _pipe_cloexec() - kwargs['stderr'] = err_w - pipe_fds.extend((err_r, err_w)) - to_close.append(err_w) - self.stderr = PipeIOStream(err_r) - try: - self.proc = subprocess.Popen(*args, **kwargs) - except: - for fd in pipe_fds: - os.close(fd) - raise - for fd in to_close: - os.close(fd) - for attr in ['stdin', 'stdout', 'stderr', 'pid']: - if not hasattr(self, attr): # don't clobber streams set above - setattr(self, attr, getattr(self.proc, attr)) - self._exit_callback = None - self.returncode = None - - def set_exit_callback(self, callback): - """Runs ``callback`` when this process exits. - - The callback takes one argument, the return code of the process. - - This method uses a ``SIGCHLD`` handler, which is a global setting - and may conflict if you have other libraries trying to handle the - same signal. If you are using more than one ``IOLoop`` it may - be necessary to call `Subprocess.initialize` first to designate - one ``IOLoop`` to run the signal handlers. - - In many cases a close callback on the stdout or stderr streams - can be used as an alternative to an exit callback if the - signal handler is causing a problem. - """ - self._exit_callback = stack_context.wrap(callback) - Subprocess.initialize() - Subprocess._waiting[self.pid] = self - Subprocess._try_cleanup_process(self.pid) - - def wait_for_exit(self, raise_error=True): - """Returns a `.Future` which resolves when the process exits. - - Usage:: - - ret = yield proc.wait_for_exit() - - This is a coroutine-friendly alternative to `set_exit_callback` - (and a replacement for the blocking `subprocess.Popen.wait`). - - By default, raises `subprocess.CalledProcessError` if the process - has a non-zero exit status. Use ``wait_for_exit(raise_error=False)`` - to suppress this behavior and return the exit status without raising. - - .. versionadded:: 4.2 - """ - future = Future() - - def callback(ret): - if ret != 0 and raise_error: - # Unfortunately we don't have the original args any more. - future.set_exception(CalledProcessError(ret, None)) - else: - future_set_result_unless_cancelled(future, ret) - self.set_exit_callback(callback) - return future - - @classmethod - def initialize(cls): - """Initializes the ``SIGCHLD`` handler. - - The signal handler is run on an `.IOLoop` to avoid locking issues. - Note that the `.IOLoop` used for signal handling need not be the - same one used by individual Subprocess objects (as long as the - ``IOLoops`` are each running in separate threads). - - .. versionchanged:: 5.0 - The ``io_loop`` argument (deprecated since version 4.1) has been - removed. - """ - if cls._initialized: - return - io_loop = ioloop.IOLoop.current() - cls._old_sigchld = signal.signal( - signal.SIGCHLD, - lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup)) - cls._initialized = True - - @classmethod - def uninitialize(cls): - """Removes the ``SIGCHLD`` handler.""" - if not cls._initialized: - return - signal.signal(signal.SIGCHLD, cls._old_sigchld) - cls._initialized = False - - @classmethod - def _cleanup(cls): - for pid in list(cls._waiting.keys()): # make a copy - cls._try_cleanup_process(pid) - - @classmethod - def _try_cleanup_process(cls, pid): - try: - ret_pid, status = os.waitpid(pid, os.WNOHANG) - except OSError as e: - if errno_from_exception(e) == errno.ECHILD: - return - if ret_pid == 0: - return - assert ret_pid == pid - subproc = cls._waiting.pop(pid) - subproc.io_loop.add_callback_from_signal( - subproc._set_returncode, status) - - def _set_returncode(self, status): - if os.WIFSIGNALED(status): - self.returncode = -os.WTERMSIG(status) - else: - assert os.WIFEXITED(status) - self.returncode = os.WEXITSTATUS(status) - # We've taken over wait() duty from the subprocess.Popen - # object. If we don't inform it of the process's return code, - # it will log a warning at destruction in python 3.6+. - self.proc.returncode = self.returncode - if self._exit_callback: - callback = self._exit_callback - self._exit_callback = None - callback(self.returncode) diff --git a/lib/tornado/queues.py b/lib/tornado/queues.py deleted file mode 100755 index 7cb96bfc..00000000 --- a/lib/tornado/queues.py +++ /dev/null @@ -1,379 +0,0 @@ -# Copyright 2015 The Tornado Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Asynchronous queues for coroutines. These classes are very similar -to those provided in the standard library's `asyncio package -<https://docs.python.org/3/library/asyncio-queue.html>`_. - -.. warning:: - - Unlike the standard library's `queue` module, the classes defined here - are *not* thread-safe. To use these queues from another thread, - use `.IOLoop.add_callback` to transfer control to the `.IOLoop` thread - before calling any queue methods. - -""" - -from __future__ import absolute_import, division, print_function - -import collections -import heapq - -from tornado import gen, ioloop -from tornado.concurrent import Future, future_set_result_unless_cancelled -from tornado.locks import Event - -__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty'] - - -class QueueEmpty(Exception): - """Raised by `.Queue.get_nowait` when the queue has no items.""" - pass - - -class QueueFull(Exception): - """Raised by `.Queue.put_nowait` when a queue is at its maximum size.""" - pass - - -def _set_timeout(future, timeout): - if timeout: - def on_timeout(): - if not future.done(): - future.set_exception(gen.TimeoutError()) - io_loop = ioloop.IOLoop.current() - timeout_handle = io_loop.add_timeout(timeout, on_timeout) - future.add_done_callback( - lambda _: io_loop.remove_timeout(timeout_handle)) - - -class _QueueIterator(object): - def __init__(self, q): - self.q = q - - def __anext__(self): - return self.q.get() - - -class Queue(object): - """Coordinate producer and consumer coroutines. - - If maxsize is 0 (the default) the queue size is unbounded. - - .. testcode:: - - from tornado import gen - from tornado.ioloop import IOLoop - from tornado.queues import Queue - - q = Queue(maxsize=2) - - async def consumer(): - async for item in q: - try: - print('Doing work on %s' % item) - await gen.sleep(0.01) - finally: - q.task_done() - - async def producer(): - for item in range(5): - await q.put(item) - print('Put %s' % item) - - async def main(): - # Start consumer without waiting (since it never finishes). - IOLoop.current().spawn_callback(consumer) - await producer() # Wait for producer to put all tasks. - await q.join() # Wait for consumer to finish all tasks. - print('Done') - - IOLoop.current().run_sync(main) - - .. testoutput:: - - Put 0 - Put 1 - Doing work on 0 - Put 2 - Doing work on 1 - Put 3 - Doing work on 2 - Put 4 - Doing work on 3 - Doing work on 4 - Done - - - In versions of Python without native coroutines (before 3.5), - ``consumer()`` could be written as:: - - @gen.coroutine - def consumer(): - while True: - item = yield q.get() - try: - print('Doing work on %s' % item) - yield gen.sleep(0.01) - finally: - q.task_done() - - .. versionchanged:: 4.3 - Added ``async for`` support in Python 3.5. - - """ - def __init__(self, maxsize=0): - if maxsize is None: - raise TypeError("maxsize can't be None") - - if maxsize < 0: - raise ValueError("maxsize can't be negative") - - self._maxsize = maxsize - self._init() - self._getters = collections.deque([]) # Futures. - self._putters = collections.deque([]) # Pairs of (item, Future). - self._unfinished_tasks = 0 - self._finished = Event() - self._finished.set() - - @property - def maxsize(self): - """Number of items allowed in the queue.""" - return self._maxsize - - def qsize(self): - """Number of items in the queue.""" - return len(self._queue) - - def empty(self): - return not self._queue - - def full(self): - if self.maxsize == 0: - return False - else: - return self.qsize() >= self.maxsize - - def put(self, item, timeout=None): - """Put an item into the queue, perhaps waiting until there is room. - - Returns a Future, which raises `tornado.util.TimeoutError` after a - timeout. - - ``timeout`` may be a number denoting a time (on the same - scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a - `datetime.timedelta` object for a deadline relative to the - current time. - """ - future = Future() - try: - self.put_nowait(item) - except QueueFull: - self._putters.append((item, future)) - _set_timeout(future, timeout) - else: - future.set_result(None) - return future - - def put_nowait(self, item): - """Put an item into the queue without blocking. - - If no free slot is immediately available, raise `QueueFull`. - """ - self._consume_expired() - if self._getters: - assert self.empty(), "queue non-empty, why are getters waiting?" - getter = self._getters.popleft() - self.__put_internal(item) - future_set_result_unless_cancelled(getter, self._get()) - elif self.full(): - raise QueueFull - else: - self.__put_internal(item) - - def get(self, timeout=None): - """Remove and return an item from the queue. - - Returns a Future which resolves once an item is available, or raises - `tornado.util.TimeoutError` after a timeout. - - ``timeout`` may be a number denoting a time (on the same - scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a - `datetime.timedelta` object for a deadline relative to the - current time. - """ - future = Future() - try: - future.set_result(self.get_nowait()) - except QueueEmpty: - self._getters.append(future) - _set_timeout(future, timeout) - return future - - def get_nowait(self): - """Remove and return an item from the queue without blocking. - - Return an item if one is immediately available, else raise - `QueueEmpty`. - """ - self._consume_expired() - if self._putters: - assert self.full(), "queue not full, why are putters waiting?" - item, putter = self._putters.popleft() - self.__put_internal(item) - future_set_result_unless_cancelled(putter, None) - return self._get() - elif self.qsize(): - return self._get() - else: - raise QueueEmpty - - def task_done(self): - """Indicate that a formerly enqueued task is complete. - - Used by queue consumers. For each `.get` used to fetch a task, a - subsequent call to `.task_done` tells the queue that the processing - on the task is complete. - - If a `.join` is blocking, it resumes when all items have been - processed; that is, when every `.put` is matched by a `.task_done`. - - Raises `ValueError` if called more times than `.put`. - """ - if self._unfinished_tasks <= 0: - raise ValueError('task_done() called too many times') - self._unfinished_tasks -= 1 - if self._unfinished_tasks == 0: - self._finished.set() - - def join(self, timeout=None): - """Block until all items in the queue are processed. - - Returns a Future, which raises `tornado.util.TimeoutError` after a - timeout. - """ - return self._finished.wait(timeout) - - def __aiter__(self): - return _QueueIterator(self) - - # These three are overridable in subclasses. - def _init(self): - self._queue = collections.deque() - - def _get(self): - return self._queue.popleft() - - def _put(self, item): - self._queue.append(item) - # End of the overridable methods. - - def __put_internal(self, item): - self._unfinished_tasks += 1 - self._finished.clear() - self._put(item) - - def _consume_expired(self): - # Remove timed-out waiters. - while self._putters and self._putters[0][1].done(): - self._putters.popleft() - - while self._getters and self._getters[0].done(): - self._getters.popleft() - - def __repr__(self): - return '<%s at %s %s>' % ( - type(self).__name__, hex(id(self)), self._format()) - - def __str__(self): - return '<%s %s>' % (type(self).__name__, self._format()) - - def _format(self): - result = 'maxsize=%r' % (self.maxsize, ) - if getattr(self, '_queue', None): - result += ' queue=%r' % self._queue - if self._getters: - result += ' getters[%s]' % len(self._getters) - if self._putters: - result += ' putters[%s]' % len(self._putters) - if self._unfinished_tasks: - result += ' tasks=%s' % self._unfinished_tasks - return result - - -class PriorityQueue(Queue): - """A `.Queue` that retrieves entries in priority order, lowest first. - - Entries are typically tuples like ``(priority number, data)``. - - .. testcode:: - - from tornado.queues import PriorityQueue - - q = PriorityQueue() - q.put((1, 'medium-priority item')) - q.put((0, 'high-priority item')) - q.put((10, 'low-priority item')) - - print(q.get_nowait()) - print(q.get_nowait()) - print(q.get_nowait()) - - .. testoutput:: - - (0, 'high-priority item') - (1, 'medium-priority item') - (10, 'low-priority item') - """ - def _init(self): - self._queue = [] - - def _put(self, item): - heapq.heappush(self._queue, item) - - def _get(self): - return heapq.heappop(self._queue) - - -class LifoQueue(Queue): - """A `.Queue` that retrieves the most recently put items first. - - .. testcode:: - - from tornado.queues import LifoQueue - - q = LifoQueue() - q.put(3) - q.put(2) - q.put(1) - - print(q.get_nowait()) - print(q.get_nowait()) - print(q.get_nowait()) - - .. testoutput:: - - 1 - 2 - 3 - """ - def _init(self): - self._queue = [] - - def _put(self, item): - self._queue.append(item) - - def _get(self): - return self._queue.pop() diff --git a/lib/tornado/routing.py b/lib/tornado/routing.py deleted file mode 100755 index e56d1a75..00000000 --- a/lib/tornado/routing.py +++ /dev/null @@ -1,641 +0,0 @@ -# Copyright 2015 The Tornado Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Flexible routing implementation. - -Tornado routes HTTP requests to appropriate handlers using `Router` -class implementations. The `tornado.web.Application` class is a -`Router` implementation and may be used directly, or the classes in -this module may be used for additional flexibility. The `RuleRouter` -class can match on more criteria than `.Application`, or the `Router` -interface can be subclassed for maximum customization. - -`Router` interface extends `~.httputil.HTTPServerConnectionDelegate` -to provide additional routing capabilities. This also means that any -`Router` implementation can be used directly as a ``request_callback`` -for `~.httpserver.HTTPServer` constructor. - -`Router` subclass must implement a ``find_handler`` method to provide -a suitable `~.httputil.HTTPMessageDelegate` instance to handle the -request: - -.. code-block:: python - - class CustomRouter(Router): - def find_handler(self, request, **kwargs): - # some routing logic providing a suitable HTTPMessageDelegate instance - return MessageDelegate(request.connection) - - class MessageDelegate(HTTPMessageDelegate): - def __init__(self, connection): - self.connection = connection - - def finish(self): - self.connection.write_headers( - ResponseStartLine("HTTP/1.1", 200, "OK"), - HTTPHeaders({"Content-Length": "2"}), - b"OK") - self.connection.finish() - - router = CustomRouter() - server = HTTPServer(router) - -The main responsibility of `Router` implementation is to provide a -mapping from a request to `~.httputil.HTTPMessageDelegate` instance -that will handle this request. In the example above we can see that -routing is possible even without instantiating an `~.web.Application`. - -For routing to `~.web.RequestHandler` implementations we need an -`~.web.Application` instance. `~.web.Application.get_handler_delegate` -provides a convenient way to create `~.httputil.HTTPMessageDelegate` -for a given request and `~.web.RequestHandler`. - -Here is a simple example of how we can we route to -`~.web.RequestHandler` subclasses by HTTP method: - -.. code-block:: python - - resources = {} - - class GetResource(RequestHandler): - def get(self, path): - if path not in resources: - raise HTTPError(404) - - self.finish(resources[path]) - - class PostResource(RequestHandler): - def post(self, path): - resources[path] = self.request.body - - class HTTPMethodRouter(Router): - def __init__(self, app): - self.app = app - - def find_handler(self, request, **kwargs): - handler = GetResource if request.method == "GET" else PostResource - return self.app.get_handler_delegate(request, handler, path_args=[request.path]) - - router = HTTPMethodRouter(Application()) - server = HTTPServer(router) - -`ReversibleRouter` interface adds the ability to distinguish between -the routes and reverse them to the original urls using route's name -and additional arguments. `~.web.Application` is itself an -implementation of `ReversibleRouter` class. - -`RuleRouter` and `ReversibleRuleRouter` are implementations of -`Router` and `ReversibleRouter` interfaces and can be used for -creating rule-based routing configurations. - -Rules are instances of `Rule` class. They contain a `Matcher`, which -provides the logic for determining whether the rule is a match for a -particular request and a target, which can be one of the following. - -1) An instance of `~.httputil.HTTPServerConnectionDelegate`: - -.. code-block:: python - - router = RuleRouter([ - Rule(PathMatches("/handler"), ConnectionDelegate()), - # ... more rules - ]) - - class ConnectionDelegate(HTTPServerConnectionDelegate): - def start_request(self, server_conn, request_conn): - return MessageDelegate(request_conn) - -2) A callable accepting a single argument of `~.httputil.HTTPServerRequest` type: - -.. code-block:: python - - router = RuleRouter([ - Rule(PathMatches("/callable"), request_callable) - ]) - - def request_callable(request): - request.write(b"HTTP/1.1 200 OK\\r\\nContent-Length: 2\\r\\n\\r\\nOK") - request.finish() - -3) Another `Router` instance: - -.. code-block:: python - - router = RuleRouter([ - Rule(PathMatches("/router.*"), CustomRouter()) - ]) - -Of course a nested `RuleRouter` or a `~.web.Application` is allowed: - -.. code-block:: python - - router = RuleRouter([ - Rule(HostMatches("example.com"), RuleRouter([ - Rule(PathMatches("/app1/.*"), Application([(r"/app1/handler", Handler)]))), - ])) - ]) - - server = HTTPServer(router) - -In the example below `RuleRouter` is used to route between applications: - -.. code-block:: python - - app1 = Application([ - (r"/app1/handler", Handler1), - # other handlers ... - ]) - - app2 = Application([ - (r"/app2/handler", Handler2), - # other handlers ... - ]) - - router = RuleRouter([ - Rule(PathMatches("/app1.*"), app1), - Rule(PathMatches("/app2.*"), app2) - ]) - - server = HTTPServer(router) - -For more information on application-level routing see docs for `~.web.Application`. - -.. versionadded:: 4.5 - -""" - -from __future__ import absolute_import, division, print_function - -import re -from functools import partial - -from tornado import httputil -from tornado.httpserver import _CallableAdapter -from tornado.escape import url_escape, url_unescape, utf8 -from tornado.log import app_log -from tornado.util import basestring_type, import_object, re_unescape, unicode_type - -try: - import typing # noqa -except ImportError: - pass - - -class Router(httputil.HTTPServerConnectionDelegate): - """Abstract router interface.""" - - def find_handler(self, request, **kwargs): - # type: (httputil.HTTPServerRequest, typing.Any)->httputil.HTTPMessageDelegate - """Must be implemented to return an appropriate instance of `~.httputil.HTTPMessageDelegate` - that can serve the request. - Routing implementations may pass additional kwargs to extend the routing logic. - - :arg httputil.HTTPServerRequest request: current HTTP request. - :arg kwargs: additional keyword arguments passed by routing implementation. - :returns: an instance of `~.httputil.HTTPMessageDelegate` that will be used to - process the request. - """ - raise NotImplementedError() - - def start_request(self, server_conn, request_conn): - return _RoutingDelegate(self, server_conn, request_conn) - - -class ReversibleRouter(Router): - """Abstract router interface for routers that can handle named routes - and support reversing them to original urls. - """ - - def reverse_url(self, name, *args): - """Returns url string for a given route name and arguments - or ``None`` if no match is found. - - :arg str name: route name. - :arg args: url parameters. - :returns: parametrized url string for a given route name (or ``None``). - """ - raise NotImplementedError() - - -class _RoutingDelegate(httputil.HTTPMessageDelegate): - def __init__(self, router, server_conn, request_conn): - self.server_conn = server_conn - self.request_conn = request_conn - self.delegate = None - self.router = router # type: Router - - def headers_received(self, start_line, headers): - request = httputil.HTTPServerRequest( - connection=self.request_conn, - server_connection=self.server_conn, - start_line=start_line, headers=headers) - - self.delegate = self.router.find_handler(request) - if self.delegate is None: - app_log.debug("Delegate for %s %s request not found", - start_line.method, start_line.path) - self.delegate = _DefaultMessageDelegate(self.request_conn) - - return self.delegate.headers_received(start_line, headers) - - def data_received(self, chunk): - return self.delegate.data_received(chunk) - - def finish(self): - self.delegate.finish() - - def on_connection_close(self): - self.delegate.on_connection_close() - - -class _DefaultMessageDelegate(httputil.HTTPMessageDelegate): - def __init__(self, connection): - self.connection = connection - - def finish(self): - self.connection.write_headers( - httputil.ResponseStartLine("HTTP/1.1", 404, "Not Found"), httputil.HTTPHeaders()) - self.connection.finish() - - -class RuleRouter(Router): - """Rule-based router implementation.""" - - def __init__(self, rules=None): - """Constructs a router from an ordered list of rules:: - - RuleRouter([ - Rule(PathMatches("/handler"), Target), - # ... more rules - ]) - - You can also omit explicit `Rule` constructor and use tuples of arguments:: - - RuleRouter([ - (PathMatches("/handler"), Target), - ]) - - `PathMatches` is a default matcher, so the example above can be simplified:: - - RuleRouter([ - ("/handler", Target), - ]) - - In the examples above, ``Target`` can be a nested `Router` instance, an instance of - `~.httputil.HTTPServerConnectionDelegate` or an old-style callable, - accepting a request argument. - - :arg rules: a list of `Rule` instances or tuples of `Rule` - constructor arguments. - """ - self.rules = [] # type: typing.List[Rule] - if rules: - self.add_rules(rules) - - def add_rules(self, rules): - """Appends new rules to the router. - - :arg rules: a list of Rule instances (or tuples of arguments, which are - passed to Rule constructor). - """ - for rule in rules: - if isinstance(rule, (tuple, list)): - assert len(rule) in (2, 3, 4) - if isinstance(rule[0], basestring_type): - rule = Rule(PathMatches(rule[0]), *rule[1:]) - else: - rule = Rule(*rule) - - self.rules.append(self.process_rule(rule)) - - def process_rule(self, rule): - """Override this method for additional preprocessing of each rule. - - :arg Rule rule: a rule to be processed. - :returns: the same or modified Rule instance. - """ - return rule - - def find_handler(self, request, **kwargs): - for rule in self.rules: - target_params = rule.matcher.match(request) - if target_params is not None: - if rule.target_kwargs: - target_params['target_kwargs'] = rule.target_kwargs - - delegate = self.get_target_delegate( - rule.target, request, **target_params) - - if delegate is not None: - return delegate - - return None - - def get_target_delegate(self, target, request, **target_params): - """Returns an instance of `~.httputil.HTTPMessageDelegate` for a - Rule's target. This method is called by `~.find_handler` and can be - extended to provide additional target types. - - :arg target: a Rule's target. - :arg httputil.HTTPServerRequest request: current request. - :arg target_params: additional parameters that can be useful - for `~.httputil.HTTPMessageDelegate` creation. - """ - if isinstance(target, Router): - return target.find_handler(request, **target_params) - - elif isinstance(target, httputil.HTTPServerConnectionDelegate): - return target.start_request(request.server_connection, request.connection) - - elif callable(target): - return _CallableAdapter( - partial(target, **target_params), request.connection - ) - - return None - - -class ReversibleRuleRouter(ReversibleRouter, RuleRouter): - """A rule-based router that implements ``reverse_url`` method. - - Each rule added to this router may have a ``name`` attribute that can be - used to reconstruct an original uri. The actual reconstruction takes place - in a rule's matcher (see `Matcher.reverse`). - """ - - def __init__(self, rules=None): - self.named_rules = {} # type: typing.Dict[str] - super(ReversibleRuleRouter, self).__init__(rules) - - def process_rule(self, rule): - rule = super(ReversibleRuleRouter, self).process_rule(rule) - - if rule.name: - if rule.name in self.named_rules: - app_log.warning( - "Multiple handlers named %s; replacing previous value", - rule.name) - self.named_rules[rule.name] = rule - - return rule - - def reverse_url(self, name, *args): - if name in self.named_rules: - return self.named_rules[name].matcher.reverse(*args) - - for rule in self.rules: - if isinstance(rule.target, ReversibleRouter): - reversed_url = rule.target.reverse_url(name, *args) - if reversed_url is not None: - return reversed_url - - return None - - -class Rule(object): - """A routing rule.""" - - def __init__(self, matcher, target, target_kwargs=None, name=None): - """Constructs a Rule instance. - - :arg Matcher matcher: a `Matcher` instance used for determining - whether the rule should be considered a match for a specific - request. - :arg target: a Rule's target (typically a ``RequestHandler`` or - `~.httputil.HTTPServerConnectionDelegate` subclass or even a nested `Router`, - depending on routing implementation). - :arg dict target_kwargs: a dict of parameters that can be useful - at the moment of target instantiation (for example, ``status_code`` - for a ``RequestHandler`` subclass). They end up in - ``target_params['target_kwargs']`` of `RuleRouter.get_target_delegate` - method. - :arg str name: the name of the rule that can be used to find it - in `ReversibleRouter.reverse_url` implementation. - """ - if isinstance(target, str): - # import the Module and instantiate the class - # Must be a fully qualified name (module.ClassName) - target = import_object(target) - - self.matcher = matcher # type: Matcher - self.target = target - self.target_kwargs = target_kwargs if target_kwargs else {} - self.name = name - - def reverse(self, *args): - return self.matcher.reverse(*args) - - def __repr__(self): - return '%s(%r, %s, kwargs=%r, name=%r)' % \ - (self.__class__.__name__, self.matcher, - self.target, self.target_kwargs, self.name) - - -class Matcher(object): - """Represents a matcher for request features.""" - - def match(self, request): - """Matches current instance against the request. - - :arg httputil.HTTPServerRequest request: current HTTP request - :returns: a dict of parameters to be passed to the target handler - (for example, ``handler_kwargs``, ``path_args``, ``path_kwargs`` - can be passed for proper `~.web.RequestHandler` instantiation). - An empty dict is a valid (and common) return value to indicate a match - when the argument-passing features are not used. - ``None`` must be returned to indicate that there is no match.""" - raise NotImplementedError() - - def reverse(self, *args): - """Reconstructs full url from matcher instance and additional arguments.""" - return None - - -class AnyMatches(Matcher): - """Matches any request.""" - - def match(self, request): - return {} - - -class HostMatches(Matcher): - """Matches requests from hosts specified by ``host_pattern`` regex.""" - - def __init__(self, host_pattern): - if isinstance(host_pattern, basestring_type): - if not host_pattern.endswith("$"): - host_pattern += "$" - self.host_pattern = re.compile(host_pattern) - else: - self.host_pattern = host_pattern - - def match(self, request): - if self.host_pattern.match(request.host_name): - return {} - - return None - - -class DefaultHostMatches(Matcher): - """Matches requests from host that is equal to application's default_host. - Always returns no match if ``X-Real-Ip`` header is present. - """ - - def __init__(self, application, host_pattern): - self.application = application - self.host_pattern = host_pattern - - def match(self, request): - # Look for default host if not behind load balancer (for debugging) - if "X-Real-Ip" not in request.headers: - if self.host_pattern.match(self.application.default_host): - return {} - return None - - -class PathMatches(Matcher): - """Matches requests with paths specified by ``path_pattern`` regex.""" - - def __init__(self, path_pattern): - if isinstance(path_pattern, basestring_type): - if not path_pattern.endswith('$'): - path_pattern += '$' - self.regex = re.compile(path_pattern) - else: - self.regex = path_pattern - - assert len(self.regex.groupindex) in (0, self.regex.groups), \ - ("groups in url regexes must either be all named or all " - "positional: %r" % self.regex.pattern) - - self._path, self._group_count = self._find_groups() - - def match(self, request): - match = self.regex.match(request.path) - if match is None: - return None - if not self.regex.groups: - return {} - - path_args, path_kwargs = [], {} - - # Pass matched groups to the handler. Since - # match.groups() includes both named and - # unnamed groups, we want to use either groups - # or groupdict but not both. - if self.regex.groupindex: - path_kwargs = dict( - (str(k), _unquote_or_none(v)) - for (k, v) in match.groupdict().items()) - else: - path_args = [_unquote_or_none(s) for s in match.groups()] - - return dict(path_args=path_args, path_kwargs=path_kwargs) - - def reverse(self, *args): - if self._path is None: - raise ValueError("Cannot reverse url regex " + self.regex.pattern) - assert len(args) == self._group_count, "required number of arguments " \ - "not found" - if not len(args): - return self._path - converted_args = [] - for a in args: - if not isinstance(a, (unicode_type, bytes)): - a = str(a) - converted_args.append(url_escape(utf8(a), plus=False)) - return self._path % tuple(converted_args) - - def _find_groups(self): - """Returns a tuple (reverse string, group count) for a url. - - For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method - would return ('/%s/%s/', 2). - """ - pattern = self.regex.pattern - if pattern.startswith('^'): - pattern = pattern[1:] - if pattern.endswith('$'): - pattern = pattern[:-1] - - if self.regex.groups != pattern.count('('): - # The pattern is too complicated for our simplistic matching, - # so we can't support reversing it. - return None, None - - pieces = [] - for fragment in pattern.split('('): - if ')' in fragment: - paren_loc = fragment.index(')') - if paren_loc >= 0: - pieces.append('%s' + fragment[paren_loc + 1:]) - else: - try: - unescaped_fragment = re_unescape(fragment) - except ValueError: - # If we can't unescape part of it, we can't - # reverse this url. - return (None, None) - pieces.append(unescaped_fragment) - - return ''.join(pieces), self.regex.groups - - -class URLSpec(Rule): - """Specifies mappings between URLs and handlers. - - .. versionchanged: 4.5 - `URLSpec` is now a subclass of a `Rule` with `PathMatches` matcher and is preserved for - backwards compatibility. - """ - def __init__(self, pattern, handler, kwargs=None, name=None): - """Parameters: - - * ``pattern``: Regular expression to be matched. Any capturing - groups in the regex will be passed in to the handler's - get/post/etc methods as arguments (by keyword if named, by - position if unnamed. Named and unnamed capturing groups - may not be mixed in the same rule). - - * ``handler``: `~.web.RequestHandler` subclass to be invoked. - - * ``kwargs`` (optional): A dictionary of additional arguments - to be passed to the handler's constructor. - - * ``name`` (optional): A name for this handler. Used by - `~.web.Application.reverse_url`. - - """ - super(URLSpec, self).__init__(PathMatches(pattern), handler, kwargs, name) - - self.regex = self.matcher.regex - self.handler_class = self.target - self.kwargs = kwargs - - def __repr__(self): - return '%s(%r, %s, kwargs=%r, name=%r)' % \ - (self.__class__.__name__, self.regex.pattern, - self.handler_class, self.kwargs, self.name) - - -def _unquote_or_none(s): - """None-safe wrapper around url_unescape to handle unmatched optional - groups correctly. - - Note that args are passed as bytes so the handler can decide what - encoding to use. - """ - if s is None: - return s - return url_unescape(s, encoding=None, plus=False) diff --git a/lib/tornado/simple_httpclient.py b/lib/tornado/simple_httpclient.py deleted file mode 100755 index 60b7956f..00000000 --- a/lib/tornado/simple_httpclient.py +++ /dev/null @@ -1,566 +0,0 @@ -from __future__ import absolute_import, division, print_function - -from tornado.escape import _unicode -from tornado import gen -from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy -from tornado import httputil -from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters -from tornado.ioloop import IOLoop -from tornado.iostream import StreamClosedError -from tornado.netutil import Resolver, OverrideResolver, _client_ssl_defaults -from tornado.log import gen_log -from tornado import stack_context -from tornado.tcpclient import TCPClient -from tornado.util import PY3 - -import base64 -import collections -import copy -import functools -import re -import socket -import sys -import time -from io import BytesIO - - -if PY3: - import urllib.parse as urlparse -else: - import urlparse - -try: - import ssl -except ImportError: - # ssl is not available on Google App Engine. - ssl = None - - -class HTTPTimeoutError(HTTPError): - """Error raised by SimpleAsyncHTTPClient on timeout. - - For historical reasons, this is a subclass of `.HTTPClientError` - which simulates a response code of 599. - - .. versionadded:: 5.1 - """ - def __init__(self, message): - super(HTTPTimeoutError, self).__init__(599, message=message) - - def __str__(self): - return self.message - - -class HTTPStreamClosedError(HTTPError): - """Error raised by SimpleAsyncHTTPClient when the underlying stream is closed. - - When a more specific exception is available (such as `ConnectionResetError`), - it may be raised instead of this one. - - For historical reasons, this is a subclass of `.HTTPClientError` - which simulates a response code of 599. - - .. versionadded:: 5.1 - """ - def __init__(self, message): - super(HTTPStreamClosedError, self).__init__(599, message=message) - - def __str__(self): - return self.message - - -class SimpleAsyncHTTPClient(AsyncHTTPClient): - """Non-blocking HTTP client with no external dependencies. - - This class implements an HTTP 1.1 client on top of Tornado's IOStreams. - Some features found in the curl-based AsyncHTTPClient are not yet - supported. In particular, proxies are not supported, connections - are not reused, and callers cannot select the network interface to be - used. - """ - def initialize(self, max_clients=10, - hostname_mapping=None, max_buffer_size=104857600, - resolver=None, defaults=None, max_header_size=None, - max_body_size=None): - """Creates a AsyncHTTPClient. - - Only a single AsyncHTTPClient instance exists per IOLoop - in order to provide limitations on the number of pending connections. - ``force_instance=True`` may be used to suppress this behavior. - - Note that because of this implicit reuse, unless ``force_instance`` - is used, only the first call to the constructor actually uses - its arguments. It is recommended to use the ``configure`` method - instead of the constructor to ensure that arguments take effect. - - ``max_clients`` is the number of concurrent requests that can be - in progress; when this limit is reached additional requests will be - queued. Note that time spent waiting in this queue still counts - against the ``request_timeout``. - - ``hostname_mapping`` is a dictionary mapping hostnames to IP addresses. - It can be used to make local DNS changes when modifying system-wide - settings like ``/etc/hosts`` is not possible or desirable (e.g. in - unittests). - - ``max_buffer_size`` (default 100MB) is the number of bytes - that can be read into memory at once. ``max_body_size`` - (defaults to ``max_buffer_size``) is the largest response body - that the client will accept. Without a - ``streaming_callback``, the smaller of these two limits - applies; with a ``streaming_callback`` only ``max_body_size`` - does. - - .. versionchanged:: 4.2 - Added the ``max_body_size`` argument. - """ - super(SimpleAsyncHTTPClient, self).initialize(defaults=defaults) - self.max_clients = max_clients - self.queue = collections.deque() - self.active = {} - self.waiting = {} - self.max_buffer_size = max_buffer_size - self.max_header_size = max_header_size - self.max_body_size = max_body_size - # TCPClient could create a Resolver for us, but we have to do it - # ourselves to support hostname_mapping. - if resolver: - self.resolver = resolver - self.own_resolver = False - else: - self.resolver = Resolver() - self.own_resolver = True - if hostname_mapping is not None: - self.resolver = OverrideResolver(resolver=self.resolver, - mapping=hostname_mapping) - self.tcp_client = TCPClient(resolver=self.resolver) - - def close(self): - super(SimpleAsyncHTTPClient, self).close() - if self.own_resolver: - self.resolver.close() - self.tcp_client.close() - - def fetch_impl(self, request, callback): - key = object() - self.queue.append((key, request, callback)) - if not len(self.active) < self.max_clients: - timeout_handle = self.io_loop.add_timeout( - self.io_loop.time() + min(request.connect_timeout, - request.request_timeout), - functools.partial(self._on_timeout, key, "in request queue")) - else: - timeout_handle = None - self.waiting[key] = (request, callback, timeout_handle) - self._process_queue() - if self.queue: - gen_log.debug("max_clients limit reached, request queued. " - "%d active, %d queued requests." % ( - len(self.active), len(self.queue))) - - def _process_queue(self): - with stack_context.NullContext(): - while self.queue and len(self.active) < self.max_clients: - key, request, callback = self.queue.popleft() - if key not in self.waiting: - continue - self._remove_timeout(key) - self.active[key] = (request, callback) - release_callback = functools.partial(self._release_fetch, key) - self._handle_request(request, release_callback, callback) - - def _connection_class(self): - return _HTTPConnection - - def _handle_request(self, request, release_callback, final_callback): - self._connection_class()( - self, request, release_callback, - final_callback, self.max_buffer_size, self.tcp_client, - self.max_header_size, self.max_body_size) - - def _release_fetch(self, key): - del self.active[key] - self._process_queue() - - def _remove_timeout(self, key): - if key in self.waiting: - request, callback, timeout_handle = self.waiting[key] - if timeout_handle is not None: - self.io_loop.remove_timeout(timeout_handle) - del self.waiting[key] - - def _on_timeout(self, key, info=None): - """Timeout callback of request. - - Construct a timeout HTTPResponse when a timeout occurs. - - :arg object key: A simple object to mark the request. - :info string key: More detailed timeout information. - """ - request, callback, timeout_handle = self.waiting[key] - self.queue.remove((key, request, callback)) - - error_message = "Timeout {0}".format(info) if info else "Timeout" - timeout_response = HTTPResponse( - request, 599, error=HTTPTimeoutError(error_message), - request_time=self.io_loop.time() - request.start_time) - self.io_loop.add_callback(callback, timeout_response) - del self.waiting[key] - - -class _HTTPConnection(httputil.HTTPMessageDelegate): - _SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"]) - - def __init__(self, client, request, release_callback, - final_callback, max_buffer_size, tcp_client, - max_header_size, max_body_size): - self.io_loop = IOLoop.current() - self.start_time = self.io_loop.time() - self.start_wall_time = time.time() - self.client = client - self.request = request - self.release_callback = release_callback - self.final_callback = final_callback - self.max_buffer_size = max_buffer_size - self.tcp_client = tcp_client - self.max_header_size = max_header_size - self.max_body_size = max_body_size - self.code = None - self.headers = None - self.chunks = [] - self._decompressor = None - # Timeout handle returned by IOLoop.add_timeout - self._timeout = None - self._sockaddr = None - IOLoop.current().add_callback(self.run) - - @gen.coroutine - def run(self): - try: - self.parsed = urlparse.urlsplit(_unicode(self.request.url)) - if self.parsed.scheme not in ("http", "https"): - raise ValueError("Unsupported url scheme: %s" % - self.request.url) - # urlsplit results have hostname and port results, but they - # didn't support ipv6 literals until python 2.7. - netloc = self.parsed.netloc - if "@" in netloc: - userpass, _, netloc = netloc.rpartition("@") - host, port = httputil.split_host_and_port(netloc) - if port is None: - port = 443 if self.parsed.scheme == "https" else 80 - if re.match(r'^\[.*\]$', host): - # raw ipv6 addresses in urls are enclosed in brackets - host = host[1:-1] - self.parsed_hostname = host # save final host for _on_connect - - if self.request.allow_ipv6 is False: - af = socket.AF_INET - else: - af = socket.AF_UNSPEC - - ssl_options = self._get_ssl_options(self.parsed.scheme) - - timeout = min(self.request.connect_timeout, self.request.request_timeout) - if timeout: - self._timeout = self.io_loop.add_timeout( - self.start_time + timeout, - stack_context.wrap(functools.partial(self._on_timeout, "while connecting"))) - stream = yield self.tcp_client.connect( - host, port, af=af, - ssl_options=ssl_options, - max_buffer_size=self.max_buffer_size) - - if self.final_callback is None: - # final_callback is cleared if we've hit our timeout. - stream.close() - return - self.stream = stream - self.stream.set_close_callback(self.on_connection_close) - self._remove_timeout() - if self.final_callback is None: - return - if self.request.request_timeout: - self._timeout = self.io_loop.add_timeout( - self.start_time + self.request.request_timeout, - stack_context.wrap(functools.partial(self._on_timeout, "during request"))) - if (self.request.method not in self._SUPPORTED_METHODS and - not self.request.allow_nonstandard_methods): - raise KeyError("unknown method %s" % self.request.method) - for key in ('network_interface', - 'proxy_host', 'proxy_port', - 'proxy_username', 'proxy_password', - 'proxy_auth_mode'): - if getattr(self.request, key, None): - raise NotImplementedError('%s not supported' % key) - if "Connection" not in self.request.headers: - self.request.headers["Connection"] = "close" - if "Host" not in self.request.headers: - if '@' in self.parsed.netloc: - self.request.headers["Host"] = self.parsed.netloc.rpartition('@')[-1] - else: - self.request.headers["Host"] = self.parsed.netloc - username, password = None, None - if self.parsed.username is not None: - username, password = self.parsed.username, self.parsed.password - elif self.request.auth_username is not None: - username = self.request.auth_username - password = self.request.auth_password or '' - if username is not None: - if self.request.auth_mode not in (None, "basic"): - raise ValueError("unsupported auth_mode %s", - self.request.auth_mode) - self.request.headers["Authorization"] = ( - b"Basic " + base64.b64encode( - httputil.encode_username_password(username, password))) - if self.request.user_agent: - self.request.headers["User-Agent"] = self.request.user_agent - if not self.request.allow_nonstandard_methods: - # Some HTTP methods nearly always have bodies while others - # almost never do. Fail in this case unless the user has - # opted out of sanity checks with allow_nonstandard_methods. - body_expected = self.request.method in ("POST", "PATCH", "PUT") - body_present = (self.request.body is not None or - self.request.body_producer is not None) - if ((body_expected and not body_present) or - (body_present and not body_expected)): - raise ValueError( - 'Body must %sbe None for method %s (unless ' - 'allow_nonstandard_methods is true)' % - ('not ' if body_expected else '', self.request.method)) - if self.request.expect_100_continue: - self.request.headers["Expect"] = "100-continue" - if self.request.body is not None: - # When body_producer is used the caller is responsible for - # setting Content-Length (or else chunked encoding will be used). - self.request.headers["Content-Length"] = str(len( - self.request.body)) - if (self.request.method == "POST" and - "Content-Type" not in self.request.headers): - self.request.headers["Content-Type"] = "application/x-www-form-urlencoded" - if self.request.decompress_response: - self.request.headers["Accept-Encoding"] = "gzip" - req_path = ((self.parsed.path or '/') + - (('?' + self.parsed.query) if self.parsed.query else '')) - self.connection = self._create_connection(stream) - start_line = httputil.RequestStartLine(self.request.method, - req_path, '') - self.connection.write_headers(start_line, self.request.headers) - if self.request.expect_100_continue: - yield self.connection.read_response(self) - else: - yield self._write_body(True) - except Exception: - if not self._handle_exception(*sys.exc_info()): - raise - - def _get_ssl_options(self, scheme): - if scheme == "https": - if self.request.ssl_options is not None: - return self.request.ssl_options - # If we are using the defaults, don't construct a - # new SSLContext. - if (self.request.validate_cert and - self.request.ca_certs is None and - self.request.client_cert is None and - self.request.client_key is None): - return _client_ssl_defaults - ssl_ctx = ssl.create_default_context( - ssl.Purpose.SERVER_AUTH, - cafile=self.request.ca_certs) - if not self.request.validate_cert: - ssl_ctx.check_hostname = False - ssl_ctx.verify_mode = ssl.CERT_NONE - if self.request.client_cert is not None: - ssl_ctx.load_cert_chain(self.request.client_cert, - self.request.client_key) - if hasattr(ssl, 'OP_NO_COMPRESSION'): - # See netutil.ssl_options_to_context - ssl_ctx.options |= ssl.OP_NO_COMPRESSION - return ssl_ctx - return None - - def _on_timeout(self, info=None): - """Timeout callback of _HTTPConnection instance. - - Raise a `HTTPTimeoutError` when a timeout occurs. - - :info string key: More detailed timeout information. - """ - self._timeout = None - error_message = "Timeout {0}".format(info) if info else "Timeout" - if self.final_callback is not None: - self._handle_exception(HTTPTimeoutError, HTTPTimeoutError(error_message), - None) - - def _remove_timeout(self): - if self._timeout is not None: - self.io_loop.remove_timeout(self._timeout) - self._timeout = None - - def _create_connection(self, stream): - stream.set_nodelay(True) - connection = HTTP1Connection( - stream, True, - HTTP1ConnectionParameters( - no_keep_alive=True, - max_header_size=self.max_header_size, - max_body_size=self.max_body_size, - decompress=self.request.decompress_response), - self._sockaddr) - return connection - - @gen.coroutine - def _write_body(self, start_read): - if self.request.body is not None: - self.connection.write(self.request.body) - elif self.request.body_producer is not None: - fut = self.request.body_producer(self.connection.write) - if fut is not None: - yield fut - self.connection.finish() - if start_read: - try: - yield self.connection.read_response(self) - except StreamClosedError: - if not self._handle_exception(*sys.exc_info()): - raise - - def _release(self): - if self.release_callback is not None: - release_callback = self.release_callback - self.release_callback = None - release_callback() - - def _run_callback(self, response): - self._release() - if self.final_callback is not None: - final_callback = self.final_callback - self.final_callback = None - self.io_loop.add_callback(final_callback, response) - - def _handle_exception(self, typ, value, tb): - if self.final_callback: - self._remove_timeout() - if isinstance(value, StreamClosedError): - if value.real_error is None: - value = HTTPStreamClosedError("Stream closed") - else: - value = value.real_error - self._run_callback(HTTPResponse(self.request, 599, error=value, - request_time=self.io_loop.time() - self.start_time, - start_time=self.start_wall_time, - )) - - if hasattr(self, "stream"): - # TODO: this may cause a StreamClosedError to be raised - # by the connection's Future. Should we cancel the - # connection more gracefully? - self.stream.close() - return True - else: - # If our callback has already been called, we are probably - # catching an exception that is not caused by us but rather - # some child of our callback. Rather than drop it on the floor, - # pass it along, unless it's just the stream being closed. - return isinstance(value, StreamClosedError) - - def on_connection_close(self): - if self.final_callback is not None: - message = "Connection closed" - if self.stream.error: - raise self.stream.error - try: - raise HTTPStreamClosedError(message) - except HTTPStreamClosedError: - self._handle_exception(*sys.exc_info()) - - def headers_received(self, first_line, headers): - if self.request.expect_100_continue and first_line.code == 100: - self._write_body(False) - return - self.code = first_line.code - self.reason = first_line.reason - self.headers = headers - - if self._should_follow_redirect(): - return - - if self.request.header_callback is not None: - # Reassemble the start line. - self.request.header_callback('%s %s %s\r\n' % first_line) - for k, v in self.headers.get_all(): - self.request.header_callback("%s: %s\r\n" % (k, v)) - self.request.header_callback('\r\n') - - def _should_follow_redirect(self): - return (self.request.follow_redirects and - self.request.max_redirects > 0 and - self.code in (301, 302, 303, 307, 308)) - - def finish(self): - data = b''.join(self.chunks) - self._remove_timeout() - original_request = getattr(self.request, "original_request", - self.request) - if self._should_follow_redirect(): - assert isinstance(self.request, _RequestProxy) - new_request = copy.copy(self.request.request) - new_request.url = urlparse.urljoin(self.request.url, - self.headers["Location"]) - new_request.max_redirects = self.request.max_redirects - 1 - del new_request.headers["Host"] - # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4 - # Client SHOULD make a GET request after a 303. - # According to the spec, 302 should be followed by the same - # method as the original request, but in practice browsers - # treat 302 the same as 303, and many servers use 302 for - # compatibility with pre-HTTP/1.1 user agents which don't - # understand the 303 status. - if self.code in (302, 303): - new_request.method = "GET" - new_request.body = None - for h in ["Content-Length", "Content-Type", - "Content-Encoding", "Transfer-Encoding"]: - try: - del self.request.headers[h] - except KeyError: - pass - new_request.original_request = original_request - final_callback = self.final_callback - self.final_callback = None - self._release() - fut = self.client.fetch(new_request, raise_error=False) - fut.add_done_callback(lambda f: final_callback(f.result())) - self._on_end_request() - return - if self.request.streaming_callback: - buffer = BytesIO() - else: - buffer = BytesIO(data) # TODO: don't require one big string? - response = HTTPResponse(original_request, - self.code, reason=getattr(self, 'reason', None), - headers=self.headers, - request_time=self.io_loop.time() - self.start_time, - start_time=self.start_wall_time, - buffer=buffer, - effective_url=self.request.url) - self._run_callback(response) - self._on_end_request() - - def _on_end_request(self): - self.stream.close() - - def data_received(self, chunk): - if self._should_follow_redirect(): - # We're going to follow a redirect so just discard the body. - return - if self.request.streaming_callback is not None: - self.request.streaming_callback(chunk) - else: - self.chunks.append(chunk) - - -if __name__ == "__main__": - AsyncHTTPClient.configure(SimpleAsyncHTTPClient) - main() diff --git a/lib/tornado/speedups.c b/lib/tornado/speedups.c deleted file mode 100755 index b714268a..00000000 --- a/lib/tornado/speedups.c +++ /dev/null @@ -1,77 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include <Python.h> -#include <stdint.h> - -static PyObject* websocket_mask(PyObject* self, PyObject* args) { - const char* mask; - Py_ssize_t mask_len; - uint32_t uint32_mask; - uint64_t uint64_mask; - const char* data; - Py_ssize_t data_len; - Py_ssize_t i; - PyObject* result; - char* buf; - - if (!PyArg_ParseTuple(args, "s#s#", &mask, &mask_len, &data, &data_len)) { - return NULL; - } - - uint32_mask = ((uint32_t*)mask)[0]; - - result = PyBytes_FromStringAndSize(NULL, data_len); - if (!result) { - return NULL; - } - buf = PyBytes_AsString(result); - - if (sizeof(size_t) >= 8) { - uint64_mask = uint32_mask; - uint64_mask = (uint64_mask << 32) | uint32_mask; - - while (data_len >= 8) { - ((uint64_t*)buf)[0] = ((uint64_t*)data)[0] ^ uint64_mask; - data += 8; - buf += 8; - data_len -= 8; - } - } - - while (data_len >= 4) { - ((uint32_t*)buf)[0] = ((uint32_t*)data)[0] ^ uint32_mask; - data += 4; - buf += 4; - data_len -= 4; - } - - for (i = 0; i < data_len; i++) { - buf[i] = data[i] ^ mask[i]; - } - - return result; -} - -static PyMethodDef methods[] = { - {"websocket_mask", websocket_mask, METH_VARARGS, ""}, - {NULL, NULL, 0, NULL} -}; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef speedupsmodule = { - PyModuleDef_HEAD_INIT, - "speedups", - NULL, - -1, - methods -}; - -PyMODINIT_FUNC -PyInit_speedups(void) { - return PyModule_Create(&speedupsmodule); -} -#else // Python 2.x -PyMODINIT_FUNC -initspeedups(void) { - Py_InitModule("tornado.speedups", methods); -} -#endif diff --git a/lib/tornado/speedups.pyi b/lib/tornado/speedups.pyi deleted file mode 100755 index 9e8def48..00000000 --- a/lib/tornado/speedups.pyi +++ /dev/null @@ -1 +0,0 @@ -def websocket_mask(mask: bytes, data: bytes) -> bytes: ... diff --git a/lib/tornado/stack_context.py b/lib/tornado/stack_context.py deleted file mode 100755 index a1eca4c7..00000000 --- a/lib/tornado/stack_context.py +++ /dev/null @@ -1,413 +0,0 @@ -# -# Copyright 2010 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""`StackContext` allows applications to maintain threadlocal-like state -that follows execution as it moves to other execution contexts. - -The motivating examples are to eliminate the need for explicit -``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to -allow some additional context to be kept for logging. - -This is slightly magic, but it's an extension of the idea that an -exception handler is a kind of stack-local state and when that stack -is suspended and resumed in a new context that state needs to be -preserved. `StackContext` shifts the burden of restoring that state -from each call site (e.g. wrapping each `.AsyncHTTPClient` callback -in ``async_callback``) to the mechanisms that transfer control from -one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`, -thread pools, etc). - -Example usage:: - - @contextlib.contextmanager - def die_on_error(): - try: - yield - except Exception: - logging.error("exception in asynchronous operation",exc_info=True) - sys.exit(1) - - with StackContext(die_on_error): - # Any exception thrown here *or in callback and its descendants* - # will cause the process to exit instead of spinning endlessly - # in the ioloop. - http_client.fetch(url, callback) - ioloop.start() - -Most applications shouldn't have to work with `StackContext` directly. -Here are a few rules of thumb for when it's necessary: - -* If you're writing an asynchronous library that doesn't rely on a - stack_context-aware library like `tornado.ioloop` or `tornado.iostream` - (for example, if you're writing a thread pool), use - `.stack_context.wrap()` before any asynchronous operations to capture the - stack context from where the operation was started. - -* If you're writing an asynchronous library that has some shared - resources (such as a connection pool), create those shared resources - within a ``with stack_context.NullContext():`` block. This will prevent - ``StackContexts`` from leaking from one request to another. - -* If you want to write something like an exception handler that will - persist across asynchronous calls, create a new `StackContext` (or - `ExceptionStackContext`), and make your asynchronous calls in a ``with`` - block that references your `StackContext`. - -.. deprecated:: 5.1 - - The ``stack_context`` package is deprecated and will be removed in - Tornado 6.0. -""" - -from __future__ import absolute_import, division, print_function - -import sys -import threading -import warnings - -from tornado.util import raise_exc_info - - -class StackContextInconsistentError(Exception): - pass - - -class _State(threading.local): - def __init__(self): - self.contexts = (tuple(), None) - - -_state = _State() - - -class StackContext(object): - """Establishes the given context as a StackContext that will be transferred. - - Note that the parameter is a callable that returns a context - manager, not the context itself. That is, where for a - non-transferable context manager you would say:: - - with my_context(): - - StackContext takes the function itself rather than its result:: - - with StackContext(my_context): - - The result of ``with StackContext() as cb:`` is a deactivation - callback. Run this callback when the StackContext is no longer - needed to ensure that it is not propagated any further (note that - deactivating a context does not affect any instances of that - context that are currently pending). This is an advanced feature - and not necessary in most applications. - """ - def __init__(self, context_factory): - warnings.warn("StackContext is deprecated and will be removed in Tornado 6.0", - DeprecationWarning) - self.context_factory = context_factory - self.contexts = [] - self.active = True - - def _deactivate(self): - self.active = False - - # StackContext protocol - def enter(self): - context = self.context_factory() - self.contexts.append(context) - context.__enter__() - - def exit(self, type, value, traceback): - context = self.contexts.pop() - context.__exit__(type, value, traceback) - - # Note that some of this code is duplicated in ExceptionStackContext - # below. ExceptionStackContext is more common and doesn't need - # the full generality of this class. - def __enter__(self): - self.old_contexts = _state.contexts - self.new_contexts = (self.old_contexts[0] + (self,), self) - _state.contexts = self.new_contexts - - try: - self.enter() - except: - _state.contexts = self.old_contexts - raise - - return self._deactivate - - def __exit__(self, type, value, traceback): - try: - self.exit(type, value, traceback) - finally: - final_contexts = _state.contexts - _state.contexts = self.old_contexts - - # Generator coroutines and with-statements with non-local - # effects interact badly. Check here for signs of - # the stack getting out of sync. - # Note that this check comes after restoring _state.context - # so that if it fails things are left in a (relatively) - # consistent state. - if final_contexts is not self.new_contexts: - raise StackContextInconsistentError( - 'stack_context inconsistency (may be caused by yield ' - 'within a "with StackContext" block)') - - # Break up a reference to itself to allow for faster GC on CPython. - self.new_contexts = None - - -class ExceptionStackContext(object): - """Specialization of StackContext for exception handling. - - The supplied ``exception_handler`` function will be called in the - event of an uncaught exception in this context. The semantics are - similar to a try/finally clause, and intended use cases are to log - an error, close a socket, or similar cleanup actions. The - ``exc_info`` triple ``(type, value, traceback)`` will be passed to the - exception_handler function. - - If the exception handler returns true, the exception will be - consumed and will not be propagated to other exception handlers. - - .. versionadded:: 5.1 - - The ``delay_warning`` argument can be used to delay the emission - of DeprecationWarnings until an exception is caught by the - ``ExceptionStackContext``, which facilitates certain transitional - use cases. - """ - def __init__(self, exception_handler, delay_warning=False): - self.delay_warning = delay_warning - if not self.delay_warning: - warnings.warn( - "StackContext is deprecated and will be removed in Tornado 6.0", - DeprecationWarning) - self.exception_handler = exception_handler - self.active = True - - def _deactivate(self): - self.active = False - - def exit(self, type, value, traceback): - if type is not None: - if self.delay_warning: - warnings.warn( - "StackContext is deprecated and will be removed in Tornado 6.0", - DeprecationWarning) - return self.exception_handler(type, value, traceback) - - def __enter__(self): - self.old_contexts = _state.contexts - self.new_contexts = (self.old_contexts[0], self) - _state.contexts = self.new_contexts - - return self._deactivate - - def __exit__(self, type, value, traceback): - try: - if type is not None: - return self.exception_handler(type, value, traceback) - finally: - final_contexts = _state.contexts - _state.contexts = self.old_contexts - - if final_contexts is not self.new_contexts: - raise StackContextInconsistentError( - 'stack_context inconsistency (may be caused by yield ' - 'within a "with StackContext" block)') - - # Break up a reference to itself to allow for faster GC on CPython. - self.new_contexts = None - - -class NullContext(object): - """Resets the `StackContext`. - - Useful when creating a shared resource on demand (e.g. an - `.AsyncHTTPClient`) where the stack that caused the creating is - not relevant to future operations. - """ - def __enter__(self): - self.old_contexts = _state.contexts - _state.contexts = (tuple(), None) - - def __exit__(self, type, value, traceback): - _state.contexts = self.old_contexts - - -def _remove_deactivated(contexts): - """Remove deactivated handlers from the chain""" - # Clean ctx handlers - stack_contexts = tuple([h for h in contexts[0] if h.active]) - - # Find new head - head = contexts[1] - while head is not None and not head.active: - head = head.old_contexts[1] - - # Process chain - ctx = head - while ctx is not None: - parent = ctx.old_contexts[1] - - while parent is not None: - if parent.active: - break - ctx.old_contexts = parent.old_contexts - parent = parent.old_contexts[1] - - ctx = parent - - return (stack_contexts, head) - - -def wrap(fn): - """Returns a callable object that will restore the current `StackContext` - when executed. - - Use this whenever saving a callback to be executed later in a - different execution context (either in a different thread or - asynchronously in the same thread). - """ - # Check if function is already wrapped - if fn is None or hasattr(fn, '_wrapped'): - return fn - - # Capture current stack head - # TODO: Any other better way to store contexts and update them in wrapped function? - cap_contexts = [_state.contexts] - - if not cap_contexts[0][0] and not cap_contexts[0][1]: - # Fast path when there are no active contexts. - def null_wrapper(*args, **kwargs): - try: - current_state = _state.contexts - _state.contexts = cap_contexts[0] - return fn(*args, **kwargs) - finally: - _state.contexts = current_state - null_wrapper._wrapped = True - return null_wrapper - - def wrapped(*args, **kwargs): - ret = None - try: - # Capture old state - current_state = _state.contexts - - # Remove deactivated items - cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0]) - - # Force new state - _state.contexts = contexts - - # Current exception - exc = (None, None, None) - top = None - - # Apply stack contexts - last_ctx = 0 - stack = contexts[0] - - # Apply state - for n in stack: - try: - n.enter() - last_ctx += 1 - except: - # Exception happened. Record exception info and store top-most handler - exc = sys.exc_info() - top = n.old_contexts[1] - - # Execute callback if no exception happened while restoring state - if top is None: - try: - ret = fn(*args, **kwargs) - except: - exc = sys.exc_info() - top = contexts[1] - - # If there was exception, try to handle it by going through the exception chain - if top is not None: - exc = _handle_exception(top, exc) - else: - # Otherwise take shorter path and run stack contexts in reverse order - while last_ctx > 0: - last_ctx -= 1 - c = stack[last_ctx] - - try: - c.exit(*exc) - except: - exc = sys.exc_info() - top = c.old_contexts[1] - break - else: - top = None - - # If if exception happened while unrolling, take longer exception handler path - if top is not None: - exc = _handle_exception(top, exc) - - # If exception was not handled, raise it - if exc != (None, None, None): - raise_exc_info(exc) - finally: - _state.contexts = current_state - return ret - - wrapped._wrapped = True - return wrapped - - -def _handle_exception(tail, exc): - while tail is not None: - try: - if tail.exit(*exc): - exc = (None, None, None) - except: - exc = sys.exc_info() - - tail = tail.old_contexts[1] - - return exc - - -def run_with_stack_context(context, func): - """Run a coroutine ``func`` in the given `StackContext`. - - It is not safe to have a ``yield`` statement within a ``with StackContext`` - block, so it is difficult to use stack context with `.gen.coroutine`. - This helper function runs the function in the correct context while - keeping the ``yield`` and ``with`` statements syntactically separate. - - Example:: - - @gen.coroutine - def incorrect(): - with StackContext(ctx): - # ERROR: this will raise StackContextInconsistentError - yield other_coroutine() - - @gen.coroutine - def correct(): - yield run_with_stack_context(StackContext(ctx), other_coroutine) - - .. versionadded:: 3.1 - """ - with context: - return func() diff --git a/lib/tornado/tcpclient.py b/lib/tornado/tcpclient.py deleted file mode 100755 index 3a1b58ca..00000000 --- a/lib/tornado/tcpclient.py +++ /dev/null @@ -1,276 +0,0 @@ -# -# Copyright 2014 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A non-blocking TCP connection factory. -""" -from __future__ import absolute_import, division, print_function - -import functools -import socket -import numbers -import datetime - -from tornado.concurrent import Future, future_add_done_callback -from tornado.ioloop import IOLoop -from tornado.iostream import IOStream -from tornado import gen -from tornado.netutil import Resolver -from tornado.platform.auto import set_close_exec -from tornado.gen import TimeoutError -from tornado.util import timedelta_to_seconds - -_INITIAL_CONNECT_TIMEOUT = 0.3 - - -class _Connector(object): - """A stateless implementation of the "Happy Eyeballs" algorithm. - - "Happy Eyeballs" is documented in RFC6555 as the recommended practice - for when both IPv4 and IPv6 addresses are available. - - In this implementation, we partition the addresses by family, and - make the first connection attempt to whichever address was - returned first by ``getaddrinfo``. If that connection fails or - times out, we begin a connection in parallel to the first address - of the other family. If there are additional failures we retry - with other addresses, keeping one connection attempt per family - in flight at a time. - - http://tools.ietf.org/html/rfc6555 - - """ - def __init__(self, addrinfo, connect): - self.io_loop = IOLoop.current() - self.connect = connect - - self.future = Future() - self.timeout = None - self.connect_timeout = None - self.last_error = None - self.remaining = len(addrinfo) - self.primary_addrs, self.secondary_addrs = self.split(addrinfo) - self.streams = set() - - @staticmethod - def split(addrinfo): - """Partition the ``addrinfo`` list by address family. - - Returns two lists. The first list contains the first entry from - ``addrinfo`` and all others with the same family, and the - second list contains all other addresses (normally one list will - be AF_INET and the other AF_INET6, although non-standard resolvers - may return additional families). - """ - primary = [] - secondary = [] - primary_af = addrinfo[0][0] - for af, addr in addrinfo: - if af == primary_af: - primary.append((af, addr)) - else: - secondary.append((af, addr)) - return primary, secondary - - def start(self, timeout=_INITIAL_CONNECT_TIMEOUT, connect_timeout=None): - self.try_connect(iter(self.primary_addrs)) - self.set_timeout(timeout) - if connect_timeout is not None: - self.set_connect_timeout(connect_timeout) - return self.future - - def try_connect(self, addrs): - try: - af, addr = next(addrs) - except StopIteration: - # We've reached the end of our queue, but the other queue - # might still be working. Send a final error on the future - # only when both queues are finished. - if self.remaining == 0 and not self.future.done(): - self.future.set_exception(self.last_error or - IOError("connection failed")) - return - stream, future = self.connect(af, addr) - self.streams.add(stream) - future_add_done_callback( - future, functools.partial(self.on_connect_done, addrs, af, addr)) - - def on_connect_done(self, addrs, af, addr, future): - self.remaining -= 1 - try: - stream = future.result() - except Exception as e: - if self.future.done(): - return - # Error: try again (but remember what happened so we have an - # error to raise in the end) - self.last_error = e - self.try_connect(addrs) - if self.timeout is not None: - # If the first attempt failed, don't wait for the - # timeout to try an address from the secondary queue. - self.io_loop.remove_timeout(self.timeout) - self.on_timeout() - return - self.clear_timeouts() - if self.future.done(): - # This is a late arrival; just drop it. - stream.close() - else: - self.streams.discard(stream) - self.future.set_result((af, addr, stream)) - self.close_streams() - - def set_timeout(self, timeout): - self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, - self.on_timeout) - - def on_timeout(self): - self.timeout = None - if not self.future.done(): - self.try_connect(iter(self.secondary_addrs)) - - def clear_timeout(self): - if self.timeout is not None: - self.io_loop.remove_timeout(self.timeout) - - def set_connect_timeout(self, connect_timeout): - self.connect_timeout = self.io_loop.add_timeout( - connect_timeout, self.on_connect_timeout) - - def on_connect_timeout(self): - if not self.future.done(): - self.future.set_exception(TimeoutError()) - self.close_streams() - - def clear_timeouts(self): - if self.timeout is not None: - self.io_loop.remove_timeout(self.timeout) - if self.connect_timeout is not None: - self.io_loop.remove_timeout(self.connect_timeout) - - def close_streams(self): - for stream in self.streams: - stream.close() - - -class TCPClient(object): - """A non-blocking TCP connection factory. - - .. versionchanged:: 5.0 - The ``io_loop`` argument (deprecated since version 4.1) has been removed. - """ - def __init__(self, resolver=None): - if resolver is not None: - self.resolver = resolver - self._own_resolver = False - else: - self.resolver = Resolver() - self._own_resolver = True - - def close(self): - if self._own_resolver: - self.resolver.close() - - @gen.coroutine - def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, - max_buffer_size=None, source_ip=None, source_port=None, - timeout=None): - """Connect to the given host and port. - - Asynchronously returns an `.IOStream` (or `.SSLIOStream` if - ``ssl_options`` is not None). - - Using the ``source_ip`` kwarg, one can specify the source - IP address to use when establishing the connection. - In case the user needs to resolve and - use a specific interface, it has to be handled outside - of Tornado as this depends very much on the platform. - - Raises `TimeoutError` if the input future does not complete before - ``timeout``, which may be specified in any form allowed by - `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time - relative to `.IOLoop.time`) - - Similarly, when the user requires a certain source port, it can - be specified using the ``source_port`` arg. - - .. versionchanged:: 4.5 - Added the ``source_ip`` and ``source_port`` arguments. - - .. versionchanged:: 5.0 - Added the ``timeout`` argument. - """ - if timeout is not None: - if isinstance(timeout, numbers.Real): - timeout = IOLoop.current().time() + timeout - elif isinstance(timeout, datetime.timedelta): - timeout = IOLoop.current().time() + timedelta_to_seconds(timeout) - else: - raise TypeError("Unsupported timeout %r" % timeout) - if timeout is not None: - addrinfo = yield gen.with_timeout( - timeout, self.resolver.resolve(host, port, af)) - else: - addrinfo = yield self.resolver.resolve(host, port, af) - connector = _Connector( - addrinfo, - functools.partial(self._create_stream, max_buffer_size, - source_ip=source_ip, source_port=source_port) - ) - af, addr, stream = yield connector.start(connect_timeout=timeout) - # TODO: For better performance we could cache the (af, addr) - # information here and re-use it on subsequent connections to - # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) - if ssl_options is not None: - if timeout is not None: - stream = yield gen.with_timeout(timeout, stream.start_tls( - False, ssl_options=ssl_options, server_hostname=host)) - else: - stream = yield stream.start_tls(False, ssl_options=ssl_options, - server_hostname=host) - raise gen.Return(stream) - - def _create_stream(self, max_buffer_size, af, addr, source_ip=None, - source_port=None): - # Always connect in plaintext; we'll convert to ssl if necessary - # after one connection has completed. - source_port_bind = source_port if isinstance(source_port, int) else 0 - source_ip_bind = source_ip - if source_port_bind and not source_ip: - # User required a specific port, but did not specify - # a certain source IP, will bind to the default loopback. - source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1' - # Trying to use the same address family as the requested af socket: - # - 127.0.0.1 for IPv4 - # - ::1 for IPv6 - socket_obj = socket.socket(af) - set_close_exec(socket_obj.fileno()) - if source_port_bind or source_ip_bind: - # If the user requires binding also to a specific IP/port. - try: - socket_obj.bind((source_ip_bind, source_port_bind)) - except socket.error: - socket_obj.close() - # Fail loudly if unable to use the IP/port. - raise - try: - stream = IOStream(socket_obj, - max_buffer_size=max_buffer_size) - except socket.error as e: - fu = Future() - fu.set_exception(e) - return fu - else: - return stream, stream.connect(addr) diff --git a/lib/tornado/tcpserver.py b/lib/tornado/tcpserver.py deleted file mode 100755 index 4f5d6f03..00000000 --- a/lib/tornado/tcpserver.py +++ /dev/null @@ -1,299 +0,0 @@ -# -# Copyright 2011 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A non-blocking, single-threaded TCP server.""" -from __future__ import absolute_import, division, print_function - -import errno -import os -import socket - -from tornado import gen -from tornado.log import app_log -from tornado.ioloop import IOLoop -from tornado.iostream import IOStream, SSLIOStream -from tornado.netutil import bind_sockets, add_accept_handler, ssl_wrap_socket -from tornado import process -from tornado.util import errno_from_exception - -try: - import ssl -except ImportError: - # ssl is not available on Google App Engine. - ssl = None - - -class TCPServer(object): - r"""A non-blocking, single-threaded TCP server. - - To use `TCPServer`, define a subclass which overrides the `handle_stream` - method. For example, a simple echo server could be defined like this:: - - from tornado.tcpserver import TCPServer - from tornado.iostream import StreamClosedError - from tornado import gen - - class EchoServer(TCPServer): - async def handle_stream(self, stream, address): - while True: - try: - data = await stream.read_until(b"\n") - await stream.write(data) - except StreamClosedError: - break - - To make this server serve SSL traffic, send the ``ssl_options`` keyword - argument with an `ssl.SSLContext` object. For compatibility with older - versions of Python ``ssl_options`` may also be a dictionary of keyword - arguments for the `ssl.wrap_socket` method.:: - - ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) - ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"), - os.path.join(data_dir, "mydomain.key")) - TCPServer(ssl_options=ssl_ctx) - - `TCPServer` initialization follows one of three patterns: - - 1. `listen`: simple single-process:: - - server = TCPServer() - server.listen(8888) - IOLoop.current().start() - - 2. `bind`/`start`: simple multi-process:: - - server = TCPServer() - server.bind(8888) - server.start(0) # Forks multiple sub-processes - IOLoop.current().start() - - When using this interface, an `.IOLoop` must *not* be passed - to the `TCPServer` constructor. `start` will always start - the server on the default singleton `.IOLoop`. - - 3. `add_sockets`: advanced multi-process:: - - sockets = bind_sockets(8888) - tornado.process.fork_processes(0) - server = TCPServer() - server.add_sockets(sockets) - IOLoop.current().start() - - The `add_sockets` interface is more complicated, but it can be - used with `tornado.process.fork_processes` to give you more - flexibility in when the fork happens. `add_sockets` can - also be used in single-process servers if you want to create - your listening sockets in some way other than - `~tornado.netutil.bind_sockets`. - - .. versionadded:: 3.1 - The ``max_buffer_size`` argument. - - .. versionchanged:: 5.0 - The ``io_loop`` argument has been removed. - """ - def __init__(self, ssl_options=None, max_buffer_size=None, - read_chunk_size=None): - self.ssl_options = ssl_options - self._sockets = {} # fd -> socket object - self._handlers = {} # fd -> remove_handler callable - self._pending_sockets = [] - self._started = False - self._stopped = False - self.max_buffer_size = max_buffer_size - self.read_chunk_size = read_chunk_size - - # Verify the SSL options. Otherwise we don't get errors until clients - # connect. This doesn't verify that the keys are legitimate, but - # the SSL module doesn't do that until there is a connected socket - # which seems like too much work - if self.ssl_options is not None and isinstance(self.ssl_options, dict): - # Only certfile is required: it can contain both keys - if 'certfile' not in self.ssl_options: - raise KeyError('missing key "certfile" in ssl_options') - - if not os.path.exists(self.ssl_options['certfile']): - raise ValueError('certfile "%s" does not exist' % - self.ssl_options['certfile']) - if ('keyfile' in self.ssl_options and - not os.path.exists(self.ssl_options['keyfile'])): - raise ValueError('keyfile "%s" does not exist' % - self.ssl_options['keyfile']) - - def listen(self, port, address=""): - """Starts accepting connections on the given port. - - This method may be called more than once to listen on multiple ports. - `listen` takes effect immediately; it is not necessary to call - `TCPServer.start` afterwards. It is, however, necessary to start - the `.IOLoop`. - """ - sockets = bind_sockets(port, address=address) - self.add_sockets(sockets) - - def add_sockets(self, sockets): - """Makes this server start accepting connections on the given sockets. - - The ``sockets`` parameter is a list of socket objects such as - those returned by `~tornado.netutil.bind_sockets`. - `add_sockets` is typically used in combination with that - method and `tornado.process.fork_processes` to provide greater - control over the initialization of a multi-process server. - """ - for sock in sockets: - self._sockets[sock.fileno()] = sock - self._handlers[sock.fileno()] = add_accept_handler( - sock, self._handle_connection) - - def add_socket(self, socket): - """Singular version of `add_sockets`. Takes a single socket object.""" - self.add_sockets([socket]) - - def bind(self, port, address=None, family=socket.AF_UNSPEC, backlog=128, - reuse_port=False): - """Binds this server to the given port on the given address. - - To start the server, call `start`. If you want to run this server - in a single process, you can call `listen` as a shortcut to the - sequence of `bind` and `start` calls. - - Address may be either an IP address or hostname. If it's a hostname, - the server will listen on all IP addresses associated with the - name. Address may be an empty string or None to listen on all - available interfaces. Family may be set to either `socket.AF_INET` - or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise - both will be used if available. - - The ``backlog`` argument has the same meaning as for - `socket.listen <socket.socket.listen>`. The ``reuse_port`` argument - has the same meaning as for `.bind_sockets`. - - This method may be called multiple times prior to `start` to listen - on multiple ports or interfaces. - - .. versionchanged:: 4.4 - Added the ``reuse_port`` argument. - """ - sockets = bind_sockets(port, address=address, family=family, - backlog=backlog, reuse_port=reuse_port) - if self._started: - self.add_sockets(sockets) - else: - self._pending_sockets.extend(sockets) - - def start(self, num_processes=1): - """Starts this server in the `.IOLoop`. - - By default, we run the server in this process and do not fork any - additional child process. - - If num_processes is ``None`` or <= 0, we detect the number of cores - available on this machine and fork that number of child - processes. If num_processes is given and > 1, we fork that - specific number of sub-processes. - - Since we use processes and not threads, there is no shared memory - between any server code. - - Note that multiple processes are not compatible with the autoreload - module (or the ``autoreload=True`` option to `tornado.web.Application` - which defaults to True when ``debug=True``). - When using multiple processes, no IOLoops can be created or - referenced until after the call to ``TCPServer.start(n)``. - """ - assert not self._started - self._started = True - if num_processes != 1: - process.fork_processes(num_processes) - sockets = self._pending_sockets - self._pending_sockets = [] - self.add_sockets(sockets) - - def stop(self): - """Stops listening for new connections. - - Requests currently in progress may still continue after the - server is stopped. - """ - if self._stopped: - return - self._stopped = True - for fd, sock in self._sockets.items(): - assert sock.fileno() == fd - # Unregister socket from IOLoop - self._handlers.pop(fd)() - sock.close() - - def handle_stream(self, stream, address): - """Override to handle a new `.IOStream` from an incoming connection. - - This method may be a coroutine; if so any exceptions it raises - asynchronously will be logged. Accepting of incoming connections - will not be blocked by this coroutine. - - If this `TCPServer` is configured for SSL, ``handle_stream`` - may be called before the SSL handshake has completed. Use - `.SSLIOStream.wait_for_handshake` if you need to verify the client's - certificate or use NPN/ALPN. - - .. versionchanged:: 4.2 - Added the option for this method to be a coroutine. - """ - raise NotImplementedError() - - def _handle_connection(self, connection, address): - if self.ssl_options is not None: - assert ssl, "Python 2.6+ and OpenSSL required for SSL" - try: - connection = ssl_wrap_socket(connection, - self.ssl_options, - server_side=True, - do_handshake_on_connect=False) - except ssl.SSLError as err: - if err.args[0] == ssl.SSL_ERROR_EOF: - return connection.close() - else: - raise - except socket.error as err: - # If the connection is closed immediately after it is created - # (as in a port scan), we can get one of several errors. - # wrap_socket makes an internal call to getpeername, - # which may return either EINVAL (Mac OS X) or ENOTCONN - # (Linux). If it returns ENOTCONN, this error is - # silently swallowed by the ssl module, so we need to - # catch another error later on (AttributeError in - # SSLIOStream._do_ssl_handshake). - # To test this behavior, try nmap with the -sT flag. - # https://github.com/tornadoweb/tornado/pull/750 - if errno_from_exception(err) in (errno.ECONNABORTED, errno.EINVAL): - return connection.close() - else: - raise - try: - if self.ssl_options is not None: - stream = SSLIOStream(connection, - max_buffer_size=self.max_buffer_size, - read_chunk_size=self.read_chunk_size) - else: - stream = IOStream(connection, - max_buffer_size=self.max_buffer_size, - read_chunk_size=self.read_chunk_size) - - future = self.handle_stream(stream, address) - if future is not None: - IOLoop.current().add_future(gen.convert_yielded(future), - lambda f: f.result()) - except Exception: - app_log.error("Error in connection callback", exc_info=True) diff --git a/lib/tornado/template.py b/lib/tornado/template.py deleted file mode 100755 index 61b98746..00000000 --- a/lib/tornado/template.py +++ /dev/null @@ -1,976 +0,0 @@ -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A simple template system that compiles templates to Python code. - -Basic usage looks like:: - - t = template.Template("<html>{{ myvalue }}</html>") - print(t.generate(myvalue="XXX")) - -`Loader` is a class that loads templates from a root directory and caches -the compiled templates:: - - loader = template.Loader("/home/btaylor") - print(loader.load("test.html").generate(myvalue="XXX")) - -We compile all templates to raw Python. Error-reporting is currently... uh, -interesting. Syntax for the templates:: - - ### base.html - <html> - <head> - <title>{% block title %}Default title{% end %} - - -
    - {% for student in students %} - {% block student %} -
  • {{ escape(student.name) }}
  • - {% end %} - {% end %} -
- - - - ### bold.html - {% extends "base.html" %} - - {% block title %}A bolder title{% end %} - - {% block student %} -
  • {{ escape(student.name) }}
  • - {% end %} - -Unlike most other template systems, we do not put any restrictions on the -expressions you can include in your statements. ``if`` and ``for`` blocks get -translated exactly into Python, so you can do complex expressions like:: - - {% for student in [p for p in people if p.student and p.age > 23] %} -
  • {{ escape(student.name) }}
  • - {% end %} - -Translating directly to Python means you can apply functions to expressions -easily, like the ``escape()`` function in the examples above. You can pass -functions in to your template just like any other variable -(In a `.RequestHandler`, override `.RequestHandler.get_template_namespace`):: - - ### Python code - def add(x, y): - return x + y - template.execute(add=add) - - ### The template - {{ add(1, 2) }} - -We provide the functions `escape() <.xhtml_escape>`, `.url_escape()`, -`.json_encode()`, and `.squeeze()` to all templates by default. - -Typical applications do not create `Template` or `Loader` instances by -hand, but instead use the `~.RequestHandler.render` and -`~.RequestHandler.render_string` methods of -`tornado.web.RequestHandler`, which load templates automatically based -on the ``template_path`` `.Application` setting. - -Variable names beginning with ``_tt_`` are reserved by the template -system and should not be used by application code. - -Syntax Reference ----------------- - -Template expressions are surrounded by double curly braces: ``{{ ... }}``. -The contents may be any python expression, which will be escaped according -to the current autoescape setting and inserted into the output. Other -template directives use ``{% %}``. - -To comment out a section so that it is omitted from the output, surround it -with ``{# ... #}``. - -These tags may be escaped as ``{{!``, ``{%!``, and ``{#!`` -if you need to include a literal ``{{``, ``{%``, or ``{#`` in the output. - - -``{% apply *function* %}...{% end %}`` - Applies a function to the output of all template code between ``apply`` - and ``end``:: - - {% apply linkify %}{{name}} said: {{message}}{% end %} - - Note that as an implementation detail apply blocks are implemented - as nested functions and thus may interact strangely with variables - set via ``{% set %}``, or the use of ``{% break %}`` or ``{% continue %}`` - within loops. - -``{% autoescape *function* %}`` - Sets the autoescape mode for the current file. This does not affect - other files, even those referenced by ``{% include %}``. Note that - autoescaping can also be configured globally, at the `.Application` - or `Loader`.:: - - {% autoescape xhtml_escape %} - {% autoescape None %} - -``{% block *name* %}...{% end %}`` - Indicates a named, replaceable block for use with ``{% extends %}``. - Blocks in the parent template will be replaced with the contents of - the same-named block in a child template.:: - - - {% block title %}Default title{% end %} - - - {% extends "base.html" %} - {% block title %}My page title{% end %} - -``{% comment ... %}`` - A comment which will be removed from the template output. Note that - there is no ``{% end %}`` tag; the comment goes from the word ``comment`` - to the closing ``%}`` tag. - -``{% extends *filename* %}`` - Inherit from another template. Templates that use ``extends`` should - contain one or more ``block`` tags to replace content from the parent - template. Anything in the child template not contained in a ``block`` - tag will be ignored. For an example, see the ``{% block %}`` tag. - -``{% for *var* in *expr* %}...{% end %}`` - Same as the python ``for`` statement. ``{% break %}`` and - ``{% continue %}`` may be used inside the loop. - -``{% from *x* import *y* %}`` - Same as the python ``import`` statement. - -``{% if *condition* %}...{% elif *condition* %}...{% else %}...{% end %}`` - Conditional statement - outputs the first section whose condition is - true. (The ``elif`` and ``else`` sections are optional) - -``{% import *module* %}`` - Same as the python ``import`` statement. - -``{% include *filename* %}`` - Includes another template file. The included file can see all the local - variables as if it were copied directly to the point of the ``include`` - directive (the ``{% autoescape %}`` directive is an exception). - Alternately, ``{% module Template(filename, **kwargs) %}`` may be used - to include another template with an isolated namespace. - -``{% module *expr* %}`` - Renders a `~tornado.web.UIModule`. The output of the ``UIModule`` is - not escaped:: - - {% module Template("foo.html", arg=42) %} - - ``UIModules`` are a feature of the `tornado.web.RequestHandler` - class (and specifically its ``render`` method) and will not work - when the template system is used on its own in other contexts. - -``{% raw *expr* %}`` - Outputs the result of the given expression without autoescaping. - -``{% set *x* = *y* %}`` - Sets a local variable. - -``{% try %}...{% except %}...{% else %}...{% finally %}...{% end %}`` - Same as the python ``try`` statement. - -``{% while *condition* %}... {% end %}`` - Same as the python ``while`` statement. ``{% break %}`` and - ``{% continue %}`` may be used inside the loop. - -``{% whitespace *mode* %}`` - Sets the whitespace mode for the remainder of the current file - (or until the next ``{% whitespace %}`` directive). See - `filter_whitespace` for available options. New in Tornado 4.3. -""" - -from __future__ import absolute_import, division, print_function - -import datetime -import linecache -import os.path -import posixpath -import re -import threading - -from tornado import escape -from tornado.log import app_log -from tornado.util import ObjectDict, exec_in, unicode_type, PY3 - -if PY3: - from io import StringIO -else: - from cStringIO import StringIO - -_DEFAULT_AUTOESCAPE = "xhtml_escape" -_UNSET = object() - - -def filter_whitespace(mode, text): - """Transform whitespace in ``text`` according to ``mode``. - - Available modes are: - - * ``all``: Return all whitespace unmodified. - * ``single``: Collapse consecutive whitespace with a single whitespace - character, preserving newlines. - * ``oneline``: Collapse all runs of whitespace into a single space - character, removing all newlines in the process. - - .. versionadded:: 4.3 - """ - if mode == 'all': - return text - elif mode == 'single': - text = re.sub(r"([\t ]+)", " ", text) - text = re.sub(r"(\s*\n\s*)", "\n", text) - return text - elif mode == 'oneline': - return re.sub(r"(\s+)", " ", text) - else: - raise Exception("invalid whitespace mode %s" % mode) - - -class Template(object): - """A compiled template. - - We compile into Python from the given template_string. You can generate - the template from variables with generate(). - """ - # note that the constructor's signature is not extracted with - # autodoc because _UNSET looks like garbage. When changing - # this signature update website/sphinx/template.rst too. - def __init__(self, template_string, name="", loader=None, - compress_whitespace=_UNSET, autoescape=_UNSET, - whitespace=None): - """Construct a Template. - - :arg str template_string: the contents of the template file. - :arg str name: the filename from which the template was loaded - (used for error message). - :arg tornado.template.BaseLoader loader: the `~tornado.template.BaseLoader` responsible - for this template, used to resolve ``{% include %}`` and ``{% extend %}`` directives. - :arg bool compress_whitespace: Deprecated since Tornado 4.3. - Equivalent to ``whitespace="single"`` if true and - ``whitespace="all"`` if false. - :arg str autoescape: The name of a function in the template - namespace, or ``None`` to disable escaping by default. - :arg str whitespace: A string specifying treatment of whitespace; - see `filter_whitespace` for options. - - .. versionchanged:: 4.3 - Added ``whitespace`` parameter; deprecated ``compress_whitespace``. - """ - self.name = escape.native_str(name) - - if compress_whitespace is not _UNSET: - # Convert deprecated compress_whitespace (bool) to whitespace (str). - if whitespace is not None: - raise Exception("cannot set both whitespace and compress_whitespace") - whitespace = "single" if compress_whitespace else "all" - if whitespace is None: - if loader and loader.whitespace: - whitespace = loader.whitespace - else: - # Whitespace defaults by filename. - if name.endswith(".html") or name.endswith(".js"): - whitespace = "single" - else: - whitespace = "all" - # Validate the whitespace setting. - filter_whitespace(whitespace, '') - - if autoescape is not _UNSET: - self.autoescape = autoescape - elif loader: - self.autoescape = loader.autoescape - else: - self.autoescape = _DEFAULT_AUTOESCAPE - - self.namespace = loader.namespace if loader else {} - reader = _TemplateReader(name, escape.native_str(template_string), - whitespace) - self.file = _File(self, _parse(reader, self)) - self.code = self._generate_python(loader) - self.loader = loader - try: - # Under python2.5, the fake filename used here must match - # the module name used in __name__ below. - # The dont_inherit flag prevents template.py's future imports - # from being applied to the generated code. - self.compiled = compile( - escape.to_unicode(self.code), - "%s.generated.py" % self.name.replace('.', '_'), - "exec", dont_inherit=True) - except Exception: - formatted_code = _format_code(self.code).rstrip() - app_log.error("%s code:\n%s", self.name, formatted_code) - raise - - def generate(self, **kwargs): - """Generate this template with the given arguments.""" - namespace = { - "escape": escape.xhtml_escape, - "xhtml_escape": escape.xhtml_escape, - "url_escape": escape.url_escape, - "json_encode": escape.json_encode, - "squeeze": escape.squeeze, - "linkify": escape.linkify, - "datetime": datetime, - "_tt_utf8": escape.utf8, # for internal use - "_tt_string_types": (unicode_type, bytes), - # __name__ and __loader__ allow the traceback mechanism to find - # the generated source code. - "__name__": self.name.replace('.', '_'), - "__loader__": ObjectDict(get_source=lambda name: self.code), - } - namespace.update(self.namespace) - namespace.update(kwargs) - exec_in(self.compiled, namespace) - execute = namespace["_tt_execute"] - # Clear the traceback module's cache of source data now that - # we've generated a new template (mainly for this module's - # unittests, where different tests reuse the same name). - linecache.clearcache() - return execute() - - def _generate_python(self, loader): - buffer = StringIO() - try: - # named_blocks maps from names to _NamedBlock objects - named_blocks = {} - ancestors = self._get_ancestors(loader) - ancestors.reverse() - for ancestor in ancestors: - ancestor.find_named_blocks(loader, named_blocks) - writer = _CodeWriter(buffer, named_blocks, loader, - ancestors[0].template) - ancestors[0].generate(writer) - return buffer.getvalue() - finally: - buffer.close() - - def _get_ancestors(self, loader): - ancestors = [self.file] - for chunk in self.file.body.chunks: - if isinstance(chunk, _ExtendsBlock): - if not loader: - raise ParseError("{% extends %} block found, but no " - "template loader") - template = loader.load(chunk.name, self.name) - ancestors.extend(template._get_ancestors(loader)) - return ancestors - - -class BaseLoader(object): - """Base class for template loaders. - - You must use a template loader to use template constructs like - ``{% extends %}`` and ``{% include %}``. The loader caches all - templates after they are loaded the first time. - """ - def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None, - whitespace=None): - """Construct a template loader. - - :arg str autoescape: The name of a function in the template - namespace, such as "xhtml_escape", or ``None`` to disable - autoescaping by default. - :arg dict namespace: A dictionary to be added to the default template - namespace, or ``None``. - :arg str whitespace: A string specifying default behavior for - whitespace in templates; see `filter_whitespace` for options. - Default is "single" for files ending in ".html" and ".js" and - "all" for other files. - - .. versionchanged:: 4.3 - Added ``whitespace`` parameter. - """ - self.autoescape = autoescape - self.namespace = namespace or {} - self.whitespace = whitespace - self.templates = {} - # self.lock protects self.templates. It's a reentrant lock - # because templates may load other templates via `include` or - # `extends`. Note that thanks to the GIL this code would be safe - # even without the lock, but could lead to wasted work as multiple - # threads tried to compile the same template simultaneously. - self.lock = threading.RLock() - - def reset(self): - """Resets the cache of compiled templates.""" - with self.lock: - self.templates = {} - - def resolve_path(self, name, parent_path=None): - """Converts a possibly-relative path to absolute (used internally).""" - raise NotImplementedError() - - def load(self, name, parent_path=None): - """Loads a template.""" - name = self.resolve_path(name, parent_path=parent_path) - with self.lock: - if name not in self.templates: - self.templates[name] = self._create_template(name) - return self.templates[name] - - def _create_template(self, name): - raise NotImplementedError() - - -class Loader(BaseLoader): - """A template loader that loads from a single root directory. - """ - def __init__(self, root_directory, **kwargs): - super(Loader, self).__init__(**kwargs) - self.root = os.path.abspath(root_directory) - - def resolve_path(self, name, parent_path=None): - if parent_path and not parent_path.startswith("<") and \ - not parent_path.startswith("/") and \ - not name.startswith("/"): - current_path = os.path.join(self.root, parent_path) - file_dir = os.path.dirname(os.path.abspath(current_path)) - relative_path = os.path.abspath(os.path.join(file_dir, name)) - if relative_path.startswith(self.root): - name = relative_path[len(self.root) + 1:] - return name - - def _create_template(self, name): - path = os.path.join(self.root, name) - with open(path, "rb") as f: - template = Template(f.read(), name=name, loader=self) - return template - - -class DictLoader(BaseLoader): - """A template loader that loads from a dictionary.""" - def __init__(self, dict, **kwargs): - super(DictLoader, self).__init__(**kwargs) - self.dict = dict - - def resolve_path(self, name, parent_path=None): - if parent_path and not parent_path.startswith("<") and \ - not parent_path.startswith("/") and \ - not name.startswith("/"): - file_dir = posixpath.dirname(parent_path) - name = posixpath.normpath(posixpath.join(file_dir, name)) - return name - - def _create_template(self, name): - return Template(self.dict[name], name=name, loader=self) - - -class _Node(object): - def each_child(self): - return () - - def generate(self, writer): - raise NotImplementedError() - - def find_named_blocks(self, loader, named_blocks): - for child in self.each_child(): - child.find_named_blocks(loader, named_blocks) - - -class _File(_Node): - def __init__(self, template, body): - self.template = template - self.body = body - self.line = 0 - - def generate(self, writer): - writer.write_line("def _tt_execute():", self.line) - with writer.indent(): - writer.write_line("_tt_buffer = []", self.line) - writer.write_line("_tt_append = _tt_buffer.append", self.line) - self.body.generate(writer) - writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line) - - def each_child(self): - return (self.body,) - - -class _ChunkList(_Node): - def __init__(self, chunks): - self.chunks = chunks - - def generate(self, writer): - for chunk in self.chunks: - chunk.generate(writer) - - def each_child(self): - return self.chunks - - -class _NamedBlock(_Node): - def __init__(self, name, body, template, line): - self.name = name - self.body = body - self.template = template - self.line = line - - def each_child(self): - return (self.body,) - - def generate(self, writer): - block = writer.named_blocks[self.name] - with writer.include(block.template, self.line): - block.body.generate(writer) - - def find_named_blocks(self, loader, named_blocks): - named_blocks[self.name] = self - _Node.find_named_blocks(self, loader, named_blocks) - - -class _ExtendsBlock(_Node): - def __init__(self, name): - self.name = name - - -class _IncludeBlock(_Node): - def __init__(self, name, reader, line): - self.name = name - self.template_name = reader.name - self.line = line - - def find_named_blocks(self, loader, named_blocks): - included = loader.load(self.name, self.template_name) - included.file.find_named_blocks(loader, named_blocks) - - def generate(self, writer): - included = writer.loader.load(self.name, self.template_name) - with writer.include(included, self.line): - included.file.body.generate(writer) - - -class _ApplyBlock(_Node): - def __init__(self, method, line, body=None): - self.method = method - self.line = line - self.body = body - - def each_child(self): - return (self.body,) - - def generate(self, writer): - method_name = "_tt_apply%d" % writer.apply_counter - writer.apply_counter += 1 - writer.write_line("def %s():" % method_name, self.line) - with writer.indent(): - writer.write_line("_tt_buffer = []", self.line) - writer.write_line("_tt_append = _tt_buffer.append", self.line) - self.body.generate(writer) - writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line) - writer.write_line("_tt_append(_tt_utf8(%s(%s())))" % ( - self.method, method_name), self.line) - - -class _ControlBlock(_Node): - def __init__(self, statement, line, body=None): - self.statement = statement - self.line = line - self.body = body - - def each_child(self): - return (self.body,) - - def generate(self, writer): - writer.write_line("%s:" % self.statement, self.line) - with writer.indent(): - self.body.generate(writer) - # Just in case the body was empty - writer.write_line("pass", self.line) - - -class _IntermediateControlBlock(_Node): - def __init__(self, statement, line): - self.statement = statement - self.line = line - - def generate(self, writer): - # In case the previous block was empty - writer.write_line("pass", self.line) - writer.write_line("%s:" % self.statement, self.line, writer.indent_size() - 1) - - -class _Statement(_Node): - def __init__(self, statement, line): - self.statement = statement - self.line = line - - def generate(self, writer): - writer.write_line(self.statement, self.line) - - -class _Expression(_Node): - def __init__(self, expression, line, raw=False): - self.expression = expression - self.line = line - self.raw = raw - - def generate(self, writer): - writer.write_line("_tt_tmp = %s" % self.expression, self.line) - writer.write_line("if isinstance(_tt_tmp, _tt_string_types):" - " _tt_tmp = _tt_utf8(_tt_tmp)", self.line) - writer.write_line("else: _tt_tmp = _tt_utf8(str(_tt_tmp))", self.line) - if not self.raw and writer.current_template.autoescape is not None: - # In python3 functions like xhtml_escape return unicode, - # so we have to convert to utf8 again. - writer.write_line("_tt_tmp = _tt_utf8(%s(_tt_tmp))" % - writer.current_template.autoescape, self.line) - writer.write_line("_tt_append(_tt_tmp)", self.line) - - -class _Module(_Expression): - def __init__(self, expression, line): - super(_Module, self).__init__("_tt_modules." + expression, line, - raw=True) - - -class _Text(_Node): - def __init__(self, value, line, whitespace): - self.value = value - self.line = line - self.whitespace = whitespace - - def generate(self, writer): - value = self.value - - # Compress whitespace if requested, with a crude heuristic to avoid - # altering preformatted whitespace. - if "
    " not in value:
    -            value = filter_whitespace(self.whitespace, value)
    -
    -        if value:
    -            writer.write_line('_tt_append(%r)' % escape.utf8(value), self.line)
    -
    -
    -class ParseError(Exception):
    -    """Raised for template syntax errors.
    -
    -    ``ParseError`` instances have ``filename`` and ``lineno`` attributes
    -    indicating the position of the error.
    -
    -    .. versionchanged:: 4.3
    -       Added ``filename`` and ``lineno`` attributes.
    -    """
    -    def __init__(self, message, filename=None, lineno=0):
    -        self.message = message
    -        # The names "filename" and "lineno" are chosen for consistency
    -        # with python SyntaxError.
    -        self.filename = filename
    -        self.lineno = lineno
    -
    -    def __str__(self):
    -        return '%s at %s:%d' % (self.message, self.filename, self.lineno)
    -
    -
    -class _CodeWriter(object):
    -    def __init__(self, file, named_blocks, loader, current_template):
    -        self.file = file
    -        self.named_blocks = named_blocks
    -        self.loader = loader
    -        self.current_template = current_template
    -        self.apply_counter = 0
    -        self.include_stack = []
    -        self._indent = 0
    -
    -    def indent_size(self):
    -        return self._indent
    -
    -    def indent(self):
    -        class Indenter(object):
    -            def __enter__(_):
    -                self._indent += 1
    -                return self
    -
    -            def __exit__(_, *args):
    -                assert self._indent > 0
    -                self._indent -= 1
    -
    -        return Indenter()
    -
    -    def include(self, template, line):
    -        self.include_stack.append((self.current_template, line))
    -        self.current_template = template
    -
    -        class IncludeTemplate(object):
    -            def __enter__(_):
    -                return self
    -
    -            def __exit__(_, *args):
    -                self.current_template = self.include_stack.pop()[0]
    -
    -        return IncludeTemplate()
    -
    -    def write_line(self, line, line_number, indent=None):
    -        if indent is None:
    -            indent = self._indent
    -        line_comment = '  # %s:%d' % (self.current_template.name, line_number)
    -        if self.include_stack:
    -            ancestors = ["%s:%d" % (tmpl.name, lineno)
    -                         for (tmpl, lineno) in self.include_stack]
    -            line_comment += ' (via %s)' % ', '.join(reversed(ancestors))
    -        print("    " * indent + line + line_comment, file=self.file)
    -
    -
    -class _TemplateReader(object):
    -    def __init__(self, name, text, whitespace):
    -        self.name = name
    -        self.text = text
    -        self.whitespace = whitespace
    -        self.line = 1
    -        self.pos = 0
    -
    -    def find(self, needle, start=0, end=None):
    -        assert start >= 0, start
    -        pos = self.pos
    -        start += pos
    -        if end is None:
    -            index = self.text.find(needle, start)
    -        else:
    -            end += pos
    -            assert end >= start
    -            index = self.text.find(needle, start, end)
    -        if index != -1:
    -            index -= pos
    -        return index
    -
    -    def consume(self, count=None):
    -        if count is None:
    -            count = len(self.text) - self.pos
    -        newpos = self.pos + count
    -        self.line += self.text.count("\n", self.pos, newpos)
    -        s = self.text[self.pos:newpos]
    -        self.pos = newpos
    -        return s
    -
    -    def remaining(self):
    -        return len(self.text) - self.pos
    -
    -    def __len__(self):
    -        return self.remaining()
    -
    -    def __getitem__(self, key):
    -        if type(key) is slice:
    -            size = len(self)
    -            start, stop, step = key.indices(size)
    -            if start is None:
    -                start = self.pos
    -            else:
    -                start += self.pos
    -            if stop is not None:
    -                stop += self.pos
    -            return self.text[slice(start, stop, step)]
    -        elif key < 0:
    -            return self.text[key]
    -        else:
    -            return self.text[self.pos + key]
    -
    -    def __str__(self):
    -        return self.text[self.pos:]
    -
    -    def raise_parse_error(self, msg):
    -        raise ParseError(msg, self.name, self.line)
    -
    -
    -def _format_code(code):
    -    lines = code.splitlines()
    -    format = "%%%dd  %%s\n" % len(repr(len(lines) + 1))
    -    return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
    -
    -
    -def _parse(reader, template, in_block=None, in_loop=None):
    -    body = _ChunkList([])
    -    while True:
    -        # Find next template directive
    -        curly = 0
    -        while True:
    -            curly = reader.find("{", curly)
    -            if curly == -1 or curly + 1 == reader.remaining():
    -                # EOF
    -                if in_block:
    -                    reader.raise_parse_error(
    -                        "Missing {%% end %%} block for %s" % in_block)
    -                body.chunks.append(_Text(reader.consume(), reader.line,
    -                                         reader.whitespace))
    -                return body
    -            # If the first curly brace is not the start of a special token,
    -            # start searching from the character after it
    -            if reader[curly + 1] not in ("{", "%", "#"):
    -                curly += 1
    -                continue
    -            # When there are more than 2 curlies in a row, use the
    -            # innermost ones.  This is useful when generating languages
    -            # like latex where curlies are also meaningful
    -            if (curly + 2 < reader.remaining() and
    -                    reader[curly + 1] == '{' and reader[curly + 2] == '{'):
    -                curly += 1
    -                continue
    -            break
    -
    -        # Append any text before the special token
    -        if curly > 0:
    -            cons = reader.consume(curly)
    -            body.chunks.append(_Text(cons, reader.line,
    -                                     reader.whitespace))
    -
    -        start_brace = reader.consume(2)
    -        line = reader.line
    -
    -        # Template directives may be escaped as "{{!" or "{%!".
    -        # In this case output the braces and consume the "!".
    -        # This is especially useful in conjunction with jquery templates,
    -        # which also use double braces.
    -        if reader.remaining() and reader[0] == "!":
    -            reader.consume(1)
    -            body.chunks.append(_Text(start_brace, line,
    -                                     reader.whitespace))
    -            continue
    -
    -        # Comment
    -        if start_brace == "{#":
    -            end = reader.find("#}")
    -            if end == -1:
    -                reader.raise_parse_error("Missing end comment #}")
    -            contents = reader.consume(end).strip()
    -            reader.consume(2)
    -            continue
    -
    -        # Expression
    -        if start_brace == "{{":
    -            end = reader.find("}}")
    -            if end == -1:
    -                reader.raise_parse_error("Missing end expression }}")
    -            contents = reader.consume(end).strip()
    -            reader.consume(2)
    -            if not contents:
    -                reader.raise_parse_error("Empty expression")
    -            body.chunks.append(_Expression(contents, line))
    -            continue
    -
    -        # Block
    -        assert start_brace == "{%", start_brace
    -        end = reader.find("%}")
    -        if end == -1:
    -            reader.raise_parse_error("Missing end block %}")
    -        contents = reader.consume(end).strip()
    -        reader.consume(2)
    -        if not contents:
    -            reader.raise_parse_error("Empty block tag ({% %})")
    -
    -        operator, space, suffix = contents.partition(" ")
    -        suffix = suffix.strip()
    -
    -        # Intermediate ("else", "elif", etc) blocks
    -        intermediate_blocks = {
    -            "else": set(["if", "for", "while", "try"]),
    -            "elif": set(["if"]),
    -            "except": set(["try"]),
    -            "finally": set(["try"]),
    -        }
    -        allowed_parents = intermediate_blocks.get(operator)
    -        if allowed_parents is not None:
    -            if not in_block:
    -                reader.raise_parse_error("%s outside %s block" %
    -                                         (operator, allowed_parents))
    -            if in_block not in allowed_parents:
    -                reader.raise_parse_error(
    -                    "%s block cannot be attached to %s block" %
    -                    (operator, in_block))
    -            body.chunks.append(_IntermediateControlBlock(contents, line))
    -            continue
    -
    -        # End tag
    -        elif operator == "end":
    -            if not in_block:
    -                reader.raise_parse_error("Extra {% end %} block")
    -            return body
    -
    -        elif operator in ("extends", "include", "set", "import", "from",
    -                          "comment", "autoescape", "whitespace", "raw",
    -                          "module"):
    -            if operator == "comment":
    -                continue
    -            if operator == "extends":
    -                suffix = suffix.strip('"').strip("'")
    -                if not suffix:
    -                    reader.raise_parse_error("extends missing file path")
    -                block = _ExtendsBlock(suffix)
    -            elif operator in ("import", "from"):
    -                if not suffix:
    -                    reader.raise_parse_error("import missing statement")
    -                block = _Statement(contents, line)
    -            elif operator == "include":
    -                suffix = suffix.strip('"').strip("'")
    -                if not suffix:
    -                    reader.raise_parse_error("include missing file path")
    -                block = _IncludeBlock(suffix, reader, line)
    -            elif operator == "set":
    -                if not suffix:
    -                    reader.raise_parse_error("set missing statement")
    -                block = _Statement(suffix, line)
    -            elif operator == "autoescape":
    -                fn = suffix.strip()
    -                if fn == "None":
    -                    fn = None
    -                template.autoescape = fn
    -                continue
    -            elif operator == "whitespace":
    -                mode = suffix.strip()
    -                # Validate the selected mode
    -                filter_whitespace(mode, '')
    -                reader.whitespace = mode
    -                continue
    -            elif operator == "raw":
    -                block = _Expression(suffix, line, raw=True)
    -            elif operator == "module":
    -                block = _Module(suffix, line)
    -            body.chunks.append(block)
    -            continue
    -
    -        elif operator in ("apply", "block", "try", "if", "for", "while"):
    -            # parse inner body recursively
    -            if operator in ("for", "while"):
    -                block_body = _parse(reader, template, operator, operator)
    -            elif operator == "apply":
    -                # apply creates a nested function so syntactically it's not
    -                # in the loop.
    -                block_body = _parse(reader, template, operator, None)
    -            else:
    -                block_body = _parse(reader, template, operator, in_loop)
    -
    -            if operator == "apply":
    -                if not suffix:
    -                    reader.raise_parse_error("apply missing method name")
    -                block = _ApplyBlock(suffix, line, block_body)
    -            elif operator == "block":
    -                if not suffix:
    -                    reader.raise_parse_error("block missing name")
    -                block = _NamedBlock(suffix, block_body, template, line)
    -            else:
    -                block = _ControlBlock(contents, line, block_body)
    -            body.chunks.append(block)
    -            continue
    -
    -        elif operator in ("break", "continue"):
    -            if not in_loop:
    -                reader.raise_parse_error("%s outside %s block" %
    -                                         (operator, set(["for", "while"])))
    -            body.chunks.append(_Statement(contents, line))
    -            continue
    -
    -        else:
    -            reader.raise_parse_error("unknown operator: %r" % operator)
    diff --git a/lib/tornado/test/__main__.py b/lib/tornado/test/__main__.py
    deleted file mode 100755
    index c78478cb..00000000
    --- a/lib/tornado/test/__main__.py
    +++ /dev/null
    @@ -1,14 +0,0 @@
    -"""Shim to allow python -m tornado.test.
    -
    -This only works in python 2.7+.
    -"""
    -from __future__ import absolute_import, division, print_function
    -
    -from tornado.test.runtests import all, main
    -
    -# tornado.testing.main autodiscovery relies on 'all' being present in
    -# the main module, so import it here even though it is not used directly.
    -# The following line prevents a pyflakes warning.
    -all = all
    -
    -main()
    diff --git a/lib/tornado/test/asyncio_test.py b/lib/tornado/test/asyncio_test.py
    deleted file mode 100755
    index a7c75649..00000000
    --- a/lib/tornado/test/asyncio_test.py
    +++ /dev/null
    @@ -1,206 +0,0 @@
    -# Licensed under the Apache License, Version 2.0 (the "License"); you may
    -# not use this file except in compliance with the License. You may obtain
    -# a copy of the License at
    -#
    -#     http://www.apache.org/licenses/LICENSE-2.0
    -#
    -# Unless required by applicable law or agreed to in writing, software
    -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    -# License for the specific language governing permissions and limitations
    -# under the License.
    -
    -from __future__ import absolute_import, division, print_function
    -
    -from concurrent.futures import ThreadPoolExecutor
    -from tornado import gen
    -from tornado.ioloop import IOLoop
    -from tornado.testing import AsyncTestCase, gen_test
    -from tornado.test.util import unittest, skipBefore33, skipBefore35, exec_test
    -
    -try:
    -    from tornado.platform.asyncio import asyncio
    -except ImportError:
    -    asyncio = None
    -else:
    -    from tornado.platform.asyncio import AsyncIOLoop, to_asyncio_future, AnyThreadEventLoopPolicy
    -    # This is used in dynamically-evaluated code, so silence pyflakes.
    -    to_asyncio_future
    -
    -
    -@unittest.skipIf(asyncio is None, "asyncio module not present")
    -class AsyncIOLoopTest(AsyncTestCase):
    -    def get_new_ioloop(self):
    -        io_loop = AsyncIOLoop()
    -        return io_loop
    -
    -    def test_asyncio_callback(self):
    -        # Basic test that the asyncio loop is set up correctly.
    -        asyncio.get_event_loop().call_soon(self.stop)
    -        self.wait()
    -
    -    @gen_test
    -    def test_asyncio_future(self):
    -        # Test that we can yield an asyncio future from a tornado coroutine.
    -        # Without 'yield from', we must wrap coroutines in ensure_future,
    -        # which was introduced during Python 3.4, deprecating the prior "async".
    -        if hasattr(asyncio, 'ensure_future'):
    -            ensure_future = asyncio.ensure_future
    -        else:
    -            # async is a reserved word in Python 3.7
    -            ensure_future = getattr(asyncio, 'async')
    -
    -        x = yield ensure_future(
    -            asyncio.get_event_loop().run_in_executor(None, lambda: 42))
    -        self.assertEqual(x, 42)
    -
    -    @skipBefore33
    -    @gen_test
    -    def test_asyncio_yield_from(self):
    -        # Test that we can use asyncio coroutines with 'yield from'
    -        # instead of asyncio.async(). This requires python 3.3 syntax.
    -        namespace = exec_test(globals(), locals(), """
    -        @gen.coroutine
    -        def f():
    -            event_loop = asyncio.get_event_loop()
    -            x = yield from event_loop.run_in_executor(None, lambda: 42)
    -            return x
    -        """)
    -        result = yield namespace['f']()
    -        self.assertEqual(result, 42)
    -
    -    @skipBefore35
    -    def test_asyncio_adapter(self):
    -        # This test demonstrates that when using the asyncio coroutine
    -        # runner (i.e. run_until_complete), the to_asyncio_future
    -        # adapter is needed. No adapter is needed in the other direction,
    -        # as demonstrated by other tests in the package.
    -        @gen.coroutine
    -        def tornado_coroutine():
    -            yield gen.moment
    -            raise gen.Return(42)
    -        native_coroutine_without_adapter = exec_test(globals(), locals(), """
    -        async def native_coroutine_without_adapter():
    -            return await tornado_coroutine()
    -        """)["native_coroutine_without_adapter"]
    -
    -        native_coroutine_with_adapter = exec_test(globals(), locals(), """
    -        async def native_coroutine_with_adapter():
    -            return await to_asyncio_future(tornado_coroutine())
    -        """)["native_coroutine_with_adapter"]
    -
    -        # Use the adapter, but two degrees from the tornado coroutine.
    -        native_coroutine_with_adapter2 = exec_test(globals(), locals(), """
    -        async def native_coroutine_with_adapter2():
    -            return await to_asyncio_future(native_coroutine_without_adapter())
    -        """)["native_coroutine_with_adapter2"]
    -
    -        # Tornado supports native coroutines both with and without adapters
    -        self.assertEqual(
    -            self.io_loop.run_sync(native_coroutine_without_adapter),
    -            42)
    -        self.assertEqual(
    -            self.io_loop.run_sync(native_coroutine_with_adapter),
    -            42)
    -        self.assertEqual(
    -            self.io_loop.run_sync(native_coroutine_with_adapter2),
    -            42)
    -
    -        # Asyncio only supports coroutines that yield asyncio-compatible
    -        # Futures (which our Future is since 5.0).
    -        self.assertEqual(
    -            asyncio.get_event_loop().run_until_complete(
    -                native_coroutine_without_adapter()),
    -            42)
    -        self.assertEqual(
    -            asyncio.get_event_loop().run_until_complete(
    -                native_coroutine_with_adapter()),
    -            42)
    -        self.assertEqual(
    -            asyncio.get_event_loop().run_until_complete(
    -                native_coroutine_with_adapter2()),
    -            42)
    -
    -
    -@unittest.skipIf(asyncio is None, "asyncio module not present")
    -class LeakTest(unittest.TestCase):
    -    def setUp(self):
    -        # Trigger a cleanup of the mapping so we start with a clean slate.
    -        AsyncIOLoop().close()
    -        # If we don't clean up after ourselves other tests may fail on
    -        # py34.
    -        self.orig_policy = asyncio.get_event_loop_policy()
    -        asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy())
    -
    -    def tearDown(self):
    -        asyncio.get_event_loop().close()
    -        asyncio.set_event_loop_policy(self.orig_policy)
    -
    -    def test_ioloop_close_leak(self):
    -        orig_count = len(IOLoop._ioloop_for_asyncio)
    -        for i in range(10):
    -            # Create and close an AsyncIOLoop using Tornado interfaces.
    -            loop = AsyncIOLoop()
    -            loop.close()
    -        new_count = len(IOLoop._ioloop_for_asyncio) - orig_count
    -        self.assertEqual(new_count, 0)
    -
    -    def test_asyncio_close_leak(self):
    -        orig_count = len(IOLoop._ioloop_for_asyncio)
    -        for i in range(10):
    -            # Create and close an AsyncIOMainLoop using asyncio interfaces.
    -            loop = asyncio.new_event_loop()
    -            loop.call_soon(IOLoop.current)
    -            loop.call_soon(loop.stop)
    -            loop.run_forever()
    -            loop.close()
    -        new_count = len(IOLoop._ioloop_for_asyncio) - orig_count
    -        # Because the cleanup is run on new loop creation, we have one
    -        # dangling entry in the map (but only one).
    -        self.assertEqual(new_count, 1)
    -
    -
    -@unittest.skipIf(asyncio is None, "asyncio module not present")
    -class AnyThreadEventLoopPolicyTest(unittest.TestCase):
    -    def setUp(self):
    -        self.orig_policy = asyncio.get_event_loop_policy()
    -        self.executor = ThreadPoolExecutor(1)
    -
    -    def tearDown(self):
    -        asyncio.set_event_loop_policy(self.orig_policy)
    -        self.executor.shutdown()
    -
    -    def get_event_loop_on_thread(self):
    -        def get_and_close_event_loop():
    -            """Get the event loop. Close it if one is returned.
    -
    -            Returns the (closed) event loop. This is a silly thing
    -            to do and leaves the thread in a broken state, but it's
    -            enough for this test. Closing the loop avoids resource
    -            leak warnings.
    -            """
    -            loop = asyncio.get_event_loop()
    -            loop.close()
    -            return loop
    -        future = self.executor.submit(get_and_close_event_loop)
    -        return future.result()
    -
    -    def run_policy_test(self, accessor, expected_type):
    -        # With the default policy, non-main threads don't get an event
    -        # loop.
    -        self.assertRaises((RuntimeError, AssertionError),
    -                          self.executor.submit(accessor).result)
    -        # Set the policy and we can get a loop.
    -        asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
    -        self.assertIsInstance(
    -            self.executor.submit(accessor).result(),
    -            expected_type)
    -        # Clean up to silence leak warnings. Always use asyncio since
    -        # IOLoop doesn't (currently) close the underlying loop.
    -        self.executor.submit(lambda: asyncio.get_event_loop().close()).result()
    -
    -    def test_asyncio_accessor(self):
    -        self.run_policy_test(asyncio.get_event_loop, asyncio.AbstractEventLoop)
    -
    -    def test_tornado_accessor(self):
    -        self.run_policy_test(IOLoop.current, IOLoop)
    diff --git a/lib/tornado/test/auth_test.py b/lib/tornado/test/auth_test.py
    deleted file mode 100755
    index 14bc3353..00000000
    --- a/lib/tornado/test/auth_test.py
    +++ /dev/null
    @@ -1,735 +0,0 @@
    -# These tests do not currently do much to verify the correct implementation
    -# of the openid/oauth protocols, they just exercise the major code paths
    -# and ensure that it doesn't blow up (e.g. with unicode/bytes issues in
    -# python 3)
    -
    -
    -from __future__ import absolute_import, division, print_function
    -
    -import unittest
    -import warnings
    -
    -from tornado.auth import (
    -    AuthError, OpenIdMixin, OAuthMixin, OAuth2Mixin,
    -    GoogleOAuth2Mixin, FacebookGraphMixin, TwitterMixin,
    -)
    -from tornado.concurrent import Future
    -from tornado.escape import json_decode
    -from tornado import gen
    -from tornado.httputil import url_concat
    -from tornado.log import gen_log, app_log
    -from tornado.testing import AsyncHTTPTestCase, ExpectLog
    -from tornado.test.util import ignore_deprecation
    -from tornado.web import RequestHandler, Application, asynchronous, HTTPError
    -
    -try:
    -    from unittest import mock
    -except ImportError:
    -    mock = None
    -
    -
    -class OpenIdClientLoginHandlerLegacy(RequestHandler, OpenIdMixin):
    -    def initialize(self, test):
    -        self._OPENID_ENDPOINT = test.get_url('/openid/server/authenticate')
    -
    -    with ignore_deprecation():
    -        @asynchronous
    -        def get(self):
    -            if self.get_argument('openid.mode', None):
    -                with warnings.catch_warnings():
    -                    warnings.simplefilter('ignore', DeprecationWarning)
    -                    self.get_authenticated_user(
    -                        self.on_user, http_client=self.settings['http_client'])
    -                    return
    -            res = self.authenticate_redirect()
    -            assert isinstance(res, Future)
    -            assert res.done()
    -
    -    def on_user(self, user):
    -        if user is None:
    -            raise Exception("user is None")
    -        self.finish(user)
    -
    -
    -class OpenIdClientLoginHandler(RequestHandler, OpenIdMixin):
    -    def initialize(self, test):
    -        self._OPENID_ENDPOINT = test.get_url('/openid/server/authenticate')
    -
    -    @gen.coroutine
    -    def get(self):
    -        if self.get_argument('openid.mode', None):
    -            user = yield self.get_authenticated_user(http_client=self.settings['http_client'])
    -            if user is None:
    -                raise Exception("user is None")
    -            self.finish(user)
    -            return
    -        res = self.authenticate_redirect()
    -        assert isinstance(res, Future)
    -        assert res.done()
    -
    -
    -class OpenIdServerAuthenticateHandler(RequestHandler):
    -    def post(self):
    -        if self.get_argument('openid.mode') != 'check_authentication':
    -            raise Exception("incorrect openid.mode %r")
    -        self.write('is_valid:true')
    -
    -
    -class OAuth1ClientLoginHandlerLegacy(RequestHandler, OAuthMixin):
    -    def initialize(self, test, version):
    -        self._OAUTH_VERSION = version
    -        self._OAUTH_REQUEST_TOKEN_URL = test.get_url('/oauth1/server/request_token')
    -        self._OAUTH_AUTHORIZE_URL = test.get_url('/oauth1/server/authorize')
    -        self._OAUTH_ACCESS_TOKEN_URL = test.get_url('/oauth1/server/access_token')
    -
    -    def _oauth_consumer_token(self):
    -        return dict(key='asdf', secret='qwer')
    -
    -    with ignore_deprecation():
    -        @asynchronous
    -        def get(self):
    -            if self.get_argument('oauth_token', None):
    -                with warnings.catch_warnings():
    -                    warnings.simplefilter('ignore', DeprecationWarning)
    -                    self.get_authenticated_user(
    -                        self.on_user, http_client=self.settings['http_client'])
    -                return
    -            res = self.authorize_redirect(http_client=self.settings['http_client'])
    -            assert isinstance(res, Future)
    -
    -    def on_user(self, user):
    -        if user is None:
    -            raise Exception("user is None")
    -        self.finish(user)
    -
    -    def _oauth_get_user(self, access_token, callback):
    -        if self.get_argument('fail_in_get_user', None):
    -            raise Exception("failing in get_user")
    -        if access_token != dict(key='uiop', secret='5678'):
    -            raise Exception("incorrect access token %r" % access_token)
    -        callback(dict(email='foo@example.com'))
    -
    -
    -class OAuth1ClientLoginHandler(RequestHandler, OAuthMixin):
    -    def initialize(self, test, version):
    -        self._OAUTH_VERSION = version
    -        self._OAUTH_REQUEST_TOKEN_URL = test.get_url('/oauth1/server/request_token')
    -        self._OAUTH_AUTHORIZE_URL = test.get_url('/oauth1/server/authorize')
    -        self._OAUTH_ACCESS_TOKEN_URL = test.get_url('/oauth1/server/access_token')
    -
    -    def _oauth_consumer_token(self):
    -        return dict(key='asdf', secret='qwer')
    -
    -    @gen.coroutine
    -    def get(self):
    -        if self.get_argument('oauth_token', None):
    -            user = yield self.get_authenticated_user(http_client=self.settings['http_client'])
    -            if user is None:
    -                raise Exception("user is None")
    -            self.finish(user)
    -            return
    -        yield self.authorize_redirect(http_client=self.settings['http_client'])
    -
    -    @gen.coroutine
    -    def _oauth_get_user_future(self, access_token):
    -        if self.get_argument('fail_in_get_user', None):
    -            raise Exception("failing in get_user")
    -        if access_token != dict(key='uiop', secret='5678'):
    -            raise Exception("incorrect access token %r" % access_token)
    -        return dict(email='foo@example.com')
    -
    -
    -class OAuth1ClientLoginCoroutineHandler(OAuth1ClientLoginHandler):
    -    """Replaces OAuth1ClientLoginCoroutineHandler's get() with a coroutine."""
    -    @gen.coroutine
    -    def get(self):
    -        if self.get_argument('oauth_token', None):
    -            # Ensure that any exceptions are set on the returned Future,
    -            # not simply thrown into the surrounding StackContext.
    -            try:
    -                yield self.get_authenticated_user()
    -            except Exception as e:
    -                self.set_status(503)
    -                self.write("got exception: %s" % e)
    -        else:
    -            yield self.authorize_redirect()
    -
    -
    -class OAuth1ClientRequestParametersHandler(RequestHandler, OAuthMixin):
    -    def initialize(self, version):
    -        self._OAUTH_VERSION = version
    -
    -    def _oauth_consumer_token(self):
    -        return dict(key='asdf', secret='qwer')
    -
    -    def get(self):
    -        params = self._oauth_request_parameters(
    -            'http://www.example.com/api/asdf',
    -            dict(key='uiop', secret='5678'),
    -            parameters=dict(foo='bar'))
    -        self.write(params)
    -
    -
    -class OAuth1ServerRequestTokenHandler(RequestHandler):
    -    def get(self):
    -        self.write('oauth_token=zxcv&oauth_token_secret=1234')
    -
    -
    -class OAuth1ServerAccessTokenHandler(RequestHandler):
    -    def get(self):
    -        self.write('oauth_token=uiop&oauth_token_secret=5678')
    -
    -
    -class OAuth2ClientLoginHandler(RequestHandler, OAuth2Mixin):
    -    def initialize(self, test):
    -        self._OAUTH_AUTHORIZE_URL = test.get_url('/oauth2/server/authorize')
    -
    -    def get(self):
    -        res = self.authorize_redirect()
    -        assert isinstance(res, Future)
    -        assert res.done()
    -
    -
    -class FacebookClientLoginHandler(RequestHandler, FacebookGraphMixin):
    -    def initialize(self, test):
    -        self._OAUTH_AUTHORIZE_URL = test.get_url('/facebook/server/authorize')
    -        self._OAUTH_ACCESS_TOKEN_URL = test.get_url('/facebook/server/access_token')
    -        self._FACEBOOK_BASE_URL = test.get_url('/facebook/server')
    -
    -    @gen.coroutine
    -    def get(self):
    -        if self.get_argument("code", None):
    -            user = yield self.get_authenticated_user(
    -                redirect_uri=self.request.full_url(),
    -                client_id=self.settings["facebook_api_key"],
    -                client_secret=self.settings["facebook_secret"],
    -                code=self.get_argument("code"))
    -            self.write(user)
    -        else:
    -            yield self.authorize_redirect(
    -                redirect_uri=self.request.full_url(),
    -                client_id=self.settings["facebook_api_key"],
    -                extra_params={"scope": "read_stream,offline_access"})
    -
    -
    -class FacebookServerAccessTokenHandler(RequestHandler):
    -    def get(self):
    -        self.write(dict(access_token="asdf", expires_in=3600))
    -
    -
    -class FacebookServerMeHandler(RequestHandler):
    -    def get(self):
    -        self.write('{}')
    -
    -
    -class TwitterClientHandler(RequestHandler, TwitterMixin):
    -    def initialize(self, test):
    -        self._OAUTH_REQUEST_TOKEN_URL = test.get_url('/oauth1/server/request_token')
    -        self._OAUTH_ACCESS_TOKEN_URL = test.get_url('/twitter/server/access_token')
    -        self._OAUTH_AUTHORIZE_URL = test.get_url('/oauth1/server/authorize')
    -        self._OAUTH_AUTHENTICATE_URL = test.get_url('/twitter/server/authenticate')
    -        self._TWITTER_BASE_URL = test.get_url('/twitter/api')
    -
    -    def get_auth_http_client(self):
    -        return self.settings['http_client']
    -
    -
    -class TwitterClientLoginHandlerLegacy(TwitterClientHandler):
    -    with ignore_deprecation():
    -        @asynchronous
    -        def get(self):
    -            if self.get_argument("oauth_token", None):
    -                self.get_authenticated_user(self.on_user)
    -                return
    -            self.authorize_redirect()
    -
    -    def on_user(self, user):
    -        if user is None:
    -            raise Exception("user is None")
    -        self.finish(user)
    -
    -
    -class TwitterClientLoginHandler(TwitterClientHandler):
    -    @gen.coroutine
    -    def get(self):
    -        if self.get_argument("oauth_token", None):
    -            user = yield self.get_authenticated_user()
    -            if user is None:
    -                raise Exception("user is None")
    -            self.finish(user)
    -            return
    -        yield self.authorize_redirect()
    -
    -
    -class TwitterClientAuthenticateHandler(TwitterClientHandler):
    -    # Like TwitterClientLoginHandler, but uses authenticate_redirect
    -    # instead of authorize_redirect.
    -    @gen.coroutine
    -    def get(self):
    -        if self.get_argument("oauth_token", None):
    -            user = yield self.get_authenticated_user()
    -            if user is None:
    -                raise Exception("user is None")
    -            self.finish(user)
    -            return
    -        yield self.authenticate_redirect()
    -
    -
    -class TwitterClientLoginGenEngineHandler(TwitterClientHandler):
    -    with ignore_deprecation():
    -        @asynchronous
    -        @gen.engine
    -        def get(self):
    -            if self.get_argument("oauth_token", None):
    -                user = yield self.get_authenticated_user()
    -                self.finish(user)
    -            else:
    -                # Old style: with @gen.engine we can ignore the Future from
    -                # authorize_redirect.
    -                self.authorize_redirect()
    -
    -
    -class TwitterClientLoginGenCoroutineHandler(TwitterClientHandler):
    -    @gen.coroutine
    -    def get(self):
    -        if self.get_argument("oauth_token", None):
    -            user = yield self.get_authenticated_user()
    -            self.finish(user)
    -        else:
    -            # New style: with @gen.coroutine the result must be yielded
    -            # or else the request will be auto-finished too soon.
    -            yield self.authorize_redirect()
    -
    -
    -class TwitterClientShowUserHandlerLegacy(TwitterClientHandler):
    -    with ignore_deprecation():
    -        @asynchronous
    -        @gen.engine
    -        def get(self):
    -            # TODO: would be nice to go through the login flow instead of
    -            # cheating with a hard-coded access token.
    -            with warnings.catch_warnings():
    -                warnings.simplefilter('ignore', DeprecationWarning)
    -                response = yield gen.Task(self.twitter_request,
    -                                          '/users/show/%s' % self.get_argument('name'),
    -                                          access_token=dict(key='hjkl', secret='vbnm'))
    -            if response is None:
    -                self.set_status(500)
    -                self.finish('error from twitter request')
    -            else:
    -                self.finish(response)
    -
    -
    -class TwitterClientShowUserHandler(TwitterClientHandler):
    -    @gen.coroutine
    -    def get(self):
    -        # TODO: would be nice to go through the login flow instead of
    -        # cheating with a hard-coded access token.
    -        try:
    -            response = yield self.twitter_request(
    -                '/users/show/%s' % self.get_argument('name'),
    -                access_token=dict(key='hjkl', secret='vbnm'))
    -        except AuthError:
    -            self.set_status(500)
    -            self.finish('error from twitter request')
    -        else:
    -            self.finish(response)
    -
    -
    -class TwitterServerAccessTokenHandler(RequestHandler):
    -    def get(self):
    -        self.write('oauth_token=hjkl&oauth_token_secret=vbnm&screen_name=foo')
    -
    -
    -class TwitterServerShowUserHandler(RequestHandler):
    -    def get(self, screen_name):
    -        if screen_name == 'error':
    -            raise HTTPError(500)
    -        assert 'oauth_nonce' in self.request.arguments
    -        assert 'oauth_timestamp' in self.request.arguments
    -        assert 'oauth_signature' in self.request.arguments
    -        assert self.get_argument('oauth_consumer_key') == 'test_twitter_consumer_key'
    -        assert self.get_argument('oauth_signature_method') == 'HMAC-SHA1'
    -        assert self.get_argument('oauth_version') == '1.0'
    -        assert self.get_argument('oauth_token') == 'hjkl'
    -        self.write(dict(screen_name=screen_name, name=screen_name.capitalize()))
    -
    -
    -class TwitterServerVerifyCredentialsHandler(RequestHandler):
    -    def get(self):
    -        assert 'oauth_nonce' in self.request.arguments
    -        assert 'oauth_timestamp' in self.request.arguments
    -        assert 'oauth_signature' in self.request.arguments
    -        assert self.get_argument('oauth_consumer_key') == 'test_twitter_consumer_key'
    -        assert self.get_argument('oauth_signature_method') == 'HMAC-SHA1'
    -        assert self.get_argument('oauth_version') == '1.0'
    -        assert self.get_argument('oauth_token') == 'hjkl'
    -        self.write(dict(screen_name='foo', name='Foo'))
    -
    -
    -class AuthTest(AsyncHTTPTestCase):
    -    def get_app(self):
    -        return Application(
    -            [
    -                # test endpoints
    -                ('/legacy/openid/client/login', OpenIdClientLoginHandlerLegacy, dict(test=self)),
    -                ('/openid/client/login', OpenIdClientLoginHandler, dict(test=self)),
    -                ('/legacy/oauth10/client/login', OAuth1ClientLoginHandlerLegacy,
    -                 dict(test=self, version='1.0')),
    -                ('/oauth10/client/login', OAuth1ClientLoginHandler,
    -                 dict(test=self, version='1.0')),
    -                ('/oauth10/client/request_params',
    -                 OAuth1ClientRequestParametersHandler,
    -                 dict(version='1.0')),
    -                ('/legacy/oauth10a/client/login', OAuth1ClientLoginHandlerLegacy,
    -                 dict(test=self, version='1.0a')),
    -                ('/oauth10a/client/login', OAuth1ClientLoginHandler,
    -                 dict(test=self, version='1.0a')),
    -                ('/oauth10a/client/login_coroutine',
    -                 OAuth1ClientLoginCoroutineHandler,
    -                 dict(test=self, version='1.0a')),
    -                ('/oauth10a/client/request_params',
    -                 OAuth1ClientRequestParametersHandler,
    -                 dict(version='1.0a')),
    -                ('/oauth2/client/login', OAuth2ClientLoginHandler, dict(test=self)),
    -
    -                ('/facebook/client/login', FacebookClientLoginHandler, dict(test=self)),
    -
    -                ('/legacy/twitter/client/login', TwitterClientLoginHandlerLegacy, dict(test=self)),
    -                ('/twitter/client/login', TwitterClientLoginHandler, dict(test=self)),
    -                ('/twitter/client/authenticate', TwitterClientAuthenticateHandler, dict(test=self)),
    -                ('/twitter/client/login_gen_engine',
    -                 TwitterClientLoginGenEngineHandler, dict(test=self)),
    -                ('/twitter/client/login_gen_coroutine',
    -                 TwitterClientLoginGenCoroutineHandler, dict(test=self)),
    -                ('/legacy/twitter/client/show_user',
    -                 TwitterClientShowUserHandlerLegacy, dict(test=self)),
    -                ('/twitter/client/show_user',
    -                 TwitterClientShowUserHandler, dict(test=self)),
    -
    -                # simulated servers
    -                ('/openid/server/authenticate', OpenIdServerAuthenticateHandler),
    -                ('/oauth1/server/request_token', OAuth1ServerRequestTokenHandler),
    -                ('/oauth1/server/access_token', OAuth1ServerAccessTokenHandler),
    -
    -                ('/facebook/server/access_token', FacebookServerAccessTokenHandler),
    -                ('/facebook/server/me', FacebookServerMeHandler),
    -                ('/twitter/server/access_token', TwitterServerAccessTokenHandler),
    -                (r'/twitter/api/users/show/(.*)\.json', TwitterServerShowUserHandler),
    -                (r'/twitter/api/account/verify_credentials\.json',
    -                 TwitterServerVerifyCredentialsHandler),
    -            ],
    -            http_client=self.http_client,
    -            twitter_consumer_key='test_twitter_consumer_key',
    -            twitter_consumer_secret='test_twitter_consumer_secret',
    -            facebook_api_key='test_facebook_api_key',
    -            facebook_secret='test_facebook_secret')
    -
    -    def test_openid_redirect_legacy(self):
    -        response = self.fetch('/legacy/openid/client/login', follow_redirects=False)
    -        self.assertEqual(response.code, 302)
    -        self.assertTrue(
    -            '/openid/server/authenticate?' in response.headers['Location'])
    -
    -    def test_openid_get_user_legacy(self):
    -        response = self.fetch('/legacy/openid/client/login?openid.mode=blah'
    -                              '&openid.ns.ax=http://openid.net/srv/ax/1.0'
    -                              '&openid.ax.type.email=http://axschema.org/contact/email'
    -                              '&openid.ax.value.email=foo@example.com')
    -        response.rethrow()
    -        parsed = json_decode(response.body)
    -        self.assertEqual(parsed["email"], "foo@example.com")
    -
    -    def test_openid_redirect(self):
    -        response = self.fetch('/openid/client/login', follow_redirects=False)
    -        self.assertEqual(response.code, 302)
    -        self.assertTrue(
    -            '/openid/server/authenticate?' in response.headers['Location'])
    -
    -    def test_openid_get_user(self):
    -        response = self.fetch('/openid/client/login?openid.mode=blah'
    -                              '&openid.ns.ax=http://openid.net/srv/ax/1.0'
    -                              '&openid.ax.type.email=http://axschema.org/contact/email'
    -                              '&openid.ax.value.email=foo@example.com')
    -        response.rethrow()
    -        parsed = json_decode(response.body)
    -        self.assertEqual(parsed["email"], "foo@example.com")
    -
    -    def test_oauth10_redirect_legacy(self):
    -        response = self.fetch('/legacy/oauth10/client/login', follow_redirects=False)
    -        self.assertEqual(response.code, 302)
    -        self.assertTrue(response.headers['Location'].endswith(
    -            '/oauth1/server/authorize?oauth_token=zxcv'))
    -        # the cookie is base64('zxcv')|base64('1234')
    -        self.assertTrue(
    -            '_oauth_request_token="enhjdg==|MTIzNA=="' in response.headers['Set-Cookie'],
    -            response.headers['Set-Cookie'])
    -
    -    def test_oauth10_redirect(self):
    -        response = self.fetch('/oauth10/client/login', follow_redirects=False)
    -        self.assertEqual(response.code, 302)
    -        self.assertTrue(response.headers['Location'].endswith(
    -            '/oauth1/server/authorize?oauth_token=zxcv'))
    -        # the cookie is base64('zxcv')|base64('1234')
    -        self.assertTrue(
    -            '_oauth_request_token="enhjdg==|MTIzNA=="' in response.headers['Set-Cookie'],
    -            response.headers['Set-Cookie'])
    -
    -    def test_oauth10_get_user_legacy(self):
    -        with ignore_deprecation():
    -            response = self.fetch(
    -                '/legacy/oauth10/client/login?oauth_token=zxcv',
    -                headers={'Cookie': '_oauth_request_token=enhjdg==|MTIzNA=='})
    -        response.rethrow()
    -        parsed = json_decode(response.body)
    -        self.assertEqual(parsed['email'], 'foo@example.com')
    -        self.assertEqual(parsed['access_token'], dict(key='uiop', secret='5678'))
    -
    -    def test_oauth10_get_user(self):
    -        response = self.fetch(
    -            '/oauth10/client/login?oauth_token=zxcv',
    -            headers={'Cookie': '_oauth_request_token=enhjdg==|MTIzNA=='})
    -        response.rethrow()
    -        parsed = json_decode(response.body)
    -        self.assertEqual(parsed['email'], 'foo@example.com')
    -        self.assertEqual(parsed['access_token'], dict(key='uiop', secret='5678'))
    -
    -    def test_oauth10_request_parameters(self):
    -        response = self.fetch('/oauth10/client/request_params')
    -        response.rethrow()
    -        parsed = json_decode(response.body)
    -        self.assertEqual(parsed['oauth_consumer_key'], 'asdf')
    -        self.assertEqual(parsed['oauth_token'], 'uiop')
    -        self.assertTrue('oauth_nonce' in parsed)
    -        self.assertTrue('oauth_signature' in parsed)
    -
    -    def test_oauth10a_redirect_legacy(self):
    -        response = self.fetch('/legacy/oauth10a/client/login', follow_redirects=False)
    -        self.assertEqual(response.code, 302)
    -        self.assertTrue(response.headers['Location'].endswith(
    -            '/oauth1/server/authorize?oauth_token=zxcv'))
    -        # the cookie is base64('zxcv')|base64('1234')
    -        self.assertTrue(
    -            '_oauth_request_token="enhjdg==|MTIzNA=="' in response.headers['Set-Cookie'],
    -            response.headers['Set-Cookie'])
    -
    -    def test_oauth10a_get_user_legacy(self):
    -        with ignore_deprecation():
    -            response = self.fetch(
    -                '/legacy/oauth10a/client/login?oauth_token=zxcv',
    -                headers={'Cookie': '_oauth_request_token=enhjdg==|MTIzNA=='})
    -        response.rethrow()
    -        parsed = json_decode(response.body)
    -        self.assertEqual(parsed['email'], 'foo@example.com')
    -        self.assertEqual(parsed['access_token'], dict(key='uiop', secret='5678'))
    -
    -    def test_oauth10a_redirect(self):
    -        response = self.fetch('/oauth10a/client/login', follow_redirects=False)
    -        self.assertEqual(response.code, 302)
    -        self.assertTrue(response.headers['Location'].endswith(
    -            '/oauth1/server/authorize?oauth_token=zxcv'))
    -        # the cookie is base64('zxcv')|base64('1234')
    -        self.assertTrue(
    -            '_oauth_request_token="enhjdg==|MTIzNA=="' in response.headers['Set-Cookie'],
    -            response.headers['Set-Cookie'])
    -
    -    @unittest.skipIf(mock is None, 'mock package not present')
    -    def test_oauth10a_redirect_error(self):
    -        with mock.patch.object(OAuth1ServerRequestTokenHandler, 'get') as get:
    -            get.side_effect = Exception("boom")
    -            with ExpectLog(app_log, "Uncaught exception"):
    -                response = self.fetch('/oauth10a/client/login', follow_redirects=False)
    -            self.assertEqual(response.code, 500)
    -
    -    def test_oauth10a_get_user(self):
    -        response = self.fetch(
    -            '/oauth10a/client/login?oauth_token=zxcv',
    -            headers={'Cookie': '_oauth_request_token=enhjdg==|MTIzNA=='})
    -        response.rethrow()
    -        parsed = json_decode(response.body)
    -        self.assertEqual(parsed['email'], 'foo@example.com')
    -        self.assertEqual(parsed['access_token'], dict(key='uiop', secret='5678'))
    -
    -    def test_oauth10a_request_parameters(self):
    -        response = self.fetch('/oauth10a/client/request_params')
    -        response.rethrow()
    -        parsed = json_decode(response.body)
    -        self.assertEqual(parsed['oauth_consumer_key'], 'asdf')
    -        self.assertEqual(parsed['oauth_token'], 'uiop')
    -        self.assertTrue('oauth_nonce' in parsed)
    -        self.assertTrue('oauth_signature' in parsed)
    -
    -    def test_oauth10a_get_user_coroutine_exception(self):
    -        response = self.fetch(
    -            '/oauth10a/client/login_coroutine?oauth_token=zxcv&fail_in_get_user=true',
    -            headers={'Cookie': '_oauth_request_token=enhjdg==|MTIzNA=='})
    -        self.assertEqual(response.code, 503)
    -
    -    def test_oauth2_redirect(self):
    -        response = self.fetch('/oauth2/client/login', follow_redirects=False)
    -        self.assertEqual(response.code, 302)
    -        self.assertTrue('/oauth2/server/authorize?' in response.headers['Location'])
    -
    -    def test_facebook_login(self):
    -        response = self.fetch('/facebook/client/login', follow_redirects=False)
    -        self.assertEqual(response.code, 302)
    -        self.assertTrue('/facebook/server/authorize?' in response.headers['Location'])
    -        response = self.fetch('/facebook/client/login?code=1234', follow_redirects=False)
    -        self.assertEqual(response.code, 200)
    -        user = json_decode(response.body)
    -        self.assertEqual(user['access_token'], 'asdf')
    -        self.assertEqual(user['session_expires'], '3600')
    -
    -    def base_twitter_redirect(self, url):
    -        # Same as test_oauth10a_redirect
    -        response = self.fetch(url, follow_redirects=False)
    -        self.assertEqual(response.code, 302)
    -        self.assertTrue(response.headers['Location'].endswith(
    -            '/oauth1/server/authorize?oauth_token=zxcv'))
    -        # the cookie is base64('zxcv')|base64('1234')
    -        self.assertTrue(
    -            '_oauth_request_token="enhjdg==|MTIzNA=="' in response.headers['Set-Cookie'],
    -            response.headers['Set-Cookie'])
    -
    -    def test_twitter_redirect_legacy(self):
    -        self.base_twitter_redirect('/legacy/twitter/client/login')
    -
    -    def test_twitter_redirect(self):
    -        self.base_twitter_redirect('/twitter/client/login')
    -
    -    def test_twitter_redirect_gen_engine(self):
    -        self.base_twitter_redirect('/twitter/client/login_gen_engine')
    -
    -    def test_twitter_redirect_gen_coroutine(self):
    -        self.base_twitter_redirect('/twitter/client/login_gen_coroutine')
    -
    -    def test_twitter_authenticate_redirect(self):
    -        response = self.fetch('/twitter/client/authenticate', follow_redirects=False)
    -        self.assertEqual(response.code, 302)
    -        self.assertTrue(response.headers['Location'].endswith(
    -            '/twitter/server/authenticate?oauth_token=zxcv'), response.headers['Location'])
    -        # the cookie is base64('zxcv')|base64('1234')
    -        self.assertTrue(
    -            '_oauth_request_token="enhjdg==|MTIzNA=="' in response.headers['Set-Cookie'],
    -            response.headers['Set-Cookie'])
    -
    -    def test_twitter_get_user(self):
    -        response = self.fetch(
    -            '/twitter/client/login?oauth_token=zxcv',
    -            headers={'Cookie': '_oauth_request_token=enhjdg==|MTIzNA=='})
    -        response.rethrow()
    -        parsed = json_decode(response.body)
    -        self.assertEqual(parsed,
    -                         {u'access_token': {u'key': u'hjkl',
    -                                            u'screen_name': u'foo',
    -                                            u'secret': u'vbnm'},
    -                          u'name': u'Foo',
    -                          u'screen_name': u'foo',
    -                          u'username': u'foo'})
    -
    -    def test_twitter_show_user_legacy(self):
    -        response = self.fetch('/legacy/twitter/client/show_user?name=somebody')
    -        response.rethrow()
    -        self.assertEqual(json_decode(response.body),
    -                         {'name': 'Somebody', 'screen_name': 'somebody'})
    -
    -    def test_twitter_show_user_error_legacy(self):
    -        with ExpectLog(gen_log, 'Error response HTTP 500'):
    -            response = self.fetch('/legacy/twitter/client/show_user?name=error')
    -        self.assertEqual(response.code, 500)
    -        self.assertEqual(response.body, b'error from twitter request')
    -
    -    def test_twitter_show_user(self):
    -        response = self.fetch('/twitter/client/show_user?name=somebody')
    -        response.rethrow()
    -        self.assertEqual(json_decode(response.body),
    -                         {'name': 'Somebody', 'screen_name': 'somebody'})
    -
    -    def test_twitter_show_user_error(self):
    -        response = self.fetch('/twitter/client/show_user?name=error')
    -        self.assertEqual(response.code, 500)
    -        self.assertEqual(response.body, b'error from twitter request')
    -
    -
    -class GoogleLoginHandler(RequestHandler, GoogleOAuth2Mixin):
    -    def initialize(self, test):
    -        self.test = test
    -        self._OAUTH_REDIRECT_URI = test.get_url('/client/login')
    -        self._OAUTH_AUTHORIZE_URL = test.get_url('/google/oauth2/authorize')
    -        self._OAUTH_ACCESS_TOKEN_URL = test.get_url('/google/oauth2/token')
    -
    -    @gen.coroutine
    -    def get(self):
    -        code = self.get_argument('code', None)
    -        if code is not None:
    -            # retrieve authenticate google user
    -            access = yield self.get_authenticated_user(self._OAUTH_REDIRECT_URI,
    -                                                       code)
    -            user = yield self.oauth2_request(
    -                self.test.get_url("/google/oauth2/userinfo"),
    -                access_token=access["access_token"])
    -            # return the user and access token as json
    -            user["access_token"] = access["access_token"]
    -            self.write(user)
    -        else:
    -            yield self.authorize_redirect(
    -                redirect_uri=self._OAUTH_REDIRECT_URI,
    -                client_id=self.settings['google_oauth']['key'],
    -                client_secret=self.settings['google_oauth']['secret'],
    -                scope=['profile', 'email'],
    -                response_type='code',
    -                extra_params={'prompt': 'select_account'})
    -
    -
    -class GoogleOAuth2AuthorizeHandler(RequestHandler):
    -    def get(self):
    -        # issue a fake auth code and redirect to redirect_uri
    -        code = 'fake-authorization-code'
    -        self.redirect(url_concat(self.get_argument('redirect_uri'),
    -                                 dict(code=code)))
    -
    -
    -class GoogleOAuth2TokenHandler(RequestHandler):
    -    def post(self):
    -        assert self.get_argument('code') == 'fake-authorization-code'
    -        # issue a fake token
    -        self.finish({
    -            'access_token': 'fake-access-token',
    -            'expires_in': 'never-expires'
    -        })
    -
    -
    -class GoogleOAuth2UserinfoHandler(RequestHandler):
    -    def get(self):
    -        assert self.get_argument('access_token') == 'fake-access-token'
    -        # return a fake user
    -        self.finish({
    -            'name': 'Foo',
    -            'email': 'foo@example.com'
    -        })
    -
    -
    -class GoogleOAuth2Test(AsyncHTTPTestCase):
    -    def get_app(self):
    -        return Application(
    -            [
    -                # test endpoints
    -                ('/client/login', GoogleLoginHandler, dict(test=self)),
    -
    -                # simulated google authorization server endpoints
    -                ('/google/oauth2/authorize', GoogleOAuth2AuthorizeHandler),
    -                ('/google/oauth2/token', GoogleOAuth2TokenHandler),
    -                ('/google/oauth2/userinfo', GoogleOAuth2UserinfoHandler),
    -            ],
    -            google_oauth={
    -                "key": 'fake_google_client_id',
    -                "secret": 'fake_google_client_secret'
    -            })
    -
    -    def test_google_login(self):
    -        response = self.fetch('/client/login')
    -        self.assertDictEqual({
    -            u'name': u'Foo',
    -            u'email': u'foo@example.com',
    -            u'access_token': u'fake-access-token',
    -        }, json_decode(response.body))
    diff --git a/lib/tornado/test/autoreload_test.py b/lib/tornado/test/autoreload_test.py
    deleted file mode 100755
    index 5cbdc2ee..00000000
    --- a/lib/tornado/test/autoreload_test.py
    +++ /dev/null
    @@ -1,114 +0,0 @@
    -from __future__ import absolute_import, division, print_function
    -import os
    -import shutil
    -import subprocess
    -from subprocess import Popen
    -import sys
    -from tempfile import mkdtemp
    -import time
    -
    -from tornado.test.util import unittest
    -
    -
    -class AutoreloadTest(unittest.TestCase):
    -
    -    def test_reload_module(self):
    -        main = """\
    -import os
    -import sys
    -
    -from tornado import autoreload
    -
    -# This import will fail if path is not set up correctly
    -import testapp
    -
    -print('Starting')
    -if 'TESTAPP_STARTED' not in os.environ:
    -    os.environ['TESTAPP_STARTED'] = '1'
    -    sys.stdout.flush()
    -    autoreload._reload()
    -"""
    -
    -        # Create temporary test application
    -        path = mkdtemp()
    -        self.addCleanup(shutil.rmtree, path)
    -        os.mkdir(os.path.join(path, 'testapp'))
    -        open(os.path.join(path, 'testapp/__init__.py'), 'w').close()
    -        with open(os.path.join(path, 'testapp/__main__.py'), 'w') as f:
    -            f.write(main)
    -
    -        # Make sure the tornado module under test is available to the test
    -        # application
    -        pythonpath = os.getcwd()
    -        if 'PYTHONPATH' in os.environ:
    -            pythonpath += os.pathsep + os.environ['PYTHONPATH']
    -
    -        p = Popen(
    -            [sys.executable, '-m', 'testapp'], stdout=subprocess.PIPE,
    -            cwd=path, env=dict(os.environ, PYTHONPATH=pythonpath),
    -            universal_newlines=True)
    -        out = p.communicate()[0]
    -        self.assertEqual(out, 'Starting\nStarting\n')
    -
    -    def test_reload_wrapper_preservation(self):
    -        # This test verifies that when `python -m tornado.autoreload`
    -        # is used on an application that also has an internal
    -        # autoreload, the reload wrapper is preserved on restart.
    -        main = """\
    -import os
    -import sys
    -
    -# This import will fail if path is not set up correctly
    -import testapp
    -
    -if 'tornado.autoreload' not in sys.modules:
    -    raise Exception('started without autoreload wrapper')
    -
    -import tornado.autoreload
    -
    -print('Starting')
    -sys.stdout.flush()
    -if 'TESTAPP_STARTED' not in os.environ:
    -    os.environ['TESTAPP_STARTED'] = '1'
    -    # Simulate an internal autoreload (one not caused
    -    # by the wrapper).
    -    tornado.autoreload._reload()
    -else:
    -    # Exit directly so autoreload doesn't catch it.
    -    os._exit(0)
    -"""
    -
    -        # Create temporary test application
    -        path = mkdtemp()
    -        os.mkdir(os.path.join(path, 'testapp'))
    -        self.addCleanup(shutil.rmtree, path)
    -        init_file = os.path.join(path, 'testapp', '__init__.py')
    -        open(init_file, 'w').close()
    -        main_file = os.path.join(path, 'testapp', '__main__.py')
    -        with open(main_file, 'w') as f:
    -            f.write(main)
    -
    -        # Make sure the tornado module under test is available to the test
    -        # application
    -        pythonpath = os.getcwd()
    -        if 'PYTHONPATH' in os.environ:
    -            pythonpath += os.pathsep + os.environ['PYTHONPATH']
    -
    -        autoreload_proc = Popen(
    -            [sys.executable, '-m', 'tornado.autoreload', '-m', 'testapp'],
    -            stdout=subprocess.PIPE, cwd=path,
    -            env=dict(os.environ, PYTHONPATH=pythonpath),
    -            universal_newlines=True)
    -
    -        # This timeout needs to be fairly generous for pypy due to jit
    -        # warmup costs.
    -        for i in range(40):
    -            if autoreload_proc.poll() is not None:
    -                break
    -            time.sleep(0.1)
    -        else:
    -            autoreload_proc.kill()
    -            raise Exception("subprocess failed to terminate")
    -
    -        out = autoreload_proc.communicate()[0]
    -        self.assertEqual(out, 'Starting\n' * 2)
    diff --git a/lib/tornado/test/concurrent_test.py b/lib/tornado/test/concurrent_test.py
    deleted file mode 100755
    index 737c13e1..00000000
    --- a/lib/tornado/test/concurrent_test.py
    +++ /dev/null
    @@ -1,496 +0,0 @@
    -#
    -# Copyright 2012 Facebook
    -#
    -# Licensed under the Apache License, Version 2.0 (the "License"); you may
    -# not use this file except in compliance with the License. You may obtain
    -# a copy of the License at
    -#
    -#     http://www.apache.org/licenses/LICENSE-2.0
    -#
    -# Unless required by applicable law or agreed to in writing, software
    -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    -# License for the specific language governing permissions and limitations
    -# under the License.
    -from __future__ import absolute_import, division, print_function
    -
    -import gc
    -import logging
    -import re
    -import socket
    -import sys
    -import traceback
    -import warnings
    -
    -from tornado.concurrent import (Future, return_future, ReturnValueIgnoredError,
    -                                run_on_executor, future_set_result_unless_cancelled)
    -from tornado.escape import utf8, to_unicode
    -from tornado import gen
    -from tornado.ioloop import IOLoop
    -from tornado.iostream import IOStream
    -from tornado.log import app_log
    -from tornado import stack_context
    -from tornado.tcpserver import TCPServer
    -from tornado.testing import AsyncTestCase, ExpectLog, bind_unused_port, gen_test
    -from tornado.test.util import unittest, skipBefore35, exec_test, ignore_deprecation
    -
    -
    -try:
    -    from concurrent import futures
    -except ImportError:
    -    futures = None
    -
    -
    -class MiscFutureTest(AsyncTestCase):
    -
    -    def test_future_set_result_unless_cancelled(self):
    -        fut = Future()
    -        future_set_result_unless_cancelled(fut, 42)
    -        self.assertEqual(fut.result(), 42)
    -        self.assertFalse(fut.cancelled())
    -
    -        fut = Future()
    -        fut.cancel()
    -        is_cancelled = fut.cancelled()
    -        future_set_result_unless_cancelled(fut, 42)
    -        self.assertEqual(fut.cancelled(), is_cancelled)
    -        if not is_cancelled:
    -            self.assertEqual(fut.result(), 42)
    -
    -
    -class ReturnFutureTest(AsyncTestCase):
    -    with ignore_deprecation():
    -        @return_future
    -        def sync_future(self, callback):
    -            callback(42)
    -
    -        @return_future
    -        def async_future(self, callback):
    -            self.io_loop.add_callback(callback, 42)
    -
    -        @return_future
    -        def immediate_failure(self, callback):
    -            1 / 0
    -
    -        @return_future
    -        def delayed_failure(self, callback):
    -            self.io_loop.add_callback(lambda: 1 / 0)
    -
    -        @return_future
    -        def return_value(self, callback):
    -            # Note that the result of both running the callback and returning
    -            # a value (or raising an exception) is unspecified; with current
    -            # implementations the last event prior to callback resolution wins.
    -            return 42
    -
    -        @return_future
    -        def no_result_future(self, callback):
    -            callback()
    -
    -    def test_immediate_failure(self):
    -        with self.assertRaises(ZeroDivisionError):
    -            # The caller sees the error just like a normal function.
    -            self.immediate_failure(callback=self.stop)
    -        # The callback is not run because the function failed synchronously.
    -        self.io_loop.add_timeout(self.io_loop.time() + 0.05, self.stop)
    -        result = self.wait()
    -        self.assertIs(result, None)
    -
    -    def test_return_value(self):
    -        with self.assertRaises(ReturnValueIgnoredError):
    -            self.return_value(callback=self.stop)
    -
    -    def test_callback_kw(self):
    -        with ignore_deprecation():
    -            future = self.sync_future(callback=self.stop)
    -        result = self.wait()
    -        self.assertEqual(result, 42)
    -        self.assertEqual(future.result(), 42)
    -
    -    def test_callback_positional(self):
    -        # When the callback is passed in positionally, future_wrap shouldn't
    -        # add another callback in the kwargs.
    -        with ignore_deprecation():
    -            future = self.sync_future(self.stop)
    -        result = self.wait()
    -        self.assertEqual(result, 42)
    -        self.assertEqual(future.result(), 42)
    -
    -    def test_no_callback(self):
    -        future = self.sync_future()
    -        self.assertEqual(future.result(), 42)
    -
    -    def test_none_callback_kw(self):
    -        # explicitly pass None as callback
    -        future = self.sync_future(callback=None)
    -        self.assertEqual(future.result(), 42)
    -
    -    def test_none_callback_pos(self):
    -        future = self.sync_future(None)
    -        self.assertEqual(future.result(), 42)
    -
    -    def test_async_future(self):
    -        future = self.async_future()
    -        self.assertFalse(future.done())
    -        self.io_loop.add_future(future, self.stop)
    -        future2 = self.wait()
    -        self.assertIs(future, future2)
    -        self.assertEqual(future.result(), 42)
    -
    -    @gen_test
    -    def test_async_future_gen(self):
    -        result = yield self.async_future()
    -        self.assertEqual(result, 42)
    -
    -    def test_delayed_failure(self):
    -        future = self.delayed_failure()
    -        with ignore_deprecation():
    -            self.io_loop.add_future(future, self.stop)
    -            future2 = self.wait()
    -        self.assertIs(future, future2)
    -        with self.assertRaises(ZeroDivisionError):
    -            future.result()
    -
    -    def test_kw_only_callback(self):
    -        with ignore_deprecation():
    -            @return_future
    -            def f(**kwargs):
    -                kwargs['callback'](42)
    -        future = f()
    -        self.assertEqual(future.result(), 42)
    -
    -    def test_error_in_callback(self):
    -        with ignore_deprecation():
    -            self.sync_future(callback=lambda future: 1 / 0)
    -        # The exception gets caught by our StackContext and will be re-raised
    -        # when we wait.
    -        self.assertRaises(ZeroDivisionError, self.wait)
    -
    -    def test_no_result_future(self):
    -        with ignore_deprecation():
    -            future = self.no_result_future(self.stop)
    -        result = self.wait()
    -        self.assertIs(result, None)
    -        # result of this future is undefined, but not an error
    -        future.result()
    -
    -    def test_no_result_future_callback(self):
    -        with ignore_deprecation():
    -            future = self.no_result_future(callback=lambda: self.stop())
    -        result = self.wait()
    -        self.assertIs(result, None)
    -        future.result()
    -
    -    @gen_test
    -    def test_future_traceback_legacy(self):
    -        with ignore_deprecation():
    -            @return_future
    -            @gen.engine
    -            def f(callback):
    -                yield gen.Task(self.io_loop.add_callback)
    -                try:
    -                    1 / 0
    -                except ZeroDivisionError:
    -                    self.expected_frame = traceback.extract_tb(
    -                        sys.exc_info()[2], limit=1)[0]
    -                    raise
    -            try:
    -                yield f()
    -                self.fail("didn't get expected exception")
    -            except ZeroDivisionError:
    -                tb = traceback.extract_tb(sys.exc_info()[2])
    -                self.assertIn(self.expected_frame, tb)
    -
    -    @gen_test
    -    def test_future_traceback(self):
    -        @gen.coroutine
    -        def f():
    -            yield gen.moment
    -            try:
    -                1 / 0
    -            except ZeroDivisionError:
    -                self.expected_frame = traceback.extract_tb(
    -                    sys.exc_info()[2], limit=1)[0]
    -                raise
    -        try:
    -            yield f()
    -            self.fail("didn't get expected exception")
    -        except ZeroDivisionError:
    -            tb = traceback.extract_tb(sys.exc_info()[2])
    -            self.assertIn(self.expected_frame, tb)
    -
    -    @gen_test
    -    def test_uncaught_exception_log(self):
    -        if IOLoop.configured_class().__name__.endswith('AsyncIOLoop'):
    -            # Install an exception handler that mirrors our
    -            # non-asyncio logging behavior.
    -            def exc_handler(loop, context):
    -                app_log.error('%s: %s', context['message'],
    -                              type(context.get('exception')))
    -            self.io_loop.asyncio_loop.set_exception_handler(exc_handler)
    -
    -        @gen.coroutine
    -        def f():
    -            yield gen.moment
    -            1 / 0
    -
    -        g = f()
    -
    -        with ExpectLog(app_log,
    -                       "(?s)Future.* exception was never retrieved:"
    -                       ".*ZeroDivisionError"):
    -            yield gen.moment
    -            yield gen.moment
    -            # For some reason, TwistedIOLoop and pypy3 need a third iteration
    -            # in order to drain references to the future
    -            yield gen.moment
    -            del g
    -            gc.collect()  # for PyPy
    -
    -
    -# The following series of classes demonstrate and test various styles
    -# of use, with and without generators and futures.
    -
    -
    -class CapServer(TCPServer):
    -    @gen.coroutine
    -    def handle_stream(self, stream, address):
    -        data = yield stream.read_until(b"\n")
    -        data = to_unicode(data)
    -        if data == data.upper():
    -            stream.write(b"error\talready capitalized\n")
    -        else:
    -            # data already has \n
    -            stream.write(utf8("ok\t%s" % data.upper()))
    -        stream.close()
    -
    -
    -class CapError(Exception):
    -    pass
    -
    -
    -class BaseCapClient(object):
    -    def __init__(self, port):
    -        self.port = port
    -
    -    def process_response(self, data):
    -        status, message = re.match('(.*)\t(.*)\n', to_unicode(data)).groups()
    -        if status == 'ok':
    -            return message
    -        else:
    -            raise CapError(message)
    -
    -
    -class ManualCapClient(BaseCapClient):
    -    def capitalize(self, request_data, callback=None):
    -        logging.debug("capitalize")
    -        self.request_data = request_data
    -        self.stream = IOStream(socket.socket())
    -        self.stream.connect(('127.0.0.1', self.port),
    -                            callback=self.handle_connect)
    -        self.future = Future()
    -        if callback is not None:
    -            self.future.add_done_callback(
    -                stack_context.wrap(lambda future: callback(future.result())))
    -        return self.future
    -
    -    def handle_connect(self):
    -        logging.debug("handle_connect")
    -        self.stream.write(utf8(self.request_data + "\n"))
    -        self.stream.read_until(b'\n', callback=self.handle_read)
    -
    -    def handle_read(self, data):
    -        logging.debug("handle_read")
    -        self.stream.close()
    -        try:
    -            self.future.set_result(self.process_response(data))
    -        except CapError as e:
    -            self.future.set_exception(e)
    -
    -
    -class DecoratorCapClient(BaseCapClient):
    -    with ignore_deprecation():
    -        @return_future
    -        def capitalize(self, request_data, callback):
    -            logging.debug("capitalize")
    -            self.request_data = request_data
    -            self.stream = IOStream(socket.socket())
    -            self.stream.connect(('127.0.0.1', self.port),
    -                                callback=self.handle_connect)
    -            self.callback = callback
    -
    -    def handle_connect(self):
    -        logging.debug("handle_connect")
    -        self.stream.write(utf8(self.request_data + "\n"))
    -        self.stream.read_until(b'\n', callback=self.handle_read)
    -
    -    def handle_read(self, data):
    -        logging.debug("handle_read")
    -        self.stream.close()
    -        self.callback(self.process_response(data))
    -
    -
    -class GeneratorCapClient(BaseCapClient):
    -    @gen.coroutine
    -    def capitalize(self, request_data):
    -        logging.debug('capitalize')
    -        stream = IOStream(socket.socket())
    -        logging.debug('connecting')
    -        yield stream.connect(('127.0.0.1', self.port))
    -        stream.write(utf8(request_data + '\n'))
    -        logging.debug('reading')
    -        data = yield stream.read_until(b'\n')
    -        logging.debug('returning')
    -        stream.close()
    -        raise gen.Return(self.process_response(data))
    -
    -
    -class ClientTestMixin(object):
    -    def setUp(self):
    -        super(ClientTestMixin, self).setUp()  # type: ignore
    -        self.server = CapServer()
    -        sock, port = bind_unused_port()
    -        self.server.add_sockets([sock])
    -        self.client = self.client_class(port=port)
    -
    -    def tearDown(self):
    -        self.server.stop()
    -        super(ClientTestMixin, self).tearDown()  # type: ignore
    -
    -    def test_callback(self):
    -        with ignore_deprecation():
    -            self.client.capitalize("hello", callback=self.stop)
    -        result = self.wait()
    -        self.assertEqual(result, "HELLO")
    -
    -    def test_callback_error(self):
    -        with ignore_deprecation():
    -            self.client.capitalize("HELLO", callback=self.stop)
    -            self.assertRaisesRegexp(CapError, "already capitalized", self.wait)
    -
    -    def test_future(self):
    -        future = self.client.capitalize("hello")
    -        self.io_loop.add_future(future, self.stop)
    -        self.wait()
    -        self.assertEqual(future.result(), "HELLO")
    -
    -    def test_future_error(self):
    -        future = self.client.capitalize("HELLO")
    -        self.io_loop.add_future(future, self.stop)
    -        self.wait()
    -        self.assertRaisesRegexp(CapError, "already capitalized", future.result)
    -
    -    def test_generator(self):
    -        @gen.coroutine
    -        def f():
    -            result = yield self.client.capitalize("hello")
    -            self.assertEqual(result, "HELLO")
    -        self.io_loop.run_sync(f)
    -
    -    def test_generator_error(self):
    -        @gen.coroutine
    -        def f():
    -            with self.assertRaisesRegexp(CapError, "already capitalized"):
    -                yield self.client.capitalize("HELLO")
    -        self.io_loop.run_sync(f)
    -
    -
    -class ManualClientTest(ClientTestMixin, AsyncTestCase):
    -    client_class = ManualCapClient
    -
    -    def setUp(self):
    -        self.warning_catcher = warnings.catch_warnings()
    -        self.warning_catcher.__enter__()
    -        warnings.simplefilter('ignore', DeprecationWarning)
    -        super(ManualClientTest, self).setUp()
    -
    -    def tearDown(self):
    -        super(ManualClientTest, self).tearDown()
    -        self.warning_catcher.__exit__(None, None, None)
    -
    -
    -class DecoratorClientTest(ClientTestMixin, AsyncTestCase):
    -    client_class = DecoratorCapClient
    -
    -    def setUp(self):
    -        self.warning_catcher = warnings.catch_warnings()
    -        self.warning_catcher.__enter__()
    -        warnings.simplefilter('ignore', DeprecationWarning)
    -        super(DecoratorClientTest, self).setUp()
    -
    -    def tearDown(self):
    -        super(DecoratorClientTest, self).tearDown()
    -        self.warning_catcher.__exit__(None, None, None)
    -
    -
    -class GeneratorClientTest(ClientTestMixin, AsyncTestCase):
    -    client_class = GeneratorCapClient
    -
    -
    -@unittest.skipIf(futures is None, "concurrent.futures module not present")
    -class RunOnExecutorTest(AsyncTestCase):
    -    @gen_test
    -    def test_no_calling(self):
    -        class Object(object):
    -            def __init__(self):
    -                self.executor = futures.thread.ThreadPoolExecutor(1)
    -
    -            @run_on_executor
    -            def f(self):
    -                return 42
    -
    -        o = Object()
    -        answer = yield o.f()
    -        self.assertEqual(answer, 42)
    -
    -    @gen_test
    -    def test_call_with_no_args(self):
    -        class Object(object):
    -            def __init__(self):
    -                self.executor = futures.thread.ThreadPoolExecutor(1)
    -
    -            @run_on_executor()
    -            def f(self):
    -                return 42
    -
    -        o = Object()
    -        answer = yield o.f()
    -        self.assertEqual(answer, 42)
    -
    -    @gen_test
    -    def test_call_with_executor(self):
    -        class Object(object):
    -            def __init__(self):
    -                self.__executor = futures.thread.ThreadPoolExecutor(1)
    -
    -            @run_on_executor(executor='_Object__executor')
    -            def f(self):
    -                return 42
    -
    -        o = Object()
    -        answer = yield o.f()
    -        self.assertEqual(answer, 42)
    -
    -    @skipBefore35
    -    @gen_test
    -    def test_async_await(self):
    -        class Object(object):
    -            def __init__(self):
    -                self.executor = futures.thread.ThreadPoolExecutor(1)
    -
    -            @run_on_executor()
    -            def f(self):
    -                return 42
    -
    -        o = Object()
    -        namespace = exec_test(globals(), locals(), """
    -        async def f():
    -            answer = await o.f()
    -            return answer
    -        """)
    -        result = yield namespace['f']()
    -        self.assertEqual(result, 42)
    -
    -
    -if __name__ == '__main__':
    -    unittest.main()
    diff --git a/lib/tornado/test/csv_translations/fr_FR.csv b/lib/tornado/test/csv_translations/fr_FR.csv
    deleted file mode 100755
    index 6321b6e7..00000000
    --- a/lib/tornado/test/csv_translations/fr_FR.csv
    +++ /dev/null
    @@ -1 +0,0 @@
    -"school","école"
    diff --git a/lib/tornado/test/curl_httpclient_test.py b/lib/tornado/test/curl_httpclient_test.py
    deleted file mode 100755
    index 4230d4cd..00000000
    --- a/lib/tornado/test/curl_httpclient_test.py
    +++ /dev/null
    @@ -1,153 +0,0 @@
    -# coding: utf-8
    -from __future__ import absolute_import, division, print_function
    -
    -from hashlib import md5
    -
    -from tornado.escape import utf8
    -from tornado.httpclient import HTTPRequest, HTTPClientError
    -from tornado.locks import Event
    -from tornado.stack_context import ExceptionStackContext
    -from tornado.testing import AsyncHTTPTestCase, gen_test
    -from tornado.test import httpclient_test
    -from tornado.test.util import unittest, ignore_deprecation
    -from tornado.web import Application, RequestHandler
    -
    -
    -try:
    -    import pycurl  # type: ignore
    -except ImportError:
    -    pycurl = None
    -
    -if pycurl is not None:
    -    from tornado.curl_httpclient import CurlAsyncHTTPClient
    -
    -
    -@unittest.skipIf(pycurl is None, "pycurl module not present")
    -class CurlHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
    -    def get_http_client(self):
    -        client = CurlAsyncHTTPClient(defaults=dict(allow_ipv6=False))
    -        # make sure AsyncHTTPClient magic doesn't give us the wrong class
    -        self.assertTrue(isinstance(client, CurlAsyncHTTPClient))
    -        return client
    -
    -
    -class DigestAuthHandler(RequestHandler):
    -    def initialize(self, username, password):
    -        self.username = username
    -        self.password = password
    -
    -    def get(self):
    -        realm = 'test'
    -        opaque = 'asdf'
    -        # Real implementations would use a random nonce.
    -        nonce = "1234"
    -
    -        auth_header = self.request.headers.get('Authorization', None)
    -        if auth_header is not None:
    -            auth_mode, params = auth_header.split(' ', 1)
    -            assert auth_mode == 'Digest'
    -            param_dict = {}
    -            for pair in params.split(','):
    -                k, v = pair.strip().split('=', 1)
    -                if v[0] == '"' and v[-1] == '"':
    -                    v = v[1:-1]
    -                param_dict[k] = v
    -            assert param_dict['realm'] == realm
    -            assert param_dict['opaque'] == opaque
    -            assert param_dict['nonce'] == nonce
    -            assert param_dict['username'] == self.username
    -            assert param_dict['uri'] == self.request.path
    -            h1 = md5(utf8('%s:%s:%s' % (self.username, realm, self.password))).hexdigest()
    -            h2 = md5(utf8('%s:%s' % (self.request.method,
    -                                     self.request.path))).hexdigest()
    -            digest = md5(utf8('%s:%s:%s' % (h1, nonce, h2))).hexdigest()
    -            if digest == param_dict['response']:
    -                self.write('ok')
    -            else:
    -                self.write('fail')
    -        else:
    -            self.set_status(401)
    -            self.set_header('WWW-Authenticate',
    -                            'Digest realm="%s", nonce="%s", opaque="%s"' %
    -                            (realm, nonce, opaque))
    -
    -
    -class CustomReasonHandler(RequestHandler):
    -    def get(self):
    -        self.set_status(200, "Custom reason")
    -
    -
    -class CustomFailReasonHandler(RequestHandler):
    -    def get(self):
    -        self.set_status(400, "Custom reason")
    -
    -
    -@unittest.skipIf(pycurl is None, "pycurl module not present")
    -class CurlHTTPClientTestCase(AsyncHTTPTestCase):
    -    def setUp(self):
    -        super(CurlHTTPClientTestCase, self).setUp()
    -        self.http_client = self.create_client()
    -
    -    def get_app(self):
    -        return Application([
    -            ('/digest', DigestAuthHandler, {'username': 'foo', 'password': 'bar'}),
    -            ('/digest_non_ascii', DigestAuthHandler, {'username': 'foo', 'password': 'barユ£'}),
    -            ('/custom_reason', CustomReasonHandler),
    -            ('/custom_fail_reason', CustomFailReasonHandler),
    -        ])
    -
    -    def create_client(self, **kwargs):
    -        return CurlAsyncHTTPClient(force_instance=True,
    -                                   defaults=dict(allow_ipv6=False),
    -                                   **kwargs)
    -
    -    @gen_test
    -    def test_prepare_curl_callback_stack_context(self):
    -        exc_info = []
    -        error_event = Event()
    -
    -        def error_handler(typ, value, tb):
    -            exc_info.append((typ, value, tb))
    -            error_event.set()
    -            return True
    -
    -        with ignore_deprecation():
    -            with ExceptionStackContext(error_handler):
    -                request = HTTPRequest(self.get_url('/custom_reason'),
    -                                      prepare_curl_callback=lambda curl: 1 / 0)
    -        yield [error_event.wait(), self.http_client.fetch(request)]
    -        self.assertEqual(1, len(exc_info))
    -        self.assertIs(exc_info[0][0], ZeroDivisionError)
    -
    -    def test_digest_auth(self):
    -        response = self.fetch('/digest', auth_mode='digest',
    -                              auth_username='foo', auth_password='bar')
    -        self.assertEqual(response.body, b'ok')
    -
    -    def test_custom_reason(self):
    -        response = self.fetch('/custom_reason')
    -        self.assertEqual(response.reason, "Custom reason")
    -
    -    def test_fail_custom_reason(self):
    -        response = self.fetch('/custom_fail_reason')
    -        self.assertEqual(str(response.error), "HTTP 400: Custom reason")
    -
    -    def test_failed_setup(self):
    -        self.http_client = self.create_client(max_clients=1)
    -        for i in range(5):
    -            with ignore_deprecation():
    -                response = self.fetch(u'/ユニコード')
    -            self.assertIsNot(response.error, None)
    -
    -            with self.assertRaises((UnicodeEncodeError, HTTPClientError)):
    -                # This raises UnicodeDecodeError on py3 and
    -                # HTTPClientError(404) on py2. The main motivation of
    -                # this test is to ensure that the UnicodeEncodeError
    -                # during the setup phase doesn't lead the request to
    -                # be dropped on the floor.
    -                response = self.fetch(u'/ユニコード', raise_error=True)
    -
    -    def test_digest_auth_non_ascii(self):
    -        response = self.fetch('/digest_non_ascii', auth_mode='digest',
    -                              auth_username='foo', auth_password='barユ£')
    -        self.assertEqual(response.body, b'ok')
    diff --git a/lib/tornado/test/escape_test.py b/lib/tornado/test/escape_test.py
    deleted file mode 100755
    index f2f2902a..00000000
    --- a/lib/tornado/test/escape_test.py
    +++ /dev/null
    @@ -1,250 +0,0 @@
    -from __future__ import absolute_import, division, print_function
    -
    -import tornado.escape
    -from tornado.escape import (
    -    utf8, xhtml_escape, xhtml_unescape, url_escape, url_unescape,
    -    to_unicode, json_decode, json_encode, squeeze, recursive_unicode,
    -)
    -from tornado.util import unicode_type
    -from tornado.test.util import unittest
    -
    -linkify_tests = [
    -    # (input, linkify_kwargs, expected_output)
    -
    -    ("hello http://world.com/!", {},
    -     u'hello http://world.com/!'),
    -
    -    ("hello http://world.com/with?param=true&stuff=yes", {},
    -     u'hello http://world.com/with?param=true&stuff=yes'),  # noqa: E501
    -
    -    # an opened paren followed by many chars killed Gruber's regex
    -    ("http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", {},
    -     u'http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),  # noqa: E501
    -
    -    # as did too many dots at the end
    -    ("http://url.com/withmany.......................................", {},
    -     u'http://url.com/withmany.......................................'),  # noqa: E501
    -
    -    ("http://url.com/withmany((((((((((((((((((((((((((((((((((a)", {},
    -     u'http://url.com/withmany((((((((((((((((((((((((((((((((((a)'),  # noqa: E501
    -
    -    # some examples from http://daringfireball.net/2009/11/liberal_regex_for_matching_urls
    -    # plus a fex extras (such as multiple parentheses).
    -    ("http://foo.com/blah_blah", {},
    -     u'http://foo.com/blah_blah'),
    -
    -    ("http://foo.com/blah_blah/", {},
    -     u'http://foo.com/blah_blah/'),
    -
    -    ("(Something like http://foo.com/blah_blah)", {},
    -     u'(Something like http://foo.com/blah_blah)'),
    -
    -    ("http://foo.com/blah_blah_(wikipedia)", {},
    -     u'http://foo.com/blah_blah_(wikipedia)'),
    -
    -    ("http://foo.com/blah_(blah)_(wikipedia)_blah", {},
    -     u'http://foo.com/blah_(blah)_(wikipedia)_blah'),  # noqa: E501
    -
    -    ("(Something like http://foo.com/blah_blah_(wikipedia))", {},
    -     u'(Something like http://foo.com/blah_blah_(wikipedia))'),  # noqa: E501
    -
    -    ("http://foo.com/blah_blah.", {},
    -     u'http://foo.com/blah_blah.'),
    -
    -    ("http://foo.com/blah_blah/.", {},
    -     u'http://foo.com/blah_blah/.'),
    -
    -    ("", {},
    -     u'<http://foo.com/blah_blah>'),
    -
    -    ("", {},
    -     u'<http://foo.com/blah_blah/>'),
    -
    -    ("http://foo.com/blah_blah,", {},
    -     u'http://foo.com/blah_blah,'),
    -
    -    ("http://www.example.com/wpstyle/?p=364.", {},
    -     u'http://www.example.com/wpstyle/?p=364.'),
    -
    -    ("rdar://1234",
    -     {"permitted_protocols": ["http", "rdar"]},
    -     u'rdar://1234'),
    -
    -    ("rdar:/1234",
    -     {"permitted_protocols": ["rdar"]},
    -     u'rdar:/1234'),
    -
    -    ("http://userid:password@example.com:8080", {},
    -     u'http://userid:password@example.com:8080'),  # noqa: E501
    -
    -    ("http://userid@example.com", {},
    -     u'http://userid@example.com'),
    -
    -    ("http://userid@example.com:8080", {},
    -     u'http://userid@example.com:8080'),
    -
    -    ("http://userid:password@example.com", {},
    -     u'http://userid:password@example.com'),
    -
    -    ("message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e",
    -     {"permitted_protocols": ["http", "message"]},
    -     u''
    -     u'message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e'),
    -
    -    (u"http://\u27a1.ws/\u4a39", {},
    -     u'http://\u27a1.ws/\u4a39'),
    -
    -    ("http://example.com", {},
    -     u'<tag>http://example.com</tag>'),
    -
    -    ("Just a www.example.com link.", {},
    -     u'Just a www.example.com link.'),
    -
    -    ("Just a www.example.com link.",
    -     {"require_protocol": True},
    -     u'Just a www.example.com link.'),
    -
    -    ("A http://reallylong.com/link/that/exceedsthelenglimit.html",
    -     {"require_protocol": True, "shorten": True},
    -     u'A http://reallylong.com/link...'),  # noqa: E501
    -
    -    ("A http://reallylongdomainnamethatwillbetoolong.com/hi!",
    -     {"shorten": True},
    -     u'A http://reallylongdomainnametha...!'),  # noqa: E501
    -
    -    ("A file:///passwords.txt and http://web.com link", {},
    -     u'A file:///passwords.txt and http://web.com link'),
    -
    -    ("A file:///passwords.txt and http://web.com link",
    -     {"permitted_protocols": ["file"]},
    -     u'A file:///passwords.txt and http://web.com link'),
    -
    -    ("www.external-link.com",
    -     {"extra_params": 'rel="nofollow" class="external"'},
    -     u'www.external-link.com'),  # noqa: E501
    -
    -    ("www.external-link.com and www.internal-link.com/blogs extra",
    -     {"extra_params": lambda href: 'class="internal"' if href.startswith("http://www.internal-link.com") else 'rel="nofollow" class="external"'},  # noqa: E501
    -     u'www.external-link.com'            # noqa: E501
    -     u' and www.internal-link.com/blogs extra'),  # noqa: E501
    -
    -    ("www.external-link.com",
    -     {"extra_params": lambda href: '    rel="nofollow" class="external"  '},
    -     u'www.external-link.com'),  # noqa: E501
    -]
    -
    -
    -class EscapeTestCase(unittest.TestCase):
    -    def test_linkify(self):
    -        for text, kwargs, html in linkify_tests:
    -            linked = tornado.escape.linkify(text, **kwargs)
    -            self.assertEqual(linked, html)
    -
    -    def test_xhtml_escape(self):
    -        tests = [
    -            ("", "<foo>"),
    -            (u"", u"<foo>"),
    -            (b"", b"<foo>"),
    -
    -            ("<>&\"'", "<>&"'"),
    -            ("&", "&amp;"),
    -
    -            (u"<\u00e9>", u"<\u00e9>"),
    -            (b"<\xc3\xa9>", b"<\xc3\xa9>"),
    -        ]
    -        for unescaped, escaped in tests:
    -            self.assertEqual(utf8(xhtml_escape(unescaped)), utf8(escaped))
    -            self.assertEqual(utf8(unescaped), utf8(xhtml_unescape(escaped)))
    -
    -    def test_xhtml_unescape_numeric(self):
    -        tests = [
    -            ('foo bar', 'foo bar'),
    -            ('foo bar', 'foo bar'),
    -            ('foo bar', 'foo bar'),
    -            ('foo઼bar', u'foo\u0abcbar'),
    -            ('foo&#xyz;bar', 'foo&#xyz;bar'),  # invalid encoding
    -            ('foo&#;bar', 'foo&#;bar'),        # invalid encoding
    -            ('foo&#x;bar', 'foo&#x;bar'),      # invalid encoding
    -        ]
    -        for escaped, unescaped in tests:
    -            self.assertEqual(unescaped, xhtml_unescape(escaped))
    -
    -    def test_url_escape_unicode(self):
    -        tests = [
    -            # byte strings are passed through as-is
    -            (u'\u00e9'.encode('utf8'), '%C3%A9'),
    -            (u'\u00e9'.encode('latin1'), '%E9'),
    -
    -            # unicode strings become utf8
    -            (u'\u00e9', '%C3%A9'),
    -        ]
    -        for unescaped, escaped in tests:
    -            self.assertEqual(url_escape(unescaped), escaped)
    -
    -    def test_url_unescape_unicode(self):
    -        tests = [
    -            ('%C3%A9', u'\u00e9', 'utf8'),
    -            ('%C3%A9', u'\u00c3\u00a9', 'latin1'),
    -            ('%C3%A9', utf8(u'\u00e9'), None),
    -        ]
    -        for escaped, unescaped, encoding in tests:
    -            # input strings to url_unescape should only contain ascii
    -            # characters, but make sure the function accepts both byte
    -            # and unicode strings.
    -            self.assertEqual(url_unescape(to_unicode(escaped), encoding), unescaped)
    -            self.assertEqual(url_unescape(utf8(escaped), encoding), unescaped)
    -
    -    def test_url_escape_quote_plus(self):
    -        unescaped = '+ #%'
    -        plus_escaped = '%2B+%23%25'
    -        escaped = '%2B%20%23%25'
    -        self.assertEqual(url_escape(unescaped), plus_escaped)
    -        self.assertEqual(url_escape(unescaped, plus=False), escaped)
    -        self.assertEqual(url_unescape(plus_escaped), unescaped)
    -        self.assertEqual(url_unescape(escaped, plus=False), unescaped)
    -        self.assertEqual(url_unescape(plus_escaped, encoding=None),
    -                         utf8(unescaped))
    -        self.assertEqual(url_unescape(escaped, encoding=None, plus=False),
    -                         utf8(unescaped))
    -
    -    def test_escape_return_types(self):
    -        # On python2 the escape methods should generally return the same
    -        # type as their argument
    -        self.assertEqual(type(xhtml_escape("foo")), str)
    -        self.assertEqual(type(xhtml_escape(u"foo")), unicode_type)
    -
    -    def test_json_decode(self):
    -        # json_decode accepts both bytes and unicode, but strings it returns
    -        # are always unicode.
    -        self.assertEqual(json_decode(b'"foo"'), u"foo")
    -        self.assertEqual(json_decode(u'"foo"'), u"foo")
    -
    -        # Non-ascii bytes are interpreted as utf8
    -        self.assertEqual(json_decode(utf8(u'"\u00e9"')), u"\u00e9")
    -
    -    def test_json_encode(self):
    -        # json deals with strings, not bytes.  On python 2 byte strings will
    -        # convert automatically if they are utf8; on python 3 byte strings
    -        # are not allowed.
    -        self.assertEqual(json_decode(json_encode(u"\u00e9")), u"\u00e9")
    -        if bytes is str:
    -            self.assertEqual(json_decode(json_encode(utf8(u"\u00e9"))), u"\u00e9")
    -            self.assertRaises(UnicodeDecodeError, json_encode, b"\xe9")
    -
    -    def test_squeeze(self):
    -        self.assertEqual(squeeze(u'sequences     of    whitespace   chars'),
    -                         u'sequences of whitespace chars')
    -
    -    def test_recursive_unicode(self):
    -        tests = {
    -            'dict': {b"foo": b"bar"},
    -            'list': [b"foo", b"bar"],
    -            'tuple': (b"foo", b"bar"),
    -            'bytes': b"foo"
    -        }
    -        self.assertEqual(recursive_unicode(tests['dict']), {u"foo": u"bar"})
    -        self.assertEqual(recursive_unicode(tests['list']), [u"foo", u"bar"])
    -        self.assertEqual(recursive_unicode(tests['tuple']), (u"foo", u"bar"))
    -        self.assertEqual(recursive_unicode(tests['bytes']), u"foo")
    diff --git a/lib/tornado/test/gen_test.py b/lib/tornado/test/gen_test.py
    deleted file mode 100755
    index 346968cf..00000000
    --- a/lib/tornado/test/gen_test.py
    +++ /dev/null
    @@ -1,1862 +0,0 @@
    -from __future__ import absolute_import, division, print_function
    -
    -import gc
    -import contextlib
    -import datetime
    -import functools
    -import platform
    -import sys
    -import textwrap
    -import time
    -import weakref
    -import warnings
    -
    -from tornado.concurrent import return_future, Future
    -from tornado.escape import url_escape
    -from tornado.httpclient import AsyncHTTPClient
    -from tornado.ioloop import IOLoop
    -from tornado.log import app_log
    -from tornado import stack_context
    -from tornado.testing import AsyncHTTPTestCase, AsyncTestCase, ExpectLog, gen_test
    -from tornado.test.util import unittest, skipOnTravis, skipBefore33, skipBefore35, skipNotCPython, exec_test, ignore_deprecation  # noqa: E501
    -from tornado.web import Application, RequestHandler, asynchronous, HTTPError
    -
    -from tornado import gen
    -
    -try:
    -    from concurrent import futures
    -except ImportError:
    -    futures = None
    -
    -try:
    -    import asyncio
    -except ImportError:
    -    asyncio = None
    -
    -
    -class GenEngineTest(AsyncTestCase):
    -    def setUp(self):
    -        self.warning_catcher = warnings.catch_warnings()
    -        self.warning_catcher.__enter__()
    -        warnings.simplefilter('ignore', DeprecationWarning)
    -        super(GenEngineTest, self).setUp()
    -        self.named_contexts = []
    -
    -    def tearDown(self):
    -        super(GenEngineTest, self).tearDown()
    -        self.warning_catcher.__exit__(None, None, None)
    -
    -    def named_context(self, name):
    -        @contextlib.contextmanager
    -        def context():
    -            self.named_contexts.append(name)
    -            try:
    -                yield
    -            finally:
    -                self.assertEqual(self.named_contexts.pop(), name)
    -        return context
    -
    -    def run_gen(self, f):
    -        f()
    -        return self.wait()
    -
    -    def delay_callback(self, iterations, callback, arg):
    -        """Runs callback(arg) after a number of IOLoop iterations."""
    -        if iterations == 0:
    -            callback(arg)
    -        else:
    -            self.io_loop.add_callback(functools.partial(
    -                self.delay_callback, iterations - 1, callback, arg))
    -
    -    with ignore_deprecation():
    -        @return_future
    -        def async_future(self, result, callback):
    -            self.io_loop.add_callback(callback, result)
    -
    -    @gen.coroutine
    -    def async_exception(self, e):
    -        yield gen.moment
    -        raise e
    -
    -    def test_no_yield(self):
    -        @gen.engine
    -        def f():
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_inline_cb(self):
    -        @gen.engine
    -        def f():
    -            (yield gen.Callback("k1"))()
    -            res = yield gen.Wait("k1")
    -            self.assertTrue(res is None)
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_ioloop_cb(self):
    -        @gen.engine
    -        def f():
    -            self.io_loop.add_callback((yield gen.Callback("k1")))
    -            yield gen.Wait("k1")
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_exception_phase1(self):
    -        @gen.engine
    -        def f():
    -            1 / 0
    -        self.assertRaises(ZeroDivisionError, self.run_gen, f)
    -
    -    def test_exception_phase2(self):
    -        @gen.engine
    -        def f():
    -            self.io_loop.add_callback((yield gen.Callback("k1")))
    -            yield gen.Wait("k1")
    -            1 / 0
    -        self.assertRaises(ZeroDivisionError, self.run_gen, f)
    -
    -    def test_exception_in_task_phase1(self):
    -        def fail_task(callback):
    -            1 / 0
    -
    -        @gen.engine
    -        def f():
    -            try:
    -                yield gen.Task(fail_task)
    -                raise Exception("did not get expected exception")
    -            except ZeroDivisionError:
    -                self.stop()
    -        self.run_gen(f)
    -
    -    def test_exception_in_task_phase2(self):
    -        # This is the case that requires the use of stack_context in gen.engine
    -        def fail_task(callback):
    -            self.io_loop.add_callback(lambda: 1 / 0)
    -
    -        @gen.engine
    -        def f():
    -            try:
    -                yield gen.Task(fail_task)
    -                raise Exception("did not get expected exception")
    -            except ZeroDivisionError:
    -                self.stop()
    -        self.run_gen(f)
    -
    -    def test_with_arg(self):
    -        @gen.engine
    -        def f():
    -            (yield gen.Callback("k1"))(42)
    -            res = yield gen.Wait("k1")
    -            self.assertEqual(42, res)
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_with_arg_tuple(self):
    -        @gen.engine
    -        def f():
    -            (yield gen.Callback((1, 2)))((3, 4))
    -            res = yield gen.Wait((1, 2))
    -            self.assertEqual((3, 4), res)
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_key_reuse(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Callback("k1")
    -            yield gen.Callback("k1")
    -            self.stop()
    -        self.assertRaises(gen.KeyReuseError, self.run_gen, f)
    -
    -    def test_key_reuse_tuple(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Callback((1, 2))
    -            yield gen.Callback((1, 2))
    -            self.stop()
    -        self.assertRaises(gen.KeyReuseError, self.run_gen, f)
    -
    -    def test_key_mismatch(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Callback("k1")
    -            yield gen.Wait("k2")
    -            self.stop()
    -        self.assertRaises(gen.UnknownKeyError, self.run_gen, f)
    -
    -    def test_key_mismatch_tuple(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Callback((1, 2))
    -            yield gen.Wait((2, 3))
    -            self.stop()
    -        self.assertRaises(gen.UnknownKeyError, self.run_gen, f)
    -
    -    def test_leaked_callback(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Callback("k1")
    -            self.stop()
    -        self.assertRaises(gen.LeakedCallbackError, self.run_gen, f)
    -
    -    def test_leaked_callback_tuple(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Callback((1, 2))
    -            self.stop()
    -        self.assertRaises(gen.LeakedCallbackError, self.run_gen, f)
    -
    -    def test_parallel_callback(self):
    -        @gen.engine
    -        def f():
    -            for k in range(3):
    -                self.io_loop.add_callback((yield gen.Callback(k)))
    -            yield gen.Wait(1)
    -            self.io_loop.add_callback((yield gen.Callback(3)))
    -            yield gen.Wait(0)
    -            yield gen.Wait(3)
    -            yield gen.Wait(2)
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_bogus_yield(self):
    -        @gen.engine
    -        def f():
    -            yield 42
    -        self.assertRaises(gen.BadYieldError, self.run_gen, f)
    -
    -    def test_bogus_yield_tuple(self):
    -        @gen.engine
    -        def f():
    -            yield (1, 2)
    -        self.assertRaises(gen.BadYieldError, self.run_gen, f)
    -
    -    def test_reuse(self):
    -        @gen.engine
    -        def f():
    -            self.io_loop.add_callback((yield gen.Callback(0)))
    -            yield gen.Wait(0)
    -            self.stop()
    -        self.run_gen(f)
    -        self.run_gen(f)
    -
    -    def test_task(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Task(self.io_loop.add_callback)
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_wait_all(self):
    -        @gen.engine
    -        def f():
    -            (yield gen.Callback("k1"))("v1")
    -            (yield gen.Callback("k2"))("v2")
    -            results = yield gen.WaitAll(["k1", "k2"])
    -            self.assertEqual(results, ["v1", "v2"])
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_exception_in_yield(self):
    -        @gen.engine
    -        def f():
    -            try:
    -                yield gen.Wait("k1")
    -                raise Exception("did not get expected exception")
    -            except gen.UnknownKeyError:
    -                pass
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_resume_after_exception_in_yield(self):
    -        @gen.engine
    -        def f():
    -            try:
    -                yield gen.Wait("k1")
    -                raise Exception("did not get expected exception")
    -            except gen.UnknownKeyError:
    -                pass
    -            (yield gen.Callback("k2"))("v2")
    -            self.assertEqual((yield gen.Wait("k2")), "v2")
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_orphaned_callback(self):
    -        @gen.engine
    -        def f():
    -            self.orphaned_callback = yield gen.Callback(1)
    -        try:
    -            self.run_gen(f)
    -            raise Exception("did not get expected exception")
    -        except gen.LeakedCallbackError:
    -            pass
    -        self.orphaned_callback()
    -
    -    def test_none(self):
    -        @gen.engine
    -        def f():
    -            yield None
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_multi(self):
    -        @gen.engine
    -        def f():
    -            (yield gen.Callback("k1"))("v1")
    -            (yield gen.Callback("k2"))("v2")
    -            results = yield [gen.Wait("k1"), gen.Wait("k2")]
    -            self.assertEqual(results, ["v1", "v2"])
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_multi_dict(self):
    -        @gen.engine
    -        def f():
    -            (yield gen.Callback("k1"))("v1")
    -            (yield gen.Callback("k2"))("v2")
    -            results = yield dict(foo=gen.Wait("k1"), bar=gen.Wait("k2"))
    -            self.assertEqual(results, dict(foo="v1", bar="v2"))
    -            self.stop()
    -        self.run_gen(f)
    -
    -    # The following tests explicitly run with both gen.Multi
    -    # and gen.multi_future (Task returns a Future, so it can be used
    -    # with either).
    -    def test_multi_yieldpoint_delayed(self):
    -        @gen.engine
    -        def f():
    -            # callbacks run at different times
    -            responses = yield gen.Multi([
    -                gen.Task(self.delay_callback, 3, arg="v1"),
    -                gen.Task(self.delay_callback, 1, arg="v2"),
    -            ])
    -            self.assertEqual(responses, ["v1", "v2"])
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_multi_yieldpoint_dict_delayed(self):
    -        @gen.engine
    -        def f():
    -            # callbacks run at different times
    -            responses = yield gen.Multi(dict(
    -                foo=gen.Task(self.delay_callback, 3, arg="v1"),
    -                bar=gen.Task(self.delay_callback, 1, arg="v2"),
    -            ))
    -            self.assertEqual(responses, dict(foo="v1", bar="v2"))
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_multi_future_delayed(self):
    -        @gen.engine
    -        def f():
    -            # callbacks run at different times
    -            responses = yield gen.multi_future([
    -                gen.Task(self.delay_callback, 3, arg="v1"),
    -                gen.Task(self.delay_callback, 1, arg="v2"),
    -            ])
    -            self.assertEqual(responses, ["v1", "v2"])
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_multi_future_dict_delayed(self):
    -        @gen.engine
    -        def f():
    -            # callbacks run at different times
    -            responses = yield gen.multi_future(dict(
    -                foo=gen.Task(self.delay_callback, 3, arg="v1"),
    -                bar=gen.Task(self.delay_callback, 1, arg="v2"),
    -            ))
    -            self.assertEqual(responses, dict(foo="v1", bar="v2"))
    -            self.stop()
    -        self.run_gen(f)
    -
    -    @skipOnTravis
    -    @gen_test
    -    def test_multi_performance(self):
    -        # Yielding a list used to have quadratic performance; make
    -        # sure a large list stays reasonable.  On my laptop a list of
    -        # 2000 used to take 1.8s, now it takes 0.12.
    -        start = time.time()
    -        yield [gen.Task(self.io_loop.add_callback) for i in range(2000)]
    -        end = time.time()
    -        self.assertLess(end - start, 1.0)
    -
    -    @gen_test
    -    def test_multi_empty(self):
    -        # Empty lists or dicts should return the same type.
    -        x = yield []
    -        self.assertTrue(isinstance(x, list))
    -        y = yield {}
    -        self.assertTrue(isinstance(y, dict))
    -
    -    @gen_test
    -    def test_multi_mixed_types(self):
    -        # A YieldPoint (Wait) and Future (Task) can be combined
    -        # (and use the YieldPoint codepath)
    -        (yield gen.Callback("k1"))("v1")
    -        responses = yield [gen.Wait("k1"),
    -                           gen.Task(self.delay_callback, 3, arg="v2")]
    -        self.assertEqual(responses, ["v1", "v2"])
    -
    -    @gen_test
    -    def test_future(self):
    -        result = yield self.async_future(1)
    -        self.assertEqual(result, 1)
    -
    -    @gen_test
    -    def test_multi_future(self):
    -        results = yield [self.async_future(1), self.async_future(2)]
    -        self.assertEqual(results, [1, 2])
    -
    -    @gen_test
    -    def test_multi_future_duplicate(self):
    -        f = self.async_future(2)
    -        results = yield [self.async_future(1), f, self.async_future(3), f]
    -        self.assertEqual(results, [1, 2, 3, 2])
    -
    -    @gen_test
    -    def test_multi_dict_future(self):
    -        results = yield dict(foo=self.async_future(1), bar=self.async_future(2))
    -        self.assertEqual(results, dict(foo=1, bar=2))
    -
    -    @gen_test
    -    def test_multi_exceptions(self):
    -        with ExpectLog(app_log, "Multiple exceptions in yield list"):
    -            with self.assertRaises(RuntimeError) as cm:
    -                yield gen.Multi([self.async_exception(RuntimeError("error 1")),
    -                                 self.async_exception(RuntimeError("error 2"))])
    -        self.assertEqual(str(cm.exception), "error 1")
    -
    -        # With only one exception, no error is logged.
    -        with self.assertRaises(RuntimeError):
    -            yield gen.Multi([self.async_exception(RuntimeError("error 1")),
    -                             self.async_future(2)])
    -
    -        # Exception logging may be explicitly quieted.
    -        with self.assertRaises(RuntimeError):
    -            yield gen.Multi([self.async_exception(RuntimeError("error 1")),
    -                             self.async_exception(RuntimeError("error 2"))],
    -                            quiet_exceptions=RuntimeError)
    -
    -    @gen_test
    -    def test_multi_future_exceptions(self):
    -        with ExpectLog(app_log, "Multiple exceptions in yield list"):
    -            with self.assertRaises(RuntimeError) as cm:
    -                yield [self.async_exception(RuntimeError("error 1")),
    -                       self.async_exception(RuntimeError("error 2"))]
    -        self.assertEqual(str(cm.exception), "error 1")
    -
    -        # With only one exception, no error is logged.
    -        with self.assertRaises(RuntimeError):
    -            yield [self.async_exception(RuntimeError("error 1")),
    -                   self.async_future(2)]
    -
    -        # Exception logging may be explicitly quieted.
    -        with self.assertRaises(RuntimeError):
    -            yield gen.multi_future(
    -                [self.async_exception(RuntimeError("error 1")),
    -                 self.async_exception(RuntimeError("error 2"))],
    -                quiet_exceptions=RuntimeError)
    -
    -    def test_arguments(self):
    -        @gen.engine
    -        def f():
    -            (yield gen.Callback("noargs"))()
    -            self.assertEqual((yield gen.Wait("noargs")), None)
    -            (yield gen.Callback("1arg"))(42)
    -            self.assertEqual((yield gen.Wait("1arg")), 42)
    -
    -            (yield gen.Callback("kwargs"))(value=42)
    -            result = yield gen.Wait("kwargs")
    -            self.assertTrue(isinstance(result, gen.Arguments))
    -            self.assertEqual(((), dict(value=42)), result)
    -            self.assertEqual(dict(value=42), result.kwargs)
    -
    -            (yield gen.Callback("2args"))(42, 43)
    -            result = yield gen.Wait("2args")
    -            self.assertTrue(isinstance(result, gen.Arguments))
    -            self.assertEqual(((42, 43), {}), result)
    -            self.assertEqual((42, 43), result.args)
    -
    -            def task_func(callback):
    -                callback(None, error="foo")
    -            result = yield gen.Task(task_func)
    -            self.assertTrue(isinstance(result, gen.Arguments))
    -            self.assertEqual(((None,), dict(error="foo")), result)
    -
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_stack_context_leak(self):
    -        # regression test: repeated invocations of a gen-based
    -        # function should not result in accumulated stack_contexts
    -        def _stack_depth():
    -            head = stack_context._state.contexts[1]
    -            length = 0
    -
    -            while head is not None:
    -                length += 1
    -                head = head.old_contexts[1]
    -
    -            return length
    -
    -        @gen.engine
    -        def inner(callback):
    -            yield gen.Task(self.io_loop.add_callback)
    -            callback()
    -
    -        @gen.engine
    -        def outer():
    -            for i in range(10):
    -                yield gen.Task(inner)
    -
    -            stack_increase = _stack_depth() - initial_stack_depth
    -            self.assertTrue(stack_increase <= 2)
    -            self.stop()
    -        initial_stack_depth = _stack_depth()
    -        self.run_gen(outer)
    -
    -    def test_stack_context_leak_exception(self):
    -        # same as previous, but with a function that exits with an exception
    -        @gen.engine
    -        def inner(callback):
    -            yield gen.Task(self.io_loop.add_callback)
    -            1 / 0
    -
    -        @gen.engine
    -        def outer():
    -            for i in range(10):
    -                try:
    -                    yield gen.Task(inner)
    -                except ZeroDivisionError:
    -                    pass
    -            stack_increase = len(stack_context._state.contexts) - initial_stack_depth
    -            self.assertTrue(stack_increase <= 2)
    -            self.stop()
    -        initial_stack_depth = len(stack_context._state.contexts)
    -        self.run_gen(outer)
    -
    -    def function_with_stack_context(self, callback):
    -        # Technically this function should stack_context.wrap its callback
    -        # upon entry.  However, it is very common for this step to be
    -        # omitted.
    -        def step2():
    -            self.assertEqual(self.named_contexts, ['a'])
    -            self.io_loop.add_callback(callback)
    -
    -        with stack_context.StackContext(self.named_context('a')):
    -            self.io_loop.add_callback(step2)
    -
    -    @gen_test
    -    def test_wait_transfer_stack_context(self):
    -        # Wait should not pick up contexts from where callback was invoked,
    -        # even if that function improperly fails to wrap its callback.
    -        cb = yield gen.Callback('k1')
    -        self.function_with_stack_context(cb)
    -        self.assertEqual(self.named_contexts, [])
    -        yield gen.Wait('k1')
    -        self.assertEqual(self.named_contexts, [])
    -
    -    @gen_test
    -    def test_task_transfer_stack_context(self):
    -        yield gen.Task(self.function_with_stack_context)
    -        self.assertEqual(self.named_contexts, [])
    -
    -    def test_raise_after_stop(self):
    -        # This pattern will be used in the following tests so make sure
    -        # the exception propagates as expected.
    -        @gen.engine
    -        def f():
    -            self.stop()
    -            1 / 0
    -
    -        with self.assertRaises(ZeroDivisionError):
    -            self.run_gen(f)
    -
    -    def test_sync_raise_return(self):
    -        # gen.Return is allowed in @gen.engine, but it may not be used
    -        # to return a value.
    -        @gen.engine
    -        def f():
    -            self.stop(42)
    -            raise gen.Return()
    -
    -        result = self.run_gen(f)
    -        self.assertEqual(result, 42)
    -
    -    def test_async_raise_return(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Task(self.io_loop.add_callback)
    -            self.stop(42)
    -            raise gen.Return()
    -
    -        result = self.run_gen(f)
    -        self.assertEqual(result, 42)
    -
    -    def test_sync_raise_return_value(self):
    -        @gen.engine
    -        def f():
    -            raise gen.Return(42)
    -
    -        with self.assertRaises(gen.ReturnValueIgnoredError):
    -            self.run_gen(f)
    -
    -    def test_sync_raise_return_value_tuple(self):
    -        @gen.engine
    -        def f():
    -            raise gen.Return((1, 2))
    -
    -        with self.assertRaises(gen.ReturnValueIgnoredError):
    -            self.run_gen(f)
    -
    -    def test_async_raise_return_value(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Task(self.io_loop.add_callback)
    -            raise gen.Return(42)
    -
    -        with self.assertRaises(gen.ReturnValueIgnoredError):
    -            self.run_gen(f)
    -
    -    def test_async_raise_return_value_tuple(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Task(self.io_loop.add_callback)
    -            raise gen.Return((1, 2))
    -
    -        with self.assertRaises(gen.ReturnValueIgnoredError):
    -            self.run_gen(f)
    -
    -    def test_return_value(self):
    -        # It is an error to apply @gen.engine to a function that returns
    -        # a value.
    -        @gen.engine
    -        def f():
    -            return 42
    -
    -        with self.assertRaises(gen.ReturnValueIgnoredError):
    -            self.run_gen(f)
    -
    -    def test_return_value_tuple(self):
    -        # It is an error to apply @gen.engine to a function that returns
    -        # a value.
    -        @gen.engine
    -        def f():
    -            return (1, 2)
    -
    -        with self.assertRaises(gen.ReturnValueIgnoredError):
    -            self.run_gen(f)
    -
    -    @skipNotCPython
    -    def test_task_refcounting(self):
    -        # On CPython, tasks and their arguments should be released immediately
    -        # without waiting for garbage collection.
    -        @gen.engine
    -        def f():
    -            class Foo(object):
    -                pass
    -            arg = Foo()
    -            self.arg_ref = weakref.ref(arg)
    -            task = gen.Task(self.io_loop.add_callback, arg=arg)
    -            self.task_ref = weakref.ref(task)
    -            yield task
    -            self.stop()
    -
    -        self.run_gen(f)
    -        self.assertIs(self.arg_ref(), None)
    -        self.assertIs(self.task_ref(), None)
    -
    -
    -# GenBasicTest duplicates the non-deprecated portions of GenEngineTest
    -# with gen.coroutine to ensure we don't lose coverage when gen.engine
    -# goes away.
    -class GenBasicTest(AsyncTestCase):
    -    @gen.coroutine
    -    def delay(self, iterations, arg):
    -        """Returns arg after a number of IOLoop iterations."""
    -        for i in range(iterations):
    -            yield gen.moment
    -        raise gen.Return(arg)
    -
    -    with ignore_deprecation():
    -        @return_future
    -        def async_future(self, result, callback):
    -            self.io_loop.add_callback(callback, result)
    -
    -    @gen.coroutine
    -    def async_exception(self, e):
    -        yield gen.moment
    -        raise e
    -
    -    @gen.coroutine
    -    def add_one_async(self, x):
    -        yield gen.moment
    -        raise gen.Return(x + 1)
    -
    -    def test_no_yield(self):
    -        @gen.coroutine
    -        def f():
    -            pass
    -        self.io_loop.run_sync(f)
    -
    -    def test_exception_phase1(self):
    -        @gen.coroutine
    -        def f():
    -            1 / 0
    -        self.assertRaises(ZeroDivisionError, self.io_loop.run_sync, f)
    -
    -    def test_exception_phase2(self):
    -        @gen.coroutine
    -        def f():
    -            yield gen.moment
    -            1 / 0
    -        self.assertRaises(ZeroDivisionError, self.io_loop.run_sync, f)
    -
    -    def test_bogus_yield(self):
    -        @gen.coroutine
    -        def f():
    -            yield 42
    -        self.assertRaises(gen.BadYieldError, self.io_loop.run_sync, f)
    -
    -    def test_bogus_yield_tuple(self):
    -        @gen.coroutine
    -        def f():
    -            yield (1, 2)
    -        self.assertRaises(gen.BadYieldError, self.io_loop.run_sync, f)
    -
    -    def test_reuse(self):
    -        @gen.coroutine
    -        def f():
    -            yield gen.moment
    -        self.io_loop.run_sync(f)
    -        self.io_loop.run_sync(f)
    -
    -    def test_none(self):
    -        @gen.coroutine
    -        def f():
    -            yield None
    -        self.io_loop.run_sync(f)
    -
    -    def test_multi(self):
    -        @gen.coroutine
    -        def f():
    -            results = yield [self.add_one_async(1), self.add_one_async(2)]
    -            self.assertEqual(results, [2, 3])
    -        self.io_loop.run_sync(f)
    -
    -    def test_multi_dict(self):
    -        @gen.coroutine
    -        def f():
    -            results = yield dict(foo=self.add_one_async(1), bar=self.add_one_async(2))
    -            self.assertEqual(results, dict(foo=2, bar=3))
    -        self.io_loop.run_sync(f)
    -
    -    def test_multi_delayed(self):
    -        @gen.coroutine
    -        def f():
    -            # callbacks run at different times
    -            responses = yield gen.multi_future([
    -                self.delay(3, "v1"),
    -                self.delay(1, "v2"),
    -            ])
    -            self.assertEqual(responses, ["v1", "v2"])
    -        self.io_loop.run_sync(f)
    -
    -    def test_multi_dict_delayed(self):
    -        @gen.coroutine
    -        def f():
    -            # callbacks run at different times
    -            responses = yield gen.multi_future(dict(
    -                foo=self.delay(3, "v1"),
    -                bar=self.delay(1, "v2"),
    -            ))
    -            self.assertEqual(responses, dict(foo="v1", bar="v2"))
    -        self.io_loop.run_sync(f)
    -
    -    @skipOnTravis
    -    @gen_test
    -    def test_multi_performance(self):
    -        # Yielding a list used to have quadratic performance; make
    -        # sure a large list stays reasonable.  On my laptop a list of
    -        # 2000 used to take 1.8s, now it takes 0.12.
    -        start = time.time()
    -        yield [gen.moment for i in range(2000)]
    -        end = time.time()
    -        self.assertLess(end - start, 1.0)
    -
    -    @gen_test
    -    def test_multi_empty(self):
    -        # Empty lists or dicts should return the same type.
    -        x = yield []
    -        self.assertTrue(isinstance(x, list))
    -        y = yield {}
    -        self.assertTrue(isinstance(y, dict))
    -
    -    @gen_test
    -    def test_future(self):
    -        result = yield self.async_future(1)
    -        self.assertEqual(result, 1)
    -
    -    @gen_test
    -    def test_multi_future(self):
    -        results = yield [self.async_future(1), self.async_future(2)]
    -        self.assertEqual(results, [1, 2])
    -
    -    @gen_test
    -    def test_multi_future_duplicate(self):
    -        f = self.async_future(2)
    -        results = yield [self.async_future(1), f, self.async_future(3), f]
    -        self.assertEqual(results, [1, 2, 3, 2])
    -
    -    @gen_test
    -    def test_multi_dict_future(self):
    -        results = yield dict(foo=self.async_future(1), bar=self.async_future(2))
    -        self.assertEqual(results, dict(foo=1, bar=2))
    -
    -    @gen_test
    -    def test_multi_exceptions(self):
    -        with ExpectLog(app_log, "Multiple exceptions in yield list"):
    -            with self.assertRaises(RuntimeError) as cm:
    -                yield gen.Multi([self.async_exception(RuntimeError("error 1")),
    -                                 self.async_exception(RuntimeError("error 2"))])
    -        self.assertEqual(str(cm.exception), "error 1")
    -
    -        # With only one exception, no error is logged.
    -        with self.assertRaises(RuntimeError):
    -            yield gen.Multi([self.async_exception(RuntimeError("error 1")),
    -                             self.async_future(2)])
    -
    -        # Exception logging may be explicitly quieted.
    -        with self.assertRaises(RuntimeError):
    -            yield gen.Multi([self.async_exception(RuntimeError("error 1")),
    -                             self.async_exception(RuntimeError("error 2"))],
    -                            quiet_exceptions=RuntimeError)
    -
    -    @gen_test
    -    def test_multi_future_exceptions(self):
    -        with ExpectLog(app_log, "Multiple exceptions in yield list"):
    -            with self.assertRaises(RuntimeError) as cm:
    -                yield [self.async_exception(RuntimeError("error 1")),
    -                       self.async_exception(RuntimeError("error 2"))]
    -        self.assertEqual(str(cm.exception), "error 1")
    -
    -        # With only one exception, no error is logged.
    -        with self.assertRaises(RuntimeError):
    -            yield [self.async_exception(RuntimeError("error 1")),
    -                   self.async_future(2)]
    -
    -        # Exception logging may be explicitly quieted.
    -        with self.assertRaises(RuntimeError):
    -            yield gen.multi_future(
    -                [self.async_exception(RuntimeError("error 1")),
    -                 self.async_exception(RuntimeError("error 2"))],
    -                quiet_exceptions=RuntimeError)
    -
    -    def test_sync_raise_return(self):
    -        @gen.coroutine
    -        def f():
    -            raise gen.Return()
    -
    -        self.io_loop.run_sync(f)
    -
    -    def test_async_raise_return(self):
    -        @gen.coroutine
    -        def f():
    -            yield gen.moment
    -            raise gen.Return()
    -
    -        self.io_loop.run_sync(f)
    -
    -    def test_sync_raise_return_value(self):
    -        @gen.coroutine
    -        def f():
    -            raise gen.Return(42)
    -
    -        self.assertEqual(42, self.io_loop.run_sync(f))
    -
    -    def test_sync_raise_return_value_tuple(self):
    -        @gen.coroutine
    -        def f():
    -            raise gen.Return((1, 2))
    -
    -        self.assertEqual((1, 2), self.io_loop.run_sync(f))
    -
    -    def test_async_raise_return_value(self):
    -        @gen.coroutine
    -        def f():
    -            yield gen.moment
    -            raise gen.Return(42)
    -
    -        self.assertEqual(42, self.io_loop.run_sync(f))
    -
    -    def test_async_raise_return_value_tuple(self):
    -        @gen.coroutine
    -        def f():
    -            yield gen.moment
    -            raise gen.Return((1, 2))
    -
    -        self.assertEqual((1, 2), self.io_loop.run_sync(f))
    -
    -
    -class GenCoroutineTest(AsyncTestCase):
    -    def setUp(self):
    -        # Stray StopIteration exceptions can lead to tests exiting prematurely,
    -        # so we need explicit checks here to make sure the tests run all
    -        # the way through.
    -        self.finished = False
    -        super(GenCoroutineTest, self).setUp()
    -
    -    def tearDown(self):
    -        super(GenCoroutineTest, self).tearDown()
    -        assert self.finished
    -
    -    def test_attributes(self):
    -        self.finished = True
    -
    -        def f():
    -            yield gen.moment
    -
    -        coro = gen.coroutine(f)
    -        self.assertEqual(coro.__name__, f.__name__)
    -        self.assertEqual(coro.__module__, f.__module__)
    -        self.assertIs(coro.__wrapped__, f)
    -
    -    def test_is_coroutine_function(self):
    -        self.finished = True
    -
    -        def f():
    -            yield gen.moment
    -
    -        coro = gen.coroutine(f)
    -        self.assertFalse(gen.is_coroutine_function(f))
    -        self.assertTrue(gen.is_coroutine_function(coro))
    -        self.assertFalse(gen.is_coroutine_function(coro()))
    -
    -    @gen_test
    -    def test_sync_gen_return(self):
    -        @gen.coroutine
    -        def f():
    -            raise gen.Return(42)
    -        result = yield f()
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @gen_test
    -    def test_async_gen_return(self):
    -        @gen.coroutine
    -        def f():
    -            yield gen.moment
    -            raise gen.Return(42)
    -        result = yield f()
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @gen_test
    -    def test_sync_return(self):
    -        @gen.coroutine
    -        def f():
    -            return 42
    -        result = yield f()
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @skipBefore33
    -    @gen_test
    -    def test_async_return(self):
    -        namespace = exec_test(globals(), locals(), """
    -        @gen.coroutine
    -        def f():
    -            yield gen.moment
    -            return 42
    -        """)
    -        result = yield namespace['f']()
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @skipBefore33
    -    @gen_test
    -    def test_async_early_return(self):
    -        # A yield statement exists but is not executed, which means
    -        # this function "returns" via an exception.  This exception
    -        # doesn't happen before the exception handling is set up.
    -        namespace = exec_test(globals(), locals(), """
    -        @gen.coroutine
    -        def f():
    -            if True:
    -                return 42
    -            yield gen.Task(self.io_loop.add_callback)
    -        """)
    -        result = yield namespace['f']()
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @skipBefore35
    -    @gen_test
    -    def test_async_await(self):
    -        @gen.coroutine
    -        def f1():
    -            yield gen.moment
    -            raise gen.Return(42)
    -
    -        # This test verifies that an async function can await a
    -        # yield-based gen.coroutine, and that a gen.coroutine
    -        # (the test method itself) can yield an async function.
    -        namespace = exec_test(globals(), locals(), """
    -        async def f2():
    -            result = await f1()
    -            return result
    -        """)
    -        result = yield namespace['f2']()
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @skipBefore35
    -    @gen_test
    -    def test_asyncio_sleep_zero(self):
    -        # asyncio.sleep(0) turns into a special case (equivalent to
    -        # `yield None`)
    -        namespace = exec_test(globals(), locals(), """
    -        async def f():
    -            import asyncio
    -            await asyncio.sleep(0)
    -            return 42
    -        """)
    -        result = yield namespace['f']()
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @skipBefore35
    -    @gen_test
    -    def test_async_await_mixed_multi_native_future(self):
    -        @gen.coroutine
    -        def f1():
    -            yield gen.moment
    -
    -        namespace = exec_test(globals(), locals(), """
    -        async def f2():
    -            await f1()
    -            return 42
    -        """)
    -
    -        @gen.coroutine
    -        def f3():
    -            yield gen.moment
    -            raise gen.Return(43)
    -
    -        results = yield [namespace['f2'](), f3()]
    -        self.assertEqual(results, [42, 43])
    -        self.finished = True
    -
    -    @skipBefore35
    -    @gen_test
    -    def test_async_await_mixed_multi_native_yieldpoint(self):
    -        namespace = exec_test(globals(), locals(), """
    -        async def f1():
    -            await gen.Task(self.io_loop.add_callback)
    -            return 42
    -        """)
    -
    -        @gen.coroutine
    -        def f2():
    -            yield gen.Task(self.io_loop.add_callback)
    -            raise gen.Return(43)
    -
    -        with ignore_deprecation():
    -            f2(callback=(yield gen.Callback('cb')))
    -            results = yield [namespace['f1'](), gen.Wait('cb')]
    -        self.assertEqual(results, [42, 43])
    -        self.finished = True
    -
    -    @skipBefore35
    -    @gen_test
    -    def test_async_with_timeout(self):
    -        namespace = exec_test(globals(), locals(), """
    -        async def f1():
    -            return 42
    -        """)
    -
    -        result = yield gen.with_timeout(datetime.timedelta(hours=1),
    -                                        namespace['f1']())
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @gen_test
    -    def test_sync_return_no_value(self):
    -        @gen.coroutine
    -        def f():
    -            return
    -        result = yield f()
    -        self.assertEqual(result, None)
    -        self.finished = True
    -
    -    @gen_test
    -    def test_async_return_no_value(self):
    -        # Without a return value we don't need python 3.3.
    -        @gen.coroutine
    -        def f():
    -            yield gen.moment
    -            return
    -        result = yield f()
    -        self.assertEqual(result, None)
    -        self.finished = True
    -
    -    @gen_test
    -    def test_sync_raise(self):
    -        @gen.coroutine
    -        def f():
    -            1 / 0
    -        # The exception is raised when the future is yielded
    -        # (or equivalently when its result method is called),
    -        # not when the function itself is called).
    -        future = f()
    -        with self.assertRaises(ZeroDivisionError):
    -            yield future
    -        self.finished = True
    -
    -    @gen_test
    -    def test_async_raise(self):
    -        @gen.coroutine
    -        def f():
    -            yield gen.moment
    -            1 / 0
    -        future = f()
    -        with self.assertRaises(ZeroDivisionError):
    -            yield future
    -        self.finished = True
    -
    -    @gen_test
    -    def test_pass_callback(self):
    -        with ignore_deprecation():
    -            @gen.coroutine
    -            def f():
    -                raise gen.Return(42)
    -            result = yield gen.Task(f)
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @gen_test
    -    def test_replace_yieldpoint_exception(self):
    -        # Test exception handling: a coroutine can catch one exception
    -        # raised by a yield point and raise a different one.
    -        @gen.coroutine
    -        def f1():
    -            1 / 0
    -
    -        @gen.coroutine
    -        def f2():
    -            try:
    -                yield f1()
    -            except ZeroDivisionError:
    -                raise KeyError()
    -
    -        future = f2()
    -        with self.assertRaises(KeyError):
    -            yield future
    -        self.finished = True
    -
    -    @gen_test
    -    def test_swallow_yieldpoint_exception(self):
    -        # Test exception handling: a coroutine can catch an exception
    -        # raised by a yield point and not raise a different one.
    -        @gen.coroutine
    -        def f1():
    -            1 / 0
    -
    -        @gen.coroutine
    -        def f2():
    -            try:
    -                yield f1()
    -            except ZeroDivisionError:
    -                raise gen.Return(42)
    -
    -        result = yield f2()
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @gen_test
    -    def test_replace_context_exception(self):
    -        with ignore_deprecation():
    -            # Test exception handling: exceptions thrown into the stack context
    -            # can be caught and replaced.
    -            # Note that this test and the following are for behavior that is
    -            # not really supported any more:  coroutines no longer create a
    -            # stack context automatically; but one is created after the first
    -            # YieldPoint (i.e. not a Future).
    -            @gen.coroutine
    -            def f2():
    -                (yield gen.Callback(1))()
    -                yield gen.Wait(1)
    -                self.io_loop.add_callback(lambda: 1 / 0)
    -                try:
    -                    yield gen.Task(self.io_loop.add_timeout,
    -                                   self.io_loop.time() + 10)
    -                except ZeroDivisionError:
    -                    raise KeyError()
    -
    -            future = f2()
    -            with self.assertRaises(KeyError):
    -                yield future
    -            self.finished = True
    -
    -    @gen_test
    -    def test_swallow_context_exception(self):
    -        with ignore_deprecation():
    -            # Test exception handling: exceptions thrown into the stack context
    -            # can be caught and ignored.
    -            @gen.coroutine
    -            def f2():
    -                (yield gen.Callback(1))()
    -                yield gen.Wait(1)
    -                self.io_loop.add_callback(lambda: 1 / 0)
    -                try:
    -                    yield gen.Task(self.io_loop.add_timeout,
    -                                   self.io_loop.time() + 10)
    -                except ZeroDivisionError:
    -                    raise gen.Return(42)
    -
    -            result = yield f2()
    -            self.assertEqual(result, 42)
    -            self.finished = True
    -
    -    @gen_test
    -    def test_moment(self):
    -        calls = []
    -
    -        @gen.coroutine
    -        def f(name, yieldable):
    -            for i in range(5):
    -                calls.append(name)
    -                yield yieldable
    -        # First, confirm the behavior without moment: each coroutine
    -        # monopolizes the event loop until it finishes.
    -        immediate = Future()
    -        immediate.set_result(None)
    -        yield [f('a', immediate), f('b', immediate)]
    -        self.assertEqual(''.join(calls), 'aaaaabbbbb')
    -
    -        # With moment, they take turns.
    -        calls = []
    -        yield [f('a', gen.moment), f('b', gen.moment)]
    -        self.assertEqual(''.join(calls), 'ababababab')
    -        self.finished = True
    -
    -        calls = []
    -        yield [f('a', gen.moment), f('b', immediate)]
    -        self.assertEqual(''.join(calls), 'abbbbbaaaa')
    -
    -    @gen_test
    -    def test_sleep(self):
    -        yield gen.sleep(0.01)
    -        self.finished = True
    -
    -    @skipBefore33
    -    @gen_test
    -    def test_py3_leak_exception_context(self):
    -        class LeakedException(Exception):
    -            pass
    -
    -        @gen.coroutine
    -        def inner(iteration):
    -            raise LeakedException(iteration)
    -
    -        try:
    -            yield inner(1)
    -        except LeakedException as e:
    -            self.assertEqual(str(e), "1")
    -            self.assertIsNone(e.__context__)
    -
    -        try:
    -            yield inner(2)
    -        except LeakedException as e:
    -            self.assertEqual(str(e), "2")
    -            self.assertIsNone(e.__context__)
    -
    -        self.finished = True
    -
    -    @skipNotCPython
    -    @unittest.skipIf((3,) < sys.version_info < (3, 6),
    -                     "asyncio.Future has reference cycles")
    -    def test_coroutine_refcounting(self):
    -        # On CPython, tasks and their arguments should be released immediately
    -        # without waiting for garbage collection.
    -        @gen.coroutine
    -        def inner():
    -            class Foo(object):
    -                pass
    -            local_var = Foo()
    -            self.local_ref = weakref.ref(local_var)
    -            yield gen.coroutine(lambda: None)()
    -            raise ValueError('Some error')
    -
    -        @gen.coroutine
    -        def inner2():
    -            try:
    -                yield inner()
    -            except ValueError:
    -                pass
    -
    -        self.io_loop.run_sync(inner2, timeout=3)
    -
    -        self.assertIs(self.local_ref(), None)
    -        self.finished = True
    -
    -    @unittest.skipIf(sys.version_info < (3,),
    -                     "test only relevant with asyncio Futures")
    -    def test_asyncio_future_debug_info(self):
    -        self.finished = True
    -        # Enable debug mode
    -        asyncio_loop = asyncio.get_event_loop()
    -        self.addCleanup(asyncio_loop.set_debug, asyncio_loop.get_debug())
    -        asyncio_loop.set_debug(True)
    -
    -        def f():
    -            yield gen.moment
    -
    -        coro = gen.coroutine(f)()
    -        self.assertIsInstance(coro, asyncio.Future)
    -        # We expect the coroutine repr() to show the place where
    -        # it was instantiated
    -        expected = ("created at %s:%d"
    -                    % (__file__, f.__code__.co_firstlineno + 3))
    -        actual = repr(coro)
    -        self.assertIn(expected, actual)
    -
    -    @unittest.skipIf(asyncio is None, "asyncio module not present")
    -    @gen_test
    -    def test_asyncio_gather(self):
    -        # This demonstrates that tornado coroutines can be understood
    -        # by asyncio (This failed prior to Tornado 5.0).
    -        @gen.coroutine
    -        def f():
    -            yield gen.moment
    -            raise gen.Return(1)
    -
    -        ret = yield asyncio.gather(f(), f())
    -        self.assertEqual(ret, [1, 1])
    -        self.finished = True
    -
    -
    -class GenSequenceHandler(RequestHandler):
    -    with ignore_deprecation():
    -        @asynchronous
    -        @gen.engine
    -        def get(self):
    -            # The outer ignore_deprecation applies at definition time.
    -            # We need another for serving time.
    -            with ignore_deprecation():
    -                self.io_loop = self.request.connection.stream.io_loop
    -                self.io_loop.add_callback((yield gen.Callback("k1")))
    -                yield gen.Wait("k1")
    -                self.write("1")
    -                self.io_loop.add_callback((yield gen.Callback("k2")))
    -                yield gen.Wait("k2")
    -                self.write("2")
    -                # reuse an old key
    -                self.io_loop.add_callback((yield gen.Callback("k1")))
    -                yield gen.Wait("k1")
    -                self.finish("3")
    -
    -
    -class GenCoroutineSequenceHandler(RequestHandler):
    -    @gen.coroutine
    -    def get(self):
    -        yield gen.moment
    -        self.write("1")
    -        yield gen.moment
    -        self.write("2")
    -        yield gen.moment
    -        self.finish("3")
    -
    -
    -class GenCoroutineUnfinishedSequenceHandler(RequestHandler):
    -    @gen.coroutine
    -    def get(self):
    -        yield gen.moment
    -        self.write("1")
    -        yield gen.moment
    -        self.write("2")
    -        yield gen.moment
    -        # just write, don't finish
    -        self.write("3")
    -
    -
    -class GenTaskHandler(RequestHandler):
    -    @gen.coroutine
    -    def get(self):
    -        client = AsyncHTTPClient()
    -        with ignore_deprecation():
    -            response = yield gen.Task(client.fetch, self.get_argument('url'))
    -        response.rethrow()
    -        self.finish(b"got response: " + response.body)
    -
    -
    -class GenExceptionHandler(RequestHandler):
    -    with ignore_deprecation():
    -        @asynchronous
    -        @gen.engine
    -        def get(self):
    -            # This test depends on the order of the two decorators.
    -            io_loop = self.request.connection.stream.io_loop
    -            yield gen.Task(io_loop.add_callback)
    -            raise Exception("oops")
    -
    -
    -class GenCoroutineExceptionHandler(RequestHandler):
    -    @gen.coroutine
    -    def get(self):
    -        # This test depends on the order of the two decorators.
    -        io_loop = self.request.connection.stream.io_loop
    -        yield gen.Task(io_loop.add_callback)
    -        raise Exception("oops")
    -
    -
    -class GenYieldExceptionHandler(RequestHandler):
    -    @gen.coroutine
    -    def get(self):
    -        io_loop = self.request.connection.stream.io_loop
    -        # Test the interaction of the two stack_contexts.
    -        with ignore_deprecation():
    -            def fail_task(callback):
    -                io_loop.add_callback(lambda: 1 / 0)
    -            try:
    -                yield gen.Task(fail_task)
    -                raise Exception("did not get expected exception")
    -            except ZeroDivisionError:
    -                self.finish('ok')
    -
    -
    -# "Undecorated" here refers to the absence of @asynchronous.
    -class UndecoratedCoroutinesHandler(RequestHandler):
    -    @gen.coroutine
    -    def prepare(self):
    -        self.chunks = []
    -        yield gen.moment
    -        self.chunks.append('1')
    -
    -    @gen.coroutine
    -    def get(self):
    -        self.chunks.append('2')
    -        yield gen.moment
    -        self.chunks.append('3')
    -        yield gen.moment
    -        self.write(''.join(self.chunks))
    -
    -
    -class AsyncPrepareErrorHandler(RequestHandler):
    -    @gen.coroutine
    -    def prepare(self):
    -        yield gen.moment
    -        raise HTTPError(403)
    -
    -    def get(self):
    -        self.finish('ok')
    -
    -
    -class NativeCoroutineHandler(RequestHandler):
    -    if sys.version_info > (3, 5):
    -        exec(textwrap.dedent("""
    -        async def get(self):
    -            import asyncio
    -            await asyncio.sleep(0)
    -            self.write("ok")
    -        """))
    -
    -
    -class GenWebTest(AsyncHTTPTestCase):
    -    def get_app(self):
    -        return Application([
    -            ('/sequence', GenSequenceHandler),
    -            ('/coroutine_sequence', GenCoroutineSequenceHandler),
    -            ('/coroutine_unfinished_sequence',
    -             GenCoroutineUnfinishedSequenceHandler),
    -            ('/task', GenTaskHandler),
    -            ('/exception', GenExceptionHandler),
    -            ('/coroutine_exception', GenCoroutineExceptionHandler),
    -            ('/yield_exception', GenYieldExceptionHandler),
    -            ('/undecorated_coroutine', UndecoratedCoroutinesHandler),
    -            ('/async_prepare_error', AsyncPrepareErrorHandler),
    -            ('/native_coroutine', NativeCoroutineHandler),
    -        ])
    -
    -    def test_sequence_handler(self):
    -        response = self.fetch('/sequence')
    -        self.assertEqual(response.body, b"123")
    -
    -    def test_coroutine_sequence_handler(self):
    -        response = self.fetch('/coroutine_sequence')
    -        self.assertEqual(response.body, b"123")
    -
    -    def test_coroutine_unfinished_sequence_handler(self):
    -        response = self.fetch('/coroutine_unfinished_sequence')
    -        self.assertEqual(response.body, b"123")
    -
    -    def test_task_handler(self):
    -        response = self.fetch('/task?url=%s' % url_escape(self.get_url('/sequence')))
    -        self.assertEqual(response.body, b"got response: 123")
    -
    -    def test_exception_handler(self):
    -        # Make sure we get an error and not a timeout
    -        with ExpectLog(app_log, "Uncaught exception GET /exception"):
    -            response = self.fetch('/exception')
    -        self.assertEqual(500, response.code)
    -
    -    def test_coroutine_exception_handler(self):
    -        # Make sure we get an error and not a timeout
    -        with ExpectLog(app_log, "Uncaught exception GET /coroutine_exception"):
    -            response = self.fetch('/coroutine_exception')
    -        self.assertEqual(500, response.code)
    -
    -    def test_yield_exception_handler(self):
    -        response = self.fetch('/yield_exception')
    -        self.assertEqual(response.body, b'ok')
    -
    -    def test_undecorated_coroutines(self):
    -        response = self.fetch('/undecorated_coroutine')
    -        self.assertEqual(response.body, b'123')
    -
    -    def test_async_prepare_error_handler(self):
    -        response = self.fetch('/async_prepare_error')
    -        self.assertEqual(response.code, 403)
    -
    -    @skipBefore35
    -    def test_native_coroutine_handler(self):
    -        response = self.fetch('/native_coroutine')
    -        self.assertEqual(response.code, 200)
    -        self.assertEqual(response.body, b'ok')
    -
    -
    -class WithTimeoutTest(AsyncTestCase):
    -    @gen_test
    -    def test_timeout(self):
    -        with self.assertRaises(gen.TimeoutError):
    -            yield gen.with_timeout(datetime.timedelta(seconds=0.1),
    -                                   Future())
    -
    -    @gen_test
    -    def test_completes_before_timeout(self):
    -        future = Future()
    -        self.io_loop.add_timeout(datetime.timedelta(seconds=0.1),
    -                                 lambda: future.set_result('asdf'))
    -        result = yield gen.with_timeout(datetime.timedelta(seconds=3600),
    -                                        future)
    -        self.assertEqual(result, 'asdf')
    -
    -    @gen_test
    -    def test_fails_before_timeout(self):
    -        future = Future()
    -        self.io_loop.add_timeout(
    -            datetime.timedelta(seconds=0.1),
    -            lambda: future.set_exception(ZeroDivisionError()))
    -        with self.assertRaises(ZeroDivisionError):
    -            yield gen.with_timeout(datetime.timedelta(seconds=3600),
    -                                   future)
    -
    -    @gen_test
    -    def test_already_resolved(self):
    -        future = Future()
    -        future.set_result('asdf')
    -        result = yield gen.with_timeout(datetime.timedelta(seconds=3600),
    -                                        future)
    -        self.assertEqual(result, 'asdf')
    -
    -    @unittest.skipIf(futures is None, 'futures module not present')
    -    @gen_test
    -    def test_timeout_concurrent_future(self):
    -        # A concurrent future that does not resolve before the timeout.
    -        with futures.ThreadPoolExecutor(1) as executor:
    -            with self.assertRaises(gen.TimeoutError):
    -                yield gen.with_timeout(self.io_loop.time(),
    -                                       executor.submit(time.sleep, 0.1))
    -
    -    @unittest.skipIf(futures is None, 'futures module not present')
    -    @gen_test
    -    def test_completed_concurrent_future(self):
    -        # A concurrent future that is resolved before we even submit it
    -        # to with_timeout.
    -        with futures.ThreadPoolExecutor(1) as executor:
    -            f = executor.submit(lambda: None)
    -            f.result()  # wait for completion
    -            yield gen.with_timeout(datetime.timedelta(seconds=3600), f)
    -
    -    @unittest.skipIf(futures is None, 'futures module not present')
    -    @gen_test
    -    def test_normal_concurrent_future(self):
    -        # A conccurrent future that resolves while waiting for the timeout.
    -        with futures.ThreadPoolExecutor(1) as executor:
    -            yield gen.with_timeout(datetime.timedelta(seconds=3600),
    -                                   executor.submit(lambda: time.sleep(0.01)))
    -
    -
    -class WaitIteratorTest(AsyncTestCase):
    -    @gen_test
    -    def test_empty_iterator(self):
    -        g = gen.WaitIterator()
    -        self.assertTrue(g.done(), 'empty generator iterated')
    -
    -        with self.assertRaises(ValueError):
    -            g = gen.WaitIterator(False, bar=False)
    -
    -        self.assertEqual(g.current_index, None, "bad nil current index")
    -        self.assertEqual(g.current_future, None, "bad nil current future")
    -
    -    @gen_test
    -    def test_already_done(self):
    -        f1 = Future()
    -        f2 = Future()
    -        f3 = Future()
    -        f1.set_result(24)
    -        f2.set_result(42)
    -        f3.set_result(84)
    -
    -        g = gen.WaitIterator(f1, f2, f3)
    -        i = 0
    -        while not g.done():
    -            r = yield g.next()
    -            # Order is not guaranteed, but the current implementation
    -            # preserves ordering of already-done Futures.
    -            if i == 0:
    -                self.assertEqual(g.current_index, 0)
    -                self.assertIs(g.current_future, f1)
    -                self.assertEqual(r, 24)
    -            elif i == 1:
    -                self.assertEqual(g.current_index, 1)
    -                self.assertIs(g.current_future, f2)
    -                self.assertEqual(r, 42)
    -            elif i == 2:
    -                self.assertEqual(g.current_index, 2)
    -                self.assertIs(g.current_future, f3)
    -                self.assertEqual(r, 84)
    -            i += 1
    -
    -        self.assertEqual(g.current_index, None, "bad nil current index")
    -        self.assertEqual(g.current_future, None, "bad nil current future")
    -
    -        dg = gen.WaitIterator(f1=f1, f2=f2)
    -
    -        while not dg.done():
    -            dr = yield dg.next()
    -            if dg.current_index == "f1":
    -                self.assertTrue(dg.current_future == f1 and dr == 24,
    -                                "WaitIterator dict status incorrect")
    -            elif dg.current_index == "f2":
    -                self.assertTrue(dg.current_future == f2 and dr == 42,
    -                                "WaitIterator dict status incorrect")
    -            else:
    -                self.fail("got bad WaitIterator index {}".format(
    -                    dg.current_index))
    -
    -            i += 1
    -
    -        self.assertEqual(dg.current_index, None, "bad nil current index")
    -        self.assertEqual(dg.current_future, None, "bad nil current future")
    -
    -    def finish_coroutines(self, iteration, futures):
    -        if iteration == 3:
    -            futures[2].set_result(24)
    -        elif iteration == 5:
    -            futures[0].set_exception(ZeroDivisionError())
    -        elif iteration == 8:
    -            futures[1].set_result(42)
    -            futures[3].set_result(84)
    -
    -        if iteration < 8:
    -            self.io_loop.add_callback(self.finish_coroutines, iteration + 1, futures)
    -
    -    @gen_test
    -    def test_iterator(self):
    -        futures = [Future(), Future(), Future(), Future()]
    -
    -        self.finish_coroutines(0, futures)
    -
    -        g = gen.WaitIterator(*futures)
    -
    -        i = 0
    -        while not g.done():
    -            try:
    -                r = yield g.next()
    -            except ZeroDivisionError:
    -                self.assertIs(g.current_future, futures[0],
    -                              'exception future invalid')
    -            else:
    -                if i == 0:
    -                    self.assertEqual(r, 24, 'iterator value incorrect')
    -                    self.assertEqual(g.current_index, 2, 'wrong index')
    -                elif i == 2:
    -                    self.assertEqual(r, 42, 'iterator value incorrect')
    -                    self.assertEqual(g.current_index, 1, 'wrong index')
    -                elif i == 3:
    -                    self.assertEqual(r, 84, 'iterator value incorrect')
    -                    self.assertEqual(g.current_index, 3, 'wrong index')
    -            i += 1
    -
    -    @skipBefore35
    -    @gen_test
    -    def test_iterator_async_await(self):
    -        # Recreate the previous test with py35 syntax. It's a little clunky
    -        # because of the way the previous test handles an exception on
    -        # a single iteration.
    -        futures = [Future(), Future(), Future(), Future()]
    -        self.finish_coroutines(0, futures)
    -        self.finished = False
    -
    -        namespace = exec_test(globals(), locals(), """
    -        async def f():
    -            i = 0
    -            g = gen.WaitIterator(*futures)
    -            try:
    -                async for r in g:
    -                    if i == 0:
    -                        self.assertEqual(r, 24, 'iterator value incorrect')
    -                        self.assertEqual(g.current_index, 2, 'wrong index')
    -                    else:
    -                        raise Exception("expected exception on iteration 1")
    -                    i += 1
    -            except ZeroDivisionError:
    -                i += 1
    -            async for r in g:
    -                if i == 2:
    -                    self.assertEqual(r, 42, 'iterator value incorrect')
    -                    self.assertEqual(g.current_index, 1, 'wrong index')
    -                elif i == 3:
    -                    self.assertEqual(r, 84, 'iterator value incorrect')
    -                    self.assertEqual(g.current_index, 3, 'wrong index')
    -                else:
    -                    raise Exception("didn't expect iteration %d" % i)
    -                i += 1
    -            self.finished = True
    -        """)
    -        yield namespace['f']()
    -        self.assertTrue(self.finished)
    -
    -    @gen_test
    -    def test_no_ref(self):
    -        # In this usage, there is no direct hard reference to the
    -        # WaitIterator itself, only the Future it returns. Since
    -        # WaitIterator uses weak references internally to improve GC
    -        # performance, this used to cause problems.
    -        yield gen.with_timeout(datetime.timedelta(seconds=0.1),
    -                               gen.WaitIterator(gen.sleep(0)).next())
    -
    -
    -class RunnerGCTest(AsyncTestCase):
    -    def is_pypy3(self):
    -        return (platform.python_implementation() == 'PyPy' and
    -                sys.version_info > (3,))
    -
    -    @gen_test
    -    def test_gc(self):
    -        # Github issue 1769: Runner objects can get GCed unexpectedly
    -        # while their future is alive.
    -        weakref_scope = [None]
    -
    -        def callback():
    -            gc.collect(2)
    -            weakref_scope[0]().set_result(123)
    -
    -        @gen.coroutine
    -        def tester():
    -            fut = Future()
    -            weakref_scope[0] = weakref.ref(fut)
    -            self.io_loop.add_callback(callback)
    -            yield fut
    -
    -        yield gen.with_timeout(
    -            datetime.timedelta(seconds=0.2),
    -            tester()
    -        )
    -
    -    def test_gc_infinite_coro(self):
    -        # Github issue 2229: suspended coroutines should be GCed when
    -        # their loop is closed, even if they're involved in a reference
    -        # cycle.
    -        if IOLoop.configured_class().__name__.endswith('TwistedIOLoop'):
    -            raise unittest.SkipTest("Test may fail on TwistedIOLoop")
    -
    -        loop = self.get_new_ioloop()
    -        result = []
    -        wfut = []
    -
    -        @gen.coroutine
    -        def infinite_coro():
    -            try:
    -                while True:
    -                    yield gen.sleep(1e-3)
    -                    result.append(True)
    -            finally:
    -                # coroutine finalizer
    -                result.append(None)
    -
    -        @gen.coroutine
    -        def do_something():
    -            fut = infinite_coro()
    -            fut._refcycle = fut
    -            wfut.append(weakref.ref(fut))
    -            yield gen.sleep(0.2)
    -
    -        loop.run_sync(do_something)
    -        loop.close()
    -        gc.collect()
    -        # Future was collected
    -        self.assertIs(wfut[0](), None)
    -        # At least one wakeup
    -        self.assertGreaterEqual(len(result), 2)
    -        if not self.is_pypy3():
    -            # coroutine finalizer was called (not on PyPy3 apparently)
    -            self.assertIs(result[-1], None)
    -
    -    @skipBefore35
    -    def test_gc_infinite_async_await(self):
    -        # Same as test_gc_infinite_coro, but with a `async def` function
    -        import asyncio
    -
    -        namespace = exec_test(globals(), locals(), """
    -        async def infinite_coro(result):
    -            try:
    -                while True:
    -                    await gen.sleep(1e-3)
    -                    result.append(True)
    -            finally:
    -                # coroutine finalizer
    -                result.append(None)
    -        """)
    -
    -        infinite_coro = namespace['infinite_coro']
    -        loop = self.get_new_ioloop()
    -        result = []
    -        wfut = []
    -
    -        @gen.coroutine
    -        def do_something():
    -            fut = asyncio.get_event_loop().create_task(infinite_coro(result))
    -            fut._refcycle = fut
    -            wfut.append(weakref.ref(fut))
    -            yield gen.sleep(0.2)
    -
    -        loop.run_sync(do_something)
    -        with ExpectLog('asyncio', "Task was destroyed but it is pending"):
    -            loop.close()
    -            gc.collect()
    -        # Future was collected
    -        self.assertIs(wfut[0](), None)
    -        # At least one wakeup and one finally
    -        self.assertGreaterEqual(len(result), 2)
    -        if not self.is_pypy3():
    -            # coroutine finalizer was called (not on PyPy3 apparently)
    -            self.assertIs(result[-1], None)
    -
    -    def test_multi_moment(self):
    -        # Test gen.multi with moment
    -        # now that it's not a real Future
    -        @gen.coroutine
    -        def wait_a_moment():
    -            result = yield gen.multi([gen.moment, gen.moment])
    -            raise gen.Return(result)
    -
    -        loop = self.get_new_ioloop()
    -        result = loop.run_sync(wait_a_moment)
    -        self.assertEqual(result, [None, None])
    -
    -
    -if __name__ == '__main__':
    -    unittest.main()
    diff --git a/lib/tornado/test/gettext_translations/extract_me.py b/lib/tornado/test/gettext_translations/extract_me.py
    deleted file mode 100755
    index 283c13f4..00000000
    --- a/lib/tornado/test/gettext_translations/extract_me.py
    +++ /dev/null
    @@ -1,16 +0,0 @@
    -# flake8: noqa
    -# Dummy source file to allow creation of the initial .po file in the
    -# same way as a real project.  I'm not entirely sure about the real
    -# workflow here, but this seems to work.
    -#
    -# 1) xgettext --language=Python --keyword=_:1,2 --keyword=pgettext:1c,2 --keyword=pgettext:1c,2,3 extract_me.py -o tornado_test.po
    -# 2) Edit tornado_test.po, setting CHARSET, Plural-Forms and setting msgstr
    -# 3) msgfmt tornado_test.po -o tornado_test.mo
    -# 4) Put the file in the proper location: $LANG/LC_MESSAGES
    -
    -from __future__ import absolute_import, division, print_function
    -_("school")
    -pgettext("law", "right")
    -pgettext("good", "right")
    -pgettext("organization", "club", "clubs", 1)
    -pgettext("stick", "club", "clubs", 1)
    diff --git a/lib/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo b/lib/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo
    deleted file mode 100755
    index a97bf9c5..00000000
    Binary files a/lib/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo and /dev/null differ
    diff --git a/lib/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po b/lib/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po
    deleted file mode 100755
    index 88d72c86..00000000
    --- a/lib/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po
    +++ /dev/null
    @@ -1,47 +0,0 @@
    -# SOME DESCRIPTIVE TITLE.
    -# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
    -# This file is distributed under the same license as the PACKAGE package.
    -# FIRST AUTHOR , YEAR.
    -#
    -#, fuzzy
    -msgid ""
    -msgstr ""
    -"Project-Id-Version: PACKAGE VERSION\n"
    -"Report-Msgid-Bugs-To: \n"
    -"POT-Creation-Date: 2015-01-27 11:05+0300\n"
    -"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
    -"Last-Translator: FULL NAME \n"
    -"Language-Team: LANGUAGE \n"
    -"Language: \n"
    -"MIME-Version: 1.0\n"
    -"Content-Type: text/plain; charset=utf-8\n"
    -"Content-Transfer-Encoding: 8bit\n"
    -"Plural-Forms: nplurals=2; plural=(n > 1);\n"
    -
    -#: extract_me.py:11
    -msgid "school"
    -msgstr "école"
    -
    -#: extract_me.py:12
    -msgctxt "law"
    -msgid "right"
    -msgstr "le droit"
    -
    -#: extract_me.py:13
    -msgctxt "good"
    -msgid "right"
    -msgstr "le bien"
    -
    -#: extract_me.py:14
    -msgctxt "organization"
    -msgid "club"
    -msgid_plural "clubs"
    -msgstr[0] "le club"
    -msgstr[1] "les clubs"
    -
    -#: extract_me.py:15
    -msgctxt "stick"
    -msgid "club"
    -msgid_plural "clubs"
    -msgstr[0] "le bâton"
    -msgstr[1] "les bâtons"
    diff --git a/lib/tornado/test/http1connection_test.py b/lib/tornado/test/http1connection_test.py
    deleted file mode 100755
    index 8aaaaf35..00000000
    --- a/lib/tornado/test/http1connection_test.py
    +++ /dev/null
    @@ -1,61 +0,0 @@
    -from __future__ import absolute_import, division, print_function
    -
    -import socket
    -
    -from tornado.http1connection import HTTP1Connection
    -from tornado.httputil import HTTPMessageDelegate
    -from tornado.iostream import IOStream
    -from tornado.locks import Event
    -from tornado.netutil import add_accept_handler
    -from tornado.testing import AsyncTestCase, bind_unused_port, gen_test
    -
    -
    -class HTTP1ConnectionTest(AsyncTestCase):
    -    def setUp(self):
    -        super(HTTP1ConnectionTest, self).setUp()
    -        self.asyncSetUp()
    -
    -    @gen_test
    -    def asyncSetUp(self):
    -        listener, port = bind_unused_port()
    -        event = Event()
    -
    -        def accept_callback(conn, addr):
    -            self.server_stream = IOStream(conn)
    -            self.addCleanup(self.server_stream.close)
    -            event.set()
    -
    -        add_accept_handler(listener, accept_callback)
    -        self.client_stream = IOStream(socket.socket())
    -        self.addCleanup(self.client_stream.close)
    -        yield [self.client_stream.connect(('127.0.0.1', port)),
    -               event.wait()]
    -        self.io_loop.remove_handler(listener)
    -        listener.close()
    -
    -    @gen_test
    -    def test_http10_no_content_length(self):
    -        # Regression test for a bug in which can_keep_alive would crash
    -        # for an HTTP/1.0 (not 1.1) response with no content-length.
    -        conn = HTTP1Connection(self.client_stream, True)
    -        self.server_stream.write(b"HTTP/1.0 200 Not Modified\r\n\r\nhello")
    -        self.server_stream.close()
    -
    -        event = Event()
    -        test = self
    -        body = []
    -
    -        class Delegate(HTTPMessageDelegate):
    -            def headers_received(self, start_line, headers):
    -                test.code = start_line.code
    -
    -            def data_received(self, data):
    -                body.append(data)
    -
    -            def finish(self):
    -                event.set()
    -
    -        yield conn.read_response(Delegate())
    -        yield event.wait()
    -        self.assertEqual(self.code, 200)
    -        self.assertEqual(b''.join(body), b'hello')
    diff --git a/lib/tornado/test/httpclient_test.py b/lib/tornado/test/httpclient_test.py
    deleted file mode 100755
    index 35426a7d..00000000
    --- a/lib/tornado/test/httpclient_test.py
    +++ /dev/null
    @@ -1,718 +0,0 @@
    -# -*- coding: utf-8 -*-
    -from __future__ import absolute_import, division, print_function
    -
    -import base64
    -import binascii
    -from contextlib import closing
    -import copy
    -import sys
    -import threading
    -import datetime
    -from io import BytesIO
    -import time
    -import unicodedata
    -
    -from tornado.escape import utf8, native_str
    -from tornado import gen
    -from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
    -from tornado.httpserver import HTTPServer
    -from tornado.ioloop import IOLoop
    -from tornado.iostream import IOStream
    -from tornado.log import gen_log
    -from tornado import netutil
    -from tornado.stack_context import ExceptionStackContext, NullContext
    -from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
    -from tornado.test.util import unittest, skipOnTravis, ignore_deprecation
    -from tornado.web import Application, RequestHandler, url
    -from tornado.httputil import format_timestamp, HTTPHeaders
    -
    -
    -class HelloWorldHandler(RequestHandler):
    -    def get(self):
    -        name = self.get_argument("name", "world")
    -        self.set_header("Content-Type", "text/plain")
    -        self.finish("Hello %s!" % name)
    -
    -
    -class PostHandler(RequestHandler):
    -    def post(self):
    -        self.finish("Post arg1: %s, arg2: %s" % (
    -            self.get_argument("arg1"), self.get_argument("arg2")))
    -
    -
    -class PutHandler(RequestHandler):
    -    def put(self):
    -        self.write("Put body: ")
    -        self.write(self.request.body)
    -
    -
    -class RedirectHandler(RequestHandler):
    -    def prepare(self):
    -        self.write('redirects can have bodies too')
    -        self.redirect(self.get_argument("url"),
    -                      status=int(self.get_argument("status", "302")))
    -
    -
    -class ChunkHandler(RequestHandler):
    -    @gen.coroutine
    -    def get(self):
    -        self.write("asdf")
    -        self.flush()
    -        # Wait a bit to ensure the chunks are sent and received separately.
    -        yield gen.sleep(0.01)
    -        self.write("qwer")
    -
    -
    -class AuthHandler(RequestHandler):
    -    def get(self):
    -        self.finish(self.request.headers["Authorization"])
    -
    -
    -class CountdownHandler(RequestHandler):
    -    def get(self, count):
    -        count = int(count)
    -        if count > 0:
    -            self.redirect(self.reverse_url("countdown", count - 1))
    -        else:
    -            self.write("Zero")
    -
    -
    -class EchoPostHandler(RequestHandler):
    -    def post(self):
    -        self.write(self.request.body)
    -
    -
    -class UserAgentHandler(RequestHandler):
    -    def get(self):
    -        self.write(self.request.headers.get('User-Agent', 'User agent not set'))
    -
    -
    -class ContentLength304Handler(RequestHandler):
    -    def get(self):
    -        self.set_status(304)
    -        self.set_header('Content-Length', 42)
    -
    -    def _clear_headers_for_304(self):
    -        # Tornado strips content-length from 304 responses, but here we
    -        # want to simulate servers that include the headers anyway.
    -        pass
    -
    -
    -class PatchHandler(RequestHandler):
    -
    -    def patch(self):
    -        "Return the request payload - so we can check it is being kept"
    -        self.write(self.request.body)
    -
    -
    -class AllMethodsHandler(RequestHandler):
    -    SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
    -
    -    def method(self):
    -        self.write(self.request.method)
    -
    -    get = post = put = delete = options = patch = other = method
    -
    -
    -class SetHeaderHandler(RequestHandler):
    -    def get(self):
    -        # Use get_arguments for keys to get strings, but
    -        # request.arguments for values to get bytes.
    -        for k, v in zip(self.get_arguments('k'),
    -                        self.request.arguments['v']):
    -            self.set_header(k, v)
    -
    -# These tests end up getting run redundantly: once here with the default
    -# HTTPClient implementation, and then again in each implementation's own
    -# test suite.
    -
    -
    -class HTTPClientCommonTestCase(AsyncHTTPTestCase):
    -    def get_app(self):
    -        return Application([
    -            url("/hello", HelloWorldHandler),
    -            url("/post", PostHandler),
    -            url("/put", PutHandler),
    -            url("/redirect", RedirectHandler),
    -            url("/chunk", ChunkHandler),
    -            url("/auth", AuthHandler),
    -            url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
    -            url("/echopost", EchoPostHandler),
    -            url("/user_agent", UserAgentHandler),
    -            url("/304_with_content_length", ContentLength304Handler),
    -            url("/all_methods", AllMethodsHandler),
    -            url('/patch', PatchHandler),
    -            url('/set_header', SetHeaderHandler),
    -        ], gzip=True)
    -
    -    def test_patch_receives_payload(self):
    -        body = b"some patch data"
    -        response = self.fetch("/patch", method='PATCH', body=body)
    -        self.assertEqual(response.code, 200)
    -        self.assertEqual(response.body, body)
    -
    -    @skipOnTravis
    -    def test_hello_world(self):
    -        response = self.fetch("/hello")
    -        self.assertEqual(response.code, 200)
    -        self.assertEqual(response.headers["Content-Type"], "text/plain")
    -        self.assertEqual(response.body, b"Hello world!")
    -        self.assertEqual(int(response.request_time), 0)
    -
    -        response = self.fetch("/hello?name=Ben")
    -        self.assertEqual(response.body, b"Hello Ben!")
    -
    -    def test_streaming_callback(self):
    -        # streaming_callback is also tested in test_chunked
    -        chunks = []
    -        response = self.fetch("/hello",
    -                              streaming_callback=chunks.append)
    -        # with streaming_callback, data goes to the callback and not response.body
    -        self.assertEqual(chunks, [b"Hello world!"])
    -        self.assertFalse(response.body)
    -
    -    def test_post(self):
    -        response = self.fetch("/post", method="POST",
    -                              body="arg1=foo&arg2=bar")
    -        self.assertEqual(response.code, 200)
    -        self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
    -
    -    def test_chunked(self):
    -        response = self.fetch("/chunk")
    -        self.assertEqual(response.body, b"asdfqwer")
    -
    -        chunks = []
    -        response = self.fetch("/chunk",
    -                              streaming_callback=chunks.append)
    -        self.assertEqual(chunks, [b"asdf", b"qwer"])
    -        self.assertFalse(response.body)
    -
    -    def test_chunked_close(self):
    -        # test case in which chunks spread read-callback processing
    -        # over several ioloop iterations, but the connection is already closed.
    -        sock, port = bind_unused_port()
    -        with closing(sock):
    -            @gen.coroutine
    -            def accept_callback(conn, address):
    -                # fake an HTTP server using chunked encoding where the final chunks
    -                # and connection close all happen at once
    -                stream = IOStream(conn)
    -                request_data = yield stream.read_until(b"\r\n\r\n")
    -                if b"HTTP/1." not in request_data:
    -                    self.skipTest("requires HTTP/1.x")
    -                yield stream.write(b"""\
    -HTTP/1.1 200 OK
    -Transfer-Encoding: chunked
    -
    -1
    -1
    -1
    -2
    -0
    -
    -""".replace(b"\n", b"\r\n"))
    -                stream.close()
    -            netutil.add_accept_handler(sock, accept_callback)
    -            resp = self.fetch("http://127.0.0.1:%d/" % port)
    -            resp.rethrow()
    -            self.assertEqual(resp.body, b"12")
    -            self.io_loop.remove_handler(sock.fileno())
    -
    -    def test_streaming_stack_context(self):
    -        chunks = []
    -        exc_info = []
    -
    -        def error_handler(typ, value, tb):
    -            exc_info.append((typ, value, tb))
    -            return True
    -
    -        def streaming_cb(chunk):
    -            chunks.append(chunk)
    -            if chunk == b'qwer':
    -                1 / 0
    -
    -        with ignore_deprecation():
    -            with ExceptionStackContext(error_handler):
    -                self.fetch('/chunk', streaming_callback=streaming_cb)
    -
    -        self.assertEqual(chunks, [b'asdf', b'qwer'])
    -        self.assertEqual(1, len(exc_info))
    -        self.assertIs(exc_info[0][0], ZeroDivisionError)
    -
    -    def test_basic_auth(self):
    -        # This test data appears in section 2 of RFC 7617.
    -        self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
    -                                    auth_password="open sesame").body,
    -                         b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
    -
    -    def test_basic_auth_explicit_mode(self):
    -        self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
    -                                    auth_password="open sesame",
    -                                    auth_mode="basic").body,
    -                         b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
    -
    -    def test_basic_auth_unicode(self):
    -        # This test data appears in section 2.1 of RFC 7617.
    -        self.assertEqual(self.fetch("/auth", auth_username="test",
    -                                    auth_password="123£").body,
    -                         b"Basic dGVzdDoxMjPCow==")
    -
    -        # The standard mandates NFC. Give it a decomposed username
    -        # and ensure it is normalized to composed form.
    -        username = unicodedata.normalize("NFD", u"josé")
    -        self.assertEqual(self.fetch("/auth",
    -                                    auth_username=username,
    -                                    auth_password="səcrət").body,
    -                         b"Basic am9zw6k6c8mZY3LJmXQ=")
    -
    -    def test_unsupported_auth_mode(self):
    -        # curl and simple clients handle errors a bit differently; the
    -        # important thing is that they don't fall back to basic auth
    -        # on an unknown mode.
    -        with ExpectLog(gen_log, "uncaught exception", required=False):
    -            with self.assertRaises((ValueError, HTTPError)):
    -                self.fetch("/auth", auth_username="Aladdin",
    -                           auth_password="open sesame",
    -                           auth_mode="asdf",
    -                           raise_error=True)
    -
    -    def test_follow_redirect(self):
    -        response = self.fetch("/countdown/2", follow_redirects=False)
    -        self.assertEqual(302, response.code)
    -        self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
    -
    -        response = self.fetch("/countdown/2")
    -        self.assertEqual(200, response.code)
    -        self.assertTrue(response.effective_url.endswith("/countdown/0"))
    -        self.assertEqual(b"Zero", response.body)
    -
    -    def test_credentials_in_url(self):
    -        url = self.get_url("/auth").replace("http://", "http://me:secret@")
    -        response = self.fetch(url)
    -        self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
    -                         response.body)
    -
    -    def test_body_encoding(self):
    -        unicode_body = u"\xe9"
    -        byte_body = binascii.a2b_hex(b"e9")
    -
    -        # unicode string in body gets converted to utf8
    -        response = self.fetch("/echopost", method="POST", body=unicode_body,
    -                              headers={"Content-Type": "application/blah"})
    -        self.assertEqual(response.headers["Content-Length"], "2")
    -        self.assertEqual(response.body, utf8(unicode_body))
    -
    -        # byte strings pass through directly
    -        response = self.fetch("/echopost", method="POST",
    -                              body=byte_body,
    -                              headers={"Content-Type": "application/blah"})
    -        self.assertEqual(response.headers["Content-Length"], "1")
    -        self.assertEqual(response.body, byte_body)
    -
    -        # Mixing unicode in headers and byte string bodies shouldn't
    -        # break anything
    -        response = self.fetch("/echopost", method="POST", body=byte_body,
    -                              headers={"Content-Type": "application/blah"},
    -                              user_agent=u"foo")
    -        self.assertEqual(response.headers["Content-Length"], "1")
    -        self.assertEqual(response.body, byte_body)
    -
    -    def test_types(self):
    -        response = self.fetch("/hello")
    -        self.assertEqual(type(response.body), bytes)
    -        self.assertEqual(type(response.headers["Content-Type"]), str)
    -        self.assertEqual(type(response.code), int)
    -        self.assertEqual(type(response.effective_url), str)
    -
    -    def test_header_callback(self):
    -        first_line = []
    -        headers = {}
    -        chunks = []
    -
    -        def header_callback(header_line):
    -            if header_line.startswith('HTTP/1.1 101'):
    -                # Upgrading to HTTP/2
    -                pass
    -            elif header_line.startswith('HTTP/'):
    -                first_line.append(header_line)
    -            elif header_line != '\r\n':
    -                k, v = header_line.split(':', 1)
    -                headers[k.lower()] = v.strip()
    -
    -        def streaming_callback(chunk):
    -            # All header callbacks are run before any streaming callbacks,
    -            # so the header data is available to process the data as it
    -            # comes in.
    -            self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
    -            chunks.append(chunk)
    -
    -        self.fetch('/chunk', header_callback=header_callback,
    -                   streaming_callback=streaming_callback)
    -        self.assertEqual(len(first_line), 1, first_line)
    -        self.assertRegexpMatches(first_line[0], 'HTTP/[0-9]\\.[0-9] 200.*\r\n')
    -        self.assertEqual(chunks, [b'asdf', b'qwer'])
    -
    -    def test_header_callback_stack_context(self):
    -        exc_info = []
    -
    -        def error_handler(typ, value, tb):
    -            exc_info.append((typ, value, tb))
    -            return True
    -
    -        def header_callback(header_line):
    -            if header_line.lower().startswith('content-type:'):
    -                1 / 0
    -
    -        with ignore_deprecation():
    -            with ExceptionStackContext(error_handler):
    -                self.fetch('/chunk', header_callback=header_callback)
    -        self.assertEqual(len(exc_info), 1)
    -        self.assertIs(exc_info[0][0], ZeroDivisionError)
    -
    -    @gen_test
    -    def test_configure_defaults(self):
    -        defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
    -        # Construct a new instance of the configured client class
    -        client = self.http_client.__class__(force_instance=True,
    -                                            defaults=defaults)
    -        try:
    -            response = yield client.fetch(self.get_url('/user_agent'))
    -            self.assertEqual(response.body, b'TestDefaultUserAgent')
    -        finally:
    -            client.close()
    -
    -    def test_header_types(self):
    -        # Header values may be passed as character or utf8 byte strings,
    -        # in a plain dictionary or an HTTPHeaders object.
    -        # Keys must always be the native str type.
    -        # All combinations should have the same results on the wire.
    -        for value in [u"MyUserAgent", b"MyUserAgent"]:
    -            for container in [dict, HTTPHeaders]:
    -                headers = container()
    -                headers['User-Agent'] = value
    -                resp = self.fetch('/user_agent', headers=headers)
    -                self.assertEqual(
    -                    resp.body, b"MyUserAgent",
    -                    "response=%r, value=%r, container=%r" %
    -                    (resp.body, value, container))
    -
    -    def test_multi_line_headers(self):
    -        # Multi-line http headers are rare but rfc-allowed
    -        # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
    -        sock, port = bind_unused_port()
    -        with closing(sock):
    -            @gen.coroutine
    -            def accept_callback(conn, address):
    -                stream = IOStream(conn)
    -                request_data = yield stream.read_until(b"\r\n\r\n")
    -                if b"HTTP/1." not in request_data:
    -                    self.skipTest("requires HTTP/1.x")
    -                yield stream.write(b"""\
    -HTTP/1.1 200 OK
    -X-XSS-Protection: 1;
    -\tmode=block
    -
    -""".replace(b"\n", b"\r\n"))
    -                stream.close()
    -
    -            netutil.add_accept_handler(sock, accept_callback)
    -            resp = self.fetch("http://127.0.0.1:%d/" % port)
    -            resp.rethrow()
    -            self.assertEqual(resp.headers['X-XSS-Protection'], "1; mode=block")
    -            self.io_loop.remove_handler(sock.fileno())
    -
    -    def test_304_with_content_length(self):
    -        # According to the spec 304 responses SHOULD NOT include
    -        # Content-Length or other entity headers, but some servers do it
    -        # anyway.
    -        # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
    -        response = self.fetch('/304_with_content_length')
    -        self.assertEqual(response.code, 304)
    -        self.assertEqual(response.headers['Content-Length'], '42')
    -
    -    def test_final_callback_stack_context(self):
    -        # The final callback should be run outside of the httpclient's
    -        # stack_context.  We want to ensure that there is not stack_context
    -        # between the user's callback and the IOLoop, so monkey-patch
    -        # IOLoop.handle_callback_exception and disable the test harness's
    -        # context with a NullContext.
    -        # Note that this does not apply to secondary callbacks (header
    -        # and streaming_callback), as errors there must be seen as errors
    -        # by the http client so it can clean up the connection.
    -        exc_info = []
    -
    -        def handle_callback_exception(callback):
    -            exc_info.append(sys.exc_info())
    -            self.stop()
    -        self.io_loop.handle_callback_exception = handle_callback_exception
    -        with NullContext():
    -            with ignore_deprecation():
    -                self.http_client.fetch(self.get_url('/hello'),
    -                                       lambda response: 1 / 0)
    -        self.wait()
    -        self.assertEqual(exc_info[0][0], ZeroDivisionError)
    -
    -    @gen_test
    -    def test_future_interface(self):
    -        response = yield self.http_client.fetch(self.get_url('/hello'))
    -        self.assertEqual(response.body, b'Hello world!')
    -
    -    @gen_test
    -    def test_future_http_error(self):
    -        with self.assertRaises(HTTPError) as context:
    -            yield self.http_client.fetch(self.get_url('/notfound'))
    -        self.assertEqual(context.exception.code, 404)
    -        self.assertEqual(context.exception.response.code, 404)
    -
    -    @gen_test
    -    def test_future_http_error_no_raise(self):
    -        response = yield self.http_client.fetch(self.get_url('/notfound'), raise_error=False)
    -        self.assertEqual(response.code, 404)
    -
    -    @gen_test
    -    def test_reuse_request_from_response(self):
    -        # The response.request attribute should be an HTTPRequest, not
    -        # a _RequestProxy.
    -        # This test uses self.http_client.fetch because self.fetch calls
    -        # self.get_url on the input unconditionally.
    -        url = self.get_url('/hello')
    -        response = yield self.http_client.fetch(url)
    -        self.assertEqual(response.request.url, url)
    -        self.assertTrue(isinstance(response.request, HTTPRequest))
    -        response2 = yield self.http_client.fetch(response.request)
    -        self.assertEqual(response2.body, b'Hello world!')
    -
    -    def test_all_methods(self):
    -        for method in ['GET', 'DELETE', 'OPTIONS']:
    -            response = self.fetch('/all_methods', method=method)
    -            self.assertEqual(response.body, utf8(method))
    -        for method in ['POST', 'PUT', 'PATCH']:
    -            response = self.fetch('/all_methods', method=method, body=b'')
    -            self.assertEqual(response.body, utf8(method))
    -        response = self.fetch('/all_methods', method='HEAD')
    -        self.assertEqual(response.body, b'')
    -        response = self.fetch('/all_methods', method='OTHER',
    -                              allow_nonstandard_methods=True)
    -        self.assertEqual(response.body, b'OTHER')
    -
    -    def test_body_sanity_checks(self):
    -        # These methods require a body.
    -        for method in ('POST', 'PUT', 'PATCH'):
    -            with self.assertRaises(ValueError) as context:
    -                self.fetch('/all_methods', method=method, raise_error=True)
    -            self.assertIn('must not be None', str(context.exception))
    -
    -            resp = self.fetch('/all_methods', method=method,
    -                              allow_nonstandard_methods=True)
    -            self.assertEqual(resp.code, 200)
    -
    -        # These methods don't allow a body.
    -        for method in ('GET', 'DELETE', 'OPTIONS'):
    -            with self.assertRaises(ValueError) as context:
    -                self.fetch('/all_methods', method=method, body=b'asdf', raise_error=True)
    -            self.assertIn('must be None', str(context.exception))
    -
    -            # In most cases this can be overridden, but curl_httpclient
    -            # does not allow body with a GET at all.
    -            if method != 'GET':
    -                self.fetch('/all_methods', method=method, body=b'asdf',
    -                           allow_nonstandard_methods=True, raise_error=True)
    -                self.assertEqual(resp.code, 200)
    -
    -    # This test causes odd failures with the combination of
    -    # curl_httpclient (at least with the version of libcurl available
    -    # on ubuntu 12.04), TwistedIOLoop, and epoll.  For POST (but not PUT),
    -    # curl decides the response came back too soon and closes the connection
    -    # to start again.  It does this *before* telling the socket callback to
    -    # unregister the FD.  Some IOLoop implementations have special kernel
    -    # integration to discover this immediately.  Tornado's IOLoops
    -    # ignore errors on remove_handler to accommodate this behavior, but
    -    # Twisted's reactor does not.  The removeReader call fails and so
    -    # do all future removeAll calls (which our tests do at cleanup).
    -    #
    -    # def test_post_307(self):
    -    #    response = self.fetch("/redirect?status=307&url=/post",
    -    #                          method="POST", body=b"arg1=foo&arg2=bar")
    -    #    self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
    -
    -    def test_put_307(self):
    -        response = self.fetch("/redirect?status=307&url=/put",
    -                              method="PUT", body=b"hello")
    -        response.rethrow()
    -        self.assertEqual(response.body, b"Put body: hello")
    -
    -    def test_non_ascii_header(self):
    -        # Non-ascii headers are sent as latin1.
    -        response = self.fetch("/set_header?k=foo&v=%E9")
    -        response.rethrow()
    -        self.assertEqual(response.headers["Foo"], native_str(u"\u00e9"))
    -
    -    def test_response_times(self):
    -        # A few simple sanity checks of the response time fields to
    -        # make sure they're using the right basis (between the
    -        # wall-time and monotonic clocks).
    -        start_time = time.time()
    -        response = self.fetch("/hello")
    -        response.rethrow()
    -        self.assertGreaterEqual(response.request_time, 0)
    -        self.assertLess(response.request_time, 1.0)
    -        # A very crude check to make sure that start_time is based on
    -        # wall time and not the monotonic clock.
    -        self.assertLess(abs(response.start_time - start_time), 1.0)
    -
    -        for k, v in response.time_info.items():
    -            self.assertTrue(0 <= v < 1.0, "time_info[%s] out of bounds: %s" % (k, v))
    -
    -
    -class RequestProxyTest(unittest.TestCase):
    -    def test_request_set(self):
    -        proxy = _RequestProxy(HTTPRequest('http://example.com/',
    -                                          user_agent='foo'),
    -                              dict())
    -        self.assertEqual(proxy.user_agent, 'foo')
    -
    -    def test_default_set(self):
    -        proxy = _RequestProxy(HTTPRequest('http://example.com/'),
    -                              dict(network_interface='foo'))
    -        self.assertEqual(proxy.network_interface, 'foo')
    -
    -    def test_both_set(self):
    -        proxy = _RequestProxy(HTTPRequest('http://example.com/',
    -                                          proxy_host='foo'),
    -                              dict(proxy_host='bar'))
    -        self.assertEqual(proxy.proxy_host, 'foo')
    -
    -    def test_neither_set(self):
    -        proxy = _RequestProxy(HTTPRequest('http://example.com/'),
    -                              dict())
    -        self.assertIs(proxy.auth_username, None)
    -
    -    def test_bad_attribute(self):
    -        proxy = _RequestProxy(HTTPRequest('http://example.com/'),
    -                              dict())
    -        with self.assertRaises(AttributeError):
    -            proxy.foo
    -
    -    def test_defaults_none(self):
    -        proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
    -        self.assertIs(proxy.auth_username, None)
    -
    -
    -class HTTPResponseTestCase(unittest.TestCase):
    -    def test_str(self):
    -        response = HTTPResponse(HTTPRequest('http://example.com'),
    -                                200, headers={}, buffer=BytesIO())
    -        s = str(response)
    -        self.assertTrue(s.startswith('HTTPResponse('))
    -        self.assertIn('code=200', s)
    -
    -
    -class SyncHTTPClientTest(unittest.TestCase):
    -    def setUp(self):
    -        if IOLoop.configured_class().__name__ == 'TwistedIOLoop':
    -            # TwistedIOLoop only supports the global reactor, so we can't have
    -            # separate IOLoops for client and server threads.
    -            raise unittest.SkipTest(
    -                'Sync HTTPClient not compatible with TwistedIOLoop')
    -        self.server_ioloop = IOLoop()
    -
    -        @gen.coroutine
    -        def init_server():
    -            sock, self.port = bind_unused_port()
    -            app = Application([('/', HelloWorldHandler)])
    -            self.server = HTTPServer(app)
    -            self.server.add_socket(sock)
    -        self.server_ioloop.run_sync(init_server)
    -
    -        self.server_thread = threading.Thread(target=self.server_ioloop.start)
    -        self.server_thread.start()
    -
    -        self.http_client = HTTPClient()
    -
    -    def tearDown(self):
    -        def stop_server():
    -            self.server.stop()
    -            # Delay the shutdown of the IOLoop by several iterations because
    -            # the server may still have some cleanup work left when
    -            # the client finishes with the response (this is noticeable
    -            # with http/2, which leaves a Future with an unexamined
    -            # StreamClosedError on the loop).
    -
    -            @gen.coroutine
    -            def slow_stop():
    -                # The number of iterations is difficult to predict. Typically,
    -                # one is sufficient, although sometimes it needs more.
    -                for i in range(5):
    -                    yield
    -                self.server_ioloop.stop()
    -            self.server_ioloop.add_callback(slow_stop)
    -        self.server_ioloop.add_callback(stop_server)
    -        self.server_thread.join()
    -        self.http_client.close()
    -        self.server_ioloop.close(all_fds=True)
    -
    -    def get_url(self, path):
    -        return 'http://127.0.0.1:%d%s' % (self.port, path)
    -
    -    def test_sync_client(self):
    -        response = self.http_client.fetch(self.get_url('/'))
    -        self.assertEqual(b'Hello world!', response.body)
    -
    -    def test_sync_client_error(self):
    -        # Synchronous HTTPClient raises errors directly; no need for
    -        # response.rethrow()
    -        with self.assertRaises(HTTPError) as assertion:
    -            self.http_client.fetch(self.get_url('/notfound'))
    -        self.assertEqual(assertion.exception.code, 404)
    -
    -
    -class HTTPRequestTestCase(unittest.TestCase):
    -    def test_headers(self):
    -        request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
    -        self.assertEqual(request.headers, {'foo': 'bar'})
    -
    -    def test_headers_setter(self):
    -        request = HTTPRequest('http://example.com')
    -        request.headers = {'bar': 'baz'}
    -        self.assertEqual(request.headers, {'bar': 'baz'})
    -
    -    def test_null_headers_setter(self):
    -        request = HTTPRequest('http://example.com')
    -        request.headers = None
    -        self.assertEqual(request.headers, {})
    -
    -    def test_body(self):
    -        request = HTTPRequest('http://example.com', body='foo')
    -        self.assertEqual(request.body, utf8('foo'))
    -
    -    def test_body_setter(self):
    -        request = HTTPRequest('http://example.com')
    -        request.body = 'foo'
    -        self.assertEqual(request.body, utf8('foo'))
    -
    -    def test_if_modified_since(self):
    -        http_date = datetime.datetime.utcnow()
    -        request = HTTPRequest('http://example.com', if_modified_since=http_date)
    -        self.assertEqual(request.headers,
    -                         {'If-Modified-Since': format_timestamp(http_date)})
    -
    -
    -class HTTPErrorTestCase(unittest.TestCase):
    -    def test_copy(self):
    -        e = HTTPError(403)
    -        e2 = copy.copy(e)
    -        self.assertIsNot(e, e2)
    -        self.assertEqual(e.code, e2.code)
    -
    -    def test_plain_error(self):
    -        e = HTTPError(403)
    -        self.assertEqual(str(e), "HTTP 403: Forbidden")
    -        self.assertEqual(repr(e), "HTTP 403: Forbidden")
    -
    -    def test_error_with_response(self):
    -        resp = HTTPResponse(HTTPRequest('http://example.com/'), 403)
    -        with self.assertRaises(HTTPError) as cm:
    -            resp.rethrow()
    -        e = cm.exception
    -        self.assertEqual(str(e), "HTTP 403: Forbidden")
    -        self.assertEqual(repr(e), "HTTP 403: Forbidden")
    diff --git a/lib/tornado/test/httpserver_test.py b/lib/tornado/test/httpserver_test.py
    deleted file mode 100755
    index acaf2a00..00000000
    --- a/lib/tornado/test/httpserver_test.py
    +++ /dev/null
    @@ -1,1167 +0,0 @@
    -from __future__ import absolute_import, division, print_function
    -
    -from tornado import gen, netutil
    -from tornado.concurrent import Future
    -from tornado.escape import json_decode, json_encode, utf8, _unicode, recursive_unicode, native_str
    -from tornado.http1connection import HTTP1Connection
    -from tornado.httpclient import HTTPError
    -from tornado.httpserver import HTTPServer
    -from tornado.httputil import HTTPHeaders, HTTPMessageDelegate, HTTPServerConnectionDelegate, ResponseStartLine  # noqa: E501
    -from tornado.iostream import IOStream
    -from tornado.locks import Event
    -from tornado.log import gen_log
    -from tornado.netutil import ssl_options_to_context
    -from tornado.simple_httpclient import SimpleAsyncHTTPClient
    -from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog, gen_test  # noqa: E501
    -from tornado.test.util import unittest, skipOnTravis
    -from tornado.web import Application, RequestHandler, stream_request_body
    -
    -from contextlib import closing
    -import datetime
    -import gzip
    -import os
    -import shutil
    -import socket
    -import ssl
    -import sys
    -import tempfile
    -from io import BytesIO
    -
    -
    -def read_stream_body(stream, callback):
    -    """Reads an HTTP response from `stream` and runs callback with its
    -    start_line, headers and body."""
    -    chunks = []
    -
    -    class Delegate(HTTPMessageDelegate):
    -        def headers_received(self, start_line, headers):
    -            self.headers = headers
    -            self.start_line = start_line
    -
    -        def data_received(self, chunk):
    -            chunks.append(chunk)
    -
    -        def finish(self):
    -            conn.detach()
    -            callback((self.start_line, self.headers, b''.join(chunks)))
    -    conn = HTTP1Connection(stream, True)
    -    conn.read_response(Delegate())
    -
    -
    -class HandlerBaseTestCase(AsyncHTTPTestCase):
    -    def get_app(self):
    -        return Application([('/', self.__class__.Handler)])
    -
    -    def fetch_json(self, *args, **kwargs):
    -        response = self.fetch(*args, **kwargs)
    -        response.rethrow()
    -        return json_decode(response.body)
    -
    -
    -class HelloWorldRequestHandler(RequestHandler):
    -    def initialize(self, protocol="http"):
    -        self.expected_protocol = protocol
    -
    -    def get(self):
    -        if self.request.protocol != self.expected_protocol:
    -            raise Exception("unexpected protocol")
    -        self.finish("Hello world")
    -
    -    def post(self):
    -        self.finish("Got %d bytes in POST" % len(self.request.body))
    -
    -
    -# In pre-1.0 versions of openssl, SSLv23 clients always send SSLv2
    -# ClientHello messages, which are rejected by SSLv3 and TLSv1
    -# servers.  Note that while the OPENSSL_VERSION_INFO was formally
    -# introduced in python3.2, it was present but undocumented in
    -# python 2.7
    -skipIfOldSSL = unittest.skipIf(
    -    getattr(ssl, 'OPENSSL_VERSION_INFO', (0, 0)) < (1, 0),
    -    "old version of ssl module and/or openssl")
    -
    -
    -class BaseSSLTest(AsyncHTTPSTestCase):
    -    def get_app(self):
    -        return Application([('/', HelloWorldRequestHandler,
    -                             dict(protocol="https"))])
    -
    -
    -class SSLTestMixin(object):
    -    def get_ssl_options(self):
    -        return dict(ssl_version=self.get_ssl_version(),  # type: ignore
    -                    **AsyncHTTPSTestCase.get_ssl_options())
    -
    -    def get_ssl_version(self):
    -        raise NotImplementedError()
    -
    -    def test_ssl(self):
    -        response = self.fetch('/')
    -        self.assertEqual(response.body, b"Hello world")
    -
    -    def test_large_post(self):
    -        response = self.fetch('/',
    -                              method='POST',
    -                              body='A' * 5000)
    -        self.assertEqual(response.body, b"Got 5000 bytes in POST")
    -
    -    def test_non_ssl_request(self):
    -        # Make sure the server closes the connection when it gets a non-ssl
    -        # connection, rather than waiting for a timeout or otherwise
    -        # misbehaving.
    -        with ExpectLog(gen_log, '(SSL Error|uncaught exception)'):
    -            with ExpectLog(gen_log, 'Uncaught exception', required=False):
    -                with self.assertRaises((IOError, HTTPError)):
    -                    self.fetch(
    -                        self.get_url("/").replace('https:', 'http:'),
    -                        request_timeout=3600,
    -                        connect_timeout=3600,
    -                        raise_error=True)
    -
    -    def test_error_logging(self):
    -        # No stack traces are logged for SSL errors.
    -        with ExpectLog(gen_log, 'SSL Error') as expect_log:
    -            with self.assertRaises((IOError, HTTPError)):
    -                self.fetch(self.get_url("/").replace("https:", "http:"),
    -                           raise_error=True)
    -        self.assertFalse(expect_log.logged_stack)
    -
    -# Python's SSL implementation differs significantly between versions.
    -# For example, SSLv3 and TLSv1 throw an exception if you try to read
    -# from the socket before the handshake is complete, but the default
    -# of SSLv23 allows it.
    -
    -
    -class SSLv23Test(BaseSSLTest, SSLTestMixin):
    -    def get_ssl_version(self):
    -        return ssl.PROTOCOL_SSLv23
    -
    -
    -@skipIfOldSSL
    -class SSLv3Test(BaseSSLTest, SSLTestMixin):
    -    def get_ssl_version(self):
    -        return ssl.PROTOCOL_SSLv3
    -
    -
    -@skipIfOldSSL
    -class TLSv1Test(BaseSSLTest, SSLTestMixin):
    -    def get_ssl_version(self):
    -        return ssl.PROTOCOL_TLSv1
    -
    -
    -class SSLContextTest(BaseSSLTest, SSLTestMixin):
    -    def get_ssl_options(self):
    -        context = ssl_options_to_context(
    -            AsyncHTTPSTestCase.get_ssl_options(self))
    -        assert isinstance(context, ssl.SSLContext)
    -        return context
    -
    -
    -class BadSSLOptionsTest(unittest.TestCase):
    -    def test_missing_arguments(self):
    -        application = Application()
    -        self.assertRaises(KeyError, HTTPServer, application, ssl_options={
    -            "keyfile": "/__missing__.crt",
    -        })
    -
    -    def test_missing_key(self):
    -        """A missing SSL key should cause an immediate exception."""
    -
    -        application = Application()
    -        module_dir = os.path.dirname(__file__)
    -        existing_certificate = os.path.join(module_dir, 'test.crt')
    -        existing_key = os.path.join(module_dir, 'test.key')
    -
    -        self.assertRaises((ValueError, IOError),
    -                          HTTPServer, application, ssl_options={
    -                              "certfile": "/__mising__.crt",
    -        })
    -        self.assertRaises((ValueError, IOError),
    -                          HTTPServer, application, ssl_options={
    -                              "certfile": existing_certificate,
    -                              "keyfile": "/__missing__.key"
    -        })
    -
    -        # This actually works because both files exist
    -        HTTPServer(application, ssl_options={
    -                   "certfile": existing_certificate,
    -                   "keyfile": existing_key,
    -                   })
    -
    -
    -class MultipartTestHandler(RequestHandler):
    -    def post(self):
    -        self.finish({"header": self.request.headers["X-Header-Encoding-Test"],
    -                     "argument": self.get_argument("argument"),
    -                     "filename": self.request.files["files"][0].filename,
    -                     "filebody": _unicode(self.request.files["files"][0]["body"]),
    -                     })
    -
    -
    -# This test is also called from wsgi_test
    -class HTTPConnectionTest(AsyncHTTPTestCase):
    -    def get_handlers(self):
    -        return [("/multipart", MultipartTestHandler),
    -                ("/hello", HelloWorldRequestHandler)]
    -
    -    def get_app(self):
    -        return Application(self.get_handlers())
    -
    -    def raw_fetch(self, headers, body, newline=b"\r\n"):
    -        with closing(IOStream(socket.socket())) as stream:
    -            self.io_loop.run_sync(lambda: stream.connect(('127.0.0.1', self.get_http_port())))
    -            stream.write(
    -                newline.join(headers +
    -                             [utf8("Content-Length: %d" % len(body))]) +
    -                newline + newline + body)
    -            read_stream_body(stream, self.stop)
    -            start_line, headers, body = self.wait()
    -            return body
    -
    -    def test_multipart_form(self):
    -        # Encodings here are tricky:  Headers are latin1, bodies can be
    -        # anything (we use utf8 by default).
    -        response = self.raw_fetch([
    -            b"POST /multipart HTTP/1.0",
    -            b"Content-Type: multipart/form-data; boundary=1234567890",
    -            b"X-Header-encoding-test: \xe9",
    -        ],
    -            b"\r\n".join([
    -                b"Content-Disposition: form-data; name=argument",
    -                b"",
    -                u"\u00e1".encode("utf-8"),
    -                b"--1234567890",
    -                u'Content-Disposition: form-data; name="files"; filename="\u00f3"'.encode("utf8"),
    -                b"",
    -                u"\u00fa".encode("utf-8"),
    -                b"--1234567890--",
    -                b"",
    -            ]))
    -        data = json_decode(response)
    -        self.assertEqual(u"\u00e9", data["header"])
    -        self.assertEqual(u"\u00e1", data["argument"])
    -        self.assertEqual(u"\u00f3", data["filename"])
    -        self.assertEqual(u"\u00fa", data["filebody"])
    -
    -    def test_newlines(self):
    -        # We support both CRLF and bare LF as line separators.
    -        for newline in (b"\r\n", b"\n"):
    -            response = self.raw_fetch([b"GET /hello HTTP/1.0"], b"",
    -                                      newline=newline)
    -            self.assertEqual(response, b'Hello world')
    -
    -    @gen_test
    -    def test_100_continue(self):
    -        # Run through a 100-continue interaction by hand:
    -        # When given Expect: 100-continue, we get a 100 response after the
    -        # headers, and then the real response after the body.
    -        stream = IOStream(socket.socket())
    -        yield stream.connect(("127.0.0.1", self.get_http_port()))
    -        yield stream.write(b"\r\n".join([
    -            b"POST /hello HTTP/1.1",
    -            b"Content-Length: 1024",
    -            b"Expect: 100-continue",
    -            b"Connection: close",
    -            b"\r\n"]))
    -        data = yield stream.read_until(b"\r\n\r\n")
    -        self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data)
    -        stream.write(b"a" * 1024)
    -        first_line = yield stream.read_until(b"\r\n")
    -        self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line)
    -        header_data = yield stream.read_until(b"\r\n\r\n")
    -        headers = HTTPHeaders.parse(native_str(header_data.decode('latin1')))
    -        body = yield stream.read_bytes(int(headers["Content-Length"]))
    -        self.assertEqual(body, b"Got 1024 bytes in POST")
    -        stream.close()
    -
    -
    -class EchoHandler(RequestHandler):
    -    def get(self):
    -        self.write(recursive_unicode(self.request.arguments))
    -
    -    def post(self):
    -        self.write(recursive_unicode(self.request.arguments))
    -
    -
    -class TypeCheckHandler(RequestHandler):
    -    def prepare(self):
    -        self.errors = {}
    -        fields = [
    -            ('method', str),
    -            ('uri', str),
    -            ('version', str),
    -            ('remote_ip', str),
    -            ('protocol', str),
    -            ('host', str),
    -            ('path', str),
    -            ('query', str),
    -        ]
    -        for field, expected_type in fields:
    -            self.check_type(field, getattr(self.request, field), expected_type)
    -
    -        self.check_type('header_key', list(self.request.headers.keys())[0], str)
    -        self.check_type('header_value', list(self.request.headers.values())[0], str)
    -
    -        self.check_type('cookie_key', list(self.request.cookies.keys())[0], str)
    -        self.check_type('cookie_value', list(self.request.cookies.values())[0].value, str)
    -        # secure cookies
    -
    -        self.check_type('arg_key', list(self.request.arguments.keys())[0], str)
    -        self.check_type('arg_value', list(self.request.arguments.values())[0][0], bytes)
    -
    -    def post(self):
    -        self.check_type('body', self.request.body, bytes)
    -        self.write(self.errors)
    -
    -    def get(self):
    -        self.write(self.errors)
    -
    -    def check_type(self, name, obj, expected_type):
    -        actual_type = type(obj)
    -        if expected_type != actual_type:
    -            self.errors[name] = "expected %s, got %s" % (expected_type,
    -                                                         actual_type)
    -
    -
    -class HTTPServerTest(AsyncHTTPTestCase):
    -    def get_app(self):
    -        return Application([("/echo", EchoHandler),
    -                            ("/typecheck", TypeCheckHandler),
    -                            ("//doubleslash", EchoHandler),
    -                            ])
    -
    -    def test_query_string_encoding(self):
    -        response = self.fetch("/echo?foo=%C3%A9")
    -        data = json_decode(response.body)
    -        self.assertEqual(data, {u"foo": [u"\u00e9"]})
    -
    -    def test_empty_query_string(self):
    -        response = self.fetch("/echo?foo=&foo=")
    -        data = json_decode(response.body)
    -        self.assertEqual(data, {u"foo": [u"", u""]})
    -
    -    def test_empty_post_parameters(self):
    -        response = self.fetch("/echo", method="POST", body="foo=&bar=")
    -        data = json_decode(response.body)
    -        self.assertEqual(data, {u"foo": [u""], u"bar": [u""]})
    -
    -    def test_types(self):
    -        headers = {"Cookie": "foo=bar"}
    -        response = self.fetch("/typecheck?foo=bar", headers=headers)
    -        data = json_decode(response.body)
    -        self.assertEqual(data, {})
    -
    -        response = self.fetch("/typecheck", method="POST", body="foo=bar", headers=headers)
    -        data = json_decode(response.body)
    -        self.assertEqual(data, {})
    -
    -    def test_double_slash(self):
    -        # urlparse.urlsplit (which tornado.httpserver used to use
    -        # incorrectly) would parse paths beginning with "//" as
    -        # protocol-relative urls.
    -        response = self.fetch("//doubleslash")
    -        self.assertEqual(200, response.code)
    -        self.assertEqual(json_decode(response.body), {})
    -
    -    def test_malformed_body(self):
    -        # parse_qs is pretty forgiving, but it will fail on python 3
    -        # if the data is not utf8.  On python 2 parse_qs will work,
    -        # but then the recursive_unicode call in EchoHandler will
    -        # fail.
    -        if str is bytes:
    -            return
    -        with ExpectLog(gen_log, 'Invalid x-www-form-urlencoded body'):
    -            response = self.fetch(
    -                '/echo', method="POST",
    -                headers={'Content-Type': 'application/x-www-form-urlencoded'},
    -                body=b'\xe9')
    -        self.assertEqual(200, response.code)
    -        self.assertEqual(b'{}', response.body)
    -
    -
    -class HTTPServerRawTest(AsyncHTTPTestCase):
    -    def get_app(self):
    -        return Application([
    -            ('/echo', EchoHandler),
    -        ])
    -
    -    def setUp(self):
    -        super(HTTPServerRawTest, self).setUp()
    -        self.stream = IOStream(socket.socket())
    -        self.io_loop.run_sync(lambda: self.stream.connect(('127.0.0.1', self.get_http_port())))
    -
    -    def tearDown(self):
    -        self.stream.close()
    -        super(HTTPServerRawTest, self).tearDown()
    -
    -    def test_empty_request(self):
    -        self.stream.close()
    -        self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop)
    -        self.wait()
    -
    -    def test_malformed_first_line_response(self):
    -        with ExpectLog(gen_log, '.*Malformed HTTP request line'):
    -            self.stream.write(b'asdf\r\n\r\n')
    -            read_stream_body(self.stream, self.stop)
    -            start_line, headers, response = self.wait()
    -            self.assertEqual('HTTP/1.1', start_line.version)
    -            self.assertEqual(400, start_line.code)
    -            self.assertEqual('Bad Request', start_line.reason)
    -
    -    def test_malformed_first_line_log(self):
    -        with ExpectLog(gen_log, '.*Malformed HTTP request line'):
    -            self.stream.write(b'asdf\r\n\r\n')
    -            # TODO: need an async version of ExpectLog so we don't need
    -            # hard-coded timeouts here.
    -            self.io_loop.add_timeout(datetime.timedelta(seconds=0.05),
    -                                     self.stop)
    -            self.wait()
    -
    -    def test_malformed_headers(self):
    -        with ExpectLog(gen_log, '.*Malformed HTTP message.*no colon in header line'):
    -            self.stream.write(b'GET / HTTP/1.0\r\nasdf\r\n\r\n')
    -            self.io_loop.add_timeout(datetime.timedelta(seconds=0.05),
    -                                     self.stop)
    -            self.wait()
    -
    -    def test_chunked_request_body(self):
    -        # Chunked requests are not widely supported and we don't have a way
    -        # to generate them in AsyncHTTPClient, but HTTPServer will read them.
    -        self.stream.write(b"""\
    -POST /echo HTTP/1.1
    -Transfer-Encoding: chunked
    -Content-Type: application/x-www-form-urlencoded
    -
    -4
    -foo=
    -3
    -bar
    -0
    -
    -""".replace(b"\n", b"\r\n"))
    -        read_stream_body(self.stream, self.stop)
    -        start_line, headers, response = self.wait()
    -        self.assertEqual(json_decode(response), {u'foo': [u'bar']})
    -
    -    def test_chunked_request_uppercase(self):
    -        # As per RFC 2616 section 3.6, "Transfer-Encoding" header's value is
    -        # case-insensitive.
    -        self.stream.write(b"""\
    -POST /echo HTTP/1.1
    -Transfer-Encoding: Chunked
    -Content-Type: application/x-www-form-urlencoded
    -
    -4
    -foo=
    -3
    -bar
    -0
    -
    -""".replace(b"\n", b"\r\n"))
    -        read_stream_body(self.stream, self.stop)
    -        start_line, headers, response = self.wait()
    -        self.assertEqual(json_decode(response), {u'foo': [u'bar']})
    -
    -    @gen_test
    -    def test_invalid_content_length(self):
    -        with ExpectLog(gen_log, '.*Only integer Content-Length is allowed'):
    -            self.stream.write(b"""\
    -POST /echo HTTP/1.1
    -Content-Length: foo
    -
    -bar
    -
    -""".replace(b"\n", b"\r\n"))
    -            yield self.stream.read_until_close()
    -
    -
    -class XHeaderTest(HandlerBaseTestCase):
    -    class Handler(RequestHandler):
    -        def get(self):
    -            self.set_header('request-version', self.request.version)
    -            self.write(dict(remote_ip=self.request.remote_ip,
    -                            remote_protocol=self.request.protocol))
    -
    -    def get_httpserver_options(self):
    -        return dict(xheaders=True, trusted_downstream=['5.5.5.5'])
    -
    -    def test_ip_headers(self):
    -        self.assertEqual(self.fetch_json("/")["remote_ip"], "127.0.0.1")
    -
    -        valid_ipv4 = {"X-Real-IP": "4.4.4.4"}
    -        self.assertEqual(
    -            self.fetch_json("/", headers=valid_ipv4)["remote_ip"],
    -            "4.4.4.4")
    -
    -        valid_ipv4_list = {"X-Forwarded-For": "127.0.0.1, 4.4.4.4"}
    -        self.assertEqual(
    -            self.fetch_json("/", headers=valid_ipv4_list)["remote_ip"],
    -            "4.4.4.4")
    -
    -        valid_ipv6 = {"X-Real-IP": "2620:0:1cfe:face:b00c::3"}
    -        self.assertEqual(
    -            self.fetch_json("/", headers=valid_ipv6)["remote_ip"],
    -            "2620:0:1cfe:face:b00c::3")
    -
    -        valid_ipv6_list = {"X-Forwarded-For": "::1, 2620:0:1cfe:face:b00c::3"}
    -        self.assertEqual(
    -            self.fetch_json("/", headers=valid_ipv6_list)["remote_ip"],
    -            "2620:0:1cfe:face:b00c::3")
    -
    -        invalid_chars = {"X-Real-IP": "4.4.4.4
    -
    -'
    -                       for p in paths)
    -
    -    def render_embed_js(self, js_embed):
    -        """Default method used to render the final embedded js for the
    -        rendered webpage.
    -
    -        Override this method in a sub-classed controller to change the output.
    -        """
    -        return b''
    -
    -    def render_linked_css(self, css_files):
    -        """Default method used to render the final css links for the
    -        rendered webpage.
    -
    -        Override this method in a sub-classed controller to change the output.
    -        """
    -        paths = []
    -        unique_paths = set()
    -
    -        for path in css_files:
    -            if not is_absolute(path):
    -                path = self.static_url(path)
    -            if path not in unique_paths:
    -                paths.append(path)
    -                unique_paths.add(path)
    -
    -        return ''.join(''
    -                       for p in paths)
    -
    -    def render_embed_css(self, css_embed):
    -        """Default method used to render the final embedded css for the
    -        rendered webpage.
    -
    -        Override this method in a sub-classed controller to change the output.
    -        """
    -        return b''
    -
    -    def render_string(self, template_name, **kwargs):
    -        """Generate the given template with the given arguments.
    -
    -        We return the generated byte string (in utf8). To generate and
    -        write a template as a response, use render() above.
    -        """
    -        # If no template_path is specified, use the path of the calling file
    -        template_path = self.get_template_path()
    -        if not template_path:
    -            frame = sys._getframe(0)
    -            web_file = frame.f_code.co_filename
    -            while frame.f_code.co_filename == web_file:
    -                frame = frame.f_back
    -            template_path = os.path.dirname(frame.f_code.co_filename)
    -        with RequestHandler._template_loader_lock:
    -            if template_path not in RequestHandler._template_loaders:
    -                loader = self.create_template_loader(template_path)
    -                RequestHandler._template_loaders[template_path] = loader
    -            else:
    -                loader = RequestHandler._template_loaders[template_path]
    -        t = loader.load(template_name)
    -        namespace = self.get_template_namespace()
    -        namespace.update(kwargs)
    -        return t.generate(**namespace)
    -
    -    def get_template_namespace(self):
    -        """Returns a dictionary to be used as the default template namespace.
    -
    -        May be overridden by subclasses to add or modify values.
    -
    -        The results of this method will be combined with additional
    -        defaults in the `tornado.template` module and keyword arguments
    -        to `render` or `render_string`.
    -        """
    -        namespace = dict(
    -            handler=self,
    -            request=self.request,
    -            current_user=self.current_user,
    -            locale=self.locale,
    -            _=self.locale.translate,
    -            pgettext=self.locale.pgettext,
    -            static_url=self.static_url,
    -            xsrf_form_html=self.xsrf_form_html,
    -            reverse_url=self.reverse_url
    -        )
    -        namespace.update(self.ui)
    -        return namespace
    -
    -    def create_template_loader(self, template_path):
    -        """Returns a new template loader for the given path.
    -
    -        May be overridden by subclasses.  By default returns a
    -        directory-based loader on the given path, using the
    -        ``autoescape`` and ``template_whitespace`` application
    -        settings.  If a ``template_loader`` application setting is
    -        supplied, uses that instead.
    -        """
    -        settings = self.application.settings
    -        if "template_loader" in settings:
    -            return settings["template_loader"]
    -        kwargs = {}
    -        if "autoescape" in settings:
    -            # autoescape=None means "no escaping", so we have to be sure
    -            # to only pass this kwarg if the user asked for it.
    -            kwargs["autoescape"] = settings["autoescape"]
    -        if "template_whitespace" in settings:
    -            kwargs["whitespace"] = settings["template_whitespace"]
    -        return template.Loader(template_path, **kwargs)
    -
    -    def flush(self, include_footers=False, callback=None):
    -        """Flushes the current output buffer to the network.
    -
    -        The ``callback`` argument, if given, can be used for flow control:
    -        it will be run when all flushed data has been written to the socket.
    -        Note that only one flush callback can be outstanding at a time;
    -        if another flush occurs before the previous flush's callback
    -        has been run, the previous callback will be discarded.
    -
    -        .. versionchanged:: 4.0
    -           Now returns a `.Future` if no callback is given.
    -
    -        .. deprecated:: 5.1
    -
    -           The ``callback`` argument is deprecated and will be removed in
    -           Tornado 6.0.
    -        """
    -        chunk = b"".join(self._write_buffer)
    -        self._write_buffer = []
    -        if not self._headers_written:
    -            self._headers_written = True
    -            for transform in self._transforms:
    -                self._status_code, self._headers, chunk = \
    -                    transform.transform_first_chunk(
    -                        self._status_code, self._headers,
    -                        chunk, include_footers)
    -            # Ignore the chunk and only write the headers for HEAD requests
    -            if self.request.method == "HEAD":
    -                chunk = None
    -
    -            # Finalize the cookie headers (which have been stored in a side
    -            # object so an outgoing cookie could be overwritten before it
    -            # is sent).
    -            if hasattr(self, "_new_cookie"):
    -                for cookie in self._new_cookie.values():
    -                    self.add_header("Set-Cookie", cookie.OutputString(None))
    -
    -            start_line = httputil.ResponseStartLine('',
    -                                                    self._status_code,
    -                                                    self._reason)
    -            return self.request.connection.write_headers(
    -                start_line, self._headers, chunk, callback=callback)
    -        else:
    -            for transform in self._transforms:
    -                chunk = transform.transform_chunk(chunk, include_footers)
    -            # Ignore the chunk and only write the headers for HEAD requests
    -            if self.request.method != "HEAD":
    -                return self.request.connection.write(chunk, callback=callback)
    -            else:
    -                future = Future()
    -                future.set_result(None)
    -                return future
    -
    -    def finish(self, chunk=None):
    -        """Finishes this response, ending the HTTP request.
    -
    -        Passing a ``chunk`` to ``finish()`` is equivalent to passing that
    -        chunk to ``write()`` and then calling ``finish()`` with no arguments.
    -
    -        Returns a `.Future` which may optionally be awaited to track the sending
    -        of the response to the client. This `.Future` resolves when all the response
    -        data has been sent, and raises an error if the connection is closed before all
    -        data can be sent.
    -
    -        .. versionchanged:: 5.1
    -
    -           Now returns a `.Future` instead of ``None``.
    -        """
    -        if self._finished:
    -            raise RuntimeError("finish() called twice")
    -
    -        if chunk is not None:
    -            self.write(chunk)
    -
    -        # Automatically support ETags and add the Content-Length header if
    -        # we have not flushed any content yet.
    -        if not self._headers_written:
    -            if (self._status_code == 200 and
    -                self.request.method in ("GET", "HEAD") and
    -                    "Etag" not in self._headers):
    -                self.set_etag_header()
    -                if self.check_etag_header():
    -                    self._write_buffer = []
    -                    self.set_status(304)
    -            if (self._status_code in (204, 304) or
    -                    (self._status_code >= 100 and self._status_code < 200)):
    -                assert not self._write_buffer, "Cannot send body with %s" % self._status_code
    -                self._clear_headers_for_304()
    -            elif "Content-Length" not in self._headers:
    -                content_length = sum(len(part) for part in self._write_buffer)
    -                self.set_header("Content-Length", content_length)
    -
    -        if hasattr(self.request, "connection"):
    -            # Now that the request is finished, clear the callback we
    -            # set on the HTTPConnection (which would otherwise prevent the
    -            # garbage collection of the RequestHandler when there
    -            # are keepalive connections)
    -            self.request.connection.set_close_callback(None)
    -
    -        future = self.flush(include_footers=True)
    -        self.request.connection.finish()
    -        self._log()
    -        self._finished = True
    -        self.on_finish()
    -        self._break_cycles()
    -        return future
    -
    -    def detach(self):
    -        """Take control of the underlying stream.
    -
    -        Returns the underlying `.IOStream` object and stops all
    -        further HTTP processing. Intended for implementing protocols
    -        like websockets that tunnel over an HTTP handshake.
    -
    -        This method is only supported when HTTP/1.1 is used.
    -
    -        .. versionadded:: 5.1
    -        """
    -        self._finished = True
    -        return self.request.connection.detach()
    -
    -    def _break_cycles(self):
    -        # Break up a reference cycle between this handler and the
    -        # _ui_module closures to allow for faster GC on CPython.
    -        self.ui = None
    -
    -    def send_error(self, status_code=500, **kwargs):
    -        """Sends the given HTTP error code to the browser.
    -
    -        If `flush()` has already been called, it is not possible to send
    -        an error, so this method will simply terminate the response.
    -        If output has been written but not yet flushed, it will be discarded
    -        and replaced with the error page.
    -
    -        Override `write_error()` to customize the error page that is returned.
    -        Additional keyword arguments are passed through to `write_error`.
    -        """
    -        if self._headers_written:
    -            gen_log.error("Cannot send error response after headers written")
    -            if not self._finished:
    -                # If we get an error between writing headers and finishing,
    -                # we are unlikely to be able to finish due to a
    -                # Content-Length mismatch. Try anyway to release the
    -                # socket.
    -                try:
    -                    self.finish()
    -                except Exception:
    -                    gen_log.error("Failed to flush partial response",
    -                                  exc_info=True)
    -            return
    -        self.clear()
    -
    -        reason = kwargs.get('reason')
    -        if 'exc_info' in kwargs:
    -            exception = kwargs['exc_info'][1]
    -            if isinstance(exception, HTTPError) and exception.reason:
    -                reason = exception.reason
    -        self.set_status(status_code, reason=reason)
    -        try:
    -            self.write_error(status_code, **kwargs)
    -        except Exception:
    -            app_log.error("Uncaught exception in write_error", exc_info=True)
    -        if not self._finished:
    -            self.finish()
    -
    -    def write_error(self, status_code, **kwargs):
    -        """Override to implement custom error pages.
    -
    -        ``write_error`` may call `write`, `render`, `set_header`, etc
    -        to produce output as usual.
    -
    -        If this error was caused by an uncaught exception (including
    -        HTTPError), an ``exc_info`` triple will be available as
    -        ``kwargs["exc_info"]``.  Note that this exception may not be
    -        the "current" exception for purposes of methods like
    -        ``sys.exc_info()`` or ``traceback.format_exc``.
    -        """
    -        if self.settings.get("serve_traceback") and "exc_info" in kwargs:
    -            # in debug mode, try to send a traceback
    -            self.set_header('Content-Type', 'text/plain')
    -            for line in traceback.format_exception(*kwargs["exc_info"]):
    -                self.write(line)
    -            self.finish()
    -        else:
    -            self.finish("%(code)d: %(message)s"
    -                        "%(code)d: %(message)s" % {
    -                            "code": status_code,
    -                            "message": self._reason,
    -                        })
    -
    -    @property
    -    def locale(self):
    -        """The locale for the current session.
    -
    -        Determined by either `get_user_locale`, which you can override to
    -        set the locale based on, e.g., a user preference stored in a
    -        database, or `get_browser_locale`, which uses the ``Accept-Language``
    -        header.
    -
    -        .. versionchanged: 4.1
    -           Added a property setter.
    -        """
    -        if not hasattr(self, "_locale"):
    -            self._locale = self.get_user_locale()
    -            if not self._locale:
    -                self._locale = self.get_browser_locale()
    -                assert self._locale
    -        return self._locale
    -
    -    @locale.setter
    -    def locale(self, value):
    -        self._locale = value
    -
    -    def get_user_locale(self):
    -        """Override to determine the locale from the authenticated user.
    -
    -        If None is returned, we fall back to `get_browser_locale()`.
    -
    -        This method should return a `tornado.locale.Locale` object,
    -        most likely obtained via a call like ``tornado.locale.get("en")``
    -        """
    -        return None
    -
    -    def get_browser_locale(self, default="en_US"):
    -        """Determines the user's locale from ``Accept-Language`` header.
    -
    -        See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
    -        """
    -        if "Accept-Language" in self.request.headers:
    -            languages = self.request.headers["Accept-Language"].split(",")
    -            locales = []
    -            for language in languages:
    -                parts = language.strip().split(";")
    -                if len(parts) > 1 and parts[1].startswith("q="):
    -                    try:
    -                        score = float(parts[1][2:])
    -                    except (ValueError, TypeError):
    -                        score = 0.0
    -                else:
    -                    score = 1.0
    -                locales.append((parts[0], score))
    -            if locales:
    -                locales.sort(key=lambda pair: pair[1], reverse=True)
    -                codes = [l[0] for l in locales]
    -                return locale.get(*codes)
    -        return locale.get(default)
    -
    -    @property
    -    def current_user(self):
    -        """The authenticated user for this request.
    -
    -        This is set in one of two ways:
    -
    -        * A subclass may override `get_current_user()`, which will be called
    -          automatically the first time ``self.current_user`` is accessed.
    -          `get_current_user()` will only be called once per request,
    -          and is cached for future access::
    -
    -              def get_current_user(self):
    -                  user_cookie = self.get_secure_cookie("user")
    -                  if user_cookie:
    -                      return json.loads(user_cookie)
    -                  return None
    -
    -        * It may be set as a normal variable, typically from an overridden
    -          `prepare()`::
    -
    -              @gen.coroutine
    -              def prepare(self):
    -                  user_id_cookie = self.get_secure_cookie("user_id")
    -                  if user_id_cookie:
    -                      self.current_user = yield load_user(user_id_cookie)
    -
    -        Note that `prepare()` may be a coroutine while `get_current_user()`
    -        may not, so the latter form is necessary if loading the user requires
    -        asynchronous operations.
    -
    -        The user object may be any type of the application's choosing.
    -        """
    -        if not hasattr(self, "_current_user"):
    -            self._current_user = self.get_current_user()
    -        return self._current_user
    -
    -    @current_user.setter
    -    def current_user(self, value):
    -        self._current_user = value
    -
    -    def get_current_user(self):
    -        """Override to determine the current user from, e.g., a cookie.
    -
    -        This method may not be a coroutine.
    -        """
    -        return None
    -
    -    def get_login_url(self):
    -        """Override to customize the login URL based on the request.
    -
    -        By default, we use the ``login_url`` application setting.
    -        """
    -        self.require_setting("login_url", "@tornado.web.authenticated")
    -        return self.application.settings["login_url"]
    -
    -    def get_template_path(self):
    -        """Override to customize template path for each handler.
    -
    -        By default, we use the ``template_path`` application setting.
    -        Return None to load templates relative to the calling file.
    -        """
    -        return self.application.settings.get("template_path")
    -
    -    @property
    -    def xsrf_token(self):
    -        """The XSRF-prevention token for the current user/session.
    -
    -        To prevent cross-site request forgery, we set an '_xsrf' cookie
    -        and include the same '_xsrf' value as an argument with all POST
    -        requests. If the two do not match, we reject the form submission
    -        as a potential forgery.
    -
    -        See http://en.wikipedia.org/wiki/Cross-site_request_forgery
    -
    -        This property is of type `bytes`, but it contains only ASCII
    -        characters. If a character string is required, there is no
    -        need to base64-encode it; just decode the byte string as
    -        UTF-8.
    -
    -        .. versionchanged:: 3.2.2
    -           The xsrf token will now be have a random mask applied in every
    -           request, which makes it safe to include the token in pages
    -           that are compressed.  See http://breachattack.com for more
    -           information on the issue fixed by this change.  Old (version 1)
    -           cookies will be converted to version 2 when this method is called
    -           unless the ``xsrf_cookie_version`` `Application` setting is
    -           set to 1.
    -
    -        .. versionchanged:: 4.3
    -           The ``xsrf_cookie_kwargs`` `Application` setting may be
    -           used to supply additional cookie options (which will be
    -           passed directly to `set_cookie`). For example,
    -           ``xsrf_cookie_kwargs=dict(httponly=True, secure=True)``
    -           will set the ``secure`` and ``httponly`` flags on the
    -           ``_xsrf`` cookie.
    -        """
    -        if not hasattr(self, "_xsrf_token"):
    -            version, token, timestamp = self._get_raw_xsrf_token()
    -            output_version = self.settings.get("xsrf_cookie_version", 2)
    -            cookie_kwargs = self.settings.get("xsrf_cookie_kwargs", {})
    -            if output_version == 1:
    -                self._xsrf_token = binascii.b2a_hex(token)
    -            elif output_version == 2:
    -                mask = os.urandom(4)
    -                self._xsrf_token = b"|".join([
    -                    b"2",
    -                    binascii.b2a_hex(mask),
    -                    binascii.b2a_hex(_websocket_mask(mask, token)),
    -                    utf8(str(int(timestamp)))])
    -            else:
    -                raise ValueError("unknown xsrf cookie version %d",
    -                                 output_version)
    -            if version is None:
    -                expires_days = 30 if self.current_user else None
    -                self.set_cookie("_xsrf", self._xsrf_token,
    -                                expires_days=expires_days,
    -                                **cookie_kwargs)
    -        return self._xsrf_token
    -
    -    def _get_raw_xsrf_token(self):
    -        """Read or generate the xsrf token in its raw form.
    -
    -        The raw_xsrf_token is a tuple containing:
    -
    -        * version: the version of the cookie from which this token was read,
    -          or None if we generated a new token in this request.
    -        * token: the raw token data; random (non-ascii) bytes.
    -        * timestamp: the time this token was generated (will not be accurate
    -          for version 1 cookies)
    -        """
    -        if not hasattr(self, '_raw_xsrf_token'):
    -            cookie = self.get_cookie("_xsrf")
    -            if cookie:
    -                version, token, timestamp = self._decode_xsrf_token(cookie)
    -            else:
    -                version, token, timestamp = None, None, None
    -            if token is None:
    -                version = None
    -                token = os.urandom(16)
    -                timestamp = time.time()
    -            self._raw_xsrf_token = (version, token, timestamp)
    -        return self._raw_xsrf_token
    -
    -    def _decode_xsrf_token(self, cookie):
    -        """Convert a cookie string into a the tuple form returned by
    -        _get_raw_xsrf_token.
    -        """
    -
    -        try:
    -            m = _signed_value_version_re.match(utf8(cookie))
    -
    -            if m:
    -                version = int(m.group(1))
    -                if version == 2:
    -                    _, mask, masked_token, timestamp = cookie.split("|")
    -
    -                    mask = binascii.a2b_hex(utf8(mask))
    -                    token = _websocket_mask(
    -                        mask, binascii.a2b_hex(utf8(masked_token)))
    -                    timestamp = int(timestamp)
    -                    return version, token, timestamp
    -                else:
    -                    # Treat unknown versions as not present instead of failing.
    -                    raise Exception("Unknown xsrf cookie version")
    -            else:
    -                version = 1
    -                try:
    -                    token = binascii.a2b_hex(utf8(cookie))
    -                except (binascii.Error, TypeError):
    -                    token = utf8(cookie)
    -                # We don't have a usable timestamp in older versions.
    -                timestamp = int(time.time())
    -                return (version, token, timestamp)
    -        except Exception:
    -            # Catch exceptions and return nothing instead of failing.
    -            gen_log.debug("Uncaught exception in _decode_xsrf_token",
    -                          exc_info=True)
    -            return None, None, None
    -
    -    def check_xsrf_cookie(self):
    -        """Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
    -
    -        To prevent cross-site request forgery, we set an ``_xsrf``
    -        cookie and include the same value as a non-cookie
    -        field with all ``POST`` requests. If the two do not match, we
    -        reject the form submission as a potential forgery.
    -
    -        The ``_xsrf`` value may be set as either a form field named ``_xsrf``
    -        or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
    -        (the latter is accepted for compatibility with Django).
    -
    -        See http://en.wikipedia.org/wiki/Cross-site_request_forgery
    -
    -        Prior to release 1.1.1, this check was ignored if the HTTP header
    -        ``X-Requested-With: XMLHTTPRequest`` was present.  This exception
    -        has been shown to be insecure and has been removed.  For more
    -        information please see
    -        http://www.djangoproject.com/weblog/2011/feb/08/security/
    -        http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
    -
    -        .. versionchanged:: 3.2.2
    -           Added support for cookie version 2.  Both versions 1 and 2 are
    -           supported.
    -        """
    -        token = (self.get_argument("_xsrf", None) or
    -                 self.request.headers.get("X-Xsrftoken") or
    -                 self.request.headers.get("X-Csrftoken"))
    -        if not token:
    -            raise HTTPError(403, "'_xsrf' argument missing from POST")
    -        _, token, _ = self._decode_xsrf_token(token)
    -        _, expected_token, _ = self._get_raw_xsrf_token()
    -        if not token:
    -            raise HTTPError(403, "'_xsrf' argument has invalid format")
    -        if not _time_independent_equals(utf8(token), utf8(expected_token)):
    -            raise HTTPError(403, "XSRF cookie does not match POST argument")
    -
    -    def xsrf_form_html(self):
    -        """An HTML ```` element to be included with all POST forms.
    -
    -        It defines the ``_xsrf`` input value, which we check on all POST
    -        requests to prevent cross-site request forgery. If you have set
    -        the ``xsrf_cookies`` application setting, you must include this
    -        HTML within all of your HTML forms.
    -
    -        In a template, this method should be called with ``{% module
    -        xsrf_form_html() %}``
    -
    -        See `check_xsrf_cookie()` above for more information.
    -        """
    -        return ''
    -
    -    def static_url(self, path, include_host=None, **kwargs):
    -        """Returns a static URL for the given relative static file path.
    -
    -        This method requires you set the ``static_path`` setting in your
    -        application (which specifies the root directory of your static
    -        files).
    -
    -        This method returns a versioned url (by default appending
    -        ``?v=``), which allows the static files to be
    -        cached indefinitely.  This can be disabled by passing
    -        ``include_version=False`` (in the default implementation;
    -        other static file implementations are not required to support
    -        this, but they may support other options).
    -
    -        By default this method returns URLs relative to the current
    -        host, but if ``include_host`` is true the URL returned will be
    -        absolute.  If this handler has an ``include_host`` attribute,
    -        that value will be used as the default for all `static_url`
    -        calls that do not pass ``include_host`` as a keyword argument.
    -
    -        """
    -        self.require_setting("static_path", "static_url")
    -        get_url = self.settings.get("static_handler_class",
    -                                    StaticFileHandler).make_static_url
    -
    -        if include_host is None:
    -            include_host = getattr(self, "include_host", False)
    -
    -        if include_host:
    -            base = self.request.protocol + "://" + self.request.host
    -        else:
    -            base = ""
    -
    -        return base + get_url(self.settings, path, **kwargs)
    -
    -    def require_setting(self, name, feature="this feature"):
    -        """Raises an exception if the given app setting is not defined."""
    -        if not self.application.settings.get(name):
    -            raise Exception("You must define the '%s' setting in your "
    -                            "application to use %s" % (name, feature))
    -
    -    def reverse_url(self, name, *args):
    -        """Alias for `Application.reverse_url`."""
    -        return self.application.reverse_url(name, *args)
    -
    -    def compute_etag(self):
    -        """Computes the etag header to be used for this request.
    -
    -        By default uses a hash of the content written so far.
    -
    -        May be overridden to provide custom etag implementations,
    -        or may return None to disable tornado's default etag support.
    -        """
    -        hasher = hashlib.sha1()
    -        for part in self._write_buffer:
    -            hasher.update(part)
    -        return '"%s"' % hasher.hexdigest()
    -
    -    def set_etag_header(self):
    -        """Sets the response's Etag header using ``self.compute_etag()``.
    -
    -        Note: no header will be set if ``compute_etag()`` returns ``None``.
    -
    -        This method is called automatically when the request is finished.
    -        """
    -        etag = self.compute_etag()
    -        if etag is not None:
    -            self.set_header("Etag", etag)
    -
    -    def check_etag_header(self):
    -        """Checks the ``Etag`` header against requests's ``If-None-Match``.
    -
    -        Returns ``True`` if the request's Etag matches and a 304 should be
    -        returned. For example::
    -
    -            self.set_etag_header()
    -            if self.check_etag_header():
    -                self.set_status(304)
    -                return
    -
    -        This method is called automatically when the request is finished,
    -        but may be called earlier for applications that override
    -        `compute_etag` and want to do an early check for ``If-None-Match``
    -        before completing the request.  The ``Etag`` header should be set
    -        (perhaps with `set_etag_header`) before calling this method.
    -        """
    -        computed_etag = utf8(self._headers.get("Etag", ""))
    -        # Find all weak and strong etag values from If-None-Match header
    -        # because RFC 7232 allows multiple etag values in a single header.
    -        etags = re.findall(
    -            br'\*|(?:W/)?"[^"]*"',
    -            utf8(self.request.headers.get("If-None-Match", ""))
    -        )
    -        if not computed_etag or not etags:
    -            return False
    -
    -        match = False
    -        if etags[0] == b'*':
    -            match = True
    -        else:
    -            # Use a weak comparison when comparing entity-tags.
    -            def val(x):
    -                return x[2:] if x.startswith(b'W/') else x
    -
    -            for etag in etags:
    -                if val(etag) == val(computed_etag):
    -                    match = True
    -                    break
    -        return match
    -
    -    def _stack_context_handle_exception(self, type, value, traceback):
    -        try:
    -            # For historical reasons _handle_request_exception only takes
    -            # the exception value instead of the full triple,
    -            # so re-raise the exception to ensure that it's in
    -            # sys.exc_info()
    -            raise_exc_info((type, value, traceback))
    -        except Exception:
    -            self._handle_request_exception(value)
    -        return True
    -
    -    @gen.coroutine
    -    def _execute(self, transforms, *args, **kwargs):
    -        """Executes this request with the given output transforms."""
    -        self._transforms = transforms
    -        try:
    -            if self.request.method not in self.SUPPORTED_METHODS:
    -                raise HTTPError(405)
    -            self.path_args = [self.decode_argument(arg) for arg in args]
    -            self.path_kwargs = dict((k, self.decode_argument(v, name=k))
    -                                    for (k, v) in kwargs.items())
    -            # If XSRF cookies are turned on, reject form submissions without
    -            # the proper cookie
    -            if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
    -                    self.application.settings.get("xsrf_cookies"):
    -                self.check_xsrf_cookie()
    -
    -            result = self.prepare()
    -            if result is not None:
    -                result = yield result
    -            if self._prepared_future is not None:
    -                # Tell the Application we've finished with prepare()
    -                # and are ready for the body to arrive.
    -                future_set_result_unless_cancelled(self._prepared_future, None)
    -            if self._finished:
    -                return
    -
    -            if _has_stream_request_body(self.__class__):
    -                # In streaming mode request.body is a Future that signals
    -                # the body has been completely received.  The Future has no
    -                # result; the data has been passed to self.data_received
    -                # instead.
    -                try:
    -                    yield self.request.body
    -                except iostream.StreamClosedError:
    -                    return
    -
    -            method = getattr(self, self.request.method.lower())
    -            result = method(*self.path_args, **self.path_kwargs)
    -            if result is not None:
    -                result = yield result
    -            if self._auto_finish and not self._finished:
    -                self.finish()
    -        except Exception as e:
    -            try:
    -                self._handle_request_exception(e)
    -            except Exception:
    -                app_log.error("Exception in exception handler", exc_info=True)
    -            finally:
    -                # Unset result to avoid circular references
    -                result = None
    -            if (self._prepared_future is not None and
    -                    not self._prepared_future.done()):
    -                # In case we failed before setting _prepared_future, do it
    -                # now (to unblock the HTTP server).  Note that this is not
    -                # in a finally block to avoid GC issues prior to Python 3.4.
    -                self._prepared_future.set_result(None)
    -
    -    def data_received(self, chunk):
    -        """Implement this method to handle streamed request data.
    -
    -        Requires the `.stream_request_body` decorator.
    -        """
    -        raise NotImplementedError()
    -
    -    def _log(self):
    -        """Logs the current request.
    -
    -        Sort of deprecated since this functionality was moved to the
    -        Application, but left in place for the benefit of existing apps
    -        that have overridden this method.
    -        """
    -        self.application.log_request(self)
    -
    -    def _request_summary(self):
    -        return "%s %s (%s)" % (self.request.method, self.request.uri,
    -                               self.request.remote_ip)
    -
    -    def _handle_request_exception(self, e):
    -        if isinstance(e, Finish):
    -            # Not an error; just finish the request without logging.
    -            if not self._finished:
    -                self.finish(*e.args)
    -            return
    -        try:
    -            self.log_exception(*sys.exc_info())
    -        except Exception:
    -            # An error here should still get a best-effort send_error()
    -            # to avoid leaking the connection.
    -            app_log.error("Error in exception logger", exc_info=True)
    -        if self._finished:
    -            # Extra errors after the request has been finished should
    -            # be logged, but there is no reason to continue to try and
    -            # send a response.
    -            return
    -        if isinstance(e, HTTPError):
    -            self.send_error(e.status_code, exc_info=sys.exc_info())
    -        else:
    -            self.send_error(500, exc_info=sys.exc_info())
    -
    -    def log_exception(self, typ, value, tb):
    -        """Override to customize logging of uncaught exceptions.
    -
    -        By default logs instances of `HTTPError` as warnings without
    -        stack traces (on the ``tornado.general`` logger), and all
    -        other exceptions as errors with stack traces (on the
    -        ``tornado.application`` logger).
    -
    -        .. versionadded:: 3.1
    -        """
    -        if isinstance(value, HTTPError):
    -            if value.log_message:
    -                format = "%d %s: " + value.log_message
    -                args = ([value.status_code, self._request_summary()] +
    -                        list(value.args))
    -                gen_log.warning(format, *args)
    -        else:
    -            app_log.error("Uncaught exception %s\n%r", self._request_summary(),
    -                          self.request, exc_info=(typ, value, tb))
    -
    -    def _ui_module(self, name, module):
    -        def render(*args, **kwargs):
    -            if not hasattr(self, "_active_modules"):
    -                self._active_modules = {}
    -            if name not in self._active_modules:
    -                self._active_modules[name] = module(self)
    -            rendered = self._active_modules[name].render(*args, **kwargs)
    -            return rendered
    -        return render
    -
    -    def _ui_method(self, method):
    -        return lambda *args, **kwargs: method(self, *args, **kwargs)
    -
    -    def _clear_headers_for_304(self):
    -        # 304 responses should not contain entity headers (defined in
    -        # http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
    -        # not explicitly allowed by
    -        # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
    -        headers = ["Allow", "Content-Encoding", "Content-Language",
    -                   "Content-Length", "Content-MD5", "Content-Range",
    -                   "Content-Type", "Last-Modified"]
    -        for h in headers:
    -            self.clear_header(h)
    -
    -
    -def asynchronous(method):
    -    """Wrap request handler methods with this if they are asynchronous.
    -
    -    This decorator is for callback-style asynchronous methods; for
    -    coroutines, use the ``@gen.coroutine`` decorator without
    -    ``@asynchronous``. (It is legal for legacy reasons to use the two
    -    decorators together provided ``@asynchronous`` is first, but
    -    ``@asynchronous`` will be ignored in this case)
    -
    -    This decorator should only be applied to the :ref:`HTTP verb
    -    methods `; its behavior is undefined for any other method.
    -    This decorator does not *make* a method asynchronous; it tells
    -    the framework that the method *is* asynchronous.  For this decorator
    -    to be useful the method must (at least sometimes) do something
    -    asynchronous.
    -
    -    If this decorator is given, the response is not finished when the
    -    method returns. It is up to the request handler to call
    -    `self.finish() ` to finish the HTTP
    -    request. Without this decorator, the request is automatically
    -    finished when the ``get()`` or ``post()`` method returns. Example:
    -
    -    .. testcode::
    -
    -       class MyRequestHandler(RequestHandler):
    -           @asynchronous
    -           def get(self):
    -              http = httpclient.AsyncHTTPClient()
    -              http.fetch("http://friendfeed.com/", self._on_download)
    -
    -           def _on_download(self, response):
    -              self.write("Downloaded!")
    -              self.finish()
    -
    -    .. testoutput::
    -       :hide:
    -
    -    .. versionchanged:: 3.1
    -       The ability to use ``@gen.coroutine`` without ``@asynchronous``.
    -
    -    .. versionchanged:: 4.3 Returning anything but ``None`` or a
    -       yieldable object from a method decorated with ``@asynchronous``
    -       is an error. Such return values were previously ignored silently.
    -
    -    .. deprecated:: 5.1
    -
    -       This decorator is deprecated and will be removed in Tornado 6.0.
    -       Use coroutines instead.
    -    """
    -    warnings.warn("@asynchronous is deprecated, use coroutines instead",
    -                  DeprecationWarning)
    -    # Delay the IOLoop import because it's not available on app engine.
    -    from tornado.ioloop import IOLoop
    -
    -    @functools.wraps(method)
    -    def wrapper(self, *args, **kwargs):
    -        self._auto_finish = False
    -        with stack_context.ExceptionStackContext(
    -                self._stack_context_handle_exception, delay_warning=True):
    -            result = method(self, *args, **kwargs)
    -            if result is not None:
    -                result = gen.convert_yielded(result)
    -
    -                # If @asynchronous is used with @gen.coroutine, (but
    -                # not @gen.engine), we can automatically finish the
    -                # request when the future resolves.  Additionally,
    -                # the Future will swallow any exceptions so we need
    -                # to throw them back out to the stack context to finish
    -                # the request.
    -                def future_complete(f):
    -                    f.result()
    -                    if not self._finished:
    -                        self.finish()
    -                IOLoop.current().add_future(result, future_complete)
    -                # Once we have done this, hide the Future from our
    -                # caller (i.e. RequestHandler._when_complete), which
    -                # would otherwise set up its own callback and
    -                # exception handler (resulting in exceptions being
    -                # logged twice).
    -                return None
    -            return result
    -    return wrapper
    -
    -
    -def stream_request_body(cls):
    -    """Apply to `RequestHandler` subclasses to enable streaming body support.
    -
    -    This decorator implies the following changes:
    -
    -    * `.HTTPServerRequest.body` is undefined, and body arguments will not
    -      be included in `RequestHandler.get_argument`.
    -    * `RequestHandler.prepare` is called when the request headers have been
    -      read instead of after the entire body has been read.
    -    * The subclass must define a method ``data_received(self, data):``, which
    -      will be called zero or more times as data is available.  Note that
    -      if the request has an empty body, ``data_received`` may not be called.
    -    * ``prepare`` and ``data_received`` may return Futures (such as via
    -      ``@gen.coroutine``, in which case the next method will not be called
    -      until those futures have completed.
    -    * The regular HTTP method (``post``, ``put``, etc) will be called after
    -      the entire body has been read.
    -
    -    See the `file receiver demo `_
    -    for example usage.
    -    """  # noqa: E501
    -    if not issubclass(cls, RequestHandler):
    -        raise TypeError("expected subclass of RequestHandler, got %r", cls)
    -    cls._stream_request_body = True
    -    return cls
    -
    -
    -def _has_stream_request_body(cls):
    -    if not issubclass(cls, RequestHandler):
    -        raise TypeError("expected subclass of RequestHandler, got %r", cls)
    -    return getattr(cls, '_stream_request_body', False)
    -
    -
    -def removeslash(method):
    -    """Use this decorator to remove trailing slashes from the request path.
    -
    -    For example, a request to ``/foo/`` would redirect to ``/foo`` with this
    -    decorator. Your request handler mapping should use a regular expression
    -    like ``r'/foo/*'`` in conjunction with using the decorator.
    -    """
    -    @functools.wraps(method)
    -    def wrapper(self, *args, **kwargs):
    -        if self.request.path.endswith("/"):
    -            if self.request.method in ("GET", "HEAD"):
    -                uri = self.request.path.rstrip("/")
    -                if uri:  # don't try to redirect '/' to ''
    -                    if self.request.query:
    -                        uri += "?" + self.request.query
    -                    self.redirect(uri, permanent=True)
    -                    return
    -            else:
    -                raise HTTPError(404)
    -        return method(self, *args, **kwargs)
    -    return wrapper
    -
    -
    -def addslash(method):
    -    """Use this decorator to add a missing trailing slash to the request path.
    -
    -    For example, a request to ``/foo`` would redirect to ``/foo/`` with this
    -    decorator. Your request handler mapping should use a regular expression
    -    like ``r'/foo/?'`` in conjunction with using the decorator.
    -    """
    -    @functools.wraps(method)
    -    def wrapper(self, *args, **kwargs):
    -        if not self.request.path.endswith("/"):
    -            if self.request.method in ("GET", "HEAD"):
    -                uri = self.request.path + "/"
    -                if self.request.query:
    -                    uri += "?" + self.request.query
    -                self.redirect(uri, permanent=True)
    -                return
    -            raise HTTPError(404)
    -        return method(self, *args, **kwargs)
    -    return wrapper
    -
    -
    -class _ApplicationRouter(ReversibleRuleRouter):
    -    """Routing implementation used internally by `Application`.
    -
    -    Provides a binding between `Application` and `RequestHandler`.
    -    This implementation extends `~.routing.ReversibleRuleRouter` in a couple of ways:
    -        * it allows to use `RequestHandler` subclasses as `~.routing.Rule` target and
    -        * it allows to use a list/tuple of rules as `~.routing.Rule` target.
    -        ``process_rule`` implementation will substitute this list with an appropriate
    -        `_ApplicationRouter` instance.
    -    """
    -
    -    def __init__(self, application, rules=None):
    -        assert isinstance(application, Application)
    -        self.application = application
    -        super(_ApplicationRouter, self).__init__(rules)
    -
    -    def process_rule(self, rule):
    -        rule = super(_ApplicationRouter, self).process_rule(rule)
    -
    -        if isinstance(rule.target, (list, tuple)):
    -            rule.target = _ApplicationRouter(self.application, rule.target)
    -
    -        return rule
    -
    -    def get_target_delegate(self, target, request, **target_params):
    -        if isclass(target) and issubclass(target, RequestHandler):
    -            return self.application.get_handler_delegate(request, target, **target_params)
    -
    -        return super(_ApplicationRouter, self).get_target_delegate(target, request, **target_params)
    -
    -
    -class Application(ReversibleRouter):
    -    r"""A collection of request handlers that make up a web application.
    -
    -    Instances of this class are callable and can be passed directly to
    -    HTTPServer to serve the application::
    -
    -        application = web.Application([
    -            (r"/", MainPageHandler),
    -        ])
    -        http_server = httpserver.HTTPServer(application)
    -        http_server.listen(8080)
    -        ioloop.IOLoop.current().start()
    -
    -    The constructor for this class takes in a list of `~.routing.Rule`
    -    objects or tuples of values corresponding to the arguments of
    -    `~.routing.Rule` constructor: ``(matcher, target, [target_kwargs], [name])``,
    -    the values in square brackets being optional. The default matcher is
    -    `~.routing.PathMatches`, so ``(regexp, target)`` tuples can also be used
    -    instead of ``(PathMatches(regexp), target)``.
    -
    -    A common routing target is a `RequestHandler` subclass, but you can also
    -    use lists of rules as a target, which create a nested routing configuration::
    -
    -        application = web.Application([
    -            (HostMatches("example.com"), [
    -                (r"/", MainPageHandler),
    -                (r"/feed", FeedHandler),
    -            ]),
    -        ])
    -
    -    In addition to this you can use nested `~.routing.Router` instances,
    -    `~.httputil.HTTPMessageDelegate` subclasses and callables as routing targets
    -    (see `~.routing` module docs for more information).
    -
    -    When we receive requests, we iterate over the list in order and
    -    instantiate an instance of the first request class whose regexp
    -    matches the request path. The request class can be specified as
    -    either a class object or a (fully-qualified) name.
    -
    -    A dictionary may be passed as the third element (``target_kwargs``)
    -    of the tuple, which will be used as keyword arguments to the handler's
    -    constructor and `~RequestHandler.initialize` method. This pattern
    -    is used for the `StaticFileHandler` in this example (note that a
    -    `StaticFileHandler` can be installed automatically with the
    -    static_path setting described below)::
    -
    -        application = web.Application([
    -            (r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
    -        ])
    -
    -    We support virtual hosts with the `add_handlers` method, which takes in
    -    a host regular expression as the first argument::
    -
    -        application.add_handlers(r"www\.myhost\.com", [
    -            (r"/article/([0-9]+)", ArticleHandler),
    -        ])
    -
    -    If there's no match for the current request's host, then ``default_host``
    -    parameter value is matched against host regular expressions.
    -
    -
    -    .. warning::
    -
    -       Applications that do not use TLS may be vulnerable to :ref:`DNS
    -       rebinding ` attacks. This attack is especially
    -       relevant to applications that only listen on ``127.0.0.1` or
    -       other private networks. Appropriate host patterns must be used
    -       (instead of the default of ``r'.*'``) to prevent this risk. The
    -       ``default_host`` argument must not be used in applications that
    -       may be vulnerable to DNS rebinding.
    -
    -    You can serve static files by sending the ``static_path`` setting
    -    as a keyword argument. We will serve those files from the
    -    ``/static/`` URI (this is configurable with the
    -    ``static_url_prefix`` setting), and we will serve ``/favicon.ico``
    -    and ``/robots.txt`` from the same directory.  A custom subclass of
    -    `StaticFileHandler` can be specified with the
    -    ``static_handler_class`` setting.
    -
    -    .. versionchanged:: 4.5
    -       Integration with the new `tornado.routing` module.
    -
    -    """
    -    def __init__(self, handlers=None, default_host=None, transforms=None,
    -                 **settings):
    -        if transforms is None:
    -            self.transforms = []
    -            if settings.get("compress_response") or settings.get("gzip"):
    -                self.transforms.append(GZipContentEncoding)
    -        else:
    -            self.transforms = transforms
    -        self.default_host = default_host
    -        self.settings = settings
    -        self.ui_modules = {'linkify': _linkify,
    -                           'xsrf_form_html': _xsrf_form_html,
    -                           'Template': TemplateModule,
    -                           }
    -        self.ui_methods = {}
    -        self._load_ui_modules(settings.get("ui_modules", {}))
    -        self._load_ui_methods(settings.get("ui_methods", {}))
    -        if self.settings.get("static_path"):
    -            path = self.settings["static_path"]
    -            handlers = list(handlers or [])
    -            static_url_prefix = settings.get("static_url_prefix",
    -                                             "/static/")
    -            static_handler_class = settings.get("static_handler_class",
    -                                                StaticFileHandler)
    -            static_handler_args = settings.get("static_handler_args", {})
    -            static_handler_args['path'] = path
    -            for pattern in [re.escape(static_url_prefix) + r"(.*)",
    -                            r"/(favicon\.ico)", r"/(robots\.txt)"]:
    -                handlers.insert(0, (pattern, static_handler_class,
    -                                    static_handler_args))
    -
    -        if self.settings.get('debug'):
    -            self.settings.setdefault('autoreload', True)
    -            self.settings.setdefault('compiled_template_cache', False)
    -            self.settings.setdefault('static_hash_cache', False)
    -            self.settings.setdefault('serve_traceback', True)
    -
    -        self.wildcard_router = _ApplicationRouter(self, handlers)
    -        self.default_router = _ApplicationRouter(self, [
    -            Rule(AnyMatches(), self.wildcard_router)
    -        ])
    -
    -        # Automatically reload modified modules
    -        if self.settings.get('autoreload'):
    -            from tornado import autoreload
    -            autoreload.start()
    -
    -    def listen(self, port, address="", **kwargs):
    -        """Starts an HTTP server for this application on the given port.
    -
    -        This is a convenience alias for creating an `.HTTPServer`
    -        object and calling its listen method.  Keyword arguments not
    -        supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
    -        `.HTTPServer` constructor.  For advanced uses
    -        (e.g. multi-process mode), do not use this method; create an
    -        `.HTTPServer` and call its
    -        `.TCPServer.bind`/`.TCPServer.start` methods directly.
    -
    -        Note that after calling this method you still need to call
    -        ``IOLoop.current().start()`` to start the server.
    -
    -        Returns the `.HTTPServer` object.
    -
    -        .. versionchanged:: 4.3
    -           Now returns the `.HTTPServer` object.
    -        """
    -        # import is here rather than top level because HTTPServer
    -        # is not importable on appengine
    -        from tornado.httpserver import HTTPServer
    -        server = HTTPServer(self, **kwargs)
    -        server.listen(port, address)
    -        return server
    -
    -    def add_handlers(self, host_pattern, host_handlers):
    -        """Appends the given handlers to our handler list.
    -
    -        Host patterns are processed sequentially in the order they were
    -        added. All matching patterns will be considered.
    -        """
    -        host_matcher = HostMatches(host_pattern)
    -        rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers))
    -
    -        self.default_router.rules.insert(-1, rule)
    -
    -        if self.default_host is not None:
    -            self.wildcard_router.add_rules([(
    -                DefaultHostMatches(self, host_matcher.host_pattern),
    -                host_handlers
    -            )])
    -
    -    def add_transform(self, transform_class):
    -        self.transforms.append(transform_class)
    -
    -    def _load_ui_methods(self, methods):
    -        if isinstance(methods, types.ModuleType):
    -            self._load_ui_methods(dict((n, getattr(methods, n))
    -                                       for n in dir(methods)))
    -        elif isinstance(methods, list):
    -            for m in methods:
    -                self._load_ui_methods(m)
    -        else:
    -            for name, fn in methods.items():
    -                if not name.startswith("_") and hasattr(fn, "__call__") \
    -                        and name[0].lower() == name[0]:
    -                    self.ui_methods[name] = fn
    -
    -    def _load_ui_modules(self, modules):
    -        if isinstance(modules, types.ModuleType):
    -            self._load_ui_modules(dict((n, getattr(modules, n))
    -                                       for n in dir(modules)))
    -        elif isinstance(modules, list):
    -            for m in modules:
    -                self._load_ui_modules(m)
    -        else:
    -            assert isinstance(modules, dict)
    -            for name, cls in modules.items():
    -                try:
    -                    if issubclass(cls, UIModule):
    -                        self.ui_modules[name] = cls
    -                except TypeError:
    -                    pass
    -
    -    def __call__(self, request):
    -        # Legacy HTTPServer interface
    -        dispatcher = self.find_handler(request)
    -        return dispatcher.execute()
    -
    -    def find_handler(self, request, **kwargs):
    -        route = self.default_router.find_handler(request)
    -        if route is not None:
    -            return route
    -
    -        if self.settings.get('default_handler_class'):
    -            return self.get_handler_delegate(
    -                request,
    -                self.settings['default_handler_class'],
    -                self.settings.get('default_handler_args', {}))
    -
    -        return self.get_handler_delegate(
    -            request, ErrorHandler, {'status_code': 404})
    -
    -    def get_handler_delegate(self, request, target_class, target_kwargs=None,
    -                             path_args=None, path_kwargs=None):
    -        """Returns `~.httputil.HTTPMessageDelegate` that can serve a request
    -        for application and `RequestHandler` subclass.
    -
    -        :arg httputil.HTTPServerRequest request: current HTTP request.
    -        :arg RequestHandler target_class: a `RequestHandler` class.
    -        :arg dict target_kwargs: keyword arguments for ``target_class`` constructor.
    -        :arg list path_args: positional arguments for ``target_class`` HTTP method that
    -            will be executed while handling a request (``get``, ``post`` or any other).
    -        :arg dict path_kwargs: keyword arguments for ``target_class`` HTTP method.
    -        """
    -        return _HandlerDelegate(
    -            self, request, target_class, target_kwargs, path_args, path_kwargs)
    -
    -    def reverse_url(self, name, *args):
    -        """Returns a URL path for handler named ``name``
    -
    -        The handler must be added to the application as a named `URLSpec`.
    -
    -        Args will be substituted for capturing groups in the `URLSpec` regex.
    -        They will be converted to strings if necessary, encoded as utf8,
    -        and url-escaped.
    -        """
    -        reversed_url = self.default_router.reverse_url(name, *args)
    -        if reversed_url is not None:
    -            return reversed_url
    -
    -        raise KeyError("%s not found in named urls" % name)
    -
    -    def log_request(self, handler):
    -        """Writes a completed HTTP request to the logs.
    -
    -        By default writes to the python root logger.  To change
    -        this behavior either subclass Application and override this method,
    -        or pass a function in the application settings dictionary as
    -        ``log_function``.
    -        """
    -        if "log_function" in self.settings:
    -            self.settings["log_function"](handler)
    -            return
    -        if handler.get_status() < 400:
    -            log_method = access_log.info
    -        elif handler.get_status() < 500:
    -            log_method = access_log.warning
    -        else:
    -            log_method = access_log.error
    -        request_time = 1000.0 * handler.request.request_time()
    -        log_method("%d %s %.2fms", handler.get_status(),
    -                   handler._request_summary(), request_time)
    -
    -
    -class _HandlerDelegate(httputil.HTTPMessageDelegate):
    -    def __init__(self, application, request, handler_class, handler_kwargs,
    -                 path_args, path_kwargs):
    -        self.application = application
    -        self.connection = request.connection
    -        self.request = request
    -        self.handler_class = handler_class
    -        self.handler_kwargs = handler_kwargs or {}
    -        self.path_args = path_args or []
    -        self.path_kwargs = path_kwargs or {}
    -        self.chunks = []
    -        self.stream_request_body = _has_stream_request_body(self.handler_class)
    -
    -    def headers_received(self, start_line, headers):
    -        if self.stream_request_body:
    -            self.request.body = Future()
    -            return self.execute()
    -
    -    def data_received(self, data):
    -        if self.stream_request_body:
    -            return self.handler.data_received(data)
    -        else:
    -            self.chunks.append(data)
    -
    -    def finish(self):
    -        if self.stream_request_body:
    -            future_set_result_unless_cancelled(self.request.body, None)
    -        else:
    -            self.request.body = b''.join(self.chunks)
    -            self.request._parse_body()
    -            self.execute()
    -
    -    def on_connection_close(self):
    -        if self.stream_request_body:
    -            self.handler.on_connection_close()
    -        else:
    -            self.chunks = None
    -
    -    def execute(self):
    -        # If template cache is disabled (usually in the debug mode),
    -        # re-compile templates and reload static files on every
    -        # request so you don't need to restart to see changes
    -        if not self.application.settings.get("compiled_template_cache", True):
    -            with RequestHandler._template_loader_lock:
    -                for loader in RequestHandler._template_loaders.values():
    -                    loader.reset()
    -        if not self.application.settings.get('static_hash_cache', True):
    -            StaticFileHandler.reset()
    -
    -        self.handler = self.handler_class(self.application, self.request,
    -                                          **self.handler_kwargs)
    -        transforms = [t(self.request) for t in self.application.transforms]
    -
    -        if self.stream_request_body:
    -            self.handler._prepared_future = Future()
    -        # Note that if an exception escapes handler._execute it will be
    -        # trapped in the Future it returns (which we are ignoring here,
    -        # leaving it to be logged when the Future is GC'd).
    -        # However, that shouldn't happen because _execute has a blanket
    -        # except handler, and we cannot easily access the IOLoop here to
    -        # call add_future (because of the requirement to remain compatible
    -        # with WSGI)
    -        self.handler._execute(transforms, *self.path_args,
    -                              **self.path_kwargs)
    -        # If we are streaming the request body, then execute() is finished
    -        # when the handler has prepared to receive the body.  If not,
    -        # it doesn't matter when execute() finishes (so we return None)
    -        return self.handler._prepared_future
    -
    -
    -class HTTPError(Exception):
    -    """An exception that will turn into an HTTP error response.
    -
    -    Raising an `HTTPError` is a convenient alternative to calling
    -    `RequestHandler.send_error` since it automatically ends the
    -    current function.
    -
    -    To customize the response sent with an `HTTPError`, override
    -    `RequestHandler.write_error`.
    -
    -    :arg int status_code: HTTP status code.  Must be listed in
    -        `httplib.responses ` unless the ``reason``
    -        keyword argument is given.
    -    :arg str log_message: Message to be written to the log for this error
    -        (will not be shown to the user unless the `Application` is in debug
    -        mode).  May contain ``%s``-style placeholders, which will be filled
    -        in with remaining positional parameters.
    -    :arg str reason: Keyword-only argument.  The HTTP "reason" phrase
    -        to pass in the status line along with ``status_code``.  Normally
    -        determined automatically from ``status_code``, but can be used
    -        to use a non-standard numeric code.
    -    """
    -    def __init__(self, status_code=500, log_message=None, *args, **kwargs):
    -        self.status_code = status_code
    -        self.log_message = log_message
    -        self.args = args
    -        self.reason = kwargs.get('reason', None)
    -        if log_message and not args:
    -            self.log_message = log_message.replace('%', '%%')
    -
    -    def __str__(self):
    -        message = "HTTP %d: %s" % (
    -            self.status_code,
    -            self.reason or httputil.responses.get(self.status_code, 'Unknown'))
    -        if self.log_message:
    -            return message + " (" + (self.log_message % self.args) + ")"
    -        else:
    -            return message
    -
    -
    -class Finish(Exception):
    -    """An exception that ends the request without producing an error response.
    -
    -    When `Finish` is raised in a `RequestHandler`, the request will
    -    end (calling `RequestHandler.finish` if it hasn't already been
    -    called), but the error-handling methods (including
    -    `RequestHandler.write_error`) will not be called.
    -
    -    If `Finish()` was created with no arguments, the pending response
    -    will be sent as-is. If `Finish()` was given an argument, that
    -    argument will be passed to `RequestHandler.finish()`.
    -
    -    This can be a more convenient way to implement custom error pages
    -    than overriding ``write_error`` (especially in library code)::
    -
    -        if self.current_user is None:
    -            self.set_status(401)
    -            self.set_header('WWW-Authenticate', 'Basic realm="something"')
    -            raise Finish()
    -
    -    .. versionchanged:: 4.3
    -       Arguments passed to ``Finish()`` will be passed on to
    -       `RequestHandler.finish`.
    -    """
    -    pass
    -
    -
    -class MissingArgumentError(HTTPError):
    -    """Exception raised by `RequestHandler.get_argument`.
    -
    -    This is a subclass of `HTTPError`, so if it is uncaught a 400 response
    -    code will be used instead of 500 (and a stack trace will not be logged).
    -
    -    .. versionadded:: 3.1
    -    """
    -    def __init__(self, arg_name):
    -        super(MissingArgumentError, self).__init__(
    -            400, 'Missing argument %s' % arg_name)
    -        self.arg_name = arg_name
    -
    -
    -class ErrorHandler(RequestHandler):
    -    """Generates an error response with ``status_code`` for all requests."""
    -    def initialize(self, status_code):
    -        self.set_status(status_code)
    -
    -    def prepare(self):
    -        raise HTTPError(self._status_code)
    -
    -    def check_xsrf_cookie(self):
    -        # POSTs to an ErrorHandler don't actually have side effects,
    -        # so we don't need to check the xsrf token.  This allows POSTs
    -        # to the wrong url to return a 404 instead of 403.
    -        pass
    -
    -
    -class RedirectHandler(RequestHandler):
    -    """Redirects the client to the given URL for all GET requests.
    -
    -    You should provide the keyword argument ``url`` to the handler, e.g.::
    -
    -        application = web.Application([
    -            (r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
    -        ])
    -
    -    `RedirectHandler` supports regular expression substitutions. E.g., to
    -    swap the first and second parts of a path while preserving the remainder::
    -
    -        application = web.Application([
    -            (r"/(.*?)/(.*?)/(.*)", web.RedirectHandler, {"url": "/{1}/{0}/{2}"}),
    -        ])
    -
    -    The final URL is formatted with `str.format` and the substrings that match
    -    the capturing groups. In the above example, a request to "/a/b/c" would be
    -    formatted like::
    -
    -        str.format("/{1}/{0}/{2}", "a", "b", "c")  # -> "/b/a/c"
    -
    -    Use Python's :ref:`format string syntax ` to customize how
    -    values are substituted.
    -
    -    .. versionchanged:: 4.5
    -       Added support for substitutions into the destination URL.
    -
    -    .. versionchanged:: 5.0
    -       If any query arguments are present, they will be copied to the
    -       destination URL.
    -    """
    -    def initialize(self, url, permanent=True):
    -        self._url = url
    -        self._permanent = permanent
    -
    -    def get(self, *args):
    -        to_url = self._url.format(*args)
    -        if self.request.query_arguments:
    -            to_url = httputil.url_concat(
    -                to_url, list(httputil.qs_to_qsl(self.request.query_arguments)))
    -        self.redirect(to_url, permanent=self._permanent)
    -
    -
    -class StaticFileHandler(RequestHandler):
    -    """A simple handler that can serve static content from a directory.
    -
    -    A `StaticFileHandler` is configured automatically if you pass the
    -    ``static_path`` keyword argument to `Application`.  This handler
    -    can be customized with the ``static_url_prefix``, ``static_handler_class``,
    -    and ``static_handler_args`` settings.
    -
    -    To map an additional path to this handler for a static data directory
    -    you would add a line to your application like::
    -
    -        application = web.Application([
    -            (r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
    -        ])
    -
    -    The handler constructor requires a ``path`` argument, which specifies the
    -    local root directory of the content to be served.
    -
    -    Note that a capture group in the regex is required to parse the value for
    -    the ``path`` argument to the get() method (different than the constructor
    -    argument above); see `URLSpec` for details.
    -
    -    To serve a file like ``index.html`` automatically when a directory is
    -    requested, set ``static_handler_args=dict(default_filename="index.html")``
    -    in your application settings, or add ``default_filename`` as an initializer
    -    argument for your ``StaticFileHandler``.
    -
    -    To maximize the effectiveness of browser caching, this class supports
    -    versioned urls (by default using the argument ``?v=``).  If a version
    -    is given, we instruct the browser to cache this file indefinitely.
    -    `make_static_url` (also available as `RequestHandler.static_url`) can
    -    be used to construct a versioned url.
    -
    -    This handler is intended primarily for use in development and light-duty
    -    file serving; for heavy traffic it will be more efficient to use
    -    a dedicated static file server (such as nginx or Apache).  We support
    -    the HTTP ``Accept-Ranges`` mechanism to return partial content (because
    -    some browsers require this functionality to be present to seek in
    -    HTML5 audio or video).
    -
    -    **Subclassing notes**
    -
    -    This class is designed to be extensible by subclassing, but because
    -    of the way static urls are generated with class methods rather than
    -    instance methods, the inheritance patterns are somewhat unusual.
    -    Be sure to use the ``@classmethod`` decorator when overriding a
    -    class method.  Instance methods may use the attributes ``self.path``
    -    ``self.absolute_path``, and ``self.modified``.
    -
    -    Subclasses should only override methods discussed in this section;
    -    overriding other methods is error-prone.  Overriding
    -    ``StaticFileHandler.get`` is particularly problematic due to the
    -    tight coupling with ``compute_etag`` and other methods.
    -
    -    To change the way static urls are generated (e.g. to match the behavior
    -    of another server or CDN), override `make_static_url`, `parse_url_path`,
    -    `get_cache_time`, and/or `get_version`.
    -
    -    To replace all interaction with the filesystem (e.g. to serve
    -    static content from a database), override `get_content`,
    -    `get_content_size`, `get_modified_time`, `get_absolute_path`, and
    -    `validate_absolute_path`.
    -
    -    .. versionchanged:: 3.1
    -       Many of the methods for subclasses were added in Tornado 3.1.
    -    """
    -    CACHE_MAX_AGE = 86400 * 365 * 10  # 10 years
    -
    -    _static_hashes = {}  # type: typing.Dict
    -    _lock = threading.Lock()  # protects _static_hashes
    -
    -    def initialize(self, path, default_filename=None):
    -        self.root = path
    -        self.default_filename = default_filename
    -
    -    @classmethod
    -    def reset(cls):
    -        with cls._lock:
    -            cls._static_hashes = {}
    -
    -    def head(self, path):
    -        return self.get(path, include_body=False)
    -
    -    @gen.coroutine
    -    def get(self, path, include_body=True):
    -        # Set up our path instance variables.
    -        self.path = self.parse_url_path(path)
    -        del path  # make sure we don't refer to path instead of self.path again
    -        absolute_path = self.get_absolute_path(self.root, self.path)
    -        self.absolute_path = self.validate_absolute_path(
    -            self.root, absolute_path)
    -        if self.absolute_path is None:
    -            return
    -
    -        self.modified = self.get_modified_time()
    -        self.set_headers()
    -
    -        if self.should_return_304():
    -            self.set_status(304)
    -            return
    -
    -        request_range = None
    -        range_header = self.request.headers.get("Range")
    -        if range_header:
    -            # As per RFC 2616 14.16, if an invalid Range header is specified,
    -            # the request will be treated as if the header didn't exist.
    -            request_range = httputil._parse_request_range(range_header)
    -
    -        size = self.get_content_size()
    -        if request_range:
    -            start, end = request_range
    -            if (start is not None and start >= size) or end == 0:
    -                # As per RFC 2616 14.35.1, a range is not satisfiable only: if
    -                # the first requested byte is equal to or greater than the
    -                # content, or when a suffix with length 0 is specified
    -                self.set_status(416)  # Range Not Satisfiable
    -                self.set_header("Content-Type", "text/plain")
    -                self.set_header("Content-Range", "bytes */%s" % (size, ))
    -                return
    -            if start is not None and start < 0:
    -                start += size
    -            if end is not None and end > size:
    -                # Clients sometimes blindly use a large range to limit their
    -                # download size; cap the endpoint at the actual file size.
    -                end = size
    -            # Note: only return HTTP 206 if less than the entire range has been
    -            # requested. Not only is this semantically correct, but Chrome
    -            # refuses to play audio if it gets an HTTP 206 in response to
    -            # ``Range: bytes=0-``.
    -            if size != (end or size) - (start or 0):
    -                self.set_status(206)  # Partial Content
    -                self.set_header("Content-Range",
    -                                httputil._get_content_range(start, end, size))
    -        else:
    -            start = end = None
    -
    -        if start is not None and end is not None:
    -            content_length = end - start
    -        elif end is not None:
    -            content_length = end
    -        elif start is not None:
    -            content_length = size - start
    -        else:
    -            content_length = size
    -        self.set_header("Content-Length", content_length)
    -
    -        if include_body:
    -            content = self.get_content(self.absolute_path, start, end)
    -            if isinstance(content, bytes):
    -                content = [content]
    -            for chunk in content:
    -                try:
    -                    self.write(chunk)
    -                    yield self.flush()
    -                except iostream.StreamClosedError:
    -                    return
    -        else:
    -            assert self.request.method == "HEAD"
    -
    -    def compute_etag(self):
    -        """Sets the ``Etag`` header based on static url version.
    -
    -        This allows efficient ``If-None-Match`` checks against cached
    -        versions, and sends the correct ``Etag`` for a partial response
    -        (i.e. the same ``Etag`` as the full file).
    -
    -        .. versionadded:: 3.1
    -        """
    -        version_hash = self._get_cached_version(self.absolute_path)
    -        if not version_hash:
    -            return None
    -        return '"%s"' % (version_hash, )
    -
    -    def set_headers(self):
    -        """Sets the content and caching headers on the response.
    -
    -        .. versionadded:: 3.1
    -        """
    -        self.set_header("Accept-Ranges", "bytes")
    -        self.set_etag_header()
    -
    -        if self.modified is not None:
    -            self.set_header("Last-Modified", self.modified)
    -
    -        content_type = self.get_content_type()
    -        if content_type:
    -            self.set_header("Content-Type", content_type)
    -
    -        cache_time = self.get_cache_time(self.path, self.modified,
    -                                         content_type)
    -        if cache_time > 0:
    -            self.set_header("Expires", datetime.datetime.utcnow() +
    -                            datetime.timedelta(seconds=cache_time))
    -            self.set_header("Cache-Control", "max-age=" + str(cache_time))
    -
    -        self.set_extra_headers(self.path)
    -
    -    def should_return_304(self):
    -        """Returns True if the headers indicate that we should return 304.
    -
    -        .. versionadded:: 3.1
    -        """
    -        # If client sent If-None-Match, use it, ignore If-Modified-Since
    -        if self.request.headers.get('If-None-Match'):
    -            return self.check_etag_header()
    -
    -        # Check the If-Modified-Since, and don't send the result if the
    -        # content has not been modified
    -        ims_value = self.request.headers.get("If-Modified-Since")
    -        if ims_value is not None:
    -            date_tuple = email.utils.parsedate(ims_value)
    -            if date_tuple is not None:
    -                if_since = datetime.datetime(*date_tuple[:6])
    -                if if_since >= self.modified:
    -                    return True
    -
    -        return False
    -
    -    @classmethod
    -    def get_absolute_path(cls, root, path):
    -        """Returns the absolute location of ``path`` relative to ``root``.
    -
    -        ``root`` is the path configured for this `StaticFileHandler`
    -        (in most cases the ``static_path`` `Application` setting).
    -
    -        This class method may be overridden in subclasses.  By default
    -        it returns a filesystem path, but other strings may be used
    -        as long as they are unique and understood by the subclass's
    -        overridden `get_content`.
    -
    -        .. versionadded:: 3.1
    -        """
    -        abspath = os.path.abspath(os.path.join(root, path))
    -        return abspath
    -
    -    def validate_absolute_path(self, root, absolute_path):
    -        """Validate and return the absolute path.
    -
    -        ``root`` is the configured path for the `StaticFileHandler`,
    -        and ``path`` is the result of `get_absolute_path`
    -
    -        This is an instance method called during request processing,
    -        so it may raise `HTTPError` or use methods like
    -        `RequestHandler.redirect` (return None after redirecting to
    -        halt further processing).  This is where 404 errors for missing files
    -        are generated.
    -
    -        This method may modify the path before returning it, but note that
    -        any such modifications will not be understood by `make_static_url`.
    -
    -        In instance methods, this method's result is available as
    -        ``self.absolute_path``.
    -
    -        .. versionadded:: 3.1
    -        """
    -        # os.path.abspath strips a trailing /.
    -        # We must add it back to `root` so that we only match files
    -        # in a directory named `root` instead of files starting with
    -        # that prefix.
    -        root = os.path.abspath(root)
    -        if not root.endswith(os.path.sep):
    -            # abspath always removes a trailing slash, except when
    -            # root is '/'. This is an unusual case, but several projects
    -            # have independently discovered this technique to disable
    -            # Tornado's path validation and (hopefully) do their own,
    -            # so we need to support it.
    -            root += os.path.sep
    -        # The trailing slash also needs to be temporarily added back
    -        # the requested path so a request to root/ will match.
    -        if not (absolute_path + os.path.sep).startswith(root):
    -            raise HTTPError(403, "%s is not in root static directory",
    -                            self.path)
    -        if (os.path.isdir(absolute_path) and
    -                self.default_filename is not None):
    -            # need to look at the request.path here for when path is empty
    -            # but there is some prefix to the path that was already
    -            # trimmed by the routing
    -            if not self.request.path.endswith("/"):
    -                self.redirect(self.request.path + "/", permanent=True)
    -                return
    -            absolute_path = os.path.join(absolute_path, self.default_filename)
    -        if not os.path.exists(absolute_path):
    -            raise HTTPError(404)
    -        if not os.path.isfile(absolute_path):
    -            raise HTTPError(403, "%s is not a file", self.path)
    -        return absolute_path
    -
    -    @classmethod
    -    def get_content(cls, abspath, start=None, end=None):
    -        """Retrieve the content of the requested resource which is located
    -        at the given absolute path.
    -
    -        This class method may be overridden by subclasses.  Note that its
    -        signature is different from other overridable class methods
    -        (no ``settings`` argument); this is deliberate to ensure that
    -        ``abspath`` is able to stand on its own as a cache key.
    -
    -        This method should either return a byte string or an iterator
    -        of byte strings.  The latter is preferred for large files
    -        as it helps reduce memory fragmentation.
    -
    -        .. versionadded:: 3.1
    -        """
    -        with open(abspath, "rb") as file:
    -            if start is not None:
    -                file.seek(start)
    -            if end is not None:
    -                remaining = end - (start or 0)
    -            else:
    -                remaining = None
    -            while True:
    -                chunk_size = 64 * 1024
    -                if remaining is not None and remaining < chunk_size:
    -                    chunk_size = remaining
    -                chunk = file.read(chunk_size)
    -                if chunk:
    -                    if remaining is not None:
    -                        remaining -= len(chunk)
    -                    yield chunk
    -                else:
    -                    if remaining is not None:
    -                        assert remaining == 0
    -                    return
    -
    -    @classmethod
    -    def get_content_version(cls, abspath):
    -        """Returns a version string for the resource at the given path.
    -
    -        This class method may be overridden by subclasses.  The
    -        default implementation is a hash of the file's contents.
    -
    -        .. versionadded:: 3.1
    -        """
    -        data = cls.get_content(abspath)
    -        hasher = hashlib.md5()
    -        if isinstance(data, bytes):
    -            hasher.update(data)
    -        else:
    -            for chunk in data:
    -                hasher.update(chunk)
    -        return hasher.hexdigest()
    -
    -    def _stat(self):
    -        if not hasattr(self, '_stat_result'):
    -            self._stat_result = os.stat(self.absolute_path)
    -        return self._stat_result
    -
    -    def get_content_size(self):
    -        """Retrieve the total size of the resource at the given path.
    -
    -        This method may be overridden by subclasses.
    -
    -        .. versionadded:: 3.1
    -
    -        .. versionchanged:: 4.0
    -           This method is now always called, instead of only when
    -           partial results are requested.
    -        """
    -        stat_result = self._stat()
    -        return stat_result[stat.ST_SIZE]
    -
    -    def get_modified_time(self):
    -        """Returns the time that ``self.absolute_path`` was last modified.
    -
    -        May be overridden in subclasses.  Should return a `~datetime.datetime`
    -        object or None.
    -
    -        .. versionadded:: 3.1
    -        """
    -        stat_result = self._stat()
    -        modified = datetime.datetime.utcfromtimestamp(
    -            stat_result[stat.ST_MTIME])
    -        return modified
    -
    -    def get_content_type(self):
    -        """Returns the ``Content-Type`` header to be used for this request.
    -
    -        .. versionadded:: 3.1
    -        """
    -        mime_type, encoding = mimetypes.guess_type(self.absolute_path)
    -        # per RFC 6713, use the appropriate type for a gzip compressed file
    -        if encoding == "gzip":
    -            return "application/gzip"
    -        # As of 2015-07-21 there is no bzip2 encoding defined at
    -        # http://www.iana.org/assignments/media-types/media-types.xhtml
    -        # So for that (and any other encoding), use octet-stream.
    -        elif encoding is not None:
    -            return "application/octet-stream"
    -        elif mime_type is not None:
    -            return mime_type
    -        # if mime_type not detected, use application/octet-stream
    -        else:
    -            return "application/octet-stream"
    -
    -    def set_extra_headers(self, path):
    -        """For subclass to add extra headers to the response"""
    -        pass
    -
    -    def get_cache_time(self, path, modified, mime_type):
    -        """Override to customize cache control behavior.
    -
    -        Return a positive number of seconds to make the result
    -        cacheable for that amount of time or 0 to mark resource as
    -        cacheable for an unspecified amount of time (subject to
    -        browser heuristics).
    -
    -        By default returns cache expiry of 10 years for resources requested
    -        with ``v`` argument.
    -        """
    -        return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
    -
    -    @classmethod
    -    def make_static_url(cls, settings, path, include_version=True):
    -        """Constructs a versioned url for the given path.
    -
    -        This method may be overridden in subclasses (but note that it
    -        is a class method rather than an instance method).  Subclasses
    -        are only required to implement the signature
    -        ``make_static_url(cls, settings, path)``; other keyword
    -        arguments may be passed through `~RequestHandler.static_url`
    -        but are not standard.
    -
    -        ``settings`` is the `Application.settings` dictionary.  ``path``
    -        is the static path being requested.  The url returned should be
    -        relative to the current host.
    -
    -        ``include_version`` determines whether the generated URL should
    -        include the query string containing the version hash of the
    -        file corresponding to the given ``path``.
    -
    -        """
    -        url = settings.get('static_url_prefix', '/static/') + path
    -        if not include_version:
    -            return url
    -
    -        version_hash = cls.get_version(settings, path)
    -        if not version_hash:
    -            return url
    -
    -        return '%s?v=%s' % (url, version_hash)
    -
    -    def parse_url_path(self, url_path):
    -        """Converts a static URL path into a filesystem path.
    -
    -        ``url_path`` is the path component of the URL with
    -        ``static_url_prefix`` removed.  The return value should be
    -        filesystem path relative to ``static_path``.
    -
    -        This is the inverse of `make_static_url`.
    -        """
    -        if os.path.sep != "/":
    -            url_path = url_path.replace("/", os.path.sep)
    -        return url_path
    -
    -    @classmethod
    -    def get_version(cls, settings, path):
    -        """Generate the version string to be used in static URLs.
    -
    -        ``settings`` is the `Application.settings` dictionary and ``path``
    -        is the relative location of the requested asset on the filesystem.
    -        The returned value should be a string, or ``None`` if no version
    -        could be determined.
    -
    -        .. versionchanged:: 3.1
    -           This method was previously recommended for subclasses to override;
    -           `get_content_version` is now preferred as it allows the base
    -           class to handle caching of the result.
    -        """
    -        abs_path = cls.get_absolute_path(settings['static_path'], path)
    -        return cls._get_cached_version(abs_path)
    -
    -    @classmethod
    -    def _get_cached_version(cls, abs_path):
    -        with cls._lock:
    -            hashes = cls._static_hashes
    -            if abs_path not in hashes:
    -                try:
    -                    hashes[abs_path] = cls.get_content_version(abs_path)
    -                except Exception:
    -                    gen_log.error("Could not open static file %r", abs_path)
    -                    hashes[abs_path] = None
    -            hsh = hashes.get(abs_path)
    -            if hsh:
    -                return hsh
    -        return None
    -
    -
    -class FallbackHandler(RequestHandler):
    -    """A `RequestHandler` that wraps another HTTP server callback.
    -
    -    The fallback is a callable object that accepts an
    -    `~.httputil.HTTPServerRequest`, such as an `Application` or
    -    `tornado.wsgi.WSGIContainer`.  This is most useful to use both
    -    Tornado ``RequestHandlers`` and WSGI in the same server.  Typical
    -    usage::
    -
    -        wsgi_app = tornado.wsgi.WSGIContainer(
    -            django.core.handlers.wsgi.WSGIHandler())
    -        application = tornado.web.Application([
    -            (r"/foo", FooHandler),
    -            (r".*", FallbackHandler, dict(fallback=wsgi_app),
    -        ])
    -    """
    -    def initialize(self, fallback):
    -        self.fallback = fallback
    -
    -    def prepare(self):
    -        self.fallback(self.request)
    -        self._finished = True
    -        self.on_finish()
    -
    -
    -class OutputTransform(object):
    -    """A transform modifies the result of an HTTP request (e.g., GZip encoding)
    -
    -    Applications are not expected to create their own OutputTransforms
    -    or interact with them directly; the framework chooses which transforms
    -    (if any) to apply.
    -    """
    -    def __init__(self, request):
    -        pass
    -
    -    def transform_first_chunk(self, status_code, headers, chunk, finishing):
    -        # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] # noqa: E501
    -        return status_code, headers, chunk
    -
    -    def transform_chunk(self, chunk, finishing):
    -        return chunk
    -
    -
    -class GZipContentEncoding(OutputTransform):
    -    """Applies the gzip content encoding to the response.
    -
    -    See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
    -
    -    .. versionchanged:: 4.0
    -        Now compresses all mime types beginning with ``text/``, instead
    -        of just a whitelist. (the whitelist is still used for certain
    -        non-text mime types).
    -    """
    -    # Whitelist of compressible mime types (in addition to any types
    -    # beginning with "text/").
    -    CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
    -                         "application/xml", "application/atom+xml",
    -                         "application/json", "application/xhtml+xml",
    -                         "image/svg+xml"])
    -    # Python's GzipFile defaults to level 9, while most other gzip
    -    # tools (including gzip itself) default to 6, which is probably a
    -    # better CPU/size tradeoff.
    -    GZIP_LEVEL = 6
    -    # Responses that are too short are unlikely to benefit from gzipping
    -    # after considering the "Content-Encoding: gzip" header and the header
    -    # inside the gzip encoding.
    -    # Note that responses written in multiple chunks will be compressed
    -    # regardless of size.
    -    MIN_LENGTH = 1024
    -
    -    def __init__(self, request):
    -        self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
    -
    -    def _compressible_type(self, ctype):
    -        return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
    -
    -    def transform_first_chunk(self, status_code, headers, chunk, finishing):
    -        # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes] # noqa: E501
    -        # TODO: can/should this type be inherited from the superclass?
    -        if 'Vary' in headers:
    -            headers['Vary'] += ', Accept-Encoding'
    -        else:
    -            headers['Vary'] = 'Accept-Encoding'
    -        if self._gzipping:
    -            ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
    -            self._gzipping = self._compressible_type(ctype) and \
    -                (not finishing or len(chunk) >= self.MIN_LENGTH) and \
    -                ("Content-Encoding" not in headers)
    -        if self._gzipping:
    -            headers["Content-Encoding"] = "gzip"
    -            self._gzip_value = BytesIO()
    -            self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value,
    -                                            compresslevel=self.GZIP_LEVEL)
    -            chunk = self.transform_chunk(chunk, finishing)
    -            if "Content-Length" in headers:
    -                # The original content length is no longer correct.
    -                # If this is the last (and only) chunk, we can set the new
    -                # content-length; otherwise we remove it and fall back to
    -                # chunked encoding.
    -                if finishing:
    -                    headers["Content-Length"] = str(len(chunk))
    -                else:
    -                    del headers["Content-Length"]
    -        return status_code, headers, chunk
    -
    -    def transform_chunk(self, chunk, finishing):
    -        if self._gzipping:
    -            self._gzip_file.write(chunk)
    -            if finishing:
    -                self._gzip_file.close()
    -            else:
    -                self._gzip_file.flush()
    -            chunk = self._gzip_value.getvalue()
    -            self._gzip_value.truncate(0)
    -            self._gzip_value.seek(0)
    -        return chunk
    -
    -
    -def authenticated(method):
    -    """Decorate methods with this to require that the user be logged in.
    -
    -    If the user is not logged in, they will be redirected to the configured
    -    `login url `.
    -
    -    If you configure a login url with a query parameter, Tornado will
    -    assume you know what you're doing and use it as-is.  If not, it
    -    will add a `next` parameter so the login page knows where to send
    -    you once you're logged in.
    -    """
    -    @functools.wraps(method)
    -    def wrapper(self, *args, **kwargs):
    -        if not self.current_user:
    -            if self.request.method in ("GET", "HEAD"):
    -                url = self.get_login_url()
    -                if "?" not in url:
    -                    if urlparse.urlsplit(url).scheme:
    -                        # if login url is absolute, make next absolute too
    -                        next_url = self.request.full_url()
    -                    else:
    -                        next_url = self.request.uri
    -                    url += "?" + urlencode(dict(next=next_url))
    -                self.redirect(url)
    -                return
    -            raise HTTPError(403)
    -        return method(self, *args, **kwargs)
    -    return wrapper
    -
    -
    -class UIModule(object):
    -    """A re-usable, modular UI unit on a page.
    -
    -    UI modules often execute additional queries, and they can include
    -    additional CSS and JavaScript that will be included in the output
    -    page, which is automatically inserted on page render.
    -
    -    Subclasses of UIModule must override the `render` method.
    -    """
    -    def __init__(self, handler):
    -        self.handler = handler
    -        self.request = handler.request
    -        self.ui = handler.ui
    -        self.locale = handler.locale
    -
    -    @property
    -    def current_user(self):
    -        return self.handler.current_user
    -
    -    def render(self, *args, **kwargs):
    -        """Override in subclasses to return this module's output."""
    -        raise NotImplementedError()
    -
    -    def embedded_javascript(self):
    -        """Override to return a JavaScript string
    -        to be embedded in the page."""
    -        return None
    -
    -    def javascript_files(self):
    -        """Override to return a list of JavaScript files needed by this module.
    -
    -        If the return values are relative paths, they will be passed to
    -        `RequestHandler.static_url`; otherwise they will be used as-is.
    -        """
    -        return None
    -
    -    def embedded_css(self):
    -        """Override to return a CSS string
    -        that will be embedded in the page."""
    -        return None
    -
    -    def css_files(self):
    -        """Override to returns a list of CSS files required by this module.
    -
    -        If the return values are relative paths, they will be passed to
    -        `RequestHandler.static_url`; otherwise they will be used as-is.
    -        """
    -        return None
    -
    -    def html_head(self):
    -        """Override to return an HTML string that will be put in the 
    -        element.
    -        """
    -        return None
    -
    -    def html_body(self):
    -        """Override to return an HTML string that will be put at the end of
    -        the  element.
    -        """
    -        return None
    -
    -    def render_string(self, path, **kwargs):
    -        """Renders a template and returns it as a string."""
    -        return self.handler.render_string(path, **kwargs)
    -
    -
    -class _linkify(UIModule):
    -    def render(self, text, **kwargs):
    -        return escape.linkify(text, **kwargs)
    -
    -
    -class _xsrf_form_html(UIModule):
    -    def render(self):
    -        return self.handler.xsrf_form_html()
    -
    -
    -class TemplateModule(UIModule):
    -    """UIModule that simply renders the given template.
    -
    -    {% module Template("foo.html") %} is similar to {% include "foo.html" %},
    -    but the module version gets its own namespace (with kwargs passed to
    -    Template()) instead of inheriting the outer template's namespace.
    -
    -    Templates rendered through this module also get access to UIModule's
    -    automatic javascript/css features.  Simply call set_resources
    -    inside the template and give it keyword arguments corresponding to
    -    the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
    -    Note that these resources are output once per template file, not once
    -    per instantiation of the template, so they must not depend on
    -    any arguments to the template.
    -    """
    -    def __init__(self, handler):
    -        super(TemplateModule, self).__init__(handler)
    -        # keep resources in both a list and a dict to preserve order
    -        self._resource_list = []
    -        self._resource_dict = {}
    -
    -    def render(self, path, **kwargs):
    -        def set_resources(**kwargs):
    -            if path not in self._resource_dict:
    -                self._resource_list.append(kwargs)
    -                self._resource_dict[path] = kwargs
    -            else:
    -                if self._resource_dict[path] != kwargs:
    -                    raise ValueError("set_resources called with different "
    -                                     "resources for the same template")
    -            return ""
    -        return self.render_string(path, set_resources=set_resources,
    -                                  **kwargs)
    -
    -    def _get_resources(self, key):
    -        return (r[key] for r in self._resource_list if key in r)
    -
    -    def embedded_javascript(self):
    -        return "\n".join(self._get_resources("embedded_javascript"))
    -
    -    def javascript_files(self):
    -        result = []
    -        for f in self._get_resources("javascript_files"):
    -            if isinstance(f, (unicode_type, bytes)):
    -                result.append(f)
    -            else:
    -                result.extend(f)
    -        return result
    -
    -    def embedded_css(self):
    -        return "\n".join(self._get_resources("embedded_css"))
    -
    -    def css_files(self):
    -        result = []
    -        for f in self._get_resources("css_files"):
    -            if isinstance(f, (unicode_type, bytes)):
    -                result.append(f)
    -            else:
    -                result.extend(f)
    -        return result
    -
    -    def html_head(self):
    -        return "".join(self._get_resources("html_head"))
    -
    -    def html_body(self):
    -        return "".join(self._get_resources("html_body"))
    -
    -
    -class _UIModuleNamespace(object):
    -    """Lazy namespace which creates UIModule proxies bound to a handler."""
    -    def __init__(self, handler, ui_modules):
    -        self.handler = handler
    -        self.ui_modules = ui_modules
    -
    -    def __getitem__(self, key):
    -        return self.handler._ui_module(key, self.ui_modules[key])
    -
    -    def __getattr__(self, key):
    -        try:
    -            return self[key]
    -        except KeyError as e:
    -            raise AttributeError(str(e))
    -
    -
    -if hasattr(hmac, 'compare_digest'):  # python 3.3
    -    _time_independent_equals = hmac.compare_digest
    -else:
    -    def _time_independent_equals(a, b):
    -        if len(a) != len(b):
    -            return False
    -        result = 0
    -        if isinstance(a[0], int):  # python3 byte strings
    -            for x, y in zip(a, b):
    -                result |= x ^ y
    -        else:  # python2
    -            for x, y in zip(a, b):
    -                result |= ord(x) ^ ord(y)
    -        return result == 0
    -
    -
    -def create_signed_value(secret, name, value, version=None, clock=None,
    -                        key_version=None):
    -    if version is None:
    -        version = DEFAULT_SIGNED_VALUE_VERSION
    -    if clock is None:
    -        clock = time.time
    -
    -    timestamp = utf8(str(int(clock())))
    -    value = base64.b64encode(utf8(value))
    -    if version == 1:
    -        signature = _create_signature_v1(secret, name, value, timestamp)
    -        value = b"|".join([value, timestamp, signature])
    -        return value
    -    elif version == 2:
    -        # The v2 format consists of a version number and a series of
    -        # length-prefixed fields "%d:%s", the last of which is a
    -        # signature, all separated by pipes.  All numbers are in
    -        # decimal format with no leading zeros.  The signature is an
    -        # HMAC-SHA256 of the whole string up to that point, including
    -        # the final pipe.
    -        #
    -        # The fields are:
    -        # - format version (i.e. 2; no length prefix)
    -        # - key version (integer, default is 0)
    -        # - timestamp (integer seconds since epoch)
    -        # - name (not encoded; assumed to be ~alphanumeric)
    -        # - value (base64-encoded)
    -        # - signature (hex-encoded; no length prefix)
    -        def format_field(s):
    -            return utf8("%d:" % len(s)) + utf8(s)
    -        to_sign = b"|".join([
    -            b"2",
    -            format_field(str(key_version or 0)),
    -            format_field(timestamp),
    -            format_field(name),
    -            format_field(value),
    -            b''])
    -
    -        if isinstance(secret, dict):
    -            assert key_version is not None, 'Key version must be set when sign key dict is used'
    -            assert version >= 2, 'Version must be at least 2 for key version support'
    -            secret = secret[key_version]
    -
    -        signature = _create_signature_v2(secret, to_sign)
    -        return to_sign + signature
    -    else:
    -        raise ValueError("Unsupported version %d" % version)
    -
    -
    -# A leading version number in decimal
    -# with no leading zeros, followed by a pipe.
    -_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
    -
    -
    -def _get_version(value):
    -    # Figures out what version value is.  Version 1 did not include an
    -    # explicit version field and started with arbitrary base64 data,
    -    # which makes this tricky.
    -    m = _signed_value_version_re.match(value)
    -    if m is None:
    -        version = 1
    -    else:
    -        try:
    -            version = int(m.group(1))
    -            if version > 999:
    -                # Certain payloads from the version-less v1 format may
    -                # be parsed as valid integers.  Due to base64 padding
    -                # restrictions, this can only happen for numbers whose
    -                # length is a multiple of 4, so we can treat all
    -                # numbers up to 999 as versions, and for the rest we
    -                # fall back to v1 format.
    -                version = 1
    -        except ValueError:
    -            version = 1
    -    return version
    -
    -
    -def decode_signed_value(secret, name, value, max_age_days=31,
    -                        clock=None, min_version=None):
    -    if clock is None:
    -        clock = time.time
    -    if min_version is None:
    -        min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
    -    if min_version > 2:
    -        raise ValueError("Unsupported min_version %d" % min_version)
    -    if not value:
    -        return None
    -
    -    value = utf8(value)
    -    version = _get_version(value)
    -
    -    if version < min_version:
    -        return None
    -    if version == 1:
    -        return _decode_signed_value_v1(secret, name, value,
    -                                       max_age_days, clock)
    -    elif version == 2:
    -        return _decode_signed_value_v2(secret, name, value,
    -                                       max_age_days, clock)
    -    else:
    -        return None
    -
    -
    -def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
    -    parts = utf8(value).split(b"|")
    -    if len(parts) != 3:
    -        return None
    -    signature = _create_signature_v1(secret, name, parts[0], parts[1])
    -    if not _time_independent_equals(parts[2], signature):
    -        gen_log.warning("Invalid cookie signature %r", value)
    -        return None
    -    timestamp = int(parts[1])
    -    if timestamp < clock() - max_age_days * 86400:
    -        gen_log.warning("Expired cookie %r", value)
    -        return None
    -    if timestamp > clock() + 31 * 86400:
    -        # _cookie_signature does not hash a delimiter between the
    -        # parts of the cookie, so an attacker could transfer trailing
    -        # digits from the payload to the timestamp without altering the
    -        # signature.  For backwards compatibility, sanity-check timestamp
    -        # here instead of modifying _cookie_signature.
    -        gen_log.warning("Cookie timestamp in future; possible tampering %r",
    -                        value)
    -        return None
    -    if parts[1].startswith(b"0"):
    -        gen_log.warning("Tampered cookie %r", value)
    -        return None
    -    try:
    -        return base64.b64decode(parts[0])
    -    except Exception:
    -        return None
    -
    -
    -def _decode_fields_v2(value):
    -    def _consume_field(s):
    -        length, _, rest = s.partition(b':')
    -        n = int(length)
    -        field_value = rest[:n]
    -        # In python 3, indexing bytes returns small integers; we must
    -        # use a slice to get a byte string as in python 2.
    -        if rest[n:n + 1] != b'|':
    -            raise ValueError("malformed v2 signed value field")
    -        rest = rest[n + 1:]
    -        return field_value, rest
    -
    -    rest = value[2:]  # remove version number
    -    key_version, rest = _consume_field(rest)
    -    timestamp, rest = _consume_field(rest)
    -    name_field, rest = _consume_field(rest)
    -    value_field, passed_sig = _consume_field(rest)
    -    return int(key_version), timestamp, name_field, value_field, passed_sig
    -
    -
    -def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
    -    try:
    -        key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value)
    -    except ValueError:
    -        return None
    -    signed_string = value[:-len(passed_sig)]
    -
    -    if isinstance(secret, dict):
    -        try:
    -            secret = secret[key_version]
    -        except KeyError:
    -            return None
    -
    -    expected_sig = _create_signature_v2(secret, signed_string)
    -    if not _time_independent_equals(passed_sig, expected_sig):
    -        return None
    -    if name_field != utf8(name):
    -        return None
    -    timestamp = int(timestamp)
    -    if timestamp < clock() - max_age_days * 86400:
    -        # The signature has expired.
    -        return None
    -    try:
    -        return base64.b64decode(value_field)
    -    except Exception:
    -        return None
    -
    -
    -def get_signature_key_version(value):
    -    value = utf8(value)
    -    version = _get_version(value)
    -    if version < 2:
    -        return None
    -    try:
    -        key_version, _, _, _, _ = _decode_fields_v2(value)
    -    except ValueError:
    -        return None
    -
    -    return key_version
    -
    -
    -def _create_signature_v1(secret, *parts):
    -    hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
    -    for part in parts:
    -        hash.update(utf8(part))
    -    return utf8(hash.hexdigest())
    -
    -
    -def _create_signature_v2(secret, s):
    -    hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
    -    hash.update(utf8(s))
    -    return utf8(hash.hexdigest())
    -
    -
    -def is_absolute(path):
    -    return any(path.startswith(x) for x in ["/", "http:", "https:"])
    diff --git a/lib/tornado/websocket.py b/lib/tornado/websocket.py
    deleted file mode 100755
    index 7b77850a..00000000
    --- a/lib/tornado/websocket.py
    +++ /dev/null
    @@ -1,1346 +0,0 @@
    -"""Implementation of the WebSocket protocol.
    -
    -`WebSockets `_ allow for bidirectional
    -communication between the browser and server.
    -
    -WebSockets are supported in the current versions of all major browsers,
    -although older versions that do not support WebSockets are still in use
    -(refer to http://caniuse.com/websockets for details).
    -
    -This module implements the final version of the WebSocket protocol as
    -defined in `RFC 6455 `_.  Certain
    -browser versions (notably Safari 5.x) implemented an earlier draft of
    -the protocol (known as "draft 76") and are not compatible with this module.
    -
    -.. versionchanged:: 4.0
    -   Removed support for the draft 76 protocol version.
    -"""
    -
    -from __future__ import absolute_import, division, print_function
    -
    -import base64
    -import hashlib
    -import os
    -import sys
    -import struct
    -import tornado.escape
    -import tornado.web
    -import zlib
    -
    -from tornado.concurrent import Future, future_set_result_unless_cancelled
    -from tornado.escape import utf8, native_str, to_unicode
    -from tornado import gen, httpclient, httputil
    -from tornado.ioloop import IOLoop, PeriodicCallback
    -from tornado.iostream import StreamClosedError
    -from tornado.log import gen_log
    -from tornado import simple_httpclient
    -from tornado.queues import Queue
    -from tornado.tcpclient import TCPClient
    -from tornado.util import _websocket_mask, PY3
    -
    -if PY3:
    -    from urllib.parse import urlparse  # py2
    -    xrange = range
    -else:
    -    from urlparse import urlparse  # py3
    -
    -_default_max_message_size = 10 * 1024 * 1024
    -
    -
    -class WebSocketError(Exception):
    -    pass
    -
    -
    -class WebSocketClosedError(WebSocketError):
    -    """Raised by operations on a closed connection.
    -
    -    .. versionadded:: 3.2
    -    """
    -    pass
    -
    -
    -class _DecompressTooLargeError(Exception):
    -    pass
    -
    -
    -class WebSocketHandler(tornado.web.RequestHandler):
    -    """Subclass this class to create a basic WebSocket handler.
    -
    -    Override `on_message` to handle incoming messages, and use
    -    `write_message` to send messages to the client. You can also
    -    override `open` and `on_close` to handle opened and closed
    -    connections.
    -
    -    Custom upgrade response headers can be sent by overriding
    -    `~tornado.web.RequestHandler.set_default_headers` or
    -    `~tornado.web.RequestHandler.prepare`.
    -
    -    See http://dev.w3.org/html5/websockets/ for details on the
    -    JavaScript interface.  The protocol is specified at
    -    http://tools.ietf.org/html/rfc6455.
    -
    -    Here is an example WebSocket handler that echos back all received messages
    -    back to the client:
    -
    -    .. testcode::
    -
    -      class EchoWebSocket(tornado.websocket.WebSocketHandler):
    -          def open(self):
    -              print("WebSocket opened")
    -
    -          def on_message(self, message):
    -              self.write_message(u"You said: " + message)
    -
    -          def on_close(self):
    -              print("WebSocket closed")
    -
    -    .. testoutput::
    -       :hide:
    -
    -    WebSockets are not standard HTTP connections. The "handshake" is
    -    HTTP, but after the handshake, the protocol is
    -    message-based. Consequently, most of the Tornado HTTP facilities
    -    are not available in handlers of this type. The only communication
    -    methods available to you are `write_message()`, `ping()`, and
    -    `close()`. Likewise, your request handler class should implement
    -    `open()` method rather than ``get()`` or ``post()``.
    -
    -    If you map the handler above to ``/websocket`` in your application, you can
    -    invoke it in JavaScript with::
    -
    -      var ws = new WebSocket("ws://localhost:8888/websocket");
    -      ws.onopen = function() {
    -         ws.send("Hello, world");
    -      };
    -      ws.onmessage = function (evt) {
    -         alert(evt.data);
    -      };
    -
    -    This script pops up an alert box that says "You said: Hello, world".
    -
    -    Web browsers allow any site to open a websocket connection to any other,
    -    instead of using the same-origin policy that governs other network
    -    access from javascript.  This can be surprising and is a potential
    -    security hole, so since Tornado 4.0 `WebSocketHandler` requires
    -    applications that wish to receive cross-origin websockets to opt in
    -    by overriding the `~WebSocketHandler.check_origin` method (see that
    -    method's docs for details).  Failure to do so is the most likely
    -    cause of 403 errors when making a websocket connection.
    -
    -    When using a secure websocket connection (``wss://``) with a self-signed
    -    certificate, the connection from a browser may fail because it wants
    -    to show the "accept this certificate" dialog but has nowhere to show it.
    -    You must first visit a regular HTML page using the same certificate
    -    to accept it before the websocket connection will succeed.
    -
    -    If the application setting ``websocket_ping_interval`` has a non-zero
    -    value, a ping will be sent periodically, and the connection will be
    -    closed if a response is not received before the ``websocket_ping_timeout``.
    -
    -    Messages larger than the ``websocket_max_message_size`` application setting
    -    (default 10MiB) will not be accepted.
    -
    -    .. versionchanged:: 4.5
    -       Added ``websocket_ping_interval``, ``websocket_ping_timeout``, and
    -       ``websocket_max_message_size``.
    -    """
    -    def __init__(self, application, request, **kwargs):
    -        super(WebSocketHandler, self).__init__(application, request, **kwargs)
    -        self.ws_connection = None
    -        self.close_code = None
    -        self.close_reason = None
    -        self.stream = None
    -        self._on_close_called = False
    -
    -    def get(self, *args, **kwargs):
    -        self.open_args = args
    -        self.open_kwargs = kwargs
    -
    -        # Upgrade header should be present and should be equal to WebSocket
    -        if self.request.headers.get("Upgrade", "").lower() != 'websocket':
    -            self.set_status(400)
    -            log_msg = "Can \"Upgrade\" only to \"WebSocket\"."
    -            self.finish(log_msg)
    -            gen_log.debug(log_msg)
    -            return
    -
    -        # Connection header should be upgrade.
    -        # Some proxy servers/load balancers
    -        # might mess with it.
    -        headers = self.request.headers
    -        connection = map(lambda s: s.strip().lower(),
    -                         headers.get("Connection", "").split(","))
    -        if 'upgrade' not in connection:
    -            self.set_status(400)
    -            log_msg = "\"Connection\" must be \"Upgrade\"."
    -            self.finish(log_msg)
    -            gen_log.debug(log_msg)
    -            return
    -
    -        # Handle WebSocket Origin naming convention differences
    -        # The difference between version 8 and 13 is that in 8 the
    -        # client sends a "Sec-Websocket-Origin" header and in 13 it's
    -        # simply "Origin".
    -        if "Origin" in self.request.headers:
    -            origin = self.request.headers.get("Origin")
    -        else:
    -            origin = self.request.headers.get("Sec-Websocket-Origin", None)
    -
    -        # If there was an origin header, check to make sure it matches
    -        # according to check_origin. When the origin is None, we assume it
    -        # did not come from a browser and that it can be passed on.
    -        if origin is not None and not self.check_origin(origin):
    -            self.set_status(403)
    -            log_msg = "Cross origin websockets not allowed"
    -            self.finish(log_msg)
    -            gen_log.debug(log_msg)
    -            return
    -
    -        self.ws_connection = self.get_websocket_protocol()
    -        if self.ws_connection:
    -            self.ws_connection.accept_connection()
    -        else:
    -            self.set_status(426, "Upgrade Required")
    -            self.set_header("Sec-WebSocket-Version", "7, 8, 13")
    -            self.finish()
    -
    -    stream = None
    -
    -    @property
    -    def ping_interval(self):
    -        """The interval for websocket keep-alive pings.
    -
    -        Set websocket_ping_interval = 0 to disable pings.
    -        """
    -        return self.settings.get('websocket_ping_interval', None)
    -
    -    @property
    -    def ping_timeout(self):
    -        """If no ping is received in this many seconds,
    -        close the websocket connection (VPNs, etc. can fail to cleanly close ws connections).
    -        Default is max of 3 pings or 30 seconds.
    -        """
    -        return self.settings.get('websocket_ping_timeout', None)
    -
    -    @property
    -    def max_message_size(self):
    -        """Maximum allowed message size.
    -
    -        If the remote peer sends a message larger than this, the connection
    -        will be closed.
    -
    -        Default is 10MiB.
    -        """
    -        return self.settings.get('websocket_max_message_size', _default_max_message_size)
    -
    -    def write_message(self, message, binary=False):
    -        """Sends the given message to the client of this Web Socket.
    -
    -        The message may be either a string or a dict (which will be
    -        encoded as json).  If the ``binary`` argument is false, the
    -        message will be sent as utf8; in binary mode any byte string
    -        is allowed.
    -
    -        If the connection is already closed, raises `WebSocketClosedError`.
    -        Returns a `.Future` which can be used for flow control.
    -
    -        .. versionchanged:: 3.2
    -           `WebSocketClosedError` was added (previously a closed connection
    -           would raise an `AttributeError`)
    -
    -        .. versionchanged:: 4.3
    -           Returns a `.Future` which can be used for flow control.
    -
    -        .. versionchanged:: 5.0
    -           Consistently raises `WebSocketClosedError`. Previously could
    -           sometimes raise `.StreamClosedError`.
    -        """
    -        if self.ws_connection is None:
    -            raise WebSocketClosedError()
    -        if isinstance(message, dict):
    -            message = tornado.escape.json_encode(message)
    -        return self.ws_connection.write_message(message, binary=binary)
    -
    -    def select_subprotocol(self, subprotocols):
    -        """Override to implement subprotocol negotiation.
    -
    -        ``subprotocols`` is a list of strings identifying the
    -        subprotocols proposed by the client.  This method may be
    -        overridden to return one of those strings to select it, or
    -        ``None`` to not select a subprotocol.
    -
    -        Failure to select a subprotocol does not automatically abort
    -        the connection, although clients may close the connection if
    -        none of their proposed subprotocols was selected.
    -
    -        The list may be empty, in which case this method must return
    -        None. This method is always called exactly once even if no
    -        subprotocols were proposed so that the handler can be advised
    -        of this fact.
    -
    -        .. versionchanged:: 5.1
    -
    -           Previously, this method was called with a list containing
    -           an empty string instead of an empty list if no subprotocols
    -           were proposed by the client.
    -        """
    -        return None
    -
    -    @property
    -    def selected_subprotocol(self):
    -        """The subprotocol returned by `select_subprotocol`.
    -
    -        .. versionadded:: 5.1
    -        """
    -        return self.ws_connection.selected_subprotocol
    -
    -    def get_compression_options(self):
    -        """Override to return compression options for the connection.
    -
    -        If this method returns None (the default), compression will
    -        be disabled.  If it returns a dict (even an empty one), it
    -        will be enabled.  The contents of the dict may be used to
    -        control the following compression options:
    -
    -        ``compression_level`` specifies the compression level.
    -
    -        ``mem_level`` specifies the amount of memory used for the internal compression state.
    -
    -         These parameters are documented in details here:
    -         https://docs.python.org/3.6/library/zlib.html#zlib.compressobj
    -
    -        .. versionadded:: 4.1
    -
    -        .. versionchanged:: 4.5
    -
    -           Added ``compression_level`` and ``mem_level``.
    -        """
    -        # TODO: Add wbits option.
    -        return None
    -
    -    def open(self, *args, **kwargs):
    -        """Invoked when a new WebSocket is opened.
    -
    -        The arguments to `open` are extracted from the `tornado.web.URLSpec`
    -        regular expression, just like the arguments to
    -        `tornado.web.RequestHandler.get`.
    -
    -        `open` may be a coroutine. `on_message` will not be called until
    -        `open` has returned.
    -
    -        .. versionchanged:: 5.1
    -
    -           ``open`` may be a coroutine.
    -        """
    -        pass
    -
    -    def on_message(self, message):
    -        """Handle incoming messages on the WebSocket
    -
    -        This method must be overridden.
    -
    -        .. versionchanged:: 4.5
    -
    -           ``on_message`` can be a coroutine.
    -        """
    -        raise NotImplementedError
    -
    -    def ping(self, data=b''):
    -        """Send ping frame to the remote end.
    -
    -        The data argument allows a small amount of data (up to 125
    -        bytes) to be sent as a part of the ping message. Note that not
    -        all websocket implementations expose this data to
    -        applications.
    -
    -        Consider using the ``websocket_ping_interval`` application
    -        setting instead of sending pings manually.
    -
    -        .. versionchanged:: 5.1
    -
    -           The data argument is now optional.
    -
    -        """
    -        data = utf8(data)
    -        if self.ws_connection is None:
    -            raise WebSocketClosedError()
    -        self.ws_connection.write_ping(data)
    -
    -    def on_pong(self, data):
    -        """Invoked when the response to a ping frame is received."""
    -        pass
    -
    -    def on_ping(self, data):
    -        """Invoked when the a ping frame is received."""
    -        pass
    -
    -    def on_close(self):
    -        """Invoked when the WebSocket is closed.
    -
    -        If the connection was closed cleanly and a status code or reason
    -        phrase was supplied, these values will be available as the attributes
    -        ``self.close_code`` and ``self.close_reason``.
    -
    -        .. versionchanged:: 4.0
    -
    -           Added ``close_code`` and ``close_reason`` attributes.
    -        """
    -        pass
    -
    -    def close(self, code=None, reason=None):
    -        """Closes this Web Socket.
    -
    -        Once the close handshake is successful the socket will be closed.
    -
    -        ``code`` may be a numeric status code, taken from the values
    -        defined in `RFC 6455 section 7.4.1
    -        `_.
    -        ``reason`` may be a textual message about why the connection is
    -        closing.  These values are made available to the client, but are
    -        not otherwise interpreted by the websocket protocol.
    -
    -        .. versionchanged:: 4.0
    -
    -           Added the ``code`` and ``reason`` arguments.
    -        """
    -        if self.ws_connection:
    -            self.ws_connection.close(code, reason)
    -            self.ws_connection = None
    -
    -    def check_origin(self, origin):
    -        """Override to enable support for allowing alternate origins.
    -
    -        The ``origin`` argument is the value of the ``Origin`` HTTP
    -        header, the url responsible for initiating this request.  This
    -        method is not called for clients that do not send this header;
    -        such requests are always allowed (because all browsers that
    -        implement WebSockets support this header, and non-browser
    -        clients do not have the same cross-site security concerns).
    -
    -        Should return True to accept the request or False to reject it.
    -        By default, rejects all requests with an origin on a host other
    -        than this one.
    -
    -        This is a security protection against cross site scripting attacks on
    -        browsers, since WebSockets are allowed to bypass the usual same-origin
    -        policies and don't use CORS headers.
    -
    -        .. warning::
    -
    -           This is an important security measure; don't disable it
    -           without understanding the security implications. In
    -           particular, if your authentication is cookie-based, you
    -           must either restrict the origins allowed by
    -           ``check_origin()`` or implement your own XSRF-like
    -           protection for websocket connections. See `these
    -           `_
    -           `articles
    -           `_
    -           for more.
    -
    -        To accept all cross-origin traffic (which was the default prior to
    -        Tornado 4.0), simply override this method to always return true::
    -
    -            def check_origin(self, origin):
    -                return True
    -
    -        To allow connections from any subdomain of your site, you might
    -        do something like::
    -
    -            def check_origin(self, origin):
    -                parsed_origin = urllib.parse.urlparse(origin)
    -                return parsed_origin.netloc.endswith(".mydomain.com")
    -
    -        .. versionadded:: 4.0
    -
    -        """
    -        parsed_origin = urlparse(origin)
    -        origin = parsed_origin.netloc
    -        origin = origin.lower()
    -
    -        host = self.request.headers.get("Host")
    -
    -        # Check to see that origin matches host directly, including ports
    -        return origin == host
    -
    -    def set_nodelay(self, value):
    -        """Set the no-delay flag for this stream.
    -
    -        By default, small messages may be delayed and/or combined to minimize
    -        the number of packets sent.  This can sometimes cause 200-500ms delays
    -        due to the interaction between Nagle's algorithm and TCP delayed
    -        ACKs.  To reduce this delay (at the expense of possibly increasing
    -        bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
    -        connection is established.
    -
    -        See `.BaseIOStream.set_nodelay` for additional details.
    -
    -        .. versionadded:: 3.1
    -        """
    -        self.stream.set_nodelay(value)
    -
    -    def on_connection_close(self):
    -        if self.ws_connection:
    -            self.ws_connection.on_connection_close()
    -            self.ws_connection = None
    -        if not self._on_close_called:
    -            self._on_close_called = True
    -            self.on_close()
    -            self._break_cycles()
    -
    -    def _break_cycles(self):
    -        # WebSocketHandlers call finish() early, but we don't want to
    -        # break up reference cycles (which makes it impossible to call
    -        # self.render_string) until after we've really closed the
    -        # connection (if it was established in the first place,
    -        # indicated by status code 101).
    -        if self.get_status() != 101 or self._on_close_called:
    -            super(WebSocketHandler, self)._break_cycles()
    -
    -    def send_error(self, *args, **kwargs):
    -        if self.stream is None:
    -            super(WebSocketHandler, self).send_error(*args, **kwargs)
    -        else:
    -            # If we get an uncaught exception during the handshake,
    -            # we have no choice but to abruptly close the connection.
    -            # TODO: for uncaught exceptions after the handshake,
    -            # we can close the connection more gracefully.
    -            self.stream.close()
    -
    -    def get_websocket_protocol(self):
    -        websocket_version = self.request.headers.get("Sec-WebSocket-Version")
    -        if websocket_version in ("7", "8", "13"):
    -            return WebSocketProtocol13(
    -                self, compression_options=self.get_compression_options())
    -
    -    def _attach_stream(self):
    -        self.stream = self.detach()
    -        self.stream.set_close_callback(self.on_connection_close)
    -        # disable non-WS methods
    -        for method in ["write", "redirect", "set_header", "set_cookie",
    -                       "set_status", "flush", "finish"]:
    -            setattr(self, method, _raise_not_supported_for_websockets)
    -
    -
    -def _raise_not_supported_for_websockets(*args, **kwargs):
    -    raise RuntimeError("Method not supported for Web Sockets")
    -
    -
    -class WebSocketProtocol(object):
    -    """Base class for WebSocket protocol versions.
    -    """
    -    def __init__(self, handler):
    -        self.handler = handler
    -        self.request = handler.request
    -        self.stream = handler.stream
    -        self.client_terminated = False
    -        self.server_terminated = False
    -
    -    def _run_callback(self, callback, *args, **kwargs):
    -        """Runs the given callback with exception handling.
    -
    -        If the callback is a coroutine, returns its Future. On error, aborts the
    -        websocket connection and returns None.
    -        """
    -        try:
    -            result = callback(*args, **kwargs)
    -        except Exception:
    -            self.handler.log_exception(*sys.exc_info())
    -            self._abort()
    -        else:
    -            if result is not None:
    -                result = gen.convert_yielded(result)
    -                self.stream.io_loop.add_future(result, lambda f: f.result())
    -            return result
    -
    -    def on_connection_close(self):
    -        self._abort()
    -
    -    def _abort(self):
    -        """Instantly aborts the WebSocket connection by closing the socket"""
    -        self.client_terminated = True
    -        self.server_terminated = True
    -        self.stream.close()  # forcibly tear down the connection
    -        self.close()  # let the subclass cleanup
    -
    -
    -class _PerMessageDeflateCompressor(object):
    -    def __init__(self, persistent, max_wbits, compression_options=None):
    -        if max_wbits is None:
    -            max_wbits = zlib.MAX_WBITS
    -        # There is no symbolic constant for the minimum wbits value.
    -        if not (8 <= max_wbits <= zlib.MAX_WBITS):
    -            raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
    -                             max_wbits, zlib.MAX_WBITS)
    -        self._max_wbits = max_wbits
    -
    -        if compression_options is None or 'compression_level' not in compression_options:
    -            self._compression_level = tornado.web.GZipContentEncoding.GZIP_LEVEL
    -        else:
    -            self._compression_level = compression_options['compression_level']
    -
    -        if compression_options is None or 'mem_level' not in compression_options:
    -            self._mem_level = 8
    -        else:
    -            self._mem_level = compression_options['mem_level']
    -
    -        if persistent:
    -            self._compressor = self._create_compressor()
    -        else:
    -            self._compressor = None
    -
    -    def _create_compressor(self):
    -        return zlib.compressobj(self._compression_level,
    -                                zlib.DEFLATED, -self._max_wbits, self._mem_level)
    -
    -    def compress(self, data):
    -        compressor = self._compressor or self._create_compressor()
    -        data = (compressor.compress(data) +
    -                compressor.flush(zlib.Z_SYNC_FLUSH))
    -        assert data.endswith(b'\x00\x00\xff\xff')
    -        return data[:-4]
    -
    -
    -class _PerMessageDeflateDecompressor(object):
    -    def __init__(self, persistent, max_wbits, max_message_size, compression_options=None):
    -        self._max_message_size = max_message_size
    -        if max_wbits is None:
    -            max_wbits = zlib.MAX_WBITS
    -        if not (8 <= max_wbits <= zlib.MAX_WBITS):
    -            raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
    -                             max_wbits, zlib.MAX_WBITS)
    -        self._max_wbits = max_wbits
    -        if persistent:
    -            self._decompressor = self._create_decompressor()
    -        else:
    -            self._decompressor = None
    -
    -    def _create_decompressor(self):
    -        return zlib.decompressobj(-self._max_wbits)
    -
    -    def decompress(self, data):
    -        decompressor = self._decompressor or self._create_decompressor()
    -        result = decompressor.decompress(data + b'\x00\x00\xff\xff', self._max_message_size)
    -        if decompressor.unconsumed_tail:
    -            raise _DecompressTooLargeError()
    -        return result
    -
    -
    -class WebSocketProtocol13(WebSocketProtocol):
    -    """Implementation of the WebSocket protocol from RFC 6455.
    -
    -    This class supports versions 7 and 8 of the protocol in addition to the
    -    final version 13.
    -    """
    -    # Bit masks for the first byte of a frame.
    -    FIN = 0x80
    -    RSV1 = 0x40
    -    RSV2 = 0x20
    -    RSV3 = 0x10
    -    RSV_MASK = RSV1 | RSV2 | RSV3
    -    OPCODE_MASK = 0x0f
    -
    -    def __init__(self, handler, mask_outgoing=False,
    -                 compression_options=None):
    -        WebSocketProtocol.__init__(self, handler)
    -        self.mask_outgoing = mask_outgoing
    -        self._final_frame = False
    -        self._frame_opcode = None
    -        self._masked_frame = None
    -        self._frame_mask = None
    -        self._frame_length = None
    -        self._fragmented_message_buffer = None
    -        self._fragmented_message_opcode = None
    -        self._waiting = None
    -        self._compression_options = compression_options
    -        self._decompressor = None
    -        self._compressor = None
    -        self._frame_compressed = None
    -        # The total uncompressed size of all messages received or sent.
    -        # Unicode messages are encoded to utf8.
    -        # Only for testing; subject to change.
    -        self._message_bytes_in = 0
    -        self._message_bytes_out = 0
    -        # The total size of all packets received or sent.  Includes
    -        # the effect of compression, frame overhead, and control frames.
    -        self._wire_bytes_in = 0
    -        self._wire_bytes_out = 0
    -        self.ping_callback = None
    -        self.last_ping = 0
    -        self.last_pong = 0
    -
    -    def accept_connection(self):
    -        try:
    -            self._handle_websocket_headers()
    -        except ValueError:
    -            self.handler.set_status(400)
    -            log_msg = "Missing/Invalid WebSocket headers"
    -            self.handler.finish(log_msg)
    -            gen_log.debug(log_msg)
    -            return
    -
    -        try:
    -            self._accept_connection()
    -        except ValueError:
    -            gen_log.debug("Malformed WebSocket request received",
    -                          exc_info=True)
    -            self._abort()
    -            return
    -
    -    def _handle_websocket_headers(self):
    -        """Verifies all invariant- and required headers
    -
    -        If a header is missing or have an incorrect value ValueError will be
    -        raised
    -        """
    -        fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
    -        if not all(map(lambda f: self.request.headers.get(f), fields)):
    -            raise ValueError("Missing/Invalid WebSocket headers")
    -
    -    @staticmethod
    -    def compute_accept_value(key):
    -        """Computes the value for the Sec-WebSocket-Accept header,
    -        given the value for Sec-WebSocket-Key.
    -        """
    -        sha1 = hashlib.sha1()
    -        sha1.update(utf8(key))
    -        sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11")  # Magic value
    -        return native_str(base64.b64encode(sha1.digest()))
    -
    -    def _challenge_response(self):
    -        return WebSocketProtocol13.compute_accept_value(
    -            self.request.headers.get("Sec-Websocket-Key"))
    -
    -    @gen.coroutine
    -    def _accept_connection(self):
    -        subprotocol_header = self.request.headers.get("Sec-WebSocket-Protocol")
    -        if subprotocol_header:
    -            subprotocols = [s.strip() for s in subprotocol_header.split(',')]
    -        else:
    -            subprotocols = []
    -        self.selected_subprotocol = self.handler.select_subprotocol(subprotocols)
    -        if self.selected_subprotocol:
    -            assert self.selected_subprotocol in subprotocols
    -            self.handler.set_header("Sec-WebSocket-Protocol", self.selected_subprotocol)
    -
    -        extensions = self._parse_extensions_header(self.request.headers)
    -        for ext in extensions:
    -            if (ext[0] == 'permessage-deflate' and
    -                    self._compression_options is not None):
    -                # TODO: negotiate parameters if compression_options
    -                # specifies limits.
    -                self._create_compressors('server', ext[1], self._compression_options)
    -                if ('client_max_window_bits' in ext[1] and
    -                        ext[1]['client_max_window_bits'] is None):
    -                    # Don't echo an offered client_max_window_bits
    -                    # parameter with no value.
    -                    del ext[1]['client_max_window_bits']
    -                self.handler.set_header("Sec-WebSocket-Extensions",
    -                                        httputil._encode_header(
    -                                            'permessage-deflate', ext[1]))
    -                break
    -
    -        self.handler.clear_header("Content-Type")
    -        self.handler.set_status(101)
    -        self.handler.set_header("Upgrade", "websocket")
    -        self.handler.set_header("Connection", "Upgrade")
    -        self.handler.set_header("Sec-WebSocket-Accept", self._challenge_response())
    -        self.handler.finish()
    -
    -        self.handler._attach_stream()
    -        self.stream = self.handler.stream
    -
    -        self.start_pinging()
    -        try:
    -            open_result = self.handler.open(*self.handler.open_args, **self.handler.open_kwargs)
    -            if open_result is not None:
    -                yield open_result
    -        except Exception:
    -            self.handler.log_exception(*sys.exc_info())
    -            self._abort()
    -
    -        yield self._receive_frame_loop()
    -
    -    def _parse_extensions_header(self, headers):
    -        extensions = headers.get("Sec-WebSocket-Extensions", '')
    -        if extensions:
    -            return [httputil._parse_header(e.strip())
    -                    for e in extensions.split(',')]
    -        return []
    -
    -    def _process_server_headers(self, key, headers):
    -        """Process the headers sent by the server to this client connection.
    -
    -        'key' is the websocket handshake challenge/response key.
    -        """
    -        assert headers['Upgrade'].lower() == 'websocket'
    -        assert headers['Connection'].lower() == 'upgrade'
    -        accept = self.compute_accept_value(key)
    -        assert headers['Sec-Websocket-Accept'] == accept
    -
    -        extensions = self._parse_extensions_header(headers)
    -        for ext in extensions:
    -            if (ext[0] == 'permessage-deflate' and
    -                    self._compression_options is not None):
    -                self._create_compressors('client', ext[1])
    -            else:
    -                raise ValueError("unsupported extension %r", ext)
    -
    -        self.selected_subprotocol = headers.get('Sec-WebSocket-Protocol', None)
    -
    -    def _get_compressor_options(self, side, agreed_parameters, compression_options=None):
    -        """Converts a websocket agreed_parameters set to keyword arguments
    -        for our compressor objects.
    -        """
    -        options = dict(
    -            persistent=(side + '_no_context_takeover') not in agreed_parameters)
    -        wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
    -        if wbits_header is None:
    -            options['max_wbits'] = zlib.MAX_WBITS
    -        else:
    -            options['max_wbits'] = int(wbits_header)
    -        options['compression_options'] = compression_options
    -        return options
    -
    -    def _create_compressors(self, side, agreed_parameters, compression_options=None):
    -        # TODO: handle invalid parameters gracefully
    -        allowed_keys = set(['server_no_context_takeover',
    -                            'client_no_context_takeover',
    -                            'server_max_window_bits',
    -                            'client_max_window_bits'])
    -        for key in agreed_parameters:
    -            if key not in allowed_keys:
    -                raise ValueError("unsupported compression parameter %r" % key)
    -        other_side = 'client' if (side == 'server') else 'server'
    -        self._compressor = _PerMessageDeflateCompressor(
    -            **self._get_compressor_options(side, agreed_parameters, compression_options))
    -        self._decompressor = _PerMessageDeflateDecompressor(
    -            max_message_size=self.handler.max_message_size,
    -            **self._get_compressor_options(other_side, agreed_parameters, compression_options))
    -
    -    def _write_frame(self, fin, opcode, data, flags=0):
    -        data_len = len(data)
    -        if opcode & 0x8:
    -            # All control frames MUST have a payload length of 125
    -            # bytes or less and MUST NOT be fragmented.
    -            if not fin:
    -                raise ValueError("control frames may not be fragmented")
    -            if data_len > 125:
    -                raise ValueError("control frame payloads may not exceed 125 bytes")
    -        if fin:
    -            finbit = self.FIN
    -        else:
    -            finbit = 0
    -        frame = struct.pack("B", finbit | opcode | flags)
    -        if self.mask_outgoing:
    -            mask_bit = 0x80
    -        else:
    -            mask_bit = 0
    -        if data_len < 126:
    -            frame += struct.pack("B", data_len | mask_bit)
    -        elif data_len <= 0xFFFF:
    -            frame += struct.pack("!BH", 126 | mask_bit, data_len)
    -        else:
    -            frame += struct.pack("!BQ", 127 | mask_bit, data_len)
    -        if self.mask_outgoing:
    -            mask = os.urandom(4)
    -            data = mask + _websocket_mask(mask, data)
    -        frame += data
    -        self._wire_bytes_out += len(frame)
    -        return self.stream.write(frame)
    -
    -    def write_message(self, message, binary=False):
    -        """Sends the given message to the client of this Web Socket."""
    -        if binary:
    -            opcode = 0x2
    -        else:
    -            opcode = 0x1
    -        message = tornado.escape.utf8(message)
    -        assert isinstance(message, bytes)
    -        self._message_bytes_out += len(message)
    -        flags = 0
    -        if self._compressor:
    -            message = self._compressor.compress(message)
    -            flags |= self.RSV1
    -        # For historical reasons, write methods in Tornado operate in a semi-synchronous
    -        # mode in which awaiting the Future they return is optional (But errors can
    -        # still be raised). This requires us to go through an awkward dance here
    -        # to transform the errors that may be returned while presenting the same
    -        # semi-synchronous interface.
    -        try:
    -            fut = self._write_frame(True, opcode, message, flags=flags)
    -        except StreamClosedError:
    -            raise WebSocketClosedError()
    -
    -        @gen.coroutine
    -        def wrapper():
    -            try:
    -                yield fut
    -            except StreamClosedError:
    -                raise WebSocketClosedError()
    -        return wrapper()
    -
    -    def write_ping(self, data):
    -        """Send ping frame."""
    -        assert isinstance(data, bytes)
    -        self._write_frame(True, 0x9, data)
    -
    -    @gen.coroutine
    -    def _receive_frame_loop(self):
    -        try:
    -            while not self.client_terminated:
    -                yield self._receive_frame()
    -        except StreamClosedError:
    -            self._abort()
    -
    -    def _read_bytes(self, n):
    -        self._wire_bytes_in += n
    -        return self.stream.read_bytes(n)
    -
    -    @gen.coroutine
    -    def _receive_frame(self):
    -        # Read the frame header.
    -        data = yield self._read_bytes(2)
    -        header, mask_payloadlen = struct.unpack("BB", data)
    -        is_final_frame = header & self.FIN
    -        reserved_bits = header & self.RSV_MASK
    -        opcode = header & self.OPCODE_MASK
    -        opcode_is_control = opcode & 0x8
    -        if self._decompressor is not None and opcode != 0:
    -            # Compression flag is present in the first frame's header,
    -            # but we can't decompress until we have all the frames of
    -            # the message.
    -            self._frame_compressed = bool(reserved_bits & self.RSV1)
    -            reserved_bits &= ~self.RSV1
    -        if reserved_bits:
    -            # client is using as-yet-undefined extensions; abort
    -            self._abort()
    -            return
    -        is_masked = bool(mask_payloadlen & 0x80)
    -        payloadlen = mask_payloadlen & 0x7f
    -
    -        # Parse and validate the length.
    -        if opcode_is_control and payloadlen >= 126:
    -            # control frames must have payload < 126
    -            self._abort()
    -            return
    -        if payloadlen < 126:
    -            self._frame_length = payloadlen
    -        elif payloadlen == 126:
    -            data = yield self._read_bytes(2)
    -            payloadlen = struct.unpack("!H", data)[0]
    -        elif payloadlen == 127:
    -            data = yield self._read_bytes(8)
    -            payloadlen = struct.unpack("!Q", data)[0]
    -        new_len = payloadlen
    -        if self._fragmented_message_buffer is not None:
    -            new_len += len(self._fragmented_message_buffer)
    -        if new_len > self.handler.max_message_size:
    -            self.close(1009, "message too big")
    -            self._abort()
    -            return
    -
    -        # Read the payload, unmasking if necessary.
    -        if is_masked:
    -            self._frame_mask = yield self._read_bytes(4)
    -        data = yield self._read_bytes(payloadlen)
    -        if is_masked:
    -            data = _websocket_mask(self._frame_mask, data)
    -
    -        # Decide what to do with this frame.
    -        if opcode_is_control:
    -            # control frames may be interleaved with a series of fragmented
    -            # data frames, so control frames must not interact with
    -            # self._fragmented_*
    -            if not is_final_frame:
    -                # control frames must not be fragmented
    -                self._abort()
    -                return
    -        elif opcode == 0:  # continuation frame
    -            if self._fragmented_message_buffer is None:
    -                # nothing to continue
    -                self._abort()
    -                return
    -            self._fragmented_message_buffer += data
    -            if is_final_frame:
    -                opcode = self._fragmented_message_opcode
    -                data = self._fragmented_message_buffer
    -                self._fragmented_message_buffer = None
    -        else:  # start of new data message
    -            if self._fragmented_message_buffer is not None:
    -                # can't start new message until the old one is finished
    -                self._abort()
    -                return
    -            if not is_final_frame:
    -                self._fragmented_message_opcode = opcode
    -                self._fragmented_message_buffer = data
    -
    -        if is_final_frame:
    -            handled_future = self._handle_message(opcode, data)
    -            if handled_future is not None:
    -                yield handled_future
    -
    -    def _handle_message(self, opcode, data):
    -        """Execute on_message, returning its Future if it is a coroutine."""
    -        if self.client_terminated:
    -            return
    -
    -        if self._frame_compressed:
    -            try:
    -                data = self._decompressor.decompress(data)
    -            except _DecompressTooLargeError:
    -                self.close(1009, "message too big after decompression")
    -                self._abort()
    -                return
    -
    -        if opcode == 0x1:
    -            # UTF-8 data
    -            self._message_bytes_in += len(data)
    -            try:
    -                decoded = data.decode("utf-8")
    -            except UnicodeDecodeError:
    -                self._abort()
    -                return
    -            return self._run_callback(self.handler.on_message, decoded)
    -        elif opcode == 0x2:
    -            # Binary data
    -            self._message_bytes_in += len(data)
    -            return self._run_callback(self.handler.on_message, data)
    -        elif opcode == 0x8:
    -            # Close
    -            self.client_terminated = True
    -            if len(data) >= 2:
    -                self.handler.close_code = struct.unpack('>H', data[:2])[0]
    -            if len(data) > 2:
    -                self.handler.close_reason = to_unicode(data[2:])
    -            # Echo the received close code, if any (RFC 6455 section 5.5.1).
    -            self.close(self.handler.close_code)
    -        elif opcode == 0x9:
    -            # Ping
    -            try:
    -                self._write_frame(True, 0xA, data)
    -            except StreamClosedError:
    -                self._abort()
    -            self._run_callback(self.handler.on_ping, data)
    -        elif opcode == 0xA:
    -            # Pong
    -            self.last_pong = IOLoop.current().time()
    -            return self._run_callback(self.handler.on_pong, data)
    -        else:
    -            self._abort()
    -
    -    def close(self, code=None, reason=None):
    -        """Closes the WebSocket connection."""
    -        if not self.server_terminated:
    -            if not self.stream.closed():
    -                if code is None and reason is not None:
    -                    code = 1000  # "normal closure" status code
    -                if code is None:
    -                    close_data = b''
    -                else:
    -                    close_data = struct.pack('>H', code)
    -                if reason is not None:
    -                    close_data += utf8(reason)
    -                try:
    -                    self._write_frame(True, 0x8, close_data)
    -                except StreamClosedError:
    -                    self._abort()
    -            self.server_terminated = True
    -        if self.client_terminated:
    -            if self._waiting is not None:
    -                self.stream.io_loop.remove_timeout(self._waiting)
    -                self._waiting = None
    -            self.stream.close()
    -        elif self._waiting is None:
    -            # Give the client a few seconds to complete a clean shutdown,
    -            # otherwise just close the connection.
    -            self._waiting = self.stream.io_loop.add_timeout(
    -                self.stream.io_loop.time() + 5, self._abort)
    -
    -    @property
    -    def ping_interval(self):
    -        interval = self.handler.ping_interval
    -        if interval is not None:
    -            return interval
    -        return 0
    -
    -    @property
    -    def ping_timeout(self):
    -        timeout = self.handler.ping_timeout
    -        if timeout is not None:
    -            return timeout
    -        return max(3 * self.ping_interval, 30)
    -
    -    def start_pinging(self):
    -        """Start sending periodic pings to keep the connection alive"""
    -        if self.ping_interval > 0:
    -            self.last_ping = self.last_pong = IOLoop.current().time()
    -            self.ping_callback = PeriodicCallback(
    -                self.periodic_ping, self.ping_interval * 1000)
    -            self.ping_callback.start()
    -
    -    def periodic_ping(self):
    -        """Send a ping to keep the websocket alive
    -
    -        Called periodically if the websocket_ping_interval is set and non-zero.
    -        """
    -        if self.stream.closed() and self.ping_callback is not None:
    -            self.ping_callback.stop()
    -            return
    -
    -        # Check for timeout on pong. Make sure that we really have
    -        # sent a recent ping in case the machine with both server and
    -        # client has been suspended since the last ping.
    -        now = IOLoop.current().time()
    -        since_last_pong = now - self.last_pong
    -        since_last_ping = now - self.last_ping
    -        if (since_last_ping < 2 * self.ping_interval and
    -                since_last_pong > self.ping_timeout):
    -            self.close()
    -            return
    -
    -        self.write_ping(b'')
    -        self.last_ping = now
    -
    -
    -class WebSocketClientConnection(simple_httpclient._HTTPConnection):
    -    """WebSocket client connection.
    -
    -    This class should not be instantiated directly; use the
    -    `websocket_connect` function instead.
    -    """
    -    def __init__(self, request, on_message_callback=None,
    -                 compression_options=None, ping_interval=None, ping_timeout=None,
    -                 max_message_size=None, subprotocols=[]):
    -        self.compression_options = compression_options
    -        self.connect_future = Future()
    -        self.protocol = None
    -        self.read_queue = Queue(1)
    -        self.key = base64.b64encode(os.urandom(16))
    -        self._on_message_callback = on_message_callback
    -        self.close_code = self.close_reason = None
    -        self.ping_interval = ping_interval
    -        self.ping_timeout = ping_timeout
    -        self.max_message_size = max_message_size
    -
    -        scheme, sep, rest = request.url.partition(':')
    -        scheme = {'ws': 'http', 'wss': 'https'}[scheme]
    -        request.url = scheme + sep + rest
    -        request.headers.update({
    -            'Upgrade': 'websocket',
    -            'Connection': 'Upgrade',
    -            'Sec-WebSocket-Key': self.key,
    -            'Sec-WebSocket-Version': '13',
    -        })
    -        if subprotocols is not None:
    -            request.headers['Sec-WebSocket-Protocol'] = ','.join(subprotocols)
    -        if self.compression_options is not None:
    -            # Always offer to let the server set our max_wbits (and even though
    -            # we don't offer it, we will accept a client_no_context_takeover
    -            # from the server).
    -            # TODO: set server parameters for deflate extension
    -            # if requested in self.compression_options.
    -            request.headers['Sec-WebSocket-Extensions'] = (
    -                'permessage-deflate; client_max_window_bits')
    -
    -        self.tcp_client = TCPClient()
    -        super(WebSocketClientConnection, self).__init__(
    -            None, request, lambda: None, self._on_http_response,
    -            104857600, self.tcp_client, 65536, 104857600)
    -
    -    def close(self, code=None, reason=None):
    -        """Closes the websocket connection.
    -
    -        ``code`` and ``reason`` are documented under
    -        `WebSocketHandler.close`.
    -
    -        .. versionadded:: 3.2
    -
    -        .. versionchanged:: 4.0
    -
    -           Added the ``code`` and ``reason`` arguments.
    -        """
    -        if self.protocol is not None:
    -            self.protocol.close(code, reason)
    -            self.protocol = None
    -
    -    def on_connection_close(self):
    -        if not self.connect_future.done():
    -            self.connect_future.set_exception(StreamClosedError())
    -        self.on_message(None)
    -        self.tcp_client.close()
    -        super(WebSocketClientConnection, self).on_connection_close()
    -
    -    def _on_http_response(self, response):
    -        if not self.connect_future.done():
    -            if response.error:
    -                self.connect_future.set_exception(response.error)
    -            else:
    -                self.connect_future.set_exception(WebSocketError(
    -                    "Non-websocket response"))
    -
    -    def headers_received(self, start_line, headers):
    -        if start_line.code != 101:
    -            return super(WebSocketClientConnection, self).headers_received(
    -                start_line, headers)
    -
    -        self.headers = headers
    -        self.protocol = self.get_websocket_protocol()
    -        self.protocol._process_server_headers(self.key, self.headers)
    -        self.protocol.start_pinging()
    -        IOLoop.current().add_callback(self.protocol._receive_frame_loop)
    -
    -        if self._timeout is not None:
    -            self.io_loop.remove_timeout(self._timeout)
    -            self._timeout = None
    -
    -        self.stream = self.connection.detach()
    -        self.stream.set_close_callback(self.on_connection_close)
    -        # Once we've taken over the connection, clear the final callback
    -        # we set on the http request.  This deactivates the error handling
    -        # in simple_httpclient that would otherwise interfere with our
    -        # ability to see exceptions.
    -        self.final_callback = None
    -
    -        future_set_result_unless_cancelled(self.connect_future, self)
    -
    -    def write_message(self, message, binary=False):
    -        """Sends a message to the WebSocket server.
    -
    -        If the stream is closed, raises `WebSocketClosedError`.
    -        Returns a `.Future` which can be used for flow control.
    -
    -        .. versionchanged:: 5.0
    -           Exception raised on a closed stream changed from `.StreamClosedError`
    -           to `WebSocketClosedError`.
    -        """
    -        return self.protocol.write_message(message, binary=binary)
    -
    -    def read_message(self, callback=None):
    -        """Reads a message from the WebSocket server.
    -
    -        If on_message_callback was specified at WebSocket
    -        initialization, this function will never return messages
    -
    -        Returns a future whose result is the message, or None
    -        if the connection is closed.  If a callback argument
    -        is given it will be called with the future when it is
    -        ready.
    -        """
    -
    -        future = self.read_queue.get()
    -        if callback is not None:
    -            self.io_loop.add_future(future, callback)
    -        return future
    -
    -    def on_message(self, message):
    -        if self._on_message_callback:
    -            self._on_message_callback(message)
    -        else:
    -            return self.read_queue.put(message)
    -
    -    def ping(self, data=b''):
    -        """Send ping frame to the remote end.
    -
    -        The data argument allows a small amount of data (up to 125
    -        bytes) to be sent as a part of the ping message. Note that not
    -        all websocket implementations expose this data to
    -        applications.
    -
    -        Consider using the ``ping_interval`` argument to
    -        `websocket_connect` instead of sending pings manually.
    -
    -        .. versionadded:: 5.1
    -
    -        """
    -        data = utf8(data)
    -        if self.protocol is None:
    -            raise WebSocketClosedError()
    -        self.protocol.write_ping(data)
    -
    -    def on_pong(self, data):
    -        pass
    -
    -    def on_ping(self, data):
    -        pass
    -
    -    def get_websocket_protocol(self):
    -        return WebSocketProtocol13(self, mask_outgoing=True,
    -                                   compression_options=self.compression_options)
    -
    -    @property
    -    def selected_subprotocol(self):
    -        """The subprotocol selected by the server.
    -
    -        .. versionadded:: 5.1
    -        """
    -        return self.protocol.selected_subprotocol
    -
    -
    -def websocket_connect(url, callback=None, connect_timeout=None,
    -                      on_message_callback=None, compression_options=None,
    -                      ping_interval=None, ping_timeout=None,
    -                      max_message_size=_default_max_message_size, subprotocols=None):
    -    """Client-side websocket support.
    -
    -    Takes a url and returns a Future whose result is a
    -    `WebSocketClientConnection`.
    -
    -    ``compression_options`` is interpreted in the same way as the
    -    return value of `.WebSocketHandler.get_compression_options`.
    -
    -    The connection supports two styles of operation. In the coroutine
    -    style, the application typically calls
    -    `~.WebSocketClientConnection.read_message` in a loop::
    -
    -        conn = yield websocket_connect(url)
    -        while True:
    -            msg = yield conn.read_message()
    -            if msg is None: break
    -            # Do something with msg
    -
    -    In the callback style, pass an ``on_message_callback`` to
    -    ``websocket_connect``. In both styles, a message of ``None``
    -    indicates that the connection has been closed.
    -
    -    ``subprotocols`` may be a list of strings specifying proposed
    -    subprotocols. The selected protocol may be found on the
    -    ``selected_subprotocol`` attribute of the connection object
    -    when the connection is complete.
    -
    -    .. versionchanged:: 3.2
    -       Also accepts ``HTTPRequest`` objects in place of urls.
    -
    -    .. versionchanged:: 4.1
    -       Added ``compression_options`` and ``on_message_callback``.
    -
    -    .. versionchanged:: 4.5
    -       Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``
    -       arguments, which have the same meaning as in `WebSocketHandler`.
    -
    -    .. versionchanged:: 5.0
    -       The ``io_loop`` argument (deprecated since version 4.1) has been removed.
    -
    -    .. versionchanged:: 5.1
    -       Added the ``subprotocols`` argument.
    -    """
    -    if isinstance(url, httpclient.HTTPRequest):
    -        assert connect_timeout is None
    -        request = url
    -        # Copy and convert the headers dict/object (see comments in
    -        # AsyncHTTPClient.fetch)
    -        request.headers = httputil.HTTPHeaders(request.headers)
    -    else:
    -        request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
    -    request = httpclient._RequestProxy(
    -        request, httpclient.HTTPRequest._DEFAULTS)
    -    conn = WebSocketClientConnection(request,
    -                                     on_message_callback=on_message_callback,
    -                                     compression_options=compression_options,
    -                                     ping_interval=ping_interval,
    -                                     ping_timeout=ping_timeout,
    -                                     max_message_size=max_message_size,
    -                                     subprotocols=subprotocols)
    -    if callback is not None:
    -        IOLoop.current().add_future(conn.connect_future, callback)
    -    return conn.connect_future
    diff --git a/lib/tornado/wsgi.py b/lib/tornado/wsgi.py
    deleted file mode 100755
    index e1230da0..00000000
    --- a/lib/tornado/wsgi.py
    +++ /dev/null
    @@ -1,377 +0,0 @@
    -#
    -# Copyright 2009 Facebook
    -#
    -# Licensed under the Apache License, Version 2.0 (the "License"); you may
    -# not use this file except in compliance with the License. You may obtain
    -# a copy of the License at
    -#
    -#     http://www.apache.org/licenses/LICENSE-2.0
    -#
    -# Unless required by applicable law or agreed to in writing, software
    -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    -# License for the specific language governing permissions and limitations
    -# under the License.
    -
    -"""WSGI support for the Tornado web framework.
    -
    -WSGI is the Python standard for web servers, and allows for interoperability
    -between Tornado and other Python web frameworks and servers.  This module
    -provides WSGI support in two ways:
    -
    -* `WSGIAdapter` converts a `tornado.web.Application` to the WSGI application
    -  interface.  This is useful for running a Tornado app on another
    -  HTTP server, such as Google App Engine.  See the `WSGIAdapter` class
    -  documentation for limitations that apply.
    -* `WSGIContainer` lets you run other WSGI applications and frameworks on the
    -  Tornado HTTP server.  For example, with this class you can mix Django
    -  and Tornado handlers in a single server.
    -"""
    -
    -from __future__ import absolute_import, division, print_function
    -
    -import sys
    -from io import BytesIO
    -import tornado
    -import warnings
    -
    -from tornado.concurrent import Future
    -from tornado import escape
    -from tornado import httputil
    -from tornado.log import access_log
    -from tornado import web
    -from tornado.escape import native_str
    -from tornado.util import unicode_type, PY3
    -
    -
    -if PY3:
    -    import urllib.parse as urllib_parse  # py3
    -else:
    -    import urllib as urllib_parse
    -
    -# PEP 3333 specifies that WSGI on python 3 generally deals with byte strings
    -# that are smuggled inside objects of type unicode (via the latin1 encoding).
    -# These functions are like those in the tornado.escape module, but defined
    -# here to minimize the temptation to use them in non-wsgi contexts.
    -if str is unicode_type:
    -    def to_wsgi_str(s):
    -        assert isinstance(s, bytes)
    -        return s.decode('latin1')
    -
    -    def from_wsgi_str(s):
    -        assert isinstance(s, str)
    -        return s.encode('latin1')
    -else:
    -    def to_wsgi_str(s):
    -        assert isinstance(s, bytes)
    -        return s
    -
    -    def from_wsgi_str(s):
    -        assert isinstance(s, str)
    -        return s
    -
    -
    -class WSGIApplication(web.Application):
    -    """A WSGI equivalent of `tornado.web.Application`.
    -
    -    .. deprecated:: 4.0
    -
    -       Use a regular `.Application` and wrap it in `WSGIAdapter` instead.
    -       This class will be removed in Tornado 6.0.
    -    """
    -    def __call__(self, environ, start_response):
    -        return WSGIAdapter(self)(environ, start_response)
    -
    -
    -# WSGI has no facilities for flow control, so just return an already-done
    -# Future when the interface requires it.
    -def _dummy_future():
    -    f = Future()
    -    f.set_result(None)
    -    return f
    -
    -
    -class _WSGIConnection(httputil.HTTPConnection):
    -    def __init__(self, method, start_response, context):
    -        self.method = method
    -        self.start_response = start_response
    -        self.context = context
    -        self._write_buffer = []
    -        self._finished = False
    -        self._expected_content_remaining = None
    -        self._error = None
    -
    -    def set_close_callback(self, callback):
    -        # WSGI has no facility for detecting a closed connection mid-request,
    -        # so we can simply ignore the callback.
    -        pass
    -
    -    def write_headers(self, start_line, headers, chunk=None, callback=None):
    -        if self.method == 'HEAD':
    -            self._expected_content_remaining = 0
    -        elif 'Content-Length' in headers:
    -            self._expected_content_remaining = int(headers['Content-Length'])
    -        else:
    -            self._expected_content_remaining = None
    -        self.start_response(
    -            '%s %s' % (start_line.code, start_line.reason),
    -            [(native_str(k), native_str(v)) for (k, v) in headers.get_all()])
    -        if chunk is not None:
    -            self.write(chunk, callback)
    -        elif callback is not None:
    -            callback()
    -        return _dummy_future()
    -
    -    def write(self, chunk, callback=None):
    -        if self._expected_content_remaining is not None:
    -            self._expected_content_remaining -= len(chunk)
    -            if self._expected_content_remaining < 0:
    -                self._error = httputil.HTTPOutputError(
    -                    "Tried to write more data than Content-Length")
    -                raise self._error
    -        self._write_buffer.append(chunk)
    -        if callback is not None:
    -            callback()
    -        return _dummy_future()
    -
    -    def finish(self):
    -        if (self._expected_content_remaining is not None and
    -                self._expected_content_remaining != 0):
    -            self._error = httputil.HTTPOutputError(
    -                "Tried to write %d bytes less than Content-Length" %
    -                self._expected_content_remaining)
    -            raise self._error
    -        self._finished = True
    -
    -
    -class _WSGIRequestContext(object):
    -    def __init__(self, remote_ip, protocol):
    -        self.remote_ip = remote_ip
    -        self.protocol = protocol
    -
    -    def __str__(self):
    -        return self.remote_ip
    -
    -
    -class WSGIAdapter(object):
    -    """Converts a `tornado.web.Application` instance into a WSGI application.
    -
    -    Example usage::
    -
    -        import tornado.web
    -        import tornado.wsgi
    -        import wsgiref.simple_server
    -
    -        class MainHandler(tornado.web.RequestHandler):
    -            def get(self):
    -                self.write("Hello, world")
    -
    -        if __name__ == "__main__":
    -            application = tornado.web.Application([
    -                (r"/", MainHandler),
    -            ])
    -            wsgi_app = tornado.wsgi.WSGIAdapter(application)
    -            server = wsgiref.simple_server.make_server('', 8888, wsgi_app)
    -            server.serve_forever()
    -
    -    See the `appengine demo
    -    `_
    -    for an example of using this module to run a Tornado app on Google
    -    App Engine.
    -
    -    In WSGI mode asynchronous methods are not supported.  This means
    -    that it is not possible to use `.AsyncHTTPClient`, or the
    -    `tornado.auth` or `tornado.websocket` modules.
    -
    -    In multithreaded WSGI servers on Python 3, it may be necessary to
    -    permit `asyncio` to create event loops on any thread. Run the
    -    following at startup (typically import time for WSGI
    -    applications)::
    -
    -        import asyncio
    -        from tornado.platform.asyncio import AnyThreadEventLoopPolicy
    -        asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
    -
    -    .. versionadded:: 4.0
    -
    -    .. deprecated:: 5.1
    -
    -       This class is deprecated and will be removed in Tornado 6.0.
    -       Use Tornado's `.HTTPServer` instead of a WSGI container.
    -    """
    -    def __init__(self, application):
    -        warnings.warn("WSGIAdapter is deprecated, use Tornado's HTTPServer instead",
    -                      DeprecationWarning)
    -        if isinstance(application, WSGIApplication):
    -            self.application = lambda request: web.Application.__call__(
    -                application, request)
    -        else:
    -            self.application = application
    -
    -    def __call__(self, environ, start_response):
    -        method = environ["REQUEST_METHOD"]
    -        uri = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", "")))
    -        uri += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", "")))
    -        if environ.get("QUERY_STRING"):
    -            uri += "?" + environ["QUERY_STRING"]
    -        headers = httputil.HTTPHeaders()
    -        if environ.get("CONTENT_TYPE"):
    -            headers["Content-Type"] = environ["CONTENT_TYPE"]
    -        if environ.get("CONTENT_LENGTH"):
    -            headers["Content-Length"] = environ["CONTENT_LENGTH"]
    -        for key in environ:
    -            if key.startswith("HTTP_"):
    -                headers[key[5:].replace("_", "-")] = environ[key]
    -        if headers.get("Content-Length"):
    -            body = environ["wsgi.input"].read(
    -                int(headers["Content-Length"]))
    -        else:
    -            body = b""
    -        protocol = environ["wsgi.url_scheme"]
    -        remote_ip = environ.get("REMOTE_ADDR", "")
    -        if environ.get("HTTP_HOST"):
    -            host = environ["HTTP_HOST"]
    -        else:
    -            host = environ["SERVER_NAME"]
    -        connection = _WSGIConnection(method, start_response,
    -                                     _WSGIRequestContext(remote_ip, protocol))
    -        request = httputil.HTTPServerRequest(
    -            method, uri, "HTTP/1.1", headers=headers, body=body,
    -            host=host, connection=connection)
    -        request._parse_body()
    -        self.application(request)
    -        if connection._error:
    -            raise connection._error
    -        if not connection._finished:
    -            raise Exception("request did not finish synchronously")
    -        return connection._write_buffer
    -
    -
    -class WSGIContainer(object):
    -    r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
    -
    -    .. warning::
    -
    -       WSGI is a *synchronous* interface, while Tornado's concurrency model
    -       is based on single-threaded asynchronous execution.  This means that
    -       running a WSGI app with Tornado's `WSGIContainer` is *less scalable*
    -       than running the same app in a multi-threaded WSGI server like
    -       ``gunicorn`` or ``uwsgi``.  Use `WSGIContainer` only when there are
    -       benefits to combining Tornado and WSGI in the same process that
    -       outweigh the reduced scalability.
    -
    -    Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to
    -    run it. For example::
    -
    -        def simple_app(environ, start_response):
    -            status = "200 OK"
    -            response_headers = [("Content-type", "text/plain")]
    -            start_response(status, response_headers)
    -            return ["Hello world!\n"]
    -
    -        container = tornado.wsgi.WSGIContainer(simple_app)
    -        http_server = tornado.httpserver.HTTPServer(container)
    -        http_server.listen(8888)
    -        tornado.ioloop.IOLoop.current().start()
    -
    -    This class is intended to let other frameworks (Django, web.py, etc)
    -    run on the Tornado HTTP server and I/O loop.
    -
    -    The `tornado.web.FallbackHandler` class is often useful for mixing
    -    Tornado and WSGI apps in the same server.  See
    -    https://github.com/bdarnell/django-tornado-demo for a complete example.
    -    """
    -    def __init__(self, wsgi_application):
    -        self.wsgi_application = wsgi_application
    -
    -    def __call__(self, request):
    -        data = {}
    -        response = []
    -
    -        def start_response(status, response_headers, exc_info=None):
    -            data["status"] = status
    -            data["headers"] = response_headers
    -            return response.append
    -        app_response = self.wsgi_application(
    -            WSGIContainer.environ(request), start_response)
    -        try:
    -            response.extend(app_response)
    -            body = b"".join(response)
    -        finally:
    -            if hasattr(app_response, "close"):
    -                app_response.close()
    -        if not data:
    -            raise Exception("WSGI app did not call start_response")
    -
    -        status_code, reason = data["status"].split(' ', 1)
    -        status_code = int(status_code)
    -        headers = data["headers"]
    -        header_set = set(k.lower() for (k, v) in headers)
    -        body = escape.utf8(body)
    -        if status_code != 304:
    -            if "content-length" not in header_set:
    -                headers.append(("Content-Length", str(len(body))))
    -            if "content-type" not in header_set:
    -                headers.append(("Content-Type", "text/html; charset=UTF-8"))
    -        if "server" not in header_set:
    -            headers.append(("Server", "TornadoServer/%s" % tornado.version))
    -
    -        start_line = httputil.ResponseStartLine("HTTP/1.1", status_code, reason)
    -        header_obj = httputil.HTTPHeaders()
    -        for key, value in headers:
    -            header_obj.add(key, value)
    -        request.connection.write_headers(start_line, header_obj, chunk=body)
    -        request.connection.finish()
    -        self._log(status_code, request)
    -
    -    @staticmethod
    -    def environ(request):
    -        """Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
    -        """
    -        hostport = request.host.split(":")
    -        if len(hostport) == 2:
    -            host = hostport[0]
    -            port = int(hostport[1])
    -        else:
    -            host = request.host
    -            port = 443 if request.protocol == "https" else 80
    -        environ = {
    -            "REQUEST_METHOD": request.method,
    -            "SCRIPT_NAME": "",
    -            "PATH_INFO": to_wsgi_str(escape.url_unescape(
    -                request.path, encoding=None, plus=False)),
    -            "QUERY_STRING": request.query,
    -            "REMOTE_ADDR": request.remote_ip,
    -            "SERVER_NAME": host,
    -            "SERVER_PORT": str(port),
    -            "SERVER_PROTOCOL": request.version,
    -            "wsgi.version": (1, 0),
    -            "wsgi.url_scheme": request.protocol,
    -            "wsgi.input": BytesIO(escape.utf8(request.body)),
    -            "wsgi.errors": sys.stderr,
    -            "wsgi.multithread": False,
    -            "wsgi.multiprocess": True,
    -            "wsgi.run_once": False,
    -        }
    -        if "Content-Type" in request.headers:
    -            environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
    -        if "Content-Length" in request.headers:
    -            environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
    -        for key, value in request.headers.items():
    -            environ["HTTP_" + key.replace("-", "_").upper()] = value
    -        return environ
    -
    -    def _log(self, status_code, request):
    -        if status_code < 400:
    -            log_method = access_log.info
    -        elif status_code < 500:
    -            log_method = access_log.warning
    -        else:
    -            log_method = access_log.error
    -        request_time = 1000.0 * request.request_time()
    -        summary = request.method + " " + request.uri + " (" + \
    -            request.remote_ip + ")"
    -        log_method("%d %s %.2fms", status_code, summary, request_time)
    -
    -
    -HTTPRequest = httputil.HTTPServerRequest
    diff --git a/lib/urllib3/__init__.py b/lib/urllib3/__init__.py
    index aaa6b1c6..8f5a21f3 100644
    --- a/lib/urllib3/__init__.py
    +++ b/lib/urllib3/__init__.py
    @@ -1,15 +1,10 @@
     """
     urllib3 - Thread-safe connection pooling and re-using.
     """
    -
     from __future__ import absolute_import
     import warnings
     
    -from .connectionpool import (
    -    HTTPConnectionPool,
    -    HTTPSConnectionPool,
    -    connection_from_url
    -)
    +from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url
     
     from . import exceptions
     from .filepost import encode_multipart_formdata
    @@ -23,32 +18,27 @@ from .util.retry import Retry
     
     # Set default logging handler to avoid "No handler found" warnings.
     import logging
    -try:  # Python 2.7+
    -    from logging import NullHandler
    -except ImportError:
    -    class NullHandler(logging.Handler):
    -        def emit(self, record):
    -            pass
    +from logging import NullHandler
     
    -__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
    -__license__ = 'MIT'
    -__version__ = '1.22'
    +__author__ = "Andrey Petrov (andrey.petrov@shazow.net)"
    +__license__ = "MIT"
    +__version__ = "1.25.6"
     
     __all__ = (
    -    'HTTPConnectionPool',
    -    'HTTPSConnectionPool',
    -    'PoolManager',
    -    'ProxyManager',
    -    'HTTPResponse',
    -    'Retry',
    -    'Timeout',
    -    'add_stderr_logger',
    -    'connection_from_url',
    -    'disable_warnings',
    -    'encode_multipart_formdata',
    -    'get_host',
    -    'make_headers',
    -    'proxy_from_url',
    +    "HTTPConnectionPool",
    +    "HTTPSConnectionPool",
    +    "PoolManager",
    +    "ProxyManager",
    +    "HTTPResponse",
    +    "Retry",
    +    "Timeout",
    +    "add_stderr_logger",
    +    "connection_from_url",
    +    "disable_warnings",
    +    "encode_multipart_formdata",
    +    "get_host",
    +    "make_headers",
    +    "proxy_from_url",
     )
     
     logging.getLogger(__name__).addHandler(NullHandler())
    @@ -65,10 +55,10 @@ def add_stderr_logger(level=logging.DEBUG):
         # even if urllib3 is vendored within another package.
         logger = logging.getLogger(__name__)
         handler = logging.StreamHandler()
    -    handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
    +    handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
         logger.addHandler(handler)
         logger.setLevel(level)
    -    logger.debug('Added a stderr logging handler to logger: %s', __name__)
    +    logger.debug("Added a stderr logging handler to logger: %s", __name__)
         return handler
     
     
    @@ -80,18 +70,17 @@ del NullHandler
     # shouldn't be: otherwise, it's very hard for users to use most Python
     # mechanisms to silence them.
     # SecurityWarning's always go off by default.
    -warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
    +warnings.simplefilter("always", exceptions.SecurityWarning, append=True)
     # SubjectAltNameWarning's should go off once per host
    -warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
    +warnings.simplefilter("default", exceptions.SubjectAltNameWarning, append=True)
     # InsecurePlatformWarning's don't vary between requests, so we keep it default.
    -warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
    -                      append=True)
    +warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True)
     # SNIMissingWarnings should go off only once.
    -warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
    +warnings.simplefilter("default", exceptions.SNIMissingWarning, append=True)
     
     
     def disable_warnings(category=exceptions.HTTPWarning):
         """
         Helper for quickly disabling all urllib3 warnings.
         """
    -    warnings.simplefilter('ignore', category)
    +    warnings.simplefilter("ignore", category)
    diff --git a/lib/urllib3/_collections.py b/lib/urllib3/_collections.py
    index 5df2372c..019d1511 100644
    --- a/lib/urllib3/_collections.py
    +++ b/lib/urllib3/_collections.py
    @@ -1,8 +1,13 @@
     from __future__ import absolute_import
    -from collections import Mapping, MutableMapping
    +
    +try:
    +    from collections.abc import Mapping, MutableMapping
    +except ImportError:
    +    from collections import Mapping, MutableMapping
     try:
         from threading import RLock
     except ImportError:  # Platform-specific: No threads available
    +
         class RLock:
             def __enter__(self):
                 pass
    @@ -11,14 +16,12 @@ except ImportError:  # Platform-specific: No threads available
                 pass
     
     
    -try:  # Python 2.7+
    -    from collections import OrderedDict
    -except ImportError:
    -    from .packages.ordered_dict import OrderedDict
    +from collections import OrderedDict
    +from .exceptions import InvalidHeader
     from .packages.six import iterkeys, itervalues, PY3
     
     
    -__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
    +__all__ = ["RecentlyUsedContainer", "HTTPHeaderDict"]
     
     
     _Null = object()
    @@ -81,7 +84,9 @@ class RecentlyUsedContainer(MutableMapping):
                 return len(self._container)
     
         def __iter__(self):
    -        raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
    +        raise NotImplementedError(
    +            "Iteration over this class is unlikely to be threadsafe."
    +        )
     
         def clear(self):
             with self.lock:
    @@ -149,7 +154,7 @@ class HTTPHeaderDict(MutableMapping):
     
         def __getitem__(self, key):
             val = self._container[key.lower()]
    -        return ', '.join(val[1:])
    +        return ", ".join(val[1:])
     
         def __delitem__(self, key):
             del self._container[key.lower()]
    @@ -158,12 +163,13 @@ class HTTPHeaderDict(MutableMapping):
             return key.lower() in self._container
     
         def __eq__(self, other):
    -        if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
    +        if not isinstance(other, Mapping) and not hasattr(other, "keys"):
                 return False
             if not isinstance(other, type(self)):
                 other = type(self)(other)
    -        return (dict((k.lower(), v) for k, v in self.itermerged()) ==
    -                dict((k.lower(), v) for k, v in other.itermerged()))
    +        return dict((k.lower(), v) for k, v in self.itermerged()) == dict(
    +            (k.lower(), v) for k, v in other.itermerged()
    +        )
     
         def __ne__(self, other):
             return not self.__eq__(other)
    @@ -183,9 +189,9 @@ class HTTPHeaderDict(MutableMapping):
                 yield vals[0]
     
         def pop(self, key, default=__marker):
    -        '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
    +        """D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
               If key is not found, d is returned if given, otherwise KeyError is raised.
    -        '''
    +        """
             # Using the MutableMapping function directly fails due to the private marker.
             # Using ordinary dict.pop would expose the internal structures.
             # So let's reinvent the wheel.
    @@ -227,8 +233,10 @@ class HTTPHeaderDict(MutableMapping):
             with self.add instead of self.__setitem__
             """
             if len(args) > 1:
    -            raise TypeError("extend() takes at most 1 positional "
    -                            "arguments ({0} given)".format(len(args)))
    +            raise TypeError(
    +                "extend() takes at most 1 positional "
    +                "arguments ({0} given)".format(len(args))
    +            )
             other = args[0] if len(args) >= 1 else ()
     
             if isinstance(other, HTTPHeaderDict):
    @@ -294,7 +302,7 @@ class HTTPHeaderDict(MutableMapping):
             """Iterate over all headers, merging duplicate ones together."""
             for key in self:
                 val = self._container[key.lower()]
    -            yield val[0], ', '.join(val[1:])
    +            yield val[0], ", ".join(val[1:])
     
         def items(self):
             return list(self.iteritems())
    @@ -305,15 +313,24 @@ class HTTPHeaderDict(MutableMapping):
             # python2.7 does not expose a proper API for exporting multiheaders
             # efficiently. This function re-reads raw lines from the message
             # object and extracts the multiheaders properly.
    +        obs_fold_continued_leaders = (" ", "\t")
             headers = []
     
             for line in message.headers:
    -            if line.startswith((' ', '\t')):
    -                key, value = headers[-1]
    -                headers[-1] = (key, value + '\r\n' + line.rstrip())
    -                continue
    +            if line.startswith(obs_fold_continued_leaders):
    +                if not headers:
    +                    # We received a header line that starts with OWS as described
    +                    # in RFC-7230 S3.2.4. This indicates a multiline header, but
    +                    # there exists no previous header to which we can attach it.
    +                    raise InvalidHeader(
    +                        "Header continuation with no previous header: %s" % line
    +                    )
    +                else:
    +                    key, value = headers[-1]
    +                    headers[-1] = (key, value + " " + line.strip())
    +                    continue
     
    -            key, value = line.split(':', 1)
    +            key, value = line.split(":", 1)
                 headers.append((key, value.strip()))
     
             return cls(headers)
    diff --git a/lib/urllib3/connection.py b/lib/urllib3/connection.py
    index c0d83299..3eeb1af5 100644
    --- a/lib/urllib3/connection.py
    +++ b/lib/urllib3/connection.py
    @@ -2,7 +2,6 @@ from __future__ import absolute_import
     import datetime
     import logging
     import os
    -import sys
     import socket
     from socket import error as SocketError, timeout as SocketTimeout
     import warnings
    @@ -12,6 +11,7 @@ from .packages.six.moves.http_client import HTTPException  # noqa: F401
     
     try:  # Compiled with SSL?
         import ssl
    +
         BaseSSLError = ssl.SSLError
     except (ImportError, AttributeError):  # Platform-specific: No SSL.
         ssl = None
    @@ -20,10 +20,11 @@ except (ImportError, AttributeError):  # Platform-specific: No SSL.
             pass
     
     
    -try:  # Python 3:
    -    # Not a no-op, we're adding this to the namespace so it can be imported.
    +try:
    +    # Python 3: not a no-op, we're adding this to the namespace so it can be imported.
         ConnectionError = ConnectionError
    -except NameError:  # Python 2:
    +except NameError:
    +    # Python 2
         class ConnectionError(Exception):
             pass
     
    @@ -41,7 +42,7 @@ from .util.ssl_ import (
         resolve_ssl_version,
         assert_fingerprint,
         create_urllib3_context,
    -    ssl_wrap_socket
    +    ssl_wrap_socket,
     )
     
     
    @@ -51,19 +52,16 @@ from ._collections import HTTPHeaderDict
     
     log = logging.getLogger(__name__)
     
    -port_by_scheme = {
    -    'http': 80,
    -    'https': 443,
    -}
    +port_by_scheme = {"http": 80, "https": 443}
     
    -# When updating RECENT_DATE, move it to
    -# within two years of the current date, and no
    -# earlier than 6 months ago.
    -RECENT_DATE = datetime.date(2016, 1, 1)
    +# When it comes time to update this value as a part of regular maintenance
    +# (ie test_recent_date is failing) update it to ~6 months before the current date.
    +RECENT_DATE = datetime.date(2019, 1, 1)
     
     
     class DummyConnection(object):
         """Used to detect a failed ConnectionCls import."""
    +
         pass
     
     
    @@ -77,9 +75,6 @@ class HTTPConnection(_HTTPConnection, object):
     
           - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
           - ``source_address``: Set the source address for the current connection.
    -
    -        .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
    -
           - ``socket_options``: Set specific options on the underlying socket. If not specified, then
             defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
             Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
    @@ -94,7 +89,7 @@ class HTTPConnection(_HTTPConnection, object):
             Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
         """
     
    -    default_port = port_by_scheme['http']
    +    default_port = port_by_scheme["http"]
     
         #: Disable Nagle's algorithm by default.
         #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
    @@ -104,26 +99,47 @@ class HTTPConnection(_HTTPConnection, object):
         is_verified = False
     
         def __init__(self, *args, **kw):
    -        if six.PY3:  # Python 3
    -            kw.pop('strict', None)
    +        if not six.PY2:
    +            kw.pop("strict", None)
     
    -        # Pre-set source_address in case we have an older Python like 2.6.
    -        self.source_address = kw.get('source_address')
    -
    -        if sys.version_info < (2, 7):  # Python 2.6
    -            # _HTTPConnection on Python 2.6 will balk at this keyword arg, but
    -            # not newer versions. We can still use it when creating a
    -            # connection though, so we pop it *after* we have saved it as
    -            # self.source_address.
    -            kw.pop('source_address', None)
    +        # Pre-set source_address.
    +        self.source_address = kw.get("source_address")
     
             #: The socket options provided by the user. If no options are
             #: provided, we use the default options.
    -        self.socket_options = kw.pop('socket_options', self.default_socket_options)
    +        self.socket_options = kw.pop("socket_options", self.default_socket_options)
     
    -        # Superclass also sets self.source_address in Python 2.7+.
             _HTTPConnection.__init__(self, *args, **kw)
     
    +    @property
    +    def host(self):
    +        """
    +        Getter method to remove any trailing dots that indicate the hostname is an FQDN.
    +
    +        In general, SSL certificates don't include the trailing dot indicating a
    +        fully-qualified domain name, and thus, they don't validate properly when
    +        checked against a domain name that includes the dot. In addition, some
    +        servers may not expect to receive the trailing dot when provided.
    +
    +        However, the hostname with trailing dot is critical to DNS resolution; doing a
    +        lookup with the trailing dot will properly only resolve the appropriate FQDN,
    +        whereas a lookup without a trailing dot will search the system's search domain
    +        list. Thus, it's important to keep the original host around for use only in
    +        those cases where it's appropriate (i.e., when doing DNS lookup to establish the
    +        actual TCP connection across which we're going to send HTTP requests).
    +        """
    +        return self._dns_host.rstrip(".")
    +
    +    @host.setter
    +    def host(self, value):
    +        """
    +        Setter for the `host` property.
    +
    +        We assume that only urllib3 uses the _dns_host attribute; httplib itself
    +        only uses `host`, and it seems reasonable that other libraries follow suit.
    +        """
    +        self._dns_host = value
    +
         def _new_conn(self):
             """ Establish a socket connection and set nodelay settings on it.
     
    @@ -131,32 +147,34 @@ class HTTPConnection(_HTTPConnection, object):
             """
             extra_kw = {}
             if self.source_address:
    -            extra_kw['source_address'] = self.source_address
    +            extra_kw["source_address"] = self.source_address
     
             if self.socket_options:
    -            extra_kw['socket_options'] = self.socket_options
    +            extra_kw["socket_options"] = self.socket_options
     
             try:
                 conn = connection.create_connection(
    -                (self.host, self.port), self.timeout, **extra_kw)
    +                (self._dns_host, self.port), self.timeout, **extra_kw
    +            )
     
    -        except SocketTimeout as e:
    +        except SocketTimeout:
                 raise ConnectTimeoutError(
    -                self, "Connection to %s timed out. (connect timeout=%s)" %
    -                (self.host, self.timeout))
    +                self,
    +                "Connection to %s timed out. (connect timeout=%s)"
    +                % (self.host, self.timeout),
    +            )
     
             except SocketError as e:
                 raise NewConnectionError(
    -                self, "Failed to establish a new connection: %s" % e)
    +                self, "Failed to establish a new connection: %s" % e
    +            )
     
             return conn
     
         def _prepare_conn(self, conn):
             self.sock = conn
    -        # the _tunnel_host attribute was added in python 2.6.3 (via
    -        # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
    -        # not have them.
    -        if getattr(self, '_tunnel_host', None):
    +        # Google App Engine's httplib does not define _tunnel_host
    +        if getattr(self, "_tunnel_host", None):
                 # TODO: Fix tunnel so it doesn't depend on self.sock state.
                 self._tunnel()
                 # Mark this connection as not reusable
    @@ -172,74 +190,99 @@ class HTTPConnection(_HTTPConnection, object):
             body with chunked encoding and not as one block
             """
             headers = HTTPHeaderDict(headers if headers is not None else {})
    -        skip_accept_encoding = 'accept-encoding' in headers
    -        skip_host = 'host' in headers
    +        skip_accept_encoding = "accept-encoding" in headers
    +        skip_host = "host" in headers
             self.putrequest(
    -            method,
    -            url,
    -            skip_accept_encoding=skip_accept_encoding,
    -            skip_host=skip_host
    +            method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host
             )
             for header, value in headers.items():
                 self.putheader(header, value)
    -        if 'transfer-encoding' not in headers:
    -            self.putheader('Transfer-Encoding', 'chunked')
    +        if "transfer-encoding" not in headers:
    +            self.putheader("Transfer-Encoding", "chunked")
             self.endheaders()
     
             if body is not None:
    -            stringish_types = six.string_types + (six.binary_type,)
    +            stringish_types = six.string_types + (bytes,)
                 if isinstance(body, stringish_types):
                     body = (body,)
                 for chunk in body:
                     if not chunk:
                         continue
    -                if not isinstance(chunk, six.binary_type):
    -                    chunk = chunk.encode('utf8')
    +                if not isinstance(chunk, bytes):
    +                    chunk = chunk.encode("utf8")
                     len_str = hex(len(chunk))[2:]
    -                self.send(len_str.encode('utf-8'))
    -                self.send(b'\r\n')
    +                self.send(len_str.encode("utf-8"))
    +                self.send(b"\r\n")
                     self.send(chunk)
    -                self.send(b'\r\n')
    +                self.send(b"\r\n")
     
             # After the if clause, to always have a closed body
    -        self.send(b'0\r\n\r\n')
    +        self.send(b"0\r\n\r\n")
     
     
     class HTTPSConnection(HTTPConnection):
    -    default_port = port_by_scheme['https']
    +    default_port = port_by_scheme["https"]
     
         ssl_version = None
     
    -    def __init__(self, host, port=None, key_file=None, cert_file=None,
    -                 strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
    -                 ssl_context=None, **kw):
    +    def __init__(
    +        self,
    +        host,
    +        port=None,
    +        key_file=None,
    +        cert_file=None,
    +        key_password=None,
    +        strict=None,
    +        timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
    +        ssl_context=None,
    +        server_hostname=None,
    +        **kw
    +    ):
     
    -        HTTPConnection.__init__(self, host, port, strict=strict,
    -                                timeout=timeout, **kw)
    +        HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw)
     
             self.key_file = key_file
             self.cert_file = cert_file
    +        self.key_password = key_password
             self.ssl_context = ssl_context
    +        self.server_hostname = server_hostname
     
             # Required property for Google AppEngine 1.9.0 which otherwise causes
             # HTTPS requests to go out as HTTP. (See Issue #356)
    -        self._protocol = 'https'
    +        self._protocol = "https"
     
         def connect(self):
             conn = self._new_conn()
             self._prepare_conn(conn)
     
    +        # Wrap socket using verification with the root certs in
    +        # trusted_root_certs
    +        default_ssl_context = False
             if self.ssl_context is None:
    +            default_ssl_context = True
                 self.ssl_context = create_urllib3_context(
    -                ssl_version=resolve_ssl_version(None),
    -                cert_reqs=resolve_cert_reqs(None),
    +                ssl_version=resolve_ssl_version(self.ssl_version),
    +                cert_reqs=resolve_cert_reqs(self.cert_reqs),
                 )
     
    +        # Try to load OS default certs if none are given.
    +        # Works well on Windows (requires Python3.4+)
    +        context = self.ssl_context
    +        if (
    +            not self.ca_certs
    +            and not self.ca_cert_dir
    +            and default_ssl_context
    +            and hasattr(context, "load_default_certs")
    +        ):
    +            context.load_default_certs()
    +
             self.sock = ssl_wrap_socket(
                 sock=conn,
                 keyfile=self.key_file,
                 certfile=self.cert_file,
    +            key_password=self.key_password,
                 ssl_context=self.ssl_context,
    +            server_hostname=self.server_hostname,
             )
     
     
    @@ -248,32 +291,39 @@ class VerifiedHTTPSConnection(HTTPSConnection):
         Based on httplib.HTTPSConnection but wraps the socket with
         SSL certification.
         """
    +
         cert_reqs = None
         ca_certs = None
         ca_cert_dir = None
         ssl_version = None
         assert_fingerprint = None
     
    -    def set_cert(self, key_file=None, cert_file=None,
    -                 cert_reqs=None, ca_certs=None,
    -                 assert_hostname=None, assert_fingerprint=None,
    -                 ca_cert_dir=None):
    +    def set_cert(
    +        self,
    +        key_file=None,
    +        cert_file=None,
    +        cert_reqs=None,
    +        key_password=None,
    +        ca_certs=None,
    +        assert_hostname=None,
    +        assert_fingerprint=None,
    +        ca_cert_dir=None,
    +    ):
             """
             This method should only be called once, before the connection is used.
             """
    -        # If cert_reqs is not provided, we can try to guess. If the user gave
    -        # us a cert database, we assume they want to use it: otherwise, if
    -        # they gave us an SSL Context object we should use whatever is set for
    -        # it.
    +        # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also
    +        # have an SSLContext object in which case we'll use its verify_mode.
             if cert_reqs is None:
    -            if ca_certs or ca_cert_dir:
    -                cert_reqs = 'CERT_REQUIRED'
    -            elif self.ssl_context is not None:
    +            if self.ssl_context is not None:
                     cert_reqs = self.ssl_context.verify_mode
    +            else:
    +                cert_reqs = resolve_cert_reqs(None)
     
             self.key_file = key_file
             self.cert_file = cert_file
             self.cert_reqs = cert_reqs
    +        self.key_password = key_password
             self.assert_hostname = assert_hostname
             self.assert_fingerprint = assert_fingerprint
             self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
    @@ -282,12 +332,10 @@ class VerifiedHTTPSConnection(HTTPSConnection):
         def connect(self):
             # Add certificate verification
             conn = self._new_conn()
    -
             hostname = self.host
    -        if getattr(self, '_tunnel_host', None):
    -            # _tunnel_host was added in Python 2.6.3
    -            # (See: http://hg.python.org/cpython/rev/0f57b30a152f)
     
    +        # Google App Engine's httplib does not define _tunnel_host
    +        if getattr(self, "_tunnel_host", None):
                 self.sock = conn
                 # Calls self._set_hostport(), so self.host is
                 # self._tunnel_host below.
    @@ -298,17 +346,25 @@ class VerifiedHTTPSConnection(HTTPSConnection):
                 # Override the host with the one we're requesting data from.
                 hostname = self._tunnel_host
     
    +        server_hostname = hostname
    +        if self.server_hostname is not None:
    +            server_hostname = self.server_hostname
    +
             is_time_off = datetime.date.today() < RECENT_DATE
             if is_time_off:
    -            warnings.warn((
    -                'System time is way off (before {0}). This will probably '
    -                'lead to SSL verification errors').format(RECENT_DATE),
    -                SystemTimeWarning
    +            warnings.warn(
    +                (
    +                    "System time is way off (before {0}). This will probably "
    +                    "lead to SSL verification errors"
    +                ).format(RECENT_DATE),
    +                SystemTimeWarning,
                 )
     
             # Wrap socket using verification with the root certs in
             # trusted_root_certs
    +        default_ssl_context = False
             if self.ssl_context is None:
    +            default_ssl_context = True
                 self.ssl_context = create_urllib3_context(
                     ssl_version=resolve_ssl_version(self.ssl_version),
                     cert_reqs=resolve_cert_reqs(self.cert_reqs),
    @@ -316,38 +372,56 @@ class VerifiedHTTPSConnection(HTTPSConnection):
     
             context = self.ssl_context
             context.verify_mode = resolve_cert_reqs(self.cert_reqs)
    +
    +        # Try to load OS default certs if none are given.
    +        # Works well on Windows (requires Python3.4+)
    +        if (
    +            not self.ca_certs
    +            and not self.ca_cert_dir
    +            and default_ssl_context
    +            and hasattr(context, "load_default_certs")
    +        ):
    +            context.load_default_certs()
    +
             self.sock = ssl_wrap_socket(
                 sock=conn,
                 keyfile=self.key_file,
                 certfile=self.cert_file,
    +            key_password=self.key_password,
                 ca_certs=self.ca_certs,
                 ca_cert_dir=self.ca_cert_dir,
    -            server_hostname=hostname,
    -            ssl_context=context)
    +            server_hostname=server_hostname,
    +            ssl_context=context,
    +        )
     
             if self.assert_fingerprint:
    -            assert_fingerprint(self.sock.getpeercert(binary_form=True),
    -                               self.assert_fingerprint)
    -        elif context.verify_mode != ssl.CERT_NONE \
    -                and not getattr(context, 'check_hostname', False) \
    -                and self.assert_hostname is not False:
    +            assert_fingerprint(
    +                self.sock.getpeercert(binary_form=True), self.assert_fingerprint
    +            )
    +        elif (
    +            context.verify_mode != ssl.CERT_NONE
    +            and not getattr(context, "check_hostname", False)
    +            and self.assert_hostname is not False
    +        ):
                 # While urllib3 attempts to always turn off hostname matching from
                 # the TLS library, this cannot always be done. So we check whether
                 # the TLS Library still thinks it's matching hostnames.
                 cert = self.sock.getpeercert()
    -            if not cert.get('subjectAltName', ()):
    -                warnings.warn((
    -                    'Certificate for {0} has no `subjectAltName`, falling back to check for a '
    -                    '`commonName` for now. This feature is being removed by major browsers and '
    -                    'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
    -                    'for details.)'.format(hostname)),
    -                    SubjectAltNameWarning
    +            if not cert.get("subjectAltName", ()):
    +                warnings.warn(
    +                    (
    +                        "Certificate for {0} has no `subjectAltName`, falling back to check for a "
    +                        "`commonName` for now. This feature is being removed by major browsers and "
    +                        "deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 "
    +                        "for details.)".format(hostname)
    +                    ),
    +                    SubjectAltNameWarning,
                     )
    -            _match_hostname(cert, self.assert_hostname or hostname)
    +            _match_hostname(cert, self.assert_hostname or server_hostname)
     
             self.is_verified = (
    -            context.verify_mode == ssl.CERT_REQUIRED or
    -            self.assert_fingerprint is not None
    +            context.verify_mode == ssl.CERT_REQUIRED
    +            or self.assert_fingerprint is not None
             )
     
     
    @@ -355,9 +429,10 @@ def _match_hostname(cert, asserted_hostname):
         try:
             match_hostname(cert, asserted_hostname)
         except CertificateError as e:
    -        log.error(
    -            'Certificate did not match expected hostname: %s. '
    -            'Certificate: %s', asserted_hostname, cert
    +        log.warning(
    +            "Certificate did not match expected hostname: %s. " "Certificate: %s",
    +            asserted_hostname,
    +            cert,
             )
             # Add cert to exception and reraise so client code can inspect
             # the cert when catching the exception, if they want to
    diff --git a/lib/urllib3/connectionpool.py b/lib/urllib3/connectionpool.py
    index ec9600f8..e73fa57a 100644
    --- a/lib/urllib3/connectionpool.py
    +++ b/lib/urllib3/connectionpool.py
    @@ -29,8 +29,11 @@ from .packages.six.moves import queue
     from .connection import (
         port_by_scheme,
         DummyConnection,
    -    HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
    -    HTTPException, BaseSSLError,
    +    HTTPConnection,
    +    HTTPSConnection,
    +    VerifiedHTTPSConnection,
    +    HTTPException,
    +    BaseSSLError,
     )
     from .request import RequestMethods
     from .response import HTTPResponse
    @@ -40,13 +43,16 @@ from .util.request import set_file_position
     from .util.response import assert_header_parsing
     from .util.retry import Retry
     from .util.timeout import Timeout
    -from .util.url import get_host, Url
    +from .util.url import (
    +    get_host,
    +    parse_url,
    +    Url,
    +    _normalize_host as normalize_host,
    +    _encode_target,
    +)
    +from .util.queue import LifoQueue
     
     
    -if six.PY2:
    -    # Queue is imported for side effects on MS Windows
    -    import Queue as _unused_module_Queue  # noqa: F401
    -
     xrange = six.moves.xrange
     
     log = logging.getLogger(__name__)
    @@ -62,19 +68,18 @@ class ConnectionPool(object):
         """
     
         scheme = None
    -    QueueCls = queue.LifoQueue
    +    QueueCls = LifoQueue
     
         def __init__(self, host, port=None):
             if not host:
                 raise LocationValueError("No host specified.")
     
    -        self.host = _ipv6_host(host).lower()
    +        self.host = _normalize_host(host, scheme=self.scheme)
             self._proxy_host = host.lower()
             self.port = port
     
         def __str__(self):
    -        return '%s(host=%r, port=%r)' % (type(self).__name__,
    -                                         self.host, self.port)
    +        return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
     
         def __enter__(self):
             return self
    @@ -92,7 +97,7 @@ class ConnectionPool(object):
     
     
     # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
    -_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
    +_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
     
     
     class HTTPConnectionPool(ConnectionPool, RequestMethods):
    @@ -155,15 +160,24 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
             :class:`urllib3.connection.HTTPSConnection` instances.
         """
     
    -    scheme = 'http'
    +    scheme = "http"
         ConnectionCls = HTTPConnection
         ResponseCls = HTTPResponse
     
    -    def __init__(self, host, port=None, strict=False,
    -                 timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
    -                 headers=None, retries=None,
    -                 _proxy=None, _proxy_headers=None,
    -                 **conn_kw):
    +    def __init__(
    +        self,
    +        host,
    +        port=None,
    +        strict=False,
    +        timeout=Timeout.DEFAULT_TIMEOUT,
    +        maxsize=1,
    +        block=False,
    +        headers=None,
    +        retries=None,
    +        _proxy=None,
    +        _proxy_headers=None,
    +        **conn_kw
    +    ):
             ConnectionPool.__init__(self, host, port)
             RequestMethods.__init__(self, headers)
     
    @@ -197,19 +211,27 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
                 # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
                 # We cannot know if the user has added default socket options, so we cannot replace the
                 # list.
    -            self.conn_kw.setdefault('socket_options', [])
    +            self.conn_kw.setdefault("socket_options", [])
     
         def _new_conn(self):
             """
             Return a fresh :class:`HTTPConnection`.
             """
             self.num_connections += 1
    -        log.debug("Starting new HTTP connection (%d): %s",
    -                  self.num_connections, self.host)
    +        log.debug(
    +            "Starting new HTTP connection (%d): %s:%s",
    +            self.num_connections,
    +            self.host,
    +            self.port or "80",
    +        )
     
    -        conn = self.ConnectionCls(host=self.host, port=self.port,
    -                                  timeout=self.timeout.connect_timeout,
    -                                  strict=self.strict, **self.conn_kw)
    +        conn = self.ConnectionCls(
    +            host=self.host,
    +            port=self.port,
    +            timeout=self.timeout.connect_timeout,
    +            strict=self.strict,
    +            **self.conn_kw
    +        )
             return conn
     
         def _get_conn(self, timeout=None):
    @@ -233,16 +255,17 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
     
             except queue.Empty:
                 if self.block:
    -                raise EmptyPoolError(self,
    -                                     "Pool reached maximum size and no more "
    -                                     "connections are allowed.")
    +                raise EmptyPoolError(
    +                    self,
    +                    "Pool reached maximum size and no more " "connections are allowed.",
    +                )
                 pass  # Oh well, we'll create a new connection then
     
             # If this is a persistent connection, check if it got disconnected
             if conn and is_connection_dropped(conn):
                 log.debug("Resetting dropped connection: %s", self.host)
                 conn.close()
    -            if getattr(conn, 'auto_open', 1) == 0:
    +            if getattr(conn, "auto_open", 1) == 0:
                     # This is a proxied connection that has been mutated by
                     # httplib._tunnel() and cannot be reused (since it would
                     # attempt to bypass the proxy)
    @@ -272,9 +295,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
                 pass
             except queue.Full:
                 # This should never happen if self.block == True
    -            log.warning(
    -                "Connection pool is full, discarding connection: %s",
    -                self.host)
    +            log.warning("Connection pool is full, discarding connection: %s", self.host)
     
             # Connection never got put back into the pool, close it.
             if conn:
    @@ -306,21 +327,30 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
             """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
     
             if isinstance(err, SocketTimeout):
    -            raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
    +            raise ReadTimeoutError(
    +                self, url, "Read timed out. (read timeout=%s)" % timeout_value
    +            )
     
             # See the above comment about EAGAIN in Python 3. In Python 2 we have
             # to specifically catch it and throw the timeout error
    -        if hasattr(err, 'errno') and err.errno in _blocking_errnos:
    -            raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
    +        if hasattr(err, "errno") and err.errno in _blocking_errnos:
    +            raise ReadTimeoutError(
    +                self, url, "Read timed out. (read timeout=%s)" % timeout_value
    +            )
     
             # Catch possible read timeouts thrown as SSL errors. If not the
             # case, rethrow the original. We need to do this because of:
             # http://bugs.python.org/issue10272
    -        if 'timed out' in str(err) or 'did not complete (read)' in str(err):  # Python 2.6
    -            raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
    +        if "timed out" in str(err) or "did not complete (read)" in str(
    +            err
    +        ):  # Python < 2.7.4
    +            raise ReadTimeoutError(
    +                self, url, "Read timed out. (read timeout=%s)" % timeout_value
    +            )
     
    -    def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
    -                      **httplib_request_kw):
    +    def _make_request(
    +        self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw
    +    ):
             """
             Perform a request on a given urllib connection object taken from our
             pool.
    @@ -360,7 +390,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
             read_timeout = timeout_obj.read_timeout
     
             # App Engine doesn't have a sock attr
    -        if getattr(conn, 'sock', None):
    +        if getattr(conn, "sock", None):
                 # In Python 3 socket.py will catch EAGAIN and return None when you
                 # try and read into the file pointer created by http.client, which
                 # instead raises a BadStatusLine exception. Instead of catching
    @@ -368,7 +398,8 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
                 # timeouts, check for a zero timeout before making the request.
                 if read_timeout == 0:
                     raise ReadTimeoutError(
    -                    self, url, "Read timed out. (read timeout=%s)" % read_timeout)
    +                    self, url, "Read timed out. (read timeout=%s)" % read_timeout
    +                )
                 if read_timeout is Timeout.DEFAULT_TIMEOUT:
                     conn.sock.settimeout(socket.getdefaulttimeout())
                 else:  # None or a value
    @@ -376,31 +407,45 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
     
             # Receive the response from the server
             try:
    -            try:  # Python 2.7, use buffering of HTTP responses
    +            try:
    +                # Python 2.7, use buffering of HTTP responses
                     httplib_response = conn.getresponse(buffering=True)
    -            except TypeError:  # Python 2.6 and older, Python 3
    +            except TypeError:
    +                # Python 3
                     try:
                         httplib_response = conn.getresponse()
    -                except Exception as e:
    -                    # Remove the TypeError from the exception chain in Python 3;
    -                    # otherwise it looks like a programming error was the cause.
    +                except BaseException as e:
    +                    # Remove the TypeError from the exception chain in
    +                    # Python 3 (including for exceptions like SystemExit).
    +                    # Otherwise it looks like a bug in the code.
                         six.raise_from(e, None)
             except (SocketTimeout, BaseSSLError, SocketError) as e:
                 self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
                 raise
     
             # AppEngine doesn't have a version attr.
    -        http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
    -        log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port,
    -                  method, url, http_version, httplib_response.status,
    -                  httplib_response.length)
    +        http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
    +        log.debug(
    +            '%s://%s:%s "%s %s %s" %s %s',
    +            self.scheme,
    +            self.host,
    +            self.port,
    +            method,
    +            url,
    +            http_version,
    +            httplib_response.status,
    +            httplib_response.length,
    +        )
     
             try:
                 assert_header_parsing(httplib_response.msg)
             except (HeaderParsingError, TypeError) as hpe:  # Platform-specific: Python 3
                 log.warning(
    -                'Failed to parse headers (url=%s): %s',
    -                self._absolute_url(url), hpe, exc_info=True)
    +                "Failed to parse headers (url=%s): %s",
    +                self._absolute_url(url),
    +                hpe,
    +                exc_info=True,
    +            )
     
             return httplib_response
     
    @@ -411,6 +456,8 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
             """
             Close all pooled connections and disable the pool.
             """
    +        if self.pool is None:
    +            return
             # Disable access to the pool
             old_pool, self.pool = self.pool, None
     
    @@ -428,13 +475,13 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
             Check if the given ``url`` is a member of the same host as this
             connection pool.
             """
    -        if url.startswith('/'):
    +        if url.startswith("/"):
                 return True
     
             # TODO: Add optional support for socket.gethostbyname checking.
             scheme, host, port = get_host(url)
    -
    -        host = _ipv6_host(host).lower()
    +        if host is not None:
    +            host = _normalize_host(host, scheme=scheme)
     
             # Use explicit default port for comparison when none is given
             if self.port and not port:
    @@ -444,10 +491,22 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
     
             return (scheme, host, port) == (self.scheme, self.host, self.port)
     
    -    def urlopen(self, method, url, body=None, headers=None, retries=None,
    -                redirect=True, assert_same_host=True, timeout=_Default,
    -                pool_timeout=None, release_conn=None, chunked=False,
    -                body_pos=None, **response_kw):
    +    def urlopen(
    +        self,
    +        method,
    +        url,
    +        body=None,
    +        headers=None,
    +        retries=None,
    +        redirect=True,
    +        assert_same_host=True,
    +        timeout=_Default,
    +        pool_timeout=None,
    +        release_conn=None,
    +        chunked=False,
    +        body_pos=None,
    +        **response_kw
    +    ):
             """
             Get a connection from the pool and perform an HTTP request. This is the
             lowest level call for making a request, so you'll need to specify all
    @@ -545,12 +604,18 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
                 retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
     
             if release_conn is None:
    -            release_conn = response_kw.get('preload_content', True)
    +            release_conn = response_kw.get("preload_content", True)
     
             # Check host
             if assert_same_host and not self.is_same_host(url):
                 raise HostChangedError(self, url, retries)
     
    +        # Ensure that the URL we're connecting to is properly encoded
    +        if url.startswith("/"):
    +            url = six.ensure_str(_encode_target(url))
    +        else:
    +            url = six.ensure_str(parse_url(url).url)
    +
             conn = None
     
             # Track whether `conn` needs to be released before
    @@ -567,7 +632,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
             # Merge the proxy headers. Only do this in HTTP. We have to copy the
             # headers dict so we can safely change it without those changes being
             # reflected in anyone else's copy.
    -        if self.scheme == 'http':
    +        if self.scheme == "http":
                 headers = headers.copy()
                 headers.update(self.proxy_headers)
     
    @@ -590,15 +655,22 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
     
                 conn.timeout = timeout_obj.connect_timeout
     
    -            is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
    +            is_new_proxy_conn = self.proxy is not None and not getattr(
    +                conn, "sock", None
    +            )
                 if is_new_proxy_conn:
                     self._prepare_proxy(conn)
     
                 # Make the request on the httplib connection object.
    -            httplib_response = self._make_request(conn, method, url,
    -                                                  timeout=timeout_obj,
    -                                                  body=body, headers=headers,
    -                                                  chunked=chunked)
    +            httplib_response = self._make_request(
    +                conn,
    +                method,
    +                url,
    +                timeout=timeout_obj,
    +                body=body,
    +                headers=headers,
    +                chunked=chunked,
    +            )
     
                 # If we're going to release the connection in ``finally:``, then
                 # the response doesn't need to know about the connection. Otherwise
    @@ -607,14 +679,16 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
                 response_conn = conn if not release_conn else None
     
                 # Pass method to Response for length checking
    -            response_kw['request_method'] = method
    +            response_kw["request_method"] = method
     
                 # Import httplib's response into our own wrapper object
    -            response = self.ResponseCls.from_httplib(httplib_response,
    -                                                     pool=self,
    -                                                     connection=response_conn,
    -                                                     retries=retries,
    -                                                     **response_kw)
    +            response = self.ResponseCls.from_httplib(
    +                httplib_response,
    +                pool=self,
    +                connection=response_conn,
    +                retries=retries,
    +                **response_kw
    +            )
     
                 # Everything went great!
                 clean_exit = True
    @@ -623,20 +697,28 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
                 # Timed out by queue.
                 raise EmptyPoolError(self, "No pool connections are available.")
     
    -        except (TimeoutError, HTTPException, SocketError, ProtocolError,
    -                BaseSSLError, SSLError, CertificateError) as e:
    +        except (
    +            TimeoutError,
    +            HTTPException,
    +            SocketError,
    +            ProtocolError,
    +            BaseSSLError,
    +            SSLError,
    +            CertificateError,
    +        ) as e:
                 # Discard the connection for these exceptions. It will be
                 # replaced during the next _get_conn() call.
                 clean_exit = False
                 if isinstance(e, (BaseSSLError, CertificateError)):
                     e = SSLError(e)
                 elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
    -                e = ProxyError('Cannot connect to proxy.', e)
    +                e = ProxyError("Cannot connect to proxy.", e)
                 elif isinstance(e, (SocketError, HTTPException)):
    -                e = ProtocolError('Connection aborted.', e)
    +                e = ProtocolError("Connection aborted.", e)
     
    -            retries = retries.increment(method, url, error=e, _pool=self,
    -                                        _stacktrace=sys.exc_info()[2])
    +            retries = retries.increment(
    +                method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
    +            )
                 retries.sleep()
     
                 # Keep track of the error for the retry warning.
    @@ -659,28 +741,47 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
     
             if not conn:
                 # Try again
    -            log.warning("Retrying (%r) after connection "
    -                        "broken by '%r': %s", retries, err, url)
    -            return self.urlopen(method, url, body, headers, retries,
    -                                redirect, assert_same_host,
    -                                timeout=timeout, pool_timeout=pool_timeout,
    -                                release_conn=release_conn, body_pos=body_pos,
    -                                **response_kw)
    +            log.warning(
    +                "Retrying (%r) after connection " "broken by '%r': %s",
    +                retries,
    +                err,
    +                url,
    +            )
    +            return self.urlopen(
    +                method,
    +                url,
    +                body,
    +                headers,
    +                retries,
    +                redirect,
    +                assert_same_host,
    +                timeout=timeout,
    +                pool_timeout=pool_timeout,
    +                release_conn=release_conn,
    +                body_pos=body_pos,
    +                **response_kw
    +            )
     
             def drain_and_release_conn(response):
                 try:
                     # discard any remaining response body, the connection will be
                     # released back to the pool once the entire response is read
                     response.read()
    -            except (TimeoutError, HTTPException, SocketError, ProtocolError,
    -                    BaseSSLError, SSLError) as e:
    +            except (
    +                TimeoutError,
    +                HTTPException,
    +                SocketError,
    +                ProtocolError,
    +                BaseSSLError,
    +                SSLError,
    +            ):
                     pass
     
             # Handle redirect?
             redirect_location = redirect and response.get_redirect_location()
             if redirect_location:
                 if response.status == 303:
    -                method = 'GET'
    +                method = "GET"
     
                 try:
                     retries = retries.increment(method, url, response=response, _pool=self)
    @@ -698,15 +799,22 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
                 retries.sleep_for_retry(response)
                 log.debug("Redirecting %s -> %s", url, redirect_location)
                 return self.urlopen(
    -                method, redirect_location, body, headers,
    -                retries=retries, redirect=redirect,
    +                method,
    +                redirect_location,
    +                body,
    +                headers,
    +                retries=retries,
    +                redirect=redirect,
                     assert_same_host=assert_same_host,
    -                timeout=timeout, pool_timeout=pool_timeout,
    -                release_conn=release_conn, body_pos=body_pos,
    -                **response_kw)
    +                timeout=timeout,
    +                pool_timeout=pool_timeout,
    +                release_conn=release_conn,
    +                body_pos=body_pos,
    +                **response_kw
    +            )
     
             # Check if we should retry the HTTP response.
    -        has_retry_after = bool(response.getheader('Retry-After'))
    +        has_retry_after = bool(response.getheader("Retry-After"))
             if retries.is_retry(method, response.status, has_retry_after):
                 try:
                     retries = retries.increment(method, url, response=response, _pool=self)
    @@ -724,12 +832,19 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
                 retries.sleep(response)
                 log.debug("Retry: %s", url)
                 return self.urlopen(
    -                method, url, body, headers,
    -                retries=retries, redirect=redirect,
    +                method,
    +                url,
    +                body,
    +                headers,
    +                retries=retries,
    +                redirect=redirect,
                     assert_same_host=assert_same_host,
    -                timeout=timeout, pool_timeout=pool_timeout,
    +                timeout=timeout,
    +                pool_timeout=pool_timeout,
                     release_conn=release_conn,
    -                body_pos=body_pos, **response_kw)
    +                body_pos=body_pos,
    +                **response_kw
    +            )
     
             return response
     
    @@ -747,33 +862,57 @@ class HTTPSConnectionPool(HTTPConnectionPool):
         If ``assert_hostname`` is False, no verification is done.
     
         The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
    -    ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
    -    available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
    +    ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
    +    is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
         the connection socket into an SSL socket.
         """
     
    -    scheme = 'https'
    +    scheme = "https"
         ConnectionCls = HTTPSConnection
     
    -    def __init__(self, host, port=None,
    -                 strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
    -                 block=False, headers=None, retries=None,
    -                 _proxy=None, _proxy_headers=None,
    -                 key_file=None, cert_file=None, cert_reqs=None,
    -                 ca_certs=None, ssl_version=None,
    -                 assert_hostname=None, assert_fingerprint=None,
    -                 ca_cert_dir=None, **conn_kw):
    +    def __init__(
    +        self,
    +        host,
    +        port=None,
    +        strict=False,
    +        timeout=Timeout.DEFAULT_TIMEOUT,
    +        maxsize=1,
    +        block=False,
    +        headers=None,
    +        retries=None,
    +        _proxy=None,
    +        _proxy_headers=None,
    +        key_file=None,
    +        cert_file=None,
    +        cert_reqs=None,
    +        key_password=None,
    +        ca_certs=None,
    +        ssl_version=None,
    +        assert_hostname=None,
    +        assert_fingerprint=None,
    +        ca_cert_dir=None,
    +        **conn_kw
    +    ):
     
    -        HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
    -                                    block, headers, retries, _proxy, _proxy_headers,
    -                                    **conn_kw)
    -
    -        if ca_certs and cert_reqs is None:
    -            cert_reqs = 'CERT_REQUIRED'
    +        HTTPConnectionPool.__init__(
    +            self,
    +            host,
    +            port,
    +            strict,
    +            timeout,
    +            maxsize,
    +            block,
    +            headers,
    +            retries,
    +            _proxy,
    +            _proxy_headers,
    +            **conn_kw
    +        )
     
             self.key_file = key_file
             self.cert_file = cert_file
             self.cert_reqs = cert_reqs
    +        self.key_password = key_password
             self.ca_certs = ca_certs
             self.ca_cert_dir = ca_cert_dir
             self.ssl_version = ssl_version
    @@ -787,13 +926,16 @@ class HTTPSConnectionPool(HTTPConnectionPool):
             """
     
             if isinstance(conn, VerifiedHTTPSConnection):
    -            conn.set_cert(key_file=self.key_file,
    -                          cert_file=self.cert_file,
    -                          cert_reqs=self.cert_reqs,
    -                          ca_certs=self.ca_certs,
    -                          ca_cert_dir=self.ca_cert_dir,
    -                          assert_hostname=self.assert_hostname,
    -                          assert_fingerprint=self.assert_fingerprint)
    +            conn.set_cert(
    +                key_file=self.key_file,
    +                key_password=self.key_password,
    +                cert_file=self.cert_file,
    +                cert_reqs=self.cert_reqs,
    +                ca_certs=self.ca_certs,
    +                ca_cert_dir=self.ca_cert_dir,
    +                assert_hostname=self.assert_hostname,
    +                assert_fingerprint=self.assert_fingerprint,
    +            )
                 conn.ssl_version = self.ssl_version
             return conn
     
    @@ -802,17 +944,7 @@ class HTTPSConnectionPool(HTTPConnectionPool):
             Establish tunnel connection early, because otherwise httplib
             would improperly set Host: header to proxy's IP:port.
             """
    -        # Python 2.7+
    -        try:
    -            set_tunnel = conn.set_tunnel
    -        except AttributeError:  # Platform-specific: Python 2.6
    -            set_tunnel = conn._set_tunnel
    -
    -        if sys.version_info <= (2, 6, 4) and not self.proxy_headers:  # Python 2.6.4 and older
    -            set_tunnel(self._proxy_host, self.port)
    -        else:
    -            set_tunnel(self._proxy_host, self.port, self.proxy_headers)
    -
    +        conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
             conn.connect()
     
         def _new_conn(self):
    @@ -820,12 +952,17 @@ class HTTPSConnectionPool(HTTPConnectionPool):
             Return a fresh :class:`httplib.HTTPSConnection`.
             """
             self.num_connections += 1
    -        log.debug("Starting new HTTPS connection (%d): %s",
    -                  self.num_connections, self.host)
    +        log.debug(
    +            "Starting new HTTPS connection (%d): %s:%s",
    +            self.num_connections,
    +            self.host,
    +            self.port or "443",
    +        )
     
             if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
    -            raise SSLError("Can't connect to HTTPS URL because the SSL "
    -                           "module is not available.")
    +            raise SSLError(
    +                "Can't connect to HTTPS URL because the SSL " "module is not available."
    +            )
     
             actual_host = self.host
             actual_port = self.port
    @@ -833,9 +970,16 @@ class HTTPSConnectionPool(HTTPConnectionPool):
                 actual_host = self.proxy.host
                 actual_port = self.proxy.port
     
    -        conn = self.ConnectionCls(host=actual_host, port=actual_port,
    -                                  timeout=self.timeout.connect_timeout,
    -                                  strict=self.strict, **self.conn_kw)
    +        conn = self.ConnectionCls(
    +            host=actual_host,
    +            port=actual_port,
    +            timeout=self.timeout.connect_timeout,
    +            strict=self.strict,
    +            cert_file=self.cert_file,
    +            key_file=self.key_file,
    +            key_password=self.key_password,
    +            **self.conn_kw
    +        )
     
             return self._prepare_conn(conn)
     
    @@ -846,16 +990,19 @@ class HTTPSConnectionPool(HTTPConnectionPool):
             super(HTTPSConnectionPool, self)._validate_conn(conn)
     
             # Force connect early to allow us to validate the connection.
    -        if not getattr(conn, 'sock', None):  # AppEngine might not have  `.sock`
    +        if not getattr(conn, "sock", None):  # AppEngine might not have  `.sock`
                 conn.connect()
     
             if not conn.is_verified:
    -            warnings.warn((
    -                'Unverified HTTPS request is being made. '
    -                'Adding certificate verification is strongly advised. See: '
    -                'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
    -                '#ssl-warnings'),
    -                InsecureRequestWarning)
    +            warnings.warn(
    +                (
    +                    "Unverified HTTPS request is being made. "
    +                    "Adding certificate verification is strongly advised. See: "
    +                    "https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
    +                    "#ssl-warnings"
    +                ),
    +                InsecureRequestWarning,
    +            )
     
     
     def connection_from_url(url, **kw):
    @@ -880,26 +1027,25 @@ def connection_from_url(url, **kw):
         """
         scheme, host, port = get_host(url)
         port = port or port_by_scheme.get(scheme, 80)
    -    if scheme == 'https':
    +    if scheme == "https":
             return HTTPSConnectionPool(host, port=port, **kw)
         else:
             return HTTPConnectionPool(host, port=port, **kw)
     
     
    -def _ipv6_host(host):
    +def _normalize_host(host, scheme):
         """
    -    Process IPv6 address literals
    +    Normalize hosts for comparisons and use with sockets.
         """
     
    +    host = normalize_host(host, scheme)
    +
         # httplib doesn't like it when we include brackets in IPv6 addresses
         # Specifically, if we include brackets but also pass the port then
         # httplib crazily doubles up the square brackets on the Host header.
         # Instead, we need to make sure we never pass ``None`` as the port.
         # However, for backward compatibility reasons we can't actually
         # *assert* that.  See http://bugs.python.org/issue28539
    -    #
    -    # Also if an IPv6 address literal has a zone identifier, the
    -    # percent sign might be URIencoded, convert it back into ASCII
    -    if host.startswith('[') and host.endswith(']'):
    -        host = host.replace('%25', '%').strip('[]')
    +    if host.startswith("[") and host.endswith("]"):
    +        host = host[1:-1]
         return host
    diff --git a/lib/tornado/platform/__init__.py b/lib/urllib3/contrib/__init__.py
    old mode 100755
    new mode 100644
    similarity index 100%
    rename from lib/tornado/platform/__init__.py
    rename to lib/urllib3/contrib/__init__.py
    diff --git a/lib/urllib3/contrib/_appengine_environ.py b/lib/urllib3/contrib/_appengine_environ.py
    new file mode 100644
    index 00000000..c909010b
    --- /dev/null
    +++ b/lib/urllib3/contrib/_appengine_environ.py
    @@ -0,0 +1,32 @@
    +"""
    +This module provides means to detect the App Engine environment.
    +"""
    +
    +import os
    +
    +
    +def is_appengine():
    +    return is_local_appengine() or is_prod_appengine() or is_prod_appengine_mvms()
    +
    +
    +def is_appengine_sandbox():
    +    return is_appengine() and not is_prod_appengine_mvms()
    +
    +
    +def is_local_appengine():
    +    return (
    +        "APPENGINE_RUNTIME" in os.environ
    +        and "Development/" in os.environ["SERVER_SOFTWARE"]
    +    )
    +
    +
    +def is_prod_appengine():
    +    return (
    +        "APPENGINE_RUNTIME" in os.environ
    +        and "Google App Engine/" in os.environ["SERVER_SOFTWARE"]
    +        and not is_prod_appengine_mvms()
    +    )
    +
    +
    +def is_prod_appengine_mvms():
    +    return os.environ.get("GAE_VM", False) == "true"
    diff --git a/lib/tornado/test/__init__.py b/lib/urllib3/contrib/_securetransport/__init__.py
    old mode 100755
    new mode 100644
    similarity index 100%
    rename from lib/tornado/test/__init__.py
    rename to lib/urllib3/contrib/_securetransport/__init__.py
    diff --git a/lib/urllib3/contrib/_securetransport/bindings.py b/lib/urllib3/contrib/_securetransport/bindings.py
    index bcf41c02..b46e1e3b 100644
    --- a/lib/urllib3/contrib/_securetransport/bindings.py
    +++ b/lib/urllib3/contrib/_securetransport/bindings.py
    @@ -34,29 +34,35 @@ from __future__ import absolute_import
     import platform
     from ctypes.util import find_library
     from ctypes import (
    -    c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long,
    -    c_bool
    +    c_void_p,
    +    c_int32,
    +    c_char_p,
    +    c_size_t,
    +    c_byte,
    +    c_uint32,
    +    c_ulong,
    +    c_long,
    +    c_bool,
     )
     from ctypes import CDLL, POINTER, CFUNCTYPE
     
     
    -security_path = find_library('Security')
    +security_path = find_library("Security")
     if not security_path:
    -    raise ImportError('The library Security could not be found')
    +    raise ImportError("The library Security could not be found")
     
     
    -core_foundation_path = find_library('CoreFoundation')
    +core_foundation_path = find_library("CoreFoundation")
     if not core_foundation_path:
    -    raise ImportError('The library CoreFoundation could not be found')
    +    raise ImportError("The library CoreFoundation could not be found")
     
     
     version = platform.mac_ver()[0]
    -version_info = tuple(map(int, version.split('.')))
    +version_info = tuple(map(int, version.split(".")))
     if version_info < (10, 8):
         raise OSError(
    -        'Only OS X 10.8 and newer are supported, not %s.%s' % (
    -            version_info[0], version_info[1]
    -        )
    +        "Only OS X 10.8 and newer are supported, not %s.%s"
    +        % (version_info[0], version_info[1])
         )
     
     Security = CDLL(security_path, use_errno=True)
    @@ -129,27 +135,19 @@ try:
         Security.SecKeyGetTypeID.argtypes = []
         Security.SecKeyGetTypeID.restype = CFTypeID
     
    -    Security.SecCertificateCreateWithData.argtypes = [
    -        CFAllocatorRef,
    -        CFDataRef
    -    ]
    +    Security.SecCertificateCreateWithData.argtypes = [CFAllocatorRef, CFDataRef]
         Security.SecCertificateCreateWithData.restype = SecCertificateRef
     
    -    Security.SecCertificateCopyData.argtypes = [
    -        SecCertificateRef
    -    ]
    +    Security.SecCertificateCopyData.argtypes = [SecCertificateRef]
         Security.SecCertificateCopyData.restype = CFDataRef
     
    -    Security.SecCopyErrorMessageString.argtypes = [
    -        OSStatus,
    -        c_void_p
    -    ]
    +    Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
         Security.SecCopyErrorMessageString.restype = CFStringRef
     
         Security.SecIdentityCreateWithCertificate.argtypes = [
             CFTypeRef,
             SecCertificateRef,
    -        POINTER(SecIdentityRef)
    +        POINTER(SecIdentityRef),
         ]
         Security.SecIdentityCreateWithCertificate.restype = OSStatus
     
    @@ -159,201 +157,126 @@ try:
             c_void_p,
             Boolean,
             c_void_p,
    -        POINTER(SecKeychainRef)
    +        POINTER(SecKeychainRef),
         ]
         Security.SecKeychainCreate.restype = OSStatus
     
    -    Security.SecKeychainDelete.argtypes = [
    -        SecKeychainRef
    -    ]
    +    Security.SecKeychainDelete.argtypes = [SecKeychainRef]
         Security.SecKeychainDelete.restype = OSStatus
     
         Security.SecPKCS12Import.argtypes = [
             CFDataRef,
             CFDictionaryRef,
    -        POINTER(CFArrayRef)
    +        POINTER(CFArrayRef),
         ]
         Security.SecPKCS12Import.restype = OSStatus
     
         SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
    -    SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t))
    +    SSLWriteFunc = CFUNCTYPE(
    +        OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)
    +    )
     
    -    Security.SSLSetIOFuncs.argtypes = [
    -        SSLContextRef,
    -        SSLReadFunc,
    -        SSLWriteFunc
    -    ]
    +    Security.SSLSetIOFuncs.argtypes = [SSLContextRef, SSLReadFunc, SSLWriteFunc]
         Security.SSLSetIOFuncs.restype = OSStatus
     
    -    Security.SSLSetPeerID.argtypes = [
    -        SSLContextRef,
    -        c_char_p,
    -        c_size_t
    -    ]
    +    Security.SSLSetPeerID.argtypes = [SSLContextRef, c_char_p, c_size_t]
         Security.SSLSetPeerID.restype = OSStatus
     
    -    Security.SSLSetCertificate.argtypes = [
    -        SSLContextRef,
    -        CFArrayRef
    -    ]
    +    Security.SSLSetCertificate.argtypes = [SSLContextRef, CFArrayRef]
         Security.SSLSetCertificate.restype = OSStatus
     
    -    Security.SSLSetCertificateAuthorities.argtypes = [
    -        SSLContextRef,
    -        CFTypeRef,
    -        Boolean
    -    ]
    +    Security.SSLSetCertificateAuthorities.argtypes = [SSLContextRef, CFTypeRef, Boolean]
         Security.SSLSetCertificateAuthorities.restype = OSStatus
     
    -    Security.SSLSetConnection.argtypes = [
    -        SSLContextRef,
    -        SSLConnectionRef
    -    ]
    +    Security.SSLSetConnection.argtypes = [SSLContextRef, SSLConnectionRef]
         Security.SSLSetConnection.restype = OSStatus
     
    -    Security.SSLSetPeerDomainName.argtypes = [
    -        SSLContextRef,
    -        c_char_p,
    -        c_size_t
    -    ]
    +    Security.SSLSetPeerDomainName.argtypes = [SSLContextRef, c_char_p, c_size_t]
         Security.SSLSetPeerDomainName.restype = OSStatus
     
    -    Security.SSLHandshake.argtypes = [
    -        SSLContextRef
    -    ]
    +    Security.SSLHandshake.argtypes = [SSLContextRef]
         Security.SSLHandshake.restype = OSStatus
     
    -    Security.SSLRead.argtypes = [
    -        SSLContextRef,
    -        c_char_p,
    -        c_size_t,
    -        POINTER(c_size_t)
    -    ]
    +    Security.SSLRead.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
         Security.SSLRead.restype = OSStatus
     
    -    Security.SSLWrite.argtypes = [
    -        SSLContextRef,
    -        c_char_p,
    -        c_size_t,
    -        POINTER(c_size_t)
    -    ]
    +    Security.SSLWrite.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
         Security.SSLWrite.restype = OSStatus
     
    -    Security.SSLClose.argtypes = [
    -        SSLContextRef
    -    ]
    +    Security.SSLClose.argtypes = [SSLContextRef]
         Security.SSLClose.restype = OSStatus
     
    -    Security.SSLGetNumberSupportedCiphers.argtypes = [
    -        SSLContextRef,
    -        POINTER(c_size_t)
    -    ]
    +    Security.SSLGetNumberSupportedCiphers.argtypes = [SSLContextRef, POINTER(c_size_t)]
         Security.SSLGetNumberSupportedCiphers.restype = OSStatus
     
         Security.SSLGetSupportedCiphers.argtypes = [
             SSLContextRef,
             POINTER(SSLCipherSuite),
    -        POINTER(c_size_t)
    +        POINTER(c_size_t),
         ]
         Security.SSLGetSupportedCiphers.restype = OSStatus
     
         Security.SSLSetEnabledCiphers.argtypes = [
             SSLContextRef,
             POINTER(SSLCipherSuite),
    -        c_size_t
    +        c_size_t,
         ]
         Security.SSLSetEnabledCiphers.restype = OSStatus
     
    -    Security.SSLGetNumberEnabledCiphers.argtype = [
    -        SSLContextRef,
    -        POINTER(c_size_t)
    -    ]
    +    Security.SSLGetNumberEnabledCiphers.argtype = [SSLContextRef, POINTER(c_size_t)]
         Security.SSLGetNumberEnabledCiphers.restype = OSStatus
     
         Security.SSLGetEnabledCiphers.argtypes = [
             SSLContextRef,
             POINTER(SSLCipherSuite),
    -        POINTER(c_size_t)
    +        POINTER(c_size_t),
         ]
         Security.SSLGetEnabledCiphers.restype = OSStatus
     
    -    Security.SSLGetNegotiatedCipher.argtypes = [
    -        SSLContextRef,
    -        POINTER(SSLCipherSuite)
    -    ]
    +    Security.SSLGetNegotiatedCipher.argtypes = [SSLContextRef, POINTER(SSLCipherSuite)]
         Security.SSLGetNegotiatedCipher.restype = OSStatus
     
         Security.SSLGetNegotiatedProtocolVersion.argtypes = [
             SSLContextRef,
    -        POINTER(SSLProtocol)
    +        POINTER(SSLProtocol),
         ]
         Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
     
    -    Security.SSLCopyPeerTrust.argtypes = [
    -        SSLContextRef,
    -        POINTER(SecTrustRef)
    -    ]
    +    Security.SSLCopyPeerTrust.argtypes = [SSLContextRef, POINTER(SecTrustRef)]
         Security.SSLCopyPeerTrust.restype = OSStatus
     
    -    Security.SecTrustSetAnchorCertificates.argtypes = [
    -        SecTrustRef,
    -        CFArrayRef
    -    ]
    +    Security.SecTrustSetAnchorCertificates.argtypes = [SecTrustRef, CFArrayRef]
         Security.SecTrustSetAnchorCertificates.restype = OSStatus
     
    -    Security.SecTrustSetAnchorCertificatesOnly.argstypes = [
    -        SecTrustRef,
    -        Boolean
    -    ]
    +    Security.SecTrustSetAnchorCertificatesOnly.argstypes = [SecTrustRef, Boolean]
         Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
     
    -    Security.SecTrustEvaluate.argtypes = [
    -        SecTrustRef,
    -        POINTER(SecTrustResultType)
    -    ]
    +    Security.SecTrustEvaluate.argtypes = [SecTrustRef, POINTER(SecTrustResultType)]
         Security.SecTrustEvaluate.restype = OSStatus
     
    -    Security.SecTrustGetCertificateCount.argtypes = [
    -        SecTrustRef
    -    ]
    +    Security.SecTrustGetCertificateCount.argtypes = [SecTrustRef]
         Security.SecTrustGetCertificateCount.restype = CFIndex
     
    -    Security.SecTrustGetCertificateAtIndex.argtypes = [
    -        SecTrustRef,
    -        CFIndex
    -    ]
    +    Security.SecTrustGetCertificateAtIndex.argtypes = [SecTrustRef, CFIndex]
         Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
     
         Security.SSLCreateContext.argtypes = [
             CFAllocatorRef,
             SSLProtocolSide,
    -        SSLConnectionType
    +        SSLConnectionType,
         ]
         Security.SSLCreateContext.restype = SSLContextRef
     
    -    Security.SSLSetSessionOption.argtypes = [
    -        SSLContextRef,
    -        SSLSessionOption,
    -        Boolean
    -    ]
    +    Security.SSLSetSessionOption.argtypes = [SSLContextRef, SSLSessionOption, Boolean]
         Security.SSLSetSessionOption.restype = OSStatus
     
    -    Security.SSLSetProtocolVersionMin.argtypes = [
    -        SSLContextRef,
    -        SSLProtocol
    -    ]
    +    Security.SSLSetProtocolVersionMin.argtypes = [SSLContextRef, SSLProtocol]
         Security.SSLSetProtocolVersionMin.restype = OSStatus
     
    -    Security.SSLSetProtocolVersionMax.argtypes = [
    -        SSLContextRef,
    -        SSLProtocol
    -    ]
    +    Security.SSLSetProtocolVersionMax.argtypes = [SSLContextRef, SSLProtocol]
         Security.SSLSetProtocolVersionMax.restype = OSStatus
     
    -    Security.SecCopyErrorMessageString.argtypes = [
    -        OSStatus,
    -        c_void_p
    -    ]
    +    Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
         Security.SecCopyErrorMessageString.restype = CFStringRef
     
         Security.SSLReadFunc = SSLReadFunc
    @@ -369,64 +292,47 @@ try:
         Security.OSStatus = OSStatus
     
         Security.kSecImportExportPassphrase = CFStringRef.in_dll(
    -        Security, 'kSecImportExportPassphrase'
    +        Security, "kSecImportExportPassphrase"
         )
         Security.kSecImportItemIdentity = CFStringRef.in_dll(
    -        Security, 'kSecImportItemIdentity'
    +        Security, "kSecImportItemIdentity"
         )
     
         # CoreFoundation time!
    -    CoreFoundation.CFRetain.argtypes = [
    -        CFTypeRef
    -    ]
    +    CoreFoundation.CFRetain.argtypes = [CFTypeRef]
         CoreFoundation.CFRetain.restype = CFTypeRef
     
    -    CoreFoundation.CFRelease.argtypes = [
    -        CFTypeRef
    -    ]
    +    CoreFoundation.CFRelease.argtypes = [CFTypeRef]
         CoreFoundation.CFRelease.restype = None
     
    -    CoreFoundation.CFGetTypeID.argtypes = [
    -        CFTypeRef
    -    ]
    +    CoreFoundation.CFGetTypeID.argtypes = [CFTypeRef]
         CoreFoundation.CFGetTypeID.restype = CFTypeID
     
         CoreFoundation.CFStringCreateWithCString.argtypes = [
             CFAllocatorRef,
             c_char_p,
    -        CFStringEncoding
    +        CFStringEncoding,
         ]
         CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
     
    -    CoreFoundation.CFStringGetCStringPtr.argtypes = [
    -        CFStringRef,
    -        CFStringEncoding
    -    ]
    +    CoreFoundation.CFStringGetCStringPtr.argtypes = [CFStringRef, CFStringEncoding]
         CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
     
         CoreFoundation.CFStringGetCString.argtypes = [
             CFStringRef,
             c_char_p,
             CFIndex,
    -        CFStringEncoding
    +        CFStringEncoding,
         ]
         CoreFoundation.CFStringGetCString.restype = c_bool
     
    -    CoreFoundation.CFDataCreate.argtypes = [
    -        CFAllocatorRef,
    -        c_char_p,
    -        CFIndex
    -    ]
    +    CoreFoundation.CFDataCreate.argtypes = [CFAllocatorRef, c_char_p, CFIndex]
         CoreFoundation.CFDataCreate.restype = CFDataRef
     
    -    CoreFoundation.CFDataGetLength.argtypes = [
    -        CFDataRef
    -    ]
    +    CoreFoundation.CFDataGetLength.argtypes = [CFDataRef]
         CoreFoundation.CFDataGetLength.restype = CFIndex
     
    -    CoreFoundation.CFDataGetBytePtr.argtypes = [
    -        CFDataRef
    -    ]
    +    CoreFoundation.CFDataGetBytePtr.argtypes = [CFDataRef]
         CoreFoundation.CFDataGetBytePtr.restype = c_void_p
     
         CoreFoundation.CFDictionaryCreate.argtypes = [
    @@ -435,14 +341,11 @@ try:
             POINTER(CFTypeRef),
             CFIndex,
             CFDictionaryKeyCallBacks,
    -        CFDictionaryValueCallBacks
    +        CFDictionaryValueCallBacks,
         ]
         CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
     
    -    CoreFoundation.CFDictionaryGetValue.argtypes = [
    -        CFDictionaryRef,
    -        CFTypeRef
    -    ]
    +    CoreFoundation.CFDictionaryGetValue.argtypes = [CFDictionaryRef, CFTypeRef]
         CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
     
         CoreFoundation.CFArrayCreate.argtypes = [
    @@ -456,36 +359,30 @@ try:
         CoreFoundation.CFArrayCreateMutable.argtypes = [
             CFAllocatorRef,
             CFIndex,
    -        CFArrayCallBacks
    +        CFArrayCallBacks,
         ]
         CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
     
    -    CoreFoundation.CFArrayAppendValue.argtypes = [
    -        CFMutableArrayRef,
    -        c_void_p
    -    ]
    +    CoreFoundation.CFArrayAppendValue.argtypes = [CFMutableArrayRef, c_void_p]
         CoreFoundation.CFArrayAppendValue.restype = None
     
    -    CoreFoundation.CFArrayGetCount.argtypes = [
    -        CFArrayRef
    -    ]
    +    CoreFoundation.CFArrayGetCount.argtypes = [CFArrayRef]
         CoreFoundation.CFArrayGetCount.restype = CFIndex
     
    -    CoreFoundation.CFArrayGetValueAtIndex.argtypes = [
    -        CFArrayRef,
    -        CFIndex
    -    ]
    +    CoreFoundation.CFArrayGetValueAtIndex.argtypes = [CFArrayRef, CFIndex]
         CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
     
         CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
    -        CoreFoundation, 'kCFAllocatorDefault'
    +        CoreFoundation, "kCFAllocatorDefault"
    +    )
    +    CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(
    +        CoreFoundation, "kCFTypeArrayCallBacks"
         )
    -    CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks')
         CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
    -        CoreFoundation, 'kCFTypeDictionaryKeyCallBacks'
    +        CoreFoundation, "kCFTypeDictionaryKeyCallBacks"
         )
         CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
    -        CoreFoundation, 'kCFTypeDictionaryValueCallBacks'
    +        CoreFoundation, "kCFTypeDictionaryValueCallBacks"
         )
     
         CoreFoundation.CFTypeRef = CFTypeRef
    @@ -494,7 +391,7 @@ try:
         CoreFoundation.CFDictionaryRef = CFDictionaryRef
     
     except (AttributeError):
    -    raise ImportError('Error initializing ctypes')
    +    raise ImportError("Error initializing ctypes")
     
     
     class CFConst(object):
    @@ -502,6 +399,7 @@ class CFConst(object):
         A class object that acts as essentially a namespace for CoreFoundation
         constants.
         """
    +
         kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
     
     
    @@ -509,6 +407,7 @@ class SecurityConst(object):
         """
         A class object that acts as essentially a namespace for Security constants.
         """
    +
         kSSLSessionOptionBreakOnServerAuth = 0
     
         kSSLProtocol2 = 1
    @@ -516,6 +415,8 @@ class SecurityConst(object):
         kTLSProtocol1 = 4
         kTLSProtocol11 = 7
         kTLSProtocol12 = 8
    +    kTLSProtocol13 = 10
    +    kTLSProtocolMaxSupported = 999
     
         kSSLClientSide = 1
         kSSLStreamType = 0
    @@ -558,30 +459,27 @@ class SecurityConst(object):
         errSecInvalidTrustSettings = -25262
     
         # Cipher suites. We only pick the ones our default cipher string allows.
    +    # Source: https://developer.apple.com/documentation/security/1550981-ssl_cipher_suite_values
         TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
         TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
         TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
         TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
    -    TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3
    +    TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9
    +    TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8
         TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
    -    TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2
         TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
         TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
         TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
         TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
         TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
         TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
    -    TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A
         TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
    -    TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038
         TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
         TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
         TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
         TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
         TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
    -    TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040
         TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
    -    TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032
         TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
         TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
         TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
    @@ -590,4 +488,5 @@ class SecurityConst(object):
         TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
         TLS_AES_128_GCM_SHA256 = 0x1301
         TLS_AES_256_GCM_SHA384 = 0x1302
    -    TLS_CHACHA20_POLY1305_SHA256 = 0x1303
    +    TLS_AES_128_CCM_8_SHA256 = 0x1305
    +    TLS_AES_128_CCM_SHA256 = 0x1304
    diff --git a/lib/urllib3/contrib/_securetransport/low_level.py b/lib/urllib3/contrib/_securetransport/low_level.py
    index 5e3494bc..e60168ca 100644
    --- a/lib/urllib3/contrib/_securetransport/low_level.py
    +++ b/lib/urllib3/contrib/_securetransport/low_level.py
    @@ -66,22 +66,18 @@ def _cf_string_to_unicode(value):
         value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
     
         string = CoreFoundation.CFStringGetCStringPtr(
    -        value_as_void_p,
    -        CFConst.kCFStringEncodingUTF8
    +        value_as_void_p, CFConst.kCFStringEncodingUTF8
         )
         if string is None:
             buffer = ctypes.create_string_buffer(1024)
             result = CoreFoundation.CFStringGetCString(
    -            value_as_void_p,
    -            buffer,
    -            1024,
    -            CFConst.kCFStringEncodingUTF8
    +            value_as_void_p, buffer, 1024, CFConst.kCFStringEncodingUTF8
             )
             if not result:
    -            raise OSError('Error copying C string from CFStringRef')
    +            raise OSError("Error copying C string from CFStringRef")
             string = buffer.value
         if string is not None:
    -        string = string.decode('utf-8')
    +        string = string.decode("utf-8")
         return string
     
     
    @@ -97,8 +93,8 @@ def _assert_no_error(error, exception_class=None):
         output = _cf_string_to_unicode(cf_error_string)
         CoreFoundation.CFRelease(cf_error_string)
     
    -    if output is None or output == u'':
    -        output = u'OSStatus %s' % error
    +    if output is None or output == u"":
    +        output = u"OSStatus %s" % error
     
         if exception_class is None:
             exception_class = ssl.SSLError
    @@ -111,9 +107,11 @@ def _cert_array_from_pem(pem_bundle):
         Given a bundle of certs in PEM format, turns them into a CFArray of certs
         that can be used to validate a cert chain.
         """
    +    # Normalize the PEM bundle's line endings.
    +    pem_bundle = pem_bundle.replace(b"\r\n", b"\n")
    +
         der_certs = [
    -        base64.b64decode(match.group(1))
    -        for match in _PEM_CERTS_RE.finditer(pem_bundle)
    +        base64.b64decode(match.group(1)) for match in _PEM_CERTS_RE.finditer(pem_bundle)
         ]
         if not der_certs:
             raise ssl.SSLError("No root certificates specified")
    @@ -121,7 +119,7 @@ def _cert_array_from_pem(pem_bundle):
         cert_array = CoreFoundation.CFArrayCreateMutable(
             CoreFoundation.kCFAllocatorDefault,
             0,
    -        ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks)
    +        ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
         )
         if not cert_array:
             raise ssl.SSLError("Unable to allocate memory!")
    @@ -183,21 +181,16 @@ def _temporary_keychain():
         # some random bytes to password-protect the keychain we're creating, so we
         # ask for 40 random bytes.
         random_bytes = os.urandom(40)
    -    filename = base64.b64encode(random_bytes[:8]).decode('utf-8')
    -    password = base64.b64encode(random_bytes[8:])  # Must be valid UTF-8
    +    filename = base64.b16encode(random_bytes[:8]).decode("utf-8")
    +    password = base64.b16encode(random_bytes[8:])  # Must be valid UTF-8
         tempdirectory = tempfile.mkdtemp()
     
    -    keychain_path = os.path.join(tempdirectory, filename).encode('utf-8')
    +    keychain_path = os.path.join(tempdirectory, filename).encode("utf-8")
     
         # We now want to create the keychain itself.
         keychain = Security.SecKeychainRef()
         status = Security.SecKeychainCreate(
    -        keychain_path,
    -        len(password),
    -        password,
    -        False,
    -        None,
    -        ctypes.byref(keychain)
    +        keychain_path, len(password), password, False, None, ctypes.byref(keychain)
         )
         _assert_no_error(status)
     
    @@ -216,14 +209,12 @@ def _load_items_from_file(keychain, path):
         identities = []
         result_array = None
     
    -    with open(path, 'rb') as f:
    +    with open(path, "rb") as f:
             raw_filedata = f.read()
     
         try:
             filedata = CoreFoundation.CFDataCreate(
    -            CoreFoundation.kCFAllocatorDefault,
    -            raw_filedata,
    -            len(raw_filedata)
    +            CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata)
             )
             result_array = CoreFoundation.CFArrayRef()
             result = Security.SecItemImport(
    @@ -234,7 +225,7 @@ def _load_items_from_file(keychain, path):
                 0,  # import flags
                 None,  # key params, can include passphrase in the future
                 keychain,  # The keychain to insert into
    -            ctypes.byref(result_array)  # Results
    +            ctypes.byref(result_array),  # Results
             )
             _assert_no_error(result)
     
    @@ -244,9 +235,7 @@ def _load_items_from_file(keychain, path):
             # keychain already has them!
             result_count = CoreFoundation.CFArrayGetCount(result_array)
             for index in range(result_count):
    -            item = CoreFoundation.CFArrayGetValueAtIndex(
    -                result_array, index
    -            )
    +            item = CoreFoundation.CFArrayGetValueAtIndex(result_array, index)
                 item = ctypes.cast(item, CoreFoundation.CFTypeRef)
     
                 if _is_cert(item):
    @@ -304,9 +293,7 @@ def _load_client_cert_chain(keychain, *paths):
     
         try:
             for file_path in paths:
    -            new_identities, new_certs = _load_items_from_file(
    -                keychain, file_path
    -            )
    +            new_identities, new_certs = _load_items_from_file(keychain, file_path)
                 identities.extend(new_identities)
                 certificates.extend(new_certs)
     
    @@ -315,9 +302,7 @@ def _load_client_cert_chain(keychain, *paths):
             if not identities:
                 new_identity = Security.SecIdentityRef()
                 status = Security.SecIdentityCreateWithCertificate(
    -                keychain,
    -                certificates[0],
    -                ctypes.byref(new_identity)
    +                keychain, certificates[0], ctypes.byref(new_identity)
                 )
                 _assert_no_error(status)
                 identities.append(new_identity)
    diff --git a/lib/urllib3/contrib/appengine.py b/lib/urllib3/contrib/appengine.py
    index 814b0222..01c91409 100644
    --- a/lib/urllib3/contrib/appengine.py
    +++ b/lib/urllib3/contrib/appengine.py
    @@ -39,8 +39,8 @@ urllib3 on Google App Engine:
     """
     
     from __future__ import absolute_import
    +import io
     import logging
    -import os
     import warnings
     from ..packages.six.moves.urllib.parse import urljoin
     
    @@ -50,14 +50,14 @@ from ..exceptions import (
         MaxRetryError,
         ProtocolError,
         TimeoutError,
    -    SSLError
    +    SSLError,
     )
     
    -from ..packages.six import BytesIO
     from ..request import RequestMethods
     from ..response import HTTPResponse
     from ..util.timeout import Timeout
     from ..util.retry import Retry
    +from . import _appengine_environ
     
     try:
         from google.appengine.api import urlfetch
    @@ -96,23 +96,31 @@ class AppEngineManager(RequestMethods):
         Beyond those cases, it will raise normal urllib3 errors.
         """
     
    -    def __init__(self, headers=None, retries=None, validate_certificate=True,
    -                 urlfetch_retries=True):
    +    def __init__(
    +        self,
    +        headers=None,
    +        retries=None,
    +        validate_certificate=True,
    +        urlfetch_retries=True,
    +    ):
             if not urlfetch:
                 raise AppEnginePlatformError(
    -                "URLFetch is not available in this environment.")
    +                "URLFetch is not available in this environment."
    +            )
     
             if is_prod_appengine_mvms():
                 raise AppEnginePlatformError(
                     "Use normal urllib3.PoolManager instead of AppEngineManager"
                     "on Managed VMs, as using URLFetch is not necessary in "
    -                "this environment.")
    +                "this environment."
    +            )
     
             warnings.warn(
                 "urllib3 is using URLFetch on Google App Engine sandbox instead "
                 "of sockets. To use sockets directly instead of URLFetch see "
                 "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
    -            AppEnginePlatformWarning)
    +            AppEnginePlatformWarning,
    +        )
     
             RequestMethods.__init__(self, headers)
             self.validate_certificate = validate_certificate
    @@ -127,17 +135,22 @@ class AppEngineManager(RequestMethods):
             # Return False to re-raise any potential exceptions
             return False
     
    -    def urlopen(self, method, url, body=None, headers=None,
    -                retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
    -                **response_kw):
    +    def urlopen(
    +        self,
    +        method,
    +        url,
    +        body=None,
    +        headers=None,
    +        retries=None,
    +        redirect=True,
    +        timeout=Timeout.DEFAULT_TIMEOUT,
    +        **response_kw
    +    ):
     
             retries = self._get_retries(retries, redirect)
     
             try:
    -            follow_redirects = (
    -                    redirect and
    -                    retries.redirect != 0 and
    -                    retries.total)
    +            follow_redirects = redirect and retries.redirect != 0 and retries.total
                 response = urlfetch.fetch(
                     url,
                     payload=body,
    @@ -152,44 +165,52 @@ class AppEngineManager(RequestMethods):
                 raise TimeoutError(self, e)
     
             except urlfetch.InvalidURLError as e:
    -            if 'too large' in str(e):
    +            if "too large" in str(e):
                     raise AppEnginePlatformError(
                         "URLFetch request too large, URLFetch only "
    -                    "supports requests up to 10mb in size.", e)
    +                    "supports requests up to 10mb in size.",
    +                    e,
    +                )
                 raise ProtocolError(e)
     
             except urlfetch.DownloadError as e:
    -            if 'Too many redirects' in str(e):
    +            if "Too many redirects" in str(e):
                     raise MaxRetryError(self, url, reason=e)
                 raise ProtocolError(e)
     
             except urlfetch.ResponseTooLargeError as e:
                 raise AppEnginePlatformError(
                     "URLFetch response too large, URLFetch only supports"
    -                "responses up to 32mb in size.", e)
    +                "responses up to 32mb in size.",
    +                e,
    +            )
     
             except urlfetch.SSLCertificateError as e:
                 raise SSLError(e)
     
             except urlfetch.InvalidMethodError as e:
                 raise AppEnginePlatformError(
    -                "URLFetch does not support method: %s" % method, e)
    +                "URLFetch does not support method: %s" % method, e
    +            )
     
             http_response = self._urlfetch_response_to_http_response(
    -            response, retries=retries, **response_kw)
    +            response, retries=retries, **response_kw
    +        )
     
             # Handle redirect?
             redirect_location = redirect and http_response.get_redirect_location()
             if redirect_location:
                 # Check for redirect response
    -            if (self.urlfetch_retries and retries.raise_on_redirect):
    +            if self.urlfetch_retries and retries.raise_on_redirect:
                     raise MaxRetryError(self, url, "too many redirects")
                 else:
                     if http_response.status == 303:
    -                    method = 'GET'
    +                    method = "GET"
     
                     try:
    -                    retries = retries.increment(method, url, response=http_response, _pool=self)
    +                    retries = retries.increment(
    +                        method, url, response=http_response, _pool=self
    +                    )
                     except MaxRetryError:
                         if retries.raise_on_redirect:
                             raise MaxRetryError(self, url, "too many redirects")
    @@ -199,22 +220,32 @@ class AppEngineManager(RequestMethods):
                     log.debug("Redirecting %s -> %s", url, redirect_location)
                     redirect_url = urljoin(url, redirect_location)
                     return self.urlopen(
    -                    method, redirect_url, body, headers,
    -                    retries=retries, redirect=redirect,
    -                    timeout=timeout, **response_kw)
    +                    method,
    +                    redirect_url,
    +                    body,
    +                    headers,
    +                    retries=retries,
    +                    redirect=redirect,
    +                    timeout=timeout,
    +                    **response_kw
    +                )
     
             # Check if we should retry the HTTP response.
    -        has_retry_after = bool(http_response.getheader('Retry-After'))
    +        has_retry_after = bool(http_response.getheader("Retry-After"))
             if retries.is_retry(method, http_response.status, has_retry_after):
    -            retries = retries.increment(
    -                method, url, response=http_response, _pool=self)
    +            retries = retries.increment(method, url, response=http_response, _pool=self)
                 log.debug("Retry: %s", url)
                 retries.sleep(http_response)
                 return self.urlopen(
    -                method, url,
    -                body=body, headers=headers,
    -                retries=retries, redirect=redirect,
    -                timeout=timeout, **response_kw)
    +                method,
    +                url,
    +                body=body,
    +                headers=headers,
    +                retries=retries,
    +                redirect=redirect,
    +                timeout=timeout,
    +                **response_kw
    +            )
     
             return http_response
     
    @@ -223,28 +254,37 @@ class AppEngineManager(RequestMethods):
             if is_prod_appengine():
                 # Production GAE handles deflate encoding automatically, but does
                 # not remove the encoding header.
    -            content_encoding = urlfetch_resp.headers.get('content-encoding')
    +            content_encoding = urlfetch_resp.headers.get("content-encoding")
     
    -            if content_encoding == 'deflate':
    -                del urlfetch_resp.headers['content-encoding']
    +            if content_encoding == "deflate":
    +                del urlfetch_resp.headers["content-encoding"]
     
    -        transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
    +        transfer_encoding = urlfetch_resp.headers.get("transfer-encoding")
             # We have a full response's content,
             # so let's make sure we don't report ourselves as chunked data.
    -        if transfer_encoding == 'chunked':
    +        if transfer_encoding == "chunked":
                 encodings = transfer_encoding.split(",")
    -            encodings.remove('chunked')
    -            urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
    +            encodings.remove("chunked")
    +            urlfetch_resp.headers["transfer-encoding"] = ",".join(encodings)
     
    -        return HTTPResponse(
    +        original_response = HTTPResponse(
                 # In order for decoding to work, we must present the content as
                 # a file-like object.
    -            body=BytesIO(urlfetch_resp.content),
    +            body=io.BytesIO(urlfetch_resp.content),
    +            msg=urlfetch_resp.header_msg,
                 headers=urlfetch_resp.headers,
                 status=urlfetch_resp.status_code,
                 **response_kw
             )
     
    +        return HTTPResponse(
    +            body=io.BytesIO(urlfetch_resp.content),
    +            headers=urlfetch_resp.headers,
    +            status=urlfetch_resp.status_code,
    +            original_response=original_response,
    +            **response_kw
    +        )
    +
         def _get_absolute_timeout(self, timeout):
             if timeout is Timeout.DEFAULT_TIMEOUT:
                 return None  # Defer to URLFetch's default.
    @@ -253,44 +293,29 @@ class AppEngineManager(RequestMethods):
                     warnings.warn(
                         "URLFetch does not support granular timeout settings, "
                         "reverting to total or default URLFetch timeout.",
    -                    AppEnginePlatformWarning)
    +                    AppEnginePlatformWarning,
    +                )
                 return timeout.total
             return timeout
     
         def _get_retries(self, retries, redirect):
             if not isinstance(retries, Retry):
    -            retries = Retry.from_int(
    -                retries, redirect=redirect, default=self.retries)
    +            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
     
             if retries.connect or retries.read or retries.redirect:
                 warnings.warn(
                     "URLFetch only supports total retries and does not "
                     "recognize connect, read, or redirect retry parameters.",
    -                AppEnginePlatformWarning)
    +                AppEnginePlatformWarning,
    +            )
     
             return retries
     
     
    -def is_appengine():
    -    return (is_local_appengine() or
    -            is_prod_appengine() or
    -            is_prod_appengine_mvms())
    +# Alias methods from _appengine_environ to maintain public API interface.
     
    -
    -def is_appengine_sandbox():
    -    return is_appengine() and not is_prod_appengine_mvms()
    -
    -
    -def is_local_appengine():
    -    return ('APPENGINE_RUNTIME' in os.environ and
    -            'Development/' in os.environ['SERVER_SOFTWARE'])
    -
    -
    -def is_prod_appengine():
    -    return ('APPENGINE_RUNTIME' in os.environ and
    -            'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
    -            not is_prod_appengine_mvms())
    -
    -
    -def is_prod_appengine_mvms():
    -    return os.environ.get('GAE_VM', False) == 'true'
    +is_appengine = _appengine_environ.is_appengine
    +is_appengine_sandbox = _appengine_environ.is_appengine_sandbox
    +is_local_appengine = _appengine_environ.is_local_appengine
    +is_prod_appengine = _appengine_environ.is_prod_appengine
    +is_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms
    diff --git a/lib/urllib3/contrib/ntlmpool.py b/lib/urllib3/contrib/ntlmpool.py
    index 642e99ed..9c96be29 100644
    --- a/lib/urllib3/contrib/ntlmpool.py
    +++ b/lib/urllib3/contrib/ntlmpool.py
    @@ -20,7 +20,7 @@ class NTLMConnectionPool(HTTPSConnectionPool):
         Implements an NTLM authentication version of an urllib3 connection pool
         """
     
    -    scheme = 'https'
    +    scheme = "https"
     
         def __init__(self, user, pw, authurl, *args, **kwargs):
             """
    @@ -31,7 +31,7 @@ class NTLMConnectionPool(HTTPSConnectionPool):
             super(NTLMConnectionPool, self).__init__(*args, **kwargs)
             self.authurl = authurl
             self.rawuser = user
    -        user_parts = user.split('\\', 1)
    +        user_parts = user.split("\\", 1)
             self.domain = user_parts[0].upper()
             self.user = user_parts[1]
             self.pw = pw
    @@ -40,73 +40,84 @@ class NTLMConnectionPool(HTTPSConnectionPool):
             # Performs the NTLM handshake that secures the connection. The socket
             # must be kept open while requests are performed.
             self.num_connections += 1
    -        log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
    -                  self.num_connections, self.host, self.authurl)
    +        log.debug(
    +            "Starting NTLM HTTPS connection no. %d: https://%s%s",
    +            self.num_connections,
    +            self.host,
    +            self.authurl,
    +        )
     
    -        headers = {}
    -        headers['Connection'] = 'Keep-Alive'
    -        req_header = 'Authorization'
    -        resp_header = 'www-authenticate'
    +        headers = {"Connection": "Keep-Alive"}
    +        req_header = "Authorization"
    +        resp_header = "www-authenticate"
     
             conn = HTTPSConnection(host=self.host, port=self.port)
     
             # Send negotiation message
    -        headers[req_header] = (
    -            'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
    -        log.debug('Request headers: %s', headers)
    -        conn.request('GET', self.authurl, None, headers)
    +        headers[req_header] = "NTLM %s" % ntlm.create_NTLM_NEGOTIATE_MESSAGE(
    +            self.rawuser
    +        )
    +        log.debug("Request headers: %s", headers)
    +        conn.request("GET", self.authurl, None, headers)
             res = conn.getresponse()
             reshdr = dict(res.getheaders())
    -        log.debug('Response status: %s %s', res.status, res.reason)
    -        log.debug('Response headers: %s', reshdr)
    -        log.debug('Response data: %s [...]', res.read(100))
    +        log.debug("Response status: %s %s", res.status, res.reason)
    +        log.debug("Response headers: %s", reshdr)
    +        log.debug("Response data: %s [...]", res.read(100))
     
             # Remove the reference to the socket, so that it can not be closed by
             # the response object (we want to keep the socket open)
             res.fp = None
     
             # Server should respond with a challenge message
    -        auth_header_values = reshdr[resp_header].split(', ')
    +        auth_header_values = reshdr[resp_header].split(", ")
             auth_header_value = None
             for s in auth_header_values:
    -            if s[:5] == 'NTLM ':
    +            if s[:5] == "NTLM ":
                     auth_header_value = s[5:]
             if auth_header_value is None:
    -            raise Exception('Unexpected %s response header: %s' %
    -                            (resp_header, reshdr[resp_header]))
    +            raise Exception(
    +                "Unexpected %s response header: %s" % (resp_header, reshdr[resp_header])
    +            )
     
             # Send authentication message
    -        ServerChallenge, NegotiateFlags = \
    -            ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
    -        auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
    -                                                         self.user,
    -                                                         self.domain,
    -                                                         self.pw,
    -                                                         NegotiateFlags)
    -        headers[req_header] = 'NTLM %s' % auth_msg
    -        log.debug('Request headers: %s', headers)
    -        conn.request('GET', self.authurl, None, headers)
    +        ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE(
    +            auth_header_value
    +        )
    +        auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(
    +            ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags
    +        )
    +        headers[req_header] = "NTLM %s" % auth_msg
    +        log.debug("Request headers: %s", headers)
    +        conn.request("GET", self.authurl, None, headers)
             res = conn.getresponse()
    -        log.debug('Response status: %s %s', res.status, res.reason)
    -        log.debug('Response headers: %s', dict(res.getheaders()))
    -        log.debug('Response data: %s [...]', res.read()[:100])
    +        log.debug("Response status: %s %s", res.status, res.reason)
    +        log.debug("Response headers: %s", dict(res.getheaders()))
    +        log.debug("Response data: %s [...]", res.read()[:100])
             if res.status != 200:
                 if res.status == 401:
    -                raise Exception('Server rejected request: wrong '
    -                                'username or password')
    -            raise Exception('Wrong server response: %s %s' %
    -                            (res.status, res.reason))
    +                raise Exception(
    +                    "Server rejected request: wrong " "username or password"
    +                )
    +            raise Exception("Wrong server response: %s %s" % (res.status, res.reason))
     
             res.fp = None
    -        log.debug('Connection established')
    +        log.debug("Connection established")
             return conn
     
    -    def urlopen(self, method, url, body=None, headers=None, retries=3,
    -                redirect=True, assert_same_host=True):
    +    def urlopen(
    +        self,
    +        method,
    +        url,
    +        body=None,
    +        headers=None,
    +        retries=3,
    +        redirect=True,
    +        assert_same_host=True,
    +    ):
             if headers is None:
                 headers = {}
    -        headers['Connection'] = 'Keep-Alive'
    -        return super(NTLMConnectionPool, self).urlopen(method, url, body,
    -                                                       headers, retries,
    -                                                       redirect,
    -                                                       assert_same_host)
    +        headers["Connection"] = "Keep-Alive"
    +        return super(NTLMConnectionPool, self).urlopen(
    +            method, url, body, headers, retries, redirect, assert_same_host
    +        )
    diff --git a/lib/urllib3/contrib/pyopenssl.py b/lib/urllib3/contrib/pyopenssl.py
    new file mode 100644
    index 00000000..3051ef3a
    --- /dev/null
    +++ b/lib/urllib3/contrib/pyopenssl.py
    @@ -0,0 +1,498 @@
    +"""
    +SSL with SNI_-support for Python 2. Follow these instructions if you would
    +like to verify SSL certificates in Python 2. Note, the default libraries do
    +*not* do certificate checking; you need to do additional work to validate
    +certificates yourself.
    +
    +This needs the following packages installed:
    +
    +* pyOpenSSL (tested with 16.0.0)
    +* cryptography (minimum 1.3.4, from pyopenssl)
    +* idna (minimum 2.0, from cryptography)
    +
    +However, pyopenssl depends on cryptography, which depends on idna, so while we
    +use all three directly here we end up having relatively few packages required.
    +
    +You can install them with the following command:
    +
    +    pip install pyopenssl cryptography idna
    +
    +To activate certificate checking, call
    +:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
    +before you begin making HTTP requests. This can be done in a ``sitecustomize``
    +module, or at any other time before your application begins using ``urllib3``,
    +like this::
    +
    +    try:
    +        import urllib3.contrib.pyopenssl
    +        urllib3.contrib.pyopenssl.inject_into_urllib3()
    +    except ImportError:
    +        pass
    +
    +Now you can use :mod:`urllib3` as you normally would, and it will support SNI
    +when the required modules are installed.
    +
    +Activating this module also has the positive side effect of disabling SSL/TLS
    +compression in Python 2 (see `CRIME attack`_).
    +
    +If you want to configure the default list of supported cipher suites, you can
    +set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
    +
    +.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
    +.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
    +"""
    +from __future__ import absolute_import
    +
    +import OpenSSL.SSL
    +from cryptography import x509
    +from cryptography.hazmat.backends.openssl import backend as openssl_backend
    +from cryptography.hazmat.backends.openssl.x509 import _Certificate
    +
    +try:
    +    from cryptography.x509 import UnsupportedExtension
    +except ImportError:
    +    # UnsupportedExtension is gone in cryptography >= 2.1.0
    +    class UnsupportedExtension(Exception):
    +        pass
    +
    +
    +from socket import timeout, error as SocketError
    +from io import BytesIO
    +
    +try:  # Platform-specific: Python 2
    +    from socket import _fileobject
    +except ImportError:  # Platform-specific: Python 3
    +    _fileobject = None
    +    from ..packages.backports.makefile import backport_makefile
    +
    +import logging
    +import ssl
    +from ..packages import six
    +import sys
    +
    +from .. import util
    +
    +
    +__all__ = ["inject_into_urllib3", "extract_from_urllib3"]
    +
    +# SNI always works.
    +HAS_SNI = True
    +
    +# Map from urllib3 to PyOpenSSL compatible parameter-values.
    +_openssl_versions = {
    +    util.PROTOCOL_TLS: OpenSSL.SSL.SSLv23_METHOD,
    +    ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
    +}
    +
    +if hasattr(ssl, "PROTOCOL_SSLv3") and hasattr(OpenSSL.SSL, "SSLv3_METHOD"):
    +    _openssl_versions[ssl.PROTOCOL_SSLv3] = OpenSSL.SSL.SSLv3_METHOD
    +
    +if hasattr(ssl, "PROTOCOL_TLSv1_1") and hasattr(OpenSSL.SSL, "TLSv1_1_METHOD"):
    +    _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
    +
    +if hasattr(ssl, "PROTOCOL_TLSv1_2") and hasattr(OpenSSL.SSL, "TLSv1_2_METHOD"):
    +    _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
    +
    +
    +_stdlib_to_openssl_verify = {
    +    ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
    +    ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
    +    ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
    +    + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
    +}
    +_openssl_to_stdlib_verify = dict((v, k) for k, v in _stdlib_to_openssl_verify.items())
    +
    +# OpenSSL will only write 16K at a time
    +SSL_WRITE_BLOCKSIZE = 16384
    +
    +orig_util_HAS_SNI = util.HAS_SNI
    +orig_util_SSLContext = util.ssl_.SSLContext
    +
    +
    +log = logging.getLogger(__name__)
    +
    +
    +def inject_into_urllib3():
    +    "Monkey-patch urllib3 with PyOpenSSL-backed SSL-support."
    +
    +    _validate_dependencies_met()
    +
    +    util.SSLContext = PyOpenSSLContext
    +    util.ssl_.SSLContext = PyOpenSSLContext
    +    util.HAS_SNI = HAS_SNI
    +    util.ssl_.HAS_SNI = HAS_SNI
    +    util.IS_PYOPENSSL = True
    +    util.ssl_.IS_PYOPENSSL = True
    +
    +
    +def extract_from_urllib3():
    +    "Undo monkey-patching by :func:`inject_into_urllib3`."
    +
    +    util.SSLContext = orig_util_SSLContext
    +    util.ssl_.SSLContext = orig_util_SSLContext
    +    util.HAS_SNI = orig_util_HAS_SNI
    +    util.ssl_.HAS_SNI = orig_util_HAS_SNI
    +    util.IS_PYOPENSSL = False
    +    util.ssl_.IS_PYOPENSSL = False
    +
    +
    +def _validate_dependencies_met():
    +    """
    +    Verifies that PyOpenSSL's package-level dependencies have been met.
    +    Throws `ImportError` if they are not met.
    +    """
    +    # Method added in `cryptography==1.1`; not available in older versions
    +    from cryptography.x509.extensions import Extensions
    +
    +    if getattr(Extensions, "get_extension_for_class", None) is None:
    +        raise ImportError(
    +            "'cryptography' module missing required functionality.  "
    +            "Try upgrading to v1.3.4 or newer."
    +        )
    +
    +    # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
    +    # attribute is only present on those versions.
    +    from OpenSSL.crypto import X509
    +
    +    x509 = X509()
    +    if getattr(x509, "_x509", None) is None:
    +        raise ImportError(
    +            "'pyOpenSSL' module missing required functionality. "
    +            "Try upgrading to v0.14 or newer."
    +        )
    +
    +
    +def _dnsname_to_stdlib(name):
    +    """
    +    Converts a dNSName SubjectAlternativeName field to the form used by the
    +    standard library on the given Python version.
    +
    +    Cryptography produces a dNSName as a unicode string that was idna-decoded
    +    from ASCII bytes. We need to idna-encode that string to get it back, and
    +    then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
    +    uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
    +
    +    If the name cannot be idna-encoded then we return None signalling that
    +    the name given should be skipped.
    +    """
    +
    +    def idna_encode(name):
    +        """
    +        Borrowed wholesale from the Python Cryptography Project. It turns out
    +        that we can't just safely call `idna.encode`: it can explode for
    +        wildcard names. This avoids that problem.
    +        """
    +        import idna
    +
    +        try:
    +            for prefix in [u"*.", u"."]:
    +                if name.startswith(prefix):
    +                    name = name[len(prefix) :]
    +                    return prefix.encode("ascii") + idna.encode(name)
    +            return idna.encode(name)
    +        except idna.core.IDNAError:
    +            return None
    +
    +    # Don't send IPv6 addresses through the IDNA encoder.
    +    if ":" in name:
    +        return name
    +
    +    name = idna_encode(name)
    +    if name is None:
    +        return None
    +    elif sys.version_info >= (3, 0):
    +        name = name.decode("utf-8")
    +    return name
    +
    +
    +def get_subj_alt_name(peer_cert):
    +    """
    +    Given an PyOpenSSL certificate, provides all the subject alternative names.
    +    """
    +    # Pass the cert to cryptography, which has much better APIs for this.
    +    if hasattr(peer_cert, "to_cryptography"):
    +        cert = peer_cert.to_cryptography()
    +    else:
    +        # This is technically using private APIs, but should work across all
    +        # relevant versions before PyOpenSSL got a proper API for this.
    +        cert = _Certificate(openssl_backend, peer_cert._x509)
    +
    +    # We want to find the SAN extension. Ask Cryptography to locate it (it's
    +    # faster than looping in Python)
    +    try:
    +        ext = cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value
    +    except x509.ExtensionNotFound:
    +        # No such extension, return the empty list.
    +        return []
    +    except (
    +        x509.DuplicateExtension,
    +        UnsupportedExtension,
    +        x509.UnsupportedGeneralNameType,
    +        UnicodeError,
    +    ) as e:
    +        # A problem has been found with the quality of the certificate. Assume
    +        # no SAN field is present.
    +        log.warning(
    +            "A problem was encountered with the certificate that prevented "
    +            "urllib3 from finding the SubjectAlternativeName field. This can "
    +            "affect certificate validation. The error was %s",
    +            e,
    +        )
    +        return []
    +
    +    # We want to return dNSName and iPAddress fields. We need to cast the IPs
    +    # back to strings because the match_hostname function wants them as
    +    # strings.
    +    # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
    +    # decoded. This is pretty frustrating, but that's what the standard library
    +    # does with certificates, and so we need to attempt to do the same.
    +    # We also want to skip over names which cannot be idna encoded.
    +    names = [
    +        ("DNS", name)
    +        for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName))
    +        if name is not None
    +    ]
    +    names.extend(
    +        ("IP Address", str(name)) for name in ext.get_values_for_type(x509.IPAddress)
    +    )
    +
    +    return names
    +
    +
    +class WrappedSocket(object):
    +    """API-compatibility wrapper for Python OpenSSL's Connection-class.
    +
    +    Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
    +    collector of pypy.
    +    """
    +
    +    def __init__(self, connection, socket, suppress_ragged_eofs=True):
    +        self.connection = connection
    +        self.socket = socket
    +        self.suppress_ragged_eofs = suppress_ragged_eofs
    +        self._makefile_refs = 0
    +        self._closed = False
    +
    +    def fileno(self):
    +        return self.socket.fileno()
    +
    +    # Copy-pasted from Python 3.5 source code
    +    def _decref_socketios(self):
    +        if self._makefile_refs > 0:
    +            self._makefile_refs -= 1
    +        if self._closed:
    +            self.close()
    +
    +    def recv(self, *args, **kwargs):
    +        try:
    +            data = self.connection.recv(*args, **kwargs)
    +        except OpenSSL.SSL.SysCallError as e:
    +            if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"):
    +                return b""
    +            else:
    +                raise SocketError(str(e))
    +        except OpenSSL.SSL.ZeroReturnError:
    +            if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
    +                return b""
    +            else:
    +                raise
    +        except OpenSSL.SSL.WantReadError:
    +            if not util.wait_for_read(self.socket, self.socket.gettimeout()):
    +                raise timeout("The read operation timed out")
    +            else:
    +                return self.recv(*args, **kwargs)
    +
    +        # TLS 1.3 post-handshake authentication
    +        except OpenSSL.SSL.Error as e:
    +            raise ssl.SSLError("read error: %r" % e)
    +        else:
    +            return data
    +
    +    def recv_into(self, *args, **kwargs):
    +        try:
    +            return self.connection.recv_into(*args, **kwargs)
    +        except OpenSSL.SSL.SysCallError as e:
    +            if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"):
    +                return 0
    +            else:
    +                raise SocketError(str(e))
    +        except OpenSSL.SSL.ZeroReturnError:
    +            if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
    +                return 0
    +            else:
    +                raise
    +        except OpenSSL.SSL.WantReadError:
    +            if not util.wait_for_read(self.socket, self.socket.gettimeout()):
    +                raise timeout("The read operation timed out")
    +            else:
    +                return self.recv_into(*args, **kwargs)
    +
    +        # TLS 1.3 post-handshake authentication
    +        except OpenSSL.SSL.Error as e:
    +            raise ssl.SSLError("read error: %r" % e)
    +
    +    def settimeout(self, timeout):
    +        return self.socket.settimeout(timeout)
    +
    +    def _send_until_done(self, data):
    +        while True:
    +            try:
    +                return self.connection.send(data)
    +            except OpenSSL.SSL.WantWriteError:
    +                if not util.wait_for_write(self.socket, self.socket.gettimeout()):
    +                    raise timeout()
    +                continue
    +            except OpenSSL.SSL.SysCallError as e:
    +                raise SocketError(str(e))
    +
    +    def sendall(self, data):
    +        total_sent = 0
    +        while total_sent < len(data):
    +            sent = self._send_until_done(
    +                data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE]
    +            )
    +            total_sent += sent
    +
    +    def shutdown(self):
    +        # FIXME rethrow compatible exceptions should we ever use this
    +        self.connection.shutdown()
    +
    +    def close(self):
    +        if self._makefile_refs < 1:
    +            try:
    +                self._closed = True
    +                return self.connection.close()
    +            except OpenSSL.SSL.Error:
    +                return
    +        else:
    +            self._makefile_refs -= 1
    +
    +    def getpeercert(self, binary_form=False):
    +        x509 = self.connection.get_peer_certificate()
    +
    +        if not x509:
    +            return x509
    +
    +        if binary_form:
    +            return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, x509)
    +
    +        return {
    +            "subject": ((("commonName", x509.get_subject().CN),),),
    +            "subjectAltName": get_subj_alt_name(x509),
    +        }
    +
    +    def version(self):
    +        return self.connection.get_protocol_version_name()
    +
    +    def _reuse(self):
    +        self._makefile_refs += 1
    +
    +    def _drop(self):
    +        if self._makefile_refs < 1:
    +            self.close()
    +        else:
    +            self._makefile_refs -= 1
    +
    +
    +if _fileobject:  # Platform-specific: Python 2
    +
    +    def makefile(self, mode, bufsize=-1):
    +        self._makefile_refs += 1
    +        return _fileobject(self, mode, bufsize, close=True)
    +
    +
    +else:  # Platform-specific: Python 3
    +    makefile = backport_makefile
    +
    +WrappedSocket.makefile = makefile
    +
    +
    +class PyOpenSSLContext(object):
    +    """
    +    I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
    +    for translating the interface of the standard library ``SSLContext`` object
    +    to calls into PyOpenSSL.
    +    """
    +
    +    def __init__(self, protocol):
    +        self.protocol = _openssl_versions[protocol]
    +        self._ctx = OpenSSL.SSL.Context(self.protocol)
    +        self._options = 0
    +        self.check_hostname = False
    +
    +    @property
    +    def options(self):
    +        return self._options
    +
    +    @options.setter
    +    def options(self, value):
    +        self._options = value
    +        self._ctx.set_options(value)
    +
    +    @property
    +    def verify_mode(self):
    +        return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
    +
    +    @verify_mode.setter
    +    def verify_mode(self, value):
    +        self._ctx.set_verify(_stdlib_to_openssl_verify[value], _verify_callback)
    +
    +    def set_default_verify_paths(self):
    +        self._ctx.set_default_verify_paths()
    +
    +    def set_ciphers(self, ciphers):
    +        if isinstance(ciphers, six.text_type):
    +            ciphers = ciphers.encode("utf-8")
    +        self._ctx.set_cipher_list(ciphers)
    +
    +    def load_verify_locations(self, cafile=None, capath=None, cadata=None):
    +        if cafile is not None:
    +            cafile = cafile.encode("utf-8")
    +        if capath is not None:
    +            capath = capath.encode("utf-8")
    +        self._ctx.load_verify_locations(cafile, capath)
    +        if cadata is not None:
    +            self._ctx.load_verify_locations(BytesIO(cadata))
    +
    +    def load_cert_chain(self, certfile, keyfile=None, password=None):
    +        self._ctx.use_certificate_chain_file(certfile)
    +        if password is not None:
    +            if not isinstance(password, six.binary_type):
    +                password = password.encode("utf-8")
    +            self._ctx.set_passwd_cb(lambda *_: password)
    +        self._ctx.use_privatekey_file(keyfile or certfile)
    +
    +    def wrap_socket(
    +        self,
    +        sock,
    +        server_side=False,
    +        do_handshake_on_connect=True,
    +        suppress_ragged_eofs=True,
    +        server_hostname=None,
    +    ):
    +        cnx = OpenSSL.SSL.Connection(self._ctx, sock)
    +
    +        if isinstance(server_hostname, six.text_type):  # Platform-specific: Python 3
    +            server_hostname = server_hostname.encode("utf-8")
    +
    +        if server_hostname is not None:
    +            cnx.set_tlsext_host_name(server_hostname)
    +
    +        cnx.set_connect_state()
    +
    +        while True:
    +            try:
    +                cnx.do_handshake()
    +            except OpenSSL.SSL.WantReadError:
    +                if not util.wait_for_read(sock, sock.gettimeout()):
    +                    raise timeout("select timed out")
    +                continue
    +            except OpenSSL.SSL.Error as e:
    +                raise ssl.SSLError("bad handshake: %r" % e)
    +            break
    +
    +        return WrappedSocket(cnx, sock)
    +
    +
    +def _verify_callback(cnx, x509, err_no, err_depth, return_code):
    +    return err_no == 0
    diff --git a/lib/urllib3/contrib/securetransport.py b/lib/urllib3/contrib/securetransport.py
    index 2cac70f7..24e6b5c4 100644
    --- a/lib/urllib3/contrib/securetransport.py
    +++ b/lib/urllib3/contrib/securetransport.py
    @@ -23,6 +23,31 @@ To use this module, simply import and inject it::
         urllib3.contrib.securetransport.inject_into_urllib3()
     
     Happy TLSing!
    +
    +This code is a bastardised version of the code found in Will Bond's oscrypto
    +library. An enormous debt is owed to him for blazing this trail for us. For
    +that reason, this code should be considered to be covered both by urllib3's
    +license and by oscrypto's:
    +
    +    Copyright (c) 2015-2016 Will Bond 
    +
    +    Permission is hereby granted, free of charge, to any person obtaining a
    +    copy of this software and associated documentation files (the "Software"),
    +    to deal in the Software without restriction, including without limitation
    +    the rights to use, copy, modify, merge, publish, distribute, sublicense,
    +    and/or sell copies of the Software, and to permit persons to whom the
    +    Software is furnished to do so, subject to the following conditions:
    +
    +    The above copyright notice and this permission notice shall be included in
    +    all copies or substantial portions of the Software.
    +
    +    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +    AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
    +    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
    +    DEALINGS IN THE SOFTWARE.
     """
     from __future__ import absolute_import
     
    @@ -37,12 +62,12 @@ import threading
     import weakref
     
     from .. import util
    -from ._securetransport.bindings import (
    -    Security, SecurityConst, CoreFoundation
    -)
    +from ._securetransport.bindings import Security, SecurityConst, CoreFoundation
     from ._securetransport.low_level import (
    -    _assert_no_error, _cert_array_from_pem, _temporary_keychain,
    -    _load_client_cert_chain
    +    _assert_no_error,
    +    _cert_array_from_pem,
    +    _temporary_keychain,
    +    _load_client_cert_chain,
     )
     
     try:  # Platform-specific: Python 2
    @@ -51,12 +76,7 @@ except ImportError:  # Platform-specific: Python 3
         _fileobject = None
         from ..packages.backports.makefile import backport_makefile
     
    -try:
    -    memoryview(b'')
    -except NameError:
    -    raise ImportError("SecureTransport only works on Pythons with memoryview")
    -
    -__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
    +__all__ = ["inject_into_urllib3", "extract_from_urllib3"]
     
     # SNI always works
     HAS_SNI = True
    @@ -88,38 +108,35 @@ _connection_ref_lock = threading.Lock()
     SSL_WRITE_BLOCKSIZE = 16384
     
     # This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
    -# individual cipher suites. We need to do this becuase this is how
    +# individual cipher suites. We need to do this because this is how
     # SecureTransport wants them.
     CIPHER_SUITES = [
    -    SecurityConst.TLS_AES_256_GCM_SHA384,
    -    SecurityConst.TLS_CHACHA20_POLY1305_SHA256,
    -    SecurityConst.TLS_AES_128_GCM_SHA256,
         SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
    -    SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
         SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
    +    SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
         SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
    -    SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,
    +    SecurityConst.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
    +    SecurityConst.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
         SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
    -    SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,
         SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
         SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
    -    SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
         SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
    -    SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
    -    SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
    -    SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
    -    SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
    -    SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
         SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
    -    SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
         SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
    +    SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
    +    SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
    +    SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
         SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
    +    SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
    +    SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
         SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
    -    SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
         SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
    -    SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
    +    SecurityConst.TLS_AES_256_GCM_SHA384,
    +    SecurityConst.TLS_AES_128_GCM_SHA256,
         SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
         SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
    +    SecurityConst.TLS_AES_128_CCM_8_SHA256,
    +    SecurityConst.TLS_AES_128_CCM_SHA256,
         SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
         SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
         SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
    @@ -127,39 +144,47 @@ CIPHER_SUITES = [
     ]
     
     # Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
    -# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
    +# TLSv1 and a high of TLSv1.3. For everything else, we pin to that version.
    +# TLSv1 to 1.2 are supported on macOS 10.8+ and TLSv1.3 is macOS 10.13+
     _protocol_to_min_max = {
    -    ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
    +    util.PROTOCOL_TLS: (
    +        SecurityConst.kTLSProtocol1,
    +        SecurityConst.kTLSProtocolMaxSupported,
    +    )
     }
     
     if hasattr(ssl, "PROTOCOL_SSLv2"):
         _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
    -        SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2
    +        SecurityConst.kSSLProtocol2,
    +        SecurityConst.kSSLProtocol2,
         )
     if hasattr(ssl, "PROTOCOL_SSLv3"):
         _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
    -        SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3
    +        SecurityConst.kSSLProtocol3,
    +        SecurityConst.kSSLProtocol3,
         )
     if hasattr(ssl, "PROTOCOL_TLSv1"):
         _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
    -        SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1
    +        SecurityConst.kTLSProtocol1,
    +        SecurityConst.kTLSProtocol1,
         )
     if hasattr(ssl, "PROTOCOL_TLSv1_1"):
         _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
    -        SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11
    +        SecurityConst.kTLSProtocol11,
    +        SecurityConst.kTLSProtocol11,
         )
     if hasattr(ssl, "PROTOCOL_TLSv1_2"):
         _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
    -        SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12
    +        SecurityConst.kTLSProtocol12,
    +        SecurityConst.kTLSProtocol12,
         )
    -if hasattr(ssl, "PROTOCOL_TLS"):
    -    _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23]
     
     
     def inject_into_urllib3():
         """
         Monkey-patch urllib3 with SecureTransport-backed SSL-support.
         """
    +    util.SSLContext = SecureTransportContext
         util.ssl_.SSLContext = SecureTransportContext
         util.HAS_SNI = HAS_SNI
         util.ssl_.HAS_SNI = HAS_SNI
    @@ -171,6 +196,7 @@ def extract_from_urllib3():
         """
         Undo monkey-patching by :func:`inject_into_urllib3`.
         """
    +    util.SSLContext = orig_util_SSLContext
         util.ssl_.SSLContext = orig_util_SSLContext
         util.HAS_SNI = orig_util_HAS_SNI
         util.ssl_.HAS_SNI = orig_util_HAS_SNI
    @@ -195,21 +221,18 @@ def _read_callback(connection_id, data_buffer, data_length_pointer):
             timeout = wrapped_socket.gettimeout()
             error = None
             read_count = 0
    -        buffer = (ctypes.c_char * requested_length).from_address(data_buffer)
    -        buffer_view = memoryview(buffer)
     
             try:
                 while read_count < requested_length:
                     if timeout is None or timeout >= 0:
    -                    readables = util.wait_for_read([base_socket], timeout)
    -                    if not readables:
    -                        raise socket.error(errno.EAGAIN, 'timed out')
    +                    if not util.wait_for_read(base_socket, timeout):
    +                        raise socket.error(errno.EAGAIN, "timed out")
     
    -                # We need to tell ctypes that we have a buffer that can be
    -                # written to. Upsettingly, we do that like this:
    -                chunk_size = base_socket.recv_into(
    -                    buffer_view[read_count:requested_length]
    +                remaining = requested_length - read_count
    +                buffer = (ctypes.c_char * remaining).from_address(
    +                    data_buffer + read_count
                     )
    +                chunk_size = base_socket.recv_into(buffer, remaining)
                     read_count += chunk_size
                     if not chunk_size:
                         if not read_count:
    @@ -219,7 +242,8 @@ def _read_callback(connection_id, data_buffer, data_length_pointer):
                 error = e.errno
     
                 if error is not None and error != errno.EAGAIN:
    -                if error == errno.ECONNRESET:
    +                data_length_pointer[0] = read_count
    +                if error == errno.ECONNRESET or error == errno.EPIPE:
                         return SecurityConst.errSSLClosedAbort
                     raise
     
    @@ -257,9 +281,8 @@ def _write_callback(connection_id, data_buffer, data_length_pointer):
             try:
                 while sent < bytes_to_write:
                     if timeout is None or timeout >= 0:
    -                    writables = util.wait_for_write([base_socket], timeout)
    -                    if not writables:
    -                        raise socket.error(errno.EAGAIN, 'timed out')
    +                    if not util.wait_for_write(base_socket, timeout):
    +                        raise socket.error(errno.EAGAIN, "timed out")
                     chunk_sent = base_socket.send(data)
                     sent += chunk_sent
     
    @@ -270,11 +293,13 @@ def _write_callback(connection_id, data_buffer, data_length_pointer):
                 error = e.errno
     
                 if error is not None and error != errno.EAGAIN:
    -                if error == errno.ECONNRESET:
    +                data_length_pointer[0] = sent
    +                if error == errno.ECONNRESET or error == errno.EPIPE:
                         return SecurityConst.errSSLClosedAbort
                     raise
     
             data_length_pointer[0] = sent
    +
             if sent != bytes_to_write:
                 return SecurityConst.errSSLWouldBlock
     
    @@ -299,6 +324,7 @@ class WrappedSocket(object):
         Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
         collector of PyPy.
         """
    +
         def __init__(self, socket):
             self.socket = socket
             self.context = None
    @@ -363,7 +389,7 @@ class WrappedSocket(object):
     
             # We want data in memory, so load it up.
             if os.path.isfile(trust_bundle):
    -            with open(trust_bundle, 'rb') as f:
    +            with open(trust_bundle, "rb") as f:
                     trust_bundle = f.read()
     
             cert_array = None
    @@ -377,9 +403,7 @@ class WrappedSocket(object):
                 # created for this connection, shove our CAs into it, tell ST to
                 # ignore everything else it knows, and then ask if it can build a
                 # chain. This is a buuuunch of code.
    -            result = Security.SSLCopyPeerTrust(
    -                self.context, ctypes.byref(trust)
    -            )
    +            result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
                 _assert_no_error(result)
                 if not trust:
                     raise ssl.SSLError("Failed to copy trust reference")
    @@ -391,37 +415,36 @@ class WrappedSocket(object):
                 _assert_no_error(result)
     
                 trust_result = Security.SecTrustResultType()
    -            result = Security.SecTrustEvaluate(
    -                trust, ctypes.byref(trust_result)
    -            )
    +            result = Security.SecTrustEvaluate(trust, ctypes.byref(trust_result))
                 _assert_no_error(result)
             finally:
                 if trust:
                     CoreFoundation.CFRelease(trust)
     
    -            if cert_array is None:
    +            if cert_array is not None:
                     CoreFoundation.CFRelease(cert_array)
     
             # Ok, now we can look at what the result was.
             successes = (
                 SecurityConst.kSecTrustResultUnspecified,
    -            SecurityConst.kSecTrustResultProceed
    +            SecurityConst.kSecTrustResultProceed,
             )
             if trust_result.value not in successes:
                 raise ssl.SSLError(
    -                "certificate verify failed, error code: %d" %
    -                trust_result.value
    +                "certificate verify failed, error code: %d" % trust_result.value
                 )
     
    -    def handshake(self,
    -                  server_hostname,
    -                  verify,
    -                  trust_bundle,
    -                  min_version,
    -                  max_version,
    -                  client_cert,
    -                  client_key,
    -                  client_key_passphrase):
    +    def handshake(
    +        self,
    +        server_hostname,
    +        verify,
    +        trust_bundle,
    +        min_version,
    +        max_version,
    +        client_cert,
    +        client_key,
    +        client_key_passphrase,
    +    ):
             """
             Actually performs the TLS handshake. This is run automatically by
             wrapped socket, and shouldn't be needed in user code.
    @@ -451,7 +474,7 @@ class WrappedSocket(object):
             # If we have a server hostname, we should set that too.
             if server_hostname:
                 if not isinstance(server_hostname, bytes):
    -                server_hostname = server_hostname.encode('utf-8')
    +                server_hostname = server_hostname.encode("utf-8")
     
                 result = Security.SSLSetPeerDomainName(
                     self.context, server_hostname, len(server_hostname)
    @@ -464,7 +487,16 @@ class WrappedSocket(object):
             # Set the minimum and maximum TLS versions.
             result = Security.SSLSetProtocolVersionMin(self.context, min_version)
             _assert_no_error(result)
    +
    +        # TLS 1.3 isn't necessarily enabled by the OS
    +        # so we have to detect when we error out and try
    +        # setting TLS 1.3 if it's allowed. kTLSProtocolMaxSupported
    +        # was added in macOS 10.13 along with kTLSProtocol13.
             result = Security.SSLSetProtocolVersionMax(self.context, max_version)
    +        if result != 0 and max_version == SecurityConst.kTLSProtocolMaxSupported:
    +            result = Security.SSLSetProtocolVersionMax(
    +                self.context, SecurityConst.kTLSProtocol12
    +            )
             _assert_no_error(result)
     
             # If there's a trust DB, we need to use it. We do that by telling
    @@ -473,9 +505,7 @@ class WrappedSocket(object):
             # authing in that case.
             if not verify or trust_bundle is not None:
                 result = Security.SSLSetSessionOption(
    -                self.context,
    -                SecurityConst.kSSLSessionOptionBreakOnServerAuth,
    -                True
    +                self.context, SecurityConst.kSSLSessionOptionBreakOnServerAuth, True
                 )
                 _assert_no_error(result)
     
    @@ -485,9 +515,7 @@ class WrappedSocket(object):
                 self._client_cert_chain = _load_client_cert_chain(
                     self._keychain, client_cert, client_key
                 )
    -            result = Security.SSLSetCertificate(
    -                self.context, self._client_cert_chain
    -            )
    +            result = Security.SSLSetCertificate(self.context, self._client_cert_chain)
                 _assert_no_error(result)
     
             while True:
    @@ -538,7 +566,7 @@ class WrappedSocket(object):
             # There are some result codes that we want to treat as "not always
             # errors". Specifically, those are errSSLWouldBlock,
             # errSSLClosedGraceful, and errSSLClosedNoNotify.
    -        if (result == SecurityConst.errSSLWouldBlock):
    +        if result == SecurityConst.errSSLWouldBlock:
                 # If we didn't process any bytes, then this was just a time out.
                 # However, we can get errSSLWouldBlock in situations when we *did*
                 # read some data, and in those cases we should just read "short"
    @@ -546,7 +574,10 @@ class WrappedSocket(object):
                 if processed_bytes.value == 0:
                     # Timed out, no data read.
                     raise socket.timeout("recv timed out")
    -        elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify):
    +        elif result in (
    +            SecurityConst.errSSLClosedGraceful,
    +            SecurityConst.errSSLClosedNoNotify,
    +        ):
                 # The remote peer has closed this connection. We should do so as
                 # well. Note that we don't actually return here because in
                 # principle this could actually be fired along with return data.
    @@ -585,7 +616,7 @@ class WrappedSocket(object):
         def sendall(self, data):
             total_sent = 0
             while total_sent < len(data):
    -            sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
    +            sent = self.send(data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE])
                 total_sent += sent
     
         def shutdown(self):
    @@ -632,18 +663,14 @@ class WrappedSocket(object):
             # instead to just flag to urllib3 that it shouldn't do its own hostname
             # validation when using SecureTransport.
             if not binary_form:
    -            raise ValueError(
    -                "SecureTransport only supports dumping binary certs"
    -            )
    +            raise ValueError("SecureTransport only supports dumping binary certs")
             trust = Security.SecTrustRef()
             certdata = None
             der_bytes = None
     
             try:
                 # Grab the trust store.
    -            result = Security.SSLCopyPeerTrust(
    -                self.context, ctypes.byref(trust)
    -            )
    +            result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
                 _assert_no_error(result)
                 if not trust:
                     # Probably we haven't done the handshake yet. No biggie.
    @@ -673,6 +700,27 @@ class WrappedSocket(object):
     
             return der_bytes
     
    +    def version(self):
    +        protocol = Security.SSLProtocol()
    +        result = Security.SSLGetNegotiatedProtocolVersion(
    +            self.context, ctypes.byref(protocol)
    +        )
    +        _assert_no_error(result)
    +        if protocol.value == SecurityConst.kTLSProtocol13:
    +            return "TLSv1.3"
    +        elif protocol.value == SecurityConst.kTLSProtocol12:
    +            return "TLSv1.2"
    +        elif protocol.value == SecurityConst.kTLSProtocol11:
    +            return "TLSv1.1"
    +        elif protocol.value == SecurityConst.kTLSProtocol1:
    +            return "TLSv1"
    +        elif protocol.value == SecurityConst.kSSLProtocol3:
    +            return "SSLv3"
    +        elif protocol.value == SecurityConst.kSSLProtocol2:
    +            return "SSLv2"
    +        else:
    +            raise ssl.SSLError("Unknown TLS version: %r" % protocol)
    +
         def _reuse(self):
             self._makefile_refs += 1
     
    @@ -684,16 +732,21 @@ class WrappedSocket(object):
     
     
     if _fileobject:  # Platform-specific: Python 2
    +
         def makefile(self, mode, bufsize=-1):
             self._makefile_refs += 1
             return _fileobject(self, mode, bufsize, close=True)
    +
    +
     else:  # Platform-specific: Python 3
    +
         def makefile(self, mode="r", buffering=None, *args, **kwargs):
             # We disable buffering with SecureTransport because it conflicts with
             # the buffering that ST does internally (see issue #1153 for more).
             buffering = 0
             return backport_makefile(self, mode, buffering, *args, **kwargs)
     
    +
     WrappedSocket.makefile = makefile
     
     
    @@ -703,6 +756,7 @@ class SecureTransportContext(object):
         interface of the standard library ``SSLContext`` object to calls into
         SecureTransport.
         """
    +
         def __init__(self, protocol):
             self._min_version, self._max_version = _protocol_to_min_max[protocol]
             self._options = 0
    @@ -769,16 +823,12 @@ class SecureTransportContext(object):
         def set_ciphers(self, ciphers):
             # For now, we just require the default cipher string.
             if ciphers != util.ssl_.DEFAULT_CIPHERS:
    -            raise ValueError(
    -                "SecureTransport doesn't support custom cipher strings"
    -            )
    +            raise ValueError("SecureTransport doesn't support custom cipher strings")
     
         def load_verify_locations(self, cafile=None, capath=None, cadata=None):
             # OK, we only really support cadata and cafile.
             if capath is not None:
    -            raise ValueError(
    -                "SecureTransport does not support cert directories"
    -            )
    +            raise ValueError("SecureTransport does not support cert directories")
     
             self._trust_bundle = cafile or cadata
     
    @@ -787,9 +837,14 @@ class SecureTransportContext(object):
             self._client_key = keyfile
             self._client_cert_passphrase = password
     
    -    def wrap_socket(self, sock, server_side=False,
    -                    do_handshake_on_connect=True, suppress_ragged_eofs=True,
    -                    server_hostname=None):
    +    def wrap_socket(
    +        self,
    +        sock,
    +        server_side=False,
    +        do_handshake_on_connect=True,
    +        suppress_ragged_eofs=True,
    +        server_hostname=None,
    +    ):
             # So, what do we do here? Firstly, we assert some properties. This is a
             # stripped down shim, so there is some functionality we don't support.
             # See PEP 543 for the real deal.
    @@ -803,8 +858,13 @@ class SecureTransportContext(object):
     
             # Now we can handshake
             wrapped_socket.handshake(
    -            server_hostname, self._verify, self._trust_bundle,
    -            self._min_version, self._max_version, self._client_cert,
    -            self._client_key, self._client_key_passphrase
    +            server_hostname,
    +            self._verify,
    +            self._trust_bundle,
    +            self._min_version,
    +            self._max_version,
    +            self._client_cert,
    +            self._client_key,
    +            self._client_key_passphrase,
             )
             return wrapped_socket
    diff --git a/lib/urllib3/contrib/socks.py b/lib/urllib3/contrib/socks.py
    index 39e92fde..9e97f7aa 100644
    --- a/lib/urllib3/contrib/socks.py
    +++ b/lib/urllib3/contrib/socks.py
    @@ -1,25 +1,38 @@
     # -*- coding: utf-8 -*-
     """
     This module contains provisional support for SOCKS proxies from within
    -urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
    +urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and
     SOCKS5. To enable its functionality, either install PySocks or install this
     module with the ``socks`` extra.
     
     The SOCKS implementation supports the full range of urllib3 features. It also
     supports the following SOCKS features:
     
    -- SOCKS4
    -- SOCKS4a
    -- SOCKS5
    +- SOCKS4A (``proxy_url='socks4a://...``)
    +- SOCKS4 (``proxy_url='socks4://...``)
    +- SOCKS5 with remote DNS (``proxy_url='socks5h://...``)
    +- SOCKS5 with local DNS (``proxy_url='socks5://...``)
     - Usernames and passwords for the SOCKS proxy
     
    -Known Limitations:
    + .. note::
    +    It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in
    +    your ``proxy_url`` to ensure that DNS resolution is done from the remote
    +    server instead of client-side when connecting to a domain name.
    +
    +SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5
    +supports IPv4, IPv6, and domain names.
    +
    +When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url``
    +will be sent as the ``userid`` section of the SOCKS request::
    +
    +    proxy_url="socks4a://@proxy-host"
    +
    +When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion
    +of the ``proxy_url`` will be sent as the username/password to authenticate
    +with the proxy::
    +
    +    proxy_url="socks5h://:@proxy-host"
     
    -- Currently PySocks does not support contacting remote websites via literal
    -  IPv6 addresses. Any such connection attempt will fail. You must use a domain
    -  name.
    -- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
    -  such connection attempt will fail.
     """
     from __future__ import absolute_import
     
    @@ -29,23 +42,20 @@ except ImportError:
         import warnings
         from ..exceptions import DependencyWarning
     
    -    warnings.warn((
    -        'SOCKS support in urllib3 requires the installation of optional '
    -        'dependencies: specifically, PySocks.  For more information, see '
    -        'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
    +    warnings.warn(
    +        (
    +            "SOCKS support in urllib3 requires the installation of optional "
    +            "dependencies: specifically, PySocks.  For more information, see "
    +            "https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies"
             ),
    -        DependencyWarning
    +        DependencyWarning,
         )
         raise
     
     from socket import error as SocketError, timeout as SocketTimeout
     
    -from ..connection import (
    -    HTTPConnection, HTTPSConnection
    -)
    -from ..connectionpool import (
    -    HTTPConnectionPool, HTTPSConnectionPool
    -)
    +from ..connection import HTTPConnection, HTTPSConnection
    +from ..connectionpool import HTTPConnectionPool, HTTPSConnectionPool
     from ..exceptions import ConnectTimeoutError, NewConnectionError
     from ..poolmanager import PoolManager
     from ..util.url import parse_url
    @@ -60,8 +70,9 @@ class SOCKSConnection(HTTPConnection):
         """
         A plain-text HTTP connection that connects via a SOCKS proxy.
         """
    +
         def __init__(self, *args, **kwargs):
    -        self._socks_options = kwargs.pop('_socks_options')
    +        self._socks_options = kwargs.pop("_socks_options")
             super(SOCKSConnection, self).__init__(*args, **kwargs)
     
         def _new_conn(self):
    @@ -70,28 +81,30 @@ class SOCKSConnection(HTTPConnection):
             """
             extra_kw = {}
             if self.source_address:
    -            extra_kw['source_address'] = self.source_address
    +            extra_kw["source_address"] = self.source_address
     
             if self.socket_options:
    -            extra_kw['socket_options'] = self.socket_options
    +            extra_kw["socket_options"] = self.socket_options
     
             try:
                 conn = socks.create_connection(
                     (self.host, self.port),
    -                proxy_type=self._socks_options['socks_version'],
    -                proxy_addr=self._socks_options['proxy_host'],
    -                proxy_port=self._socks_options['proxy_port'],
    -                proxy_username=self._socks_options['username'],
    -                proxy_password=self._socks_options['password'],
    -                proxy_rdns=self._socks_options['rdns'],
    +                proxy_type=self._socks_options["socks_version"],
    +                proxy_addr=self._socks_options["proxy_host"],
    +                proxy_port=self._socks_options["proxy_port"],
    +                proxy_username=self._socks_options["username"],
    +                proxy_password=self._socks_options["password"],
    +                proxy_rdns=self._socks_options["rdns"],
                     timeout=self.timeout,
                     **extra_kw
                 )
     
    -        except SocketTimeout as e:
    +        except SocketTimeout:
                 raise ConnectTimeoutError(
    -                self, "Connection to %s timed out. (connect timeout=%s)" %
    -                (self.host, self.timeout))
    +                self,
    +                "Connection to %s timed out. (connect timeout=%s)"
    +                % (self.host, self.timeout),
    +            )
     
             except socks.ProxyError as e:
                 # This is fragile as hell, but it seems to be the only way to raise
    @@ -101,23 +114,22 @@ class SOCKSConnection(HTTPConnection):
                     if isinstance(error, SocketTimeout):
                         raise ConnectTimeoutError(
                             self,
    -                        "Connection to %s timed out. (connect timeout=%s)" %
    -                        (self.host, self.timeout)
    +                        "Connection to %s timed out. (connect timeout=%s)"
    +                        % (self.host, self.timeout),
                         )
                     else:
                         raise NewConnectionError(
    -                        self,
    -                        "Failed to establish a new connection: %s" % error
    +                        self, "Failed to establish a new connection: %s" % error
                         )
                 else:
                     raise NewConnectionError(
    -                    self,
    -                    "Failed to establish a new connection: %s" % e
    +                    self, "Failed to establish a new connection: %s" % e
                     )
     
             except SocketError as e:  # Defensive: PySocks should catch all these.
                 raise NewConnectionError(
    -                self, "Failed to establish a new connection: %s" % e)
    +                self, "Failed to establish a new connection: %s" % e
    +            )
     
             return conn
     
    @@ -143,43 +155,53 @@ class SOCKSProxyManager(PoolManager):
         A version of the urllib3 ProxyManager that routes connections via the
         defined SOCKS proxy.
         """
    +
         pool_classes_by_scheme = {
    -        'http': SOCKSHTTPConnectionPool,
    -        'https': SOCKSHTTPSConnectionPool,
    +        "http": SOCKSHTTPConnectionPool,
    +        "https": SOCKSHTTPSConnectionPool,
         }
     
    -    def __init__(self, proxy_url, username=None, password=None,
    -                 num_pools=10, headers=None, **connection_pool_kw):
    +    def __init__(
    +        self,
    +        proxy_url,
    +        username=None,
    +        password=None,
    +        num_pools=10,
    +        headers=None,
    +        **connection_pool_kw
    +    ):
             parsed = parse_url(proxy_url)
     
    -        if parsed.scheme == 'socks5':
    +        if username is None and password is None and parsed.auth is not None:
    +            split = parsed.auth.split(":")
    +            if len(split) == 2:
    +                username, password = split
    +        if parsed.scheme == "socks5":
                 socks_version = socks.PROXY_TYPE_SOCKS5
                 rdns = False
    -        elif parsed.scheme == 'socks5h':
    +        elif parsed.scheme == "socks5h":
                 socks_version = socks.PROXY_TYPE_SOCKS5
                 rdns = True
    -        elif parsed.scheme == 'socks4':
    +        elif parsed.scheme == "socks4":
                 socks_version = socks.PROXY_TYPE_SOCKS4
                 rdns = False
    -        elif parsed.scheme == 'socks4a':
    +        elif parsed.scheme == "socks4a":
                 socks_version = socks.PROXY_TYPE_SOCKS4
                 rdns = True
             else:
    -            raise ValueError(
    -                "Unable to determine SOCKS version from %s" % proxy_url
    -            )
    +            raise ValueError("Unable to determine SOCKS version from %s" % proxy_url)
     
             self.proxy_url = proxy_url
     
             socks_options = {
    -            'socks_version': socks_version,
    -            'proxy_host': parsed.host,
    -            'proxy_port': parsed.port,
    -            'username': username,
    -            'password': password,
    -            'rdns': rdns
    +            "socks_version": socks_version,
    +            "proxy_host": parsed.host,
    +            "proxy_port": parsed.port,
    +            "username": username,
    +            "password": password,
    +            "rdns": rdns,
             }
    -        connection_pool_kw['_socks_options'] = socks_options
    +        connection_pool_kw["_socks_options"] = socks_options
     
             super(SOCKSProxyManager, self).__init__(
                 num_pools, headers, **connection_pool_kw
    diff --git a/lib/urllib3/exceptions.py b/lib/urllib3/exceptions.py
    index 6c4be581..93d93fba 100644
    --- a/lib/urllib3/exceptions.py
    +++ b/lib/urllib3/exceptions.py
    @@ -1,7 +1,6 @@
     from __future__ import absolute_import
    -from .packages.six.moves.http_client import (
    -    IncompleteRead as httplib_IncompleteRead
    -)
    +from .packages.six.moves.http_client import IncompleteRead as httplib_IncompleteRead
    +
     # Base Exceptions
     
     
    @@ -17,6 +16,7 @@ class HTTPWarning(Warning):
     
     class PoolError(HTTPError):
         "Base exception for errors caused within a pool."
    +
         def __init__(self, pool, message):
             self.pool = pool
             HTTPError.__init__(self, "%s: %s" % (pool, message))
    @@ -28,6 +28,7 @@ class PoolError(HTTPError):
     
     class RequestError(PoolError):
         "Base exception for PoolErrors that have associated URLs."
    +
         def __init__(self, pool, url, message):
             self.url = url
             PoolError.__init__(self, pool, message)
    @@ -63,6 +64,7 @@ ConnectionError = ProtocolError
     
     # Leaf Exceptions
     
    +
     class MaxRetryError(RequestError):
         """Raised when the maximum number of retries is exceeded.
     
    @@ -76,8 +78,7 @@ class MaxRetryError(RequestError):
         def __init__(self, pool, url, reason=None):
             self.reason = reason
     
    -        message = "Max retries exceeded with url: %s (Caused by %r)" % (
    -            url, reason)
    +        message = "Max retries exceeded with url: %s (Caused by %r)" % (url, reason)
     
             RequestError.__init__(self, pool, url, message)
     
    @@ -93,6 +94,7 @@ class HostChangedError(RequestError):
     
     class TimeoutStateError(HTTPError):
         """ Raised when passing an invalid state to a timeout """
    +
         pass
     
     
    @@ -102,6 +104,7 @@ class TimeoutError(HTTPError):
         Catching this error will catch both :exc:`ReadTimeoutErrors
         ` and :exc:`ConnectTimeoutErrors `.
         """
    +
         pass
     
     
    @@ -149,12 +152,12 @@ class LocationParseError(LocationValueError):
     
     class ResponseError(HTTPError):
         "Used as a container for an error reason supplied in a MaxRetryError."
    -    GENERIC_ERROR = 'too many error responses'
    -    SPECIFIC_ERROR = 'too many {status_code} error responses'
    +    GENERIC_ERROR = "too many error responses"
    +    SPECIFIC_ERROR = "too many {status_code} error responses"
     
     
     class SecurityWarning(HTTPWarning):
    -    "Warned when perfoming security reducing actions"
    +    "Warned when performing security reducing actions"
         pass
     
     
    @@ -188,6 +191,7 @@ class DependencyWarning(HTTPWarning):
         Warned when an attempt is made to import a module with missing optional
         dependencies.
         """
    +
         pass
     
     
    @@ -201,6 +205,7 @@ class BodyNotHttplibCompatible(HTTPError):
         Body should be httplib.HTTPResponse like (have an fp attribute which
         returns raw chunks) for read_chunked().
         """
    +
         pass
     
     
    @@ -212,12 +217,15 @@ class IncompleteRead(HTTPError, httplib_IncompleteRead):
         for `partial` to avoid creating large objects on streamed
         reads.
         """
    +
         def __init__(self, partial, expected):
             super(IncompleteRead, self).__init__(partial, expected)
     
         def __repr__(self):
    -        return ('IncompleteRead(%i bytes read, '
    -                '%i more expected)' % (self.partial, self.expected))
    +        return "IncompleteRead(%i bytes read, " "%i more expected)" % (
    +            self.partial,
    +            self.expected,
    +        )
     
     
     class InvalidHeader(HTTPError):
    @@ -236,8 +244,9 @@ class ProxySchemeUnknown(AssertionError, ValueError):
     
     class HeaderParsingError(HTTPError):
         "Raised by assert_header_parsing, but we convert it to a log.warning statement."
    +
         def __init__(self, defects, unparsed_data):
    -        message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
    +        message = "%s, unparsed data: %r" % (defects or "Unknown", unparsed_data)
             super(HeaderParsingError, self).__init__(message)
     
     
    diff --git a/lib/urllib3/fields.py b/lib/urllib3/fields.py
    index 19b0ae0c..8715b220 100644
    --- a/lib/urllib3/fields.py
    +++ b/lib/urllib3/fields.py
    @@ -1,11 +1,12 @@
     from __future__ import absolute_import
     import email.utils
     import mimetypes
    +import re
     
     from .packages import six
     
     
    -def guess_content_type(filename, default='application/octet-stream'):
    +def guess_content_type(filename, default="application/octet-stream"):
         """
         Guess the "Content-Type" of a file.
     
    @@ -19,57 +20,143 @@ def guess_content_type(filename, default='application/octet-stream'):
         return default
     
     
    -def format_header_param(name, value):
    +def format_header_param_rfc2231(name, value):
         """
    -    Helper function to format and quote a single header parameter.
    +    Helper function to format and quote a single header parameter using the
    +    strategy defined in RFC 2231.
     
         Particularly useful for header parameters which might contain
    -    non-ASCII values, like file names. This follows RFC 2231, as
    -    suggested by RFC 2388 Section 4.4.
    +    non-ASCII values, like file names. This follows RFC 2388 Section 4.4.
     
         :param name:
             The name of the parameter, a string expected to be ASCII only.
         :param value:
    -        The value of the parameter, provided as a unicode string.
    +        The value of the parameter, provided as ``bytes`` or `str``.
    +    :ret:
    +        An RFC-2231-formatted unicode string.
         """
    +    if isinstance(value, six.binary_type):
    +        value = value.decode("utf-8")
    +
         if not any(ch in value for ch in '"\\\r\n'):
    -        result = '%s="%s"' % (name, value)
    +        result = u'%s="%s"' % (name, value)
             try:
    -            result.encode('ascii')
    +            result.encode("ascii")
             except (UnicodeEncodeError, UnicodeDecodeError):
                 pass
             else:
                 return result
    -    if not six.PY3 and isinstance(value, six.text_type):  # Python 2:
    -        value = value.encode('utf-8')
    -    value = email.utils.encode_rfc2231(value, 'utf-8')
    -    value = '%s*=%s' % (name, value)
    +
    +    if six.PY2:  # Python 2:
    +        value = value.encode("utf-8")
    +
    +    # encode_rfc2231 accepts an encoded string and returns an ascii-encoded
    +    # string in Python 2 but accepts and returns unicode strings in Python 3
    +    value = email.utils.encode_rfc2231(value, "utf-8")
    +    value = "%s*=%s" % (name, value)
    +
    +    if six.PY2:  # Python 2:
    +        value = value.decode("utf-8")
    +
         return value
     
     
    +_HTML5_REPLACEMENTS = {
    +    u"\u0022": u"%22",
    +    # Replace "\" with "\\".
    +    u"\u005C": u"\u005C\u005C",
    +    u"\u005C": u"\u005C\u005C",
    +}
    +
    +# All control characters from 0x00 to 0x1F *except* 0x1B.
    +_HTML5_REPLACEMENTS.update(
    +    {
    +        six.unichr(cc): u"%{:02X}".format(cc)
    +        for cc in range(0x00, 0x1F + 1)
    +        if cc not in (0x1B,)
    +    }
    +)
    +
    +
    +def _replace_multiple(value, needles_and_replacements):
    +    def replacer(match):
    +        return needles_and_replacements[match.group(0)]
    +
    +    pattern = re.compile(
    +        r"|".join([re.escape(needle) for needle in needles_and_replacements.keys()])
    +    )
    +
    +    result = pattern.sub(replacer, value)
    +
    +    return result
    +
    +
    +def format_header_param_html5(name, value):
    +    """
    +    Helper function to format and quote a single header parameter using the
    +    HTML5 strategy.
    +
    +    Particularly useful for header parameters which might contain
    +    non-ASCII values, like file names. This follows the `HTML5 Working Draft
    +    Section 4.10.22.7`_ and matches the behavior of curl and modern browsers.
    +
    +    .. _HTML5 Working Draft Section 4.10.22.7:
    +        https://w3c.github.io/html/sec-forms.html#multipart-form-data
    +
    +    :param name:
    +        The name of the parameter, a string expected to be ASCII only.
    +    :param value:
    +        The value of the parameter, provided as ``bytes`` or `str``.
    +    :ret:
    +        A unicode string, stripped of troublesome characters.
    +    """
    +    if isinstance(value, six.binary_type):
    +        value = value.decode("utf-8")
    +
    +    value = _replace_multiple(value, _HTML5_REPLACEMENTS)
    +
    +    return u'%s="%s"' % (name, value)
    +
    +
    +# For backwards-compatibility.
    +format_header_param = format_header_param_html5
    +
    +
     class RequestField(object):
         """
         A data container for request body parameters.
     
         :param name:
    -        The name of this request field.
    +        The name of this request field. Must be unicode.
         :param data:
             The data/value body.
         :param filename:
    -        An optional filename of the request field.
    +        An optional filename of the request field. Must be unicode.
         :param headers:
             An optional dict-like object of headers to initially use for the field.
    +    :param header_formatter:
    +        An optional callable that is used to encode and format the headers. By
    +        default, this is :func:`format_header_param_html5`.
         """
    -    def __init__(self, name, data, filename=None, headers=None):
    +
    +    def __init__(
    +        self,
    +        name,
    +        data,
    +        filename=None,
    +        headers=None,
    +        header_formatter=format_header_param_html5,
    +    ):
             self._name = name
             self._filename = filename
             self.data = data
             self.headers = {}
             if headers:
                 self.headers = dict(headers)
    +        self.header_formatter = header_formatter
     
         @classmethod
    -    def from_tuples(cls, fieldname, value):
    +    def from_tuples(cls, fieldname, value, header_formatter=format_header_param_html5):
             """
             A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
     
    @@ -97,21 +184,25 @@ class RequestField(object):
                 content_type = None
                 data = value
     
    -        request_param = cls(fieldname, data, filename=filename)
    +        request_param = cls(
    +            fieldname, data, filename=filename, header_formatter=header_formatter
    +        )
             request_param.make_multipart(content_type=content_type)
     
             return request_param
     
         def _render_part(self, name, value):
             """
    -        Overridable helper function to format a single header parameter.
    +        Overridable helper function to format a single header parameter. By
    +        default, this calls ``self.header_formatter``.
     
             :param name:
                 The name of the parameter, a string expected to be ASCII only.
             :param value:
                 The value of the parameter, provided as a unicode string.
             """
    -        return format_header_param(name, value)
    +
    +        return self.header_formatter(name, value)
     
         def _render_parts(self, header_parts):
             """
    @@ -121,7 +212,7 @@ class RequestField(object):
             'Content-Disposition' fields.
     
             :param header_parts:
    -            A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
    +            A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format
                 as `k1="v1"; k2="v2"; ...`.
             """
             parts = []
    @@ -133,7 +224,7 @@ class RequestField(object):
                 if value is not None:
                     parts.append(self._render_part(name, value))
     
    -        return '; '.join(parts)
    +        return u"; ".join(parts)
     
         def render_headers(self):
             """
    @@ -141,21 +232,22 @@ class RequestField(object):
             """
             lines = []
     
    -        sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
    +        sort_keys = ["Content-Disposition", "Content-Type", "Content-Location"]
             for sort_key in sort_keys:
                 if self.headers.get(sort_key, False):
    -                lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
    +                lines.append(u"%s: %s" % (sort_key, self.headers[sort_key]))
     
             for header_name, header_value in self.headers.items():
                 if header_name not in sort_keys:
                     if header_value:
    -                    lines.append('%s: %s' % (header_name, header_value))
    +                    lines.append(u"%s: %s" % (header_name, header_value))
     
    -        lines.append('\r\n')
    -        return '\r\n'.join(lines)
    +        lines.append(u"\r\n")
    +        return u"\r\n".join(lines)
     
    -    def make_multipart(self, content_disposition=None, content_type=None,
    -                       content_location=None):
    +    def make_multipart(
    +        self, content_disposition=None, content_type=None, content_location=None
    +    ):
             """
             Makes this request field into a multipart request field.
     
    @@ -168,11 +260,14 @@ class RequestField(object):
                 The 'Content-Location' of the request body.
     
             """
    -        self.headers['Content-Disposition'] = content_disposition or 'form-data'
    -        self.headers['Content-Disposition'] += '; '.join([
    -            '', self._render_parts(
    -                (('name', self._name), ('filename', self._filename))
    -            )
    -        ])
    -        self.headers['Content-Type'] = content_type
    -        self.headers['Content-Location'] = content_location
    +        self.headers["Content-Disposition"] = content_disposition or u"form-data"
    +        self.headers["Content-Disposition"] += u"; ".join(
    +            [
    +                u"",
    +                self._render_parts(
    +                    ((u"name", self._name), (u"filename", self._filename))
    +                ),
    +            ]
    +        )
    +        self.headers["Content-Type"] = content_type
    +        self.headers["Content-Location"] = content_location
    diff --git a/lib/urllib3/filepost.py b/lib/urllib3/filepost.py
    index cd11cee4..b7b00992 100644
    --- a/lib/urllib3/filepost.py
    +++ b/lib/urllib3/filepost.py
    @@ -1,21 +1,25 @@
     from __future__ import absolute_import
    +import binascii
     import codecs
    +import os
     
    -from uuid import uuid4
     from io import BytesIO
     
     from .packages import six
     from .packages.six import b
     from .fields import RequestField
     
    -writer = codecs.lookup('utf-8')[3]
    +writer = codecs.lookup("utf-8")[3]
     
     
     def choose_boundary():
         """
         Our embarrassingly-simple replacement for mimetools.choose_boundary.
         """
    -    return uuid4().hex
    +    boundary = binascii.hexlify(os.urandom(16))
    +    if not six.PY2:
    +        boundary = boundary.decode("ascii")
    +    return boundary
     
     
     def iter_field_objects(fields):
    @@ -65,14 +69,14 @@ def encode_multipart_formdata(fields, boundary=None):
     
         :param boundary:
             If not specified, then a random boundary will be generated using
    -        :func:`mimetools.choose_boundary`.
    +        :func:`urllib3.filepost.choose_boundary`.
         """
         body = BytesIO()
         if boundary is None:
             boundary = choose_boundary()
     
         for field in iter_field_objects(fields):
    -        body.write(b('--%s\r\n' % (boundary)))
    +        body.write(b("--%s\r\n" % (boundary)))
     
             writer(body).write(field.render_headers())
             data = field.data
    @@ -85,10 +89,10 @@ def encode_multipart_formdata(fields, boundary=None):
             else:
                 body.write(data)
     
    -        body.write(b'\r\n')
    +        body.write(b"\r\n")
     
    -    body.write(b('--%s--\r\n' % (boundary)))
    +    body.write(b("--%s--\r\n" % (boundary)))
     
    -    content_type = str('multipart/form-data; boundary=%s' % boundary)
    +    content_type = str("multipart/form-data; boundary=%s" % boundary)
     
         return body.getvalue(), content_type
    diff --git a/lib/urllib3/packages/__init__.py b/lib/urllib3/packages/__init__.py
    index 170e974c..fce4caa6 100644
    --- a/lib/urllib3/packages/__init__.py
    +++ b/lib/urllib3/packages/__init__.py
    @@ -2,4 +2,4 @@ from __future__ import absolute_import
     
     from . import ssl_match_hostname
     
    -__all__ = ('ssl_match_hostname', )
    +__all__ = ("ssl_match_hostname",)
    diff --git a/lib/urllib3/packages/backports/__init__.py b/lib/urllib3/packages/backports/__init__.py
    new file mode 100644
    index 00000000..e69de29b
    diff --git a/lib/urllib3/packages/backports/makefile.py b/lib/urllib3/packages/backports/makefile.py
    index 75b80dcf..a3156a69 100644
    --- a/lib/urllib3/packages/backports/makefile.py
    +++ b/lib/urllib3/packages/backports/makefile.py
    @@ -11,15 +11,14 @@ import io
     from socket import SocketIO
     
     
    -def backport_makefile(self, mode="r", buffering=None, encoding=None,
    -                      errors=None, newline=None):
    +def backport_makefile(
    +    self, mode="r", buffering=None, encoding=None, errors=None, newline=None
    +):
         """
         Backport of ``socket.makefile`` from Python 3.5.
         """
    -    if not set(mode) <= set(["r", "w", "b"]):
    -        raise ValueError(
    -            "invalid mode %r (only r, w, b allowed)" % (mode,)
    -        )
    +    if not set(mode) <= {"r", "w", "b"}:
    +        raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
         writing = "w" in mode
         reading = "r" in mode or not writing
         assert reading or writing
    diff --git a/lib/urllib3/packages/ordered_dict.py b/lib/urllib3/packages/ordered_dict.py
    deleted file mode 100644
    index 4479363c..00000000
    --- a/lib/urllib3/packages/ordered_dict.py
    +++ /dev/null
    @@ -1,259 +0,0 @@
    -# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
    -# Passes Python2.7's test suite and incorporates all the latest updates.
    -# Copyright 2009 Raymond Hettinger, released under the MIT License.
    -# http://code.activestate.com/recipes/576693/
    -try:
    -    from thread import get_ident as _get_ident
    -except ImportError:
    -    from dummy_thread import get_ident as _get_ident
    -
    -try:
    -    from _abcoll import KeysView, ValuesView, ItemsView
    -except ImportError:
    -    pass
    -
    -
    -class OrderedDict(dict):
    -    'Dictionary that remembers insertion order'
    -    # An inherited dict maps keys to values.
    -    # The inherited dict provides __getitem__, __len__, __contains__, and get.
    -    # The remaining methods are order-aware.
    -    # Big-O running times for all methods are the same as for regular dictionaries.
    -
    -    # The internal self.__map dictionary maps keys to links in a doubly linked list.
    -    # The circular doubly linked list starts and ends with a sentinel element.
    -    # The sentinel element never gets deleted (this simplifies the algorithm).
    -    # Each link is stored as a list of length three:  [PREV, NEXT, KEY].
    -
    -    def __init__(self, *args, **kwds):
    -        '''Initialize an ordered dictionary.  Signature is the same as for
    -        regular dictionaries, but keyword arguments are not recommended
    -        because their insertion order is arbitrary.
    -
    -        '''
    -        if len(args) > 1:
    -            raise TypeError('expected at most 1 arguments, got %d' % len(args))
    -        try:
    -            self.__root
    -        except AttributeError:
    -            self.__root = root = []                     # sentinel node
    -            root[:] = [root, root, None]
    -            self.__map = {}
    -        self.__update(*args, **kwds)
    -
    -    def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
    -        'od.__setitem__(i, y) <==> od[i]=y'
    -        # Setting a new item creates a new link which goes at the end of the linked
    -        # list, and the inherited dictionary is updated with the new key/value pair.
    -        if key not in self:
    -            root = self.__root
    -            last = root[0]
    -            last[1] = root[0] = self.__map[key] = [last, root, key]
    -        dict_setitem(self, key, value)
    -
    -    def __delitem__(self, key, dict_delitem=dict.__delitem__):
    -        'od.__delitem__(y) <==> del od[y]'
    -        # Deleting an existing item uses self.__map to find the link which is
    -        # then removed by updating the links in the predecessor and successor nodes.
    -        dict_delitem(self, key)
    -        link_prev, link_next, key = self.__map.pop(key)
    -        link_prev[1] = link_next
    -        link_next[0] = link_prev
    -
    -    def __iter__(self):
    -        'od.__iter__() <==> iter(od)'
    -        root = self.__root
    -        curr = root[1]
    -        while curr is not root:
    -            yield curr[2]
    -            curr = curr[1]
    -
    -    def __reversed__(self):
    -        'od.__reversed__() <==> reversed(od)'
    -        root = self.__root
    -        curr = root[0]
    -        while curr is not root:
    -            yield curr[2]
    -            curr = curr[0]
    -
    -    def clear(self):
    -        'od.clear() -> None.  Remove all items from od.'
    -        try:
    -            for node in self.__map.itervalues():
    -                del node[:]
    -            root = self.__root
    -            root[:] = [root, root, None]
    -            self.__map.clear()
    -        except AttributeError:
    -            pass
    -        dict.clear(self)
    -
    -    def popitem(self, last=True):
    -        '''od.popitem() -> (k, v), return and remove a (key, value) pair.
    -        Pairs are returned in LIFO order if last is true or FIFO order if false.
    -
    -        '''
    -        if not self:
    -            raise KeyError('dictionary is empty')
    -        root = self.__root
    -        if last:
    -            link = root[0]
    -            link_prev = link[0]
    -            link_prev[1] = root
    -            root[0] = link_prev
    -        else:
    -            link = root[1]
    -            link_next = link[1]
    -            root[1] = link_next
    -            link_next[0] = root
    -        key = link[2]
    -        del self.__map[key]
    -        value = dict.pop(self, key)
    -        return key, value
    -
    -    # -- the following methods do not depend on the internal structure --
    -
    -    def keys(self):
    -        'od.keys() -> list of keys in od'
    -        return list(self)
    -
    -    def values(self):
    -        'od.values() -> list of values in od'
    -        return [self[key] for key in self]
    -
    -    def items(self):
    -        'od.items() -> list of (key, value) pairs in od'
    -        return [(key, self[key]) for key in self]
    -
    -    def iterkeys(self):
    -        'od.iterkeys() -> an iterator over the keys in od'
    -        return iter(self)
    -
    -    def itervalues(self):
    -        'od.itervalues -> an iterator over the values in od'
    -        for k in self:
    -            yield self[k]
    -
    -    def iteritems(self):
    -        'od.iteritems -> an iterator over the (key, value) items in od'
    -        for k in self:
    -            yield (k, self[k])
    -
    -    def update(*args, **kwds):
    -        '''od.update(E, **F) -> None.  Update od from dict/iterable E and F.
    -
    -        If E is a dict instance, does:           for k in E: od[k] = E[k]
    -        If E has a .keys() method, does:         for k in E.keys(): od[k] = E[k]
    -        Or if E is an iterable of items, does:   for k, v in E: od[k] = v
    -        In either case, this is followed by:     for k, v in F.items(): od[k] = v
    -
    -        '''
    -        if len(args) > 2:
    -            raise TypeError('update() takes at most 2 positional '
    -                            'arguments (%d given)' % (len(args),))
    -        elif not args:
    -            raise TypeError('update() takes at least 1 argument (0 given)')
    -        self = args[0]
    -        # Make progressively weaker assumptions about "other"
    -        other = ()
    -        if len(args) == 2:
    -            other = args[1]
    -        if isinstance(other, dict):
    -            for key in other:
    -                self[key] = other[key]
    -        elif hasattr(other, 'keys'):
    -            for key in other.keys():
    -                self[key] = other[key]
    -        else:
    -            for key, value in other:
    -                self[key] = value
    -        for key, value in kwds.items():
    -            self[key] = value
    -
    -    __update = update  # let subclasses override update without breaking __init__
    -
    -    __marker = object()
    -
    -    def pop(self, key, default=__marker):
    -        '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
    -        If key is not found, d is returned if given, otherwise KeyError is raised.
    -
    -        '''
    -        if key in self:
    -            result = self[key]
    -            del self[key]
    -            return result
    -        if default is self.__marker:
    -            raise KeyError(key)
    -        return default
    -
    -    def setdefault(self, key, default=None):
    -        'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
    -        if key in self:
    -            return self[key]
    -        self[key] = default
    -        return default
    -
    -    def __repr__(self, _repr_running={}):
    -        'od.__repr__() <==> repr(od)'
    -        call_key = id(self), _get_ident()
    -        if call_key in _repr_running:
    -            return '...'
    -        _repr_running[call_key] = 1
    -        try:
    -            if not self:
    -                return '%s()' % (self.__class__.__name__,)
    -            return '%s(%r)' % (self.__class__.__name__, self.items())
    -        finally:
    -            del _repr_running[call_key]
    -
    -    def __reduce__(self):
    -        'Return state information for pickling'
    -        items = [[k, self[k]] for k in self]
    -        inst_dict = vars(self).copy()
    -        for k in vars(OrderedDict()):
    -            inst_dict.pop(k, None)
    -        if inst_dict:
    -            return (self.__class__, (items,), inst_dict)
    -        return self.__class__, (items,)
    -
    -    def copy(self):
    -        'od.copy() -> a shallow copy of od'
    -        return self.__class__(self)
    -
    -    @classmethod
    -    def fromkeys(cls, iterable, value=None):
    -        '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
    -        and values equal to v (which defaults to None).
    -
    -        '''
    -        d = cls()
    -        for key in iterable:
    -            d[key] = value
    -        return d
    -
    -    def __eq__(self, other):
    -        '''od.__eq__(y) <==> od==y.  Comparison to another OD is order-sensitive
    -        while comparison to a regular mapping is order-insensitive.
    -
    -        '''
    -        if isinstance(other, OrderedDict):
    -            return len(self)==len(other) and self.items() == other.items()
    -        return dict.__eq__(self, other)
    -
    -    def __ne__(self, other):
    -        return not self == other
    -
    -    # -- the following methods are only used in Python 2.7 --
    -
    -    def viewkeys(self):
    -        "od.viewkeys() -> a set-like object providing a view on od's keys"
    -        return KeysView(self)
    -
    -    def viewvalues(self):
    -        "od.viewvalues() -> an object providing a view on od's values"
    -        return ValuesView(self)
    -
    -    def viewitems(self):
    -        "od.viewitems() -> a set-like object providing a view on od's items"
    -        return ItemsView(self)
    diff --git a/lib/urllib3/packages/six.py b/lib/urllib3/packages/six.py
    index 190c0239..31442409 100644
    --- a/lib/urllib3/packages/six.py
    +++ b/lib/urllib3/packages/six.py
    @@ -1,6 +1,4 @@
    -"""Utilities for writing code that runs on Python 2 and 3"""
    -
    -# Copyright (c) 2010-2015 Benjamin Peterson
    +# Copyright (c) 2010-2019 Benjamin Peterson
     #
     # Permission is hereby granted, free of charge, to any person obtaining a copy
     # of this software and associated documentation files (the "Software"), to deal
    @@ -20,6 +18,8 @@
     # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     # SOFTWARE.
     
    +"""Utilities for writing code that runs on Python 2 and 3"""
    +
     from __future__ import absolute_import
     
     import functools
    @@ -29,7 +29,7 @@ import sys
     import types
     
     __author__ = "Benjamin Peterson "
    -__version__ = "1.10.0"
    +__version__ = "1.12.0"
     
     
     # Useful for very coarse version differentiation.
    @@ -38,15 +38,15 @@ PY3 = sys.version_info[0] == 3
     PY34 = sys.version_info[0:2] >= (3, 4)
     
     if PY3:
    -    string_types = str,
    -    integer_types = int,
    -    class_types = type,
    +    string_types = (str,)
    +    integer_types = (int,)
    +    class_types = (type,)
         text_type = str
         binary_type = bytes
     
         MAXSIZE = sys.maxsize
     else:
    -    string_types = basestring,
    +    string_types = (basestring,)
         integer_types = (int, long)
         class_types = (type, types.ClassType)
         text_type = unicode
    @@ -58,9 +58,9 @@ else:
         else:
             # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
             class X(object):
    -
                 def __len__(self):
                     return 1 << 31
    +
             try:
                 len(X())
             except OverflowError:
    @@ -84,7 +84,6 @@ def _import_module(name):
     
     
     class _LazyDescr(object):
    -
         def __init__(self, name):
             self.name = name
     
    @@ -101,7 +100,6 @@ class _LazyDescr(object):
     
     
     class MovedModule(_LazyDescr):
    -
         def __init__(self, name, old, new=None):
             super(MovedModule, self).__init__(name)
             if PY3:
    @@ -122,7 +120,6 @@ class MovedModule(_LazyDescr):
     
     
     class _LazyModule(types.ModuleType):
    -
         def __init__(self, name):
             super(_LazyModule, self).__init__(name)
             self.__doc__ = self.__class__.__doc__
    @@ -137,7 +134,6 @@ class _LazyModule(types.ModuleType):
     
     
     class MovedAttribute(_LazyDescr):
    -
         def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
             super(MovedAttribute, self).__init__(name)
             if PY3:
    @@ -221,28 +217,36 @@ class _SixMetaPathImporter(object):
             Required, if is_package is implemented"""
             self.__get_module(fullname)  # eventually raises ImportError
             return None
    +
         get_source = get_code  # same as get_code
     
    +
     _importer = _SixMetaPathImporter(__name__)
     
     
     class _MovedItems(_LazyModule):
     
         """Lazy loading of moved objects"""
    +
         __path__ = []  # mark as package
     
     
     _moved_attributes = [
         MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
         MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
    -    MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
    +    MovedAttribute(
    +        "filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"
    +    ),
         MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
         MovedAttribute("intern", "__builtin__", "sys"),
         MovedAttribute("map", "itertools", "builtins", "imap", "map"),
         MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
         MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
    +    MovedAttribute("getoutput", "commands", "subprocess"),
         MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
    -    MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
    +    MovedAttribute(
    +        "reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"
    +    ),
         MovedAttribute("reduce", "__builtin__", "functools"),
         MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
         MovedAttribute("StringIO", "StringIO", "io"),
    @@ -251,7 +255,9 @@ _moved_attributes = [
         MovedAttribute("UserString", "UserString", "collections"),
         MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
         MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
    -    MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
    +    MovedAttribute(
    +        "zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"
    +    ),
         MovedModule("builtins", "__builtin__"),
         MovedModule("configparser", "ConfigParser"),
         MovedModule("copyreg", "copy_reg"),
    @@ -262,10 +268,13 @@ _moved_attributes = [
         MovedModule("html_entities", "htmlentitydefs", "html.entities"),
         MovedModule("html_parser", "HTMLParser", "html.parser"),
         MovedModule("http_client", "httplib", "http.client"),
    -    MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
    -    MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
    -    MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
         MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
    +    MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
    +    MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
    +    MovedModule(
    +        "email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"
    +    ),
    +    MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
         MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
         MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
         MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
    @@ -283,15 +292,12 @@ _moved_attributes = [
         MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
         MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
         MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
    -    MovedModule("tkinter_colorchooser", "tkColorChooser",
    -                "tkinter.colorchooser"),
    -    MovedModule("tkinter_commondialog", "tkCommonDialog",
    -                "tkinter.commondialog"),
    +    MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"),
    +    MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"),
         MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
         MovedModule("tkinter_font", "tkFont", "tkinter.font"),
         MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
    -    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
    -                "tkinter.simpledialog"),
    +    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"),
         MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
         MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
         MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
    @@ -301,9 +307,7 @@ _moved_attributes = [
     ]
     # Add windows specific modules.
     if sys.platform == "win32":
    -    _moved_attributes += [
    -        MovedModule("winreg", "_winreg"),
    -    ]
    +    _moved_attributes += [MovedModule("winreg", "_winreg")]
     
     for attr in _moved_attributes:
         setattr(_MovedItems, attr.name, attr)
    @@ -337,10 +341,14 @@ _urllib_parse_moved_attributes = [
         MovedAttribute("quote_plus", "urllib", "urllib.parse"),
         MovedAttribute("unquote", "urllib", "urllib.parse"),
         MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
    +    MovedAttribute(
    +        "unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"
    +    ),
         MovedAttribute("urlencode", "urllib", "urllib.parse"),
         MovedAttribute("splitquery", "urllib", "urllib.parse"),
         MovedAttribute("splittag", "urllib", "urllib.parse"),
         MovedAttribute("splituser", "urllib", "urllib.parse"),
    +    MovedAttribute("splitvalue", "urllib", "urllib.parse"),
         MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
         MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
         MovedAttribute("uses_params", "urlparse", "urllib.parse"),
    @@ -353,8 +361,11 @@ del attr
     
     Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
     
    -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
    -                      "moves.urllib_parse", "moves.urllib.parse")
    +_importer._add_module(
    +    Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
    +    "moves.urllib_parse",
    +    "moves.urllib.parse",
    +)
     
     
     class Module_six_moves_urllib_error(_LazyModule):
    @@ -373,8 +384,11 @@ del attr
     
     Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
     
    -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
    -                      "moves.urllib_error", "moves.urllib.error")
    +_importer._add_module(
    +    Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
    +    "moves.urllib_error",
    +    "moves.urllib.error",
    +)
     
     
     class Module_six_moves_urllib_request(_LazyModule):
    @@ -416,6 +430,8 @@ _urllib_request_moved_attributes = [
         MovedAttribute("URLopener", "urllib", "urllib.request"),
         MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
         MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
    +    MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
    +    MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
     ]
     for attr in _urllib_request_moved_attributes:
         setattr(Module_six_moves_urllib_request, attr.name, attr)
    @@ -423,8 +439,11 @@ del attr
     
     Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
     
    -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
    -                      "moves.urllib_request", "moves.urllib.request")
    +_importer._add_module(
    +    Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
    +    "moves.urllib_request",
    +    "moves.urllib.request",
    +)
     
     
     class Module_six_moves_urllib_response(_LazyModule):
    @@ -444,8 +463,11 @@ del attr
     
     Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
     
    -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
    -                      "moves.urllib_response", "moves.urllib.response")
    +_importer._add_module(
    +    Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
    +    "moves.urllib_response",
    +    "moves.urllib.response",
    +)
     
     
     class Module_six_moves_urllib_robotparser(_LazyModule):
    @@ -454,21 +476,27 @@ class Module_six_moves_urllib_robotparser(_LazyModule):
     
     
     _urllib_robotparser_moved_attributes = [
    -    MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
    +    MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser")
     ]
     for attr in _urllib_robotparser_moved_attributes:
         setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
     del attr
     
    -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
    +Module_six_moves_urllib_robotparser._moved_attributes = (
    +    _urllib_robotparser_moved_attributes
    +)
     
    -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
    -                      "moves.urllib_robotparser", "moves.urllib.robotparser")
    +_importer._add_module(
    +    Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
    +    "moves.urllib_robotparser",
    +    "moves.urllib.robotparser",
    +)
     
     
     class Module_six_moves_urllib(types.ModuleType):
     
         """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
    +
         __path__ = []  # mark as package
         parse = _importer._get_module("moves.urllib_parse")
         error = _importer._get_module("moves.urllib_error")
    @@ -477,10 +505,12 @@ class Module_six_moves_urllib(types.ModuleType):
         robotparser = _importer._get_module("moves.urllib_robotparser")
     
         def __dir__(self):
    -        return ['parse', 'error', 'request', 'response', 'robotparser']
    +        return ["parse", "error", "request", "response", "robotparser"]
     
    -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
    -                      "moves.urllib")
    +
    +_importer._add_module(
    +    Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib"
    +)
     
     
     def add_move(move):
    @@ -520,19 +550,24 @@ else:
     try:
         advance_iterator = next
     except NameError:
    +
         def advance_iterator(it):
             return it.next()
    +
    +
     next = advance_iterator
     
     
     try:
         callable = callable
     except NameError:
    +
         def callable(obj):
             return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
     
     
     if PY3:
    +
         def get_unbound_function(unbound):
             return unbound
     
    @@ -543,6 +578,7 @@ if PY3:
     
         Iterator = object
     else:
    +
         def get_unbound_function(unbound):
             return unbound.im_func
     
    @@ -553,13 +589,13 @@ else:
             return types.MethodType(func, None, cls)
     
         class Iterator(object):
    -
             def next(self):
                 return type(self).__next__(self)
     
         callable = callable
    -_add_doc(get_unbound_function,
    -         """Get the function out of a possibly unbound function""")
    +_add_doc(
    +    get_unbound_function, """Get the function out of a possibly unbound function"""
    +)
     
     
     get_method_function = operator.attrgetter(_meth_func)
    @@ -571,6 +607,7 @@ get_function_globals = operator.attrgetter(_func_globals)
     
     
     if PY3:
    +
         def iterkeys(d, **kw):
             return iter(d.keys(**kw))
     
    @@ -589,6 +626,7 @@ if PY3:
     
         viewitems = operator.methodcaller("items")
     else:
    +
         def iterkeys(d, **kw):
             return d.iterkeys(**kw)
     
    @@ -609,28 +647,33 @@ else:
     
     _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
     _add_doc(itervalues, "Return an iterator over the values of a dictionary.")
    -_add_doc(iteritems,
    -         "Return an iterator over the (key, value) pairs of a dictionary.")
    -_add_doc(iterlists,
    -         "Return an iterator over the (key, [values]) pairs of a dictionary.")
    +_add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.")
    +_add_doc(
    +    iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary."
    +)
     
     
     if PY3:
    +
         def b(s):
             return s.encode("latin-1")
     
         def u(s):
             return s
    +
         unichr = chr
         import struct
    +
         int2byte = struct.Struct(">B").pack
         del struct
         byte2int = operator.itemgetter(0)
         indexbytes = operator.getitem
         iterbytes = iter
         import io
    +
         StringIO = io.StringIO
         BytesIO = io.BytesIO
    +    del io
         _assertCountEqual = "assertCountEqual"
         if sys.version_info[1] <= 1:
             _assertRaisesRegex = "assertRaisesRegexp"
    @@ -639,12 +682,15 @@ if PY3:
             _assertRaisesRegex = "assertRaisesRegex"
             _assertRegex = "assertRegex"
     else:
    +
         def b(s):
             return s
    +
         # Workaround for standalone backslash
     
         def u(s):
    -        return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
    +        return unicode(s.replace(r"\\", r"\\\\"), "unicode_escape")
    +
         unichr = unichr
         int2byte = chr
     
    @@ -653,8 +699,10 @@ else:
     
         def indexbytes(buf, i):
             return ord(buf[i])
    +
         iterbytes = functools.partial(itertools.imap, ord)
         import StringIO
    +
         StringIO = BytesIO = StringIO.StringIO
         _assertCountEqual = "assertItemsEqual"
         _assertRaisesRegex = "assertRaisesRegexp"
    @@ -679,13 +727,19 @@ if PY3:
         exec_ = getattr(moves.builtins, "exec")
     
         def reraise(tp, value, tb=None):
    -        if value is None:
    -            value = tp()
    -        if value.__traceback__ is not tb:
    -            raise value.with_traceback(tb)
    -        raise value
    +        try:
    +            if value is None:
    +                value = tp()
    +            if value.__traceback__ is not tb:
    +                raise value.with_traceback(tb)
    +            raise value
    +        finally:
    +            value = None
    +            tb = None
    +
     
     else:
    +
         def exec_(_code_, _globs_=None, _locs_=None):
             """Execute code in a namespace."""
             if _globs_ is None:
    @@ -698,28 +752,45 @@ else:
                 _locs_ = _globs_
             exec("""exec _code_ in _globs_, _locs_""")
     
    -    exec_("""def reraise(tp, value, tb=None):
    -    raise tp, value, tb
    -""")
    +    exec_(
    +        """def reraise(tp, value, tb=None):
    +    try:
    +        raise tp, value, tb
    +    finally:
    +        tb = None
    +"""
    +    )
     
     
     if sys.version_info[:2] == (3, 2):
    -    exec_("""def raise_from(value, from_value):
    -    if from_value is None:
    -        raise value
    -    raise value from from_value
    -""")
    +    exec_(
    +        """def raise_from(value, from_value):
    +    try:
    +        if from_value is None:
    +            raise value
    +        raise value from from_value
    +    finally:
    +        value = None
    +"""
    +    )
     elif sys.version_info[:2] > (3, 2):
    -    exec_("""def raise_from(value, from_value):
    -    raise value from from_value
    -""")
    +    exec_(
    +        """def raise_from(value, from_value):
    +    try:
    +        raise value from from_value
    +    finally:
    +        value = None
    +"""
    +    )
     else:
    +
         def raise_from(value, from_value):
             raise value
     
     
     print_ = getattr(moves.builtins, "print", None)
     if print_ is None:
    +
         def print_(*args, **kwargs):
             """The new-style print function for Python 2.4 and 2.5."""
             fp = kwargs.pop("file", sys.stdout)
    @@ -730,14 +801,17 @@ if print_ is None:
                 if not isinstance(data, basestring):
                     data = str(data)
                 # If the file has an encoding, encode unicode with it.
    -            if (isinstance(fp, file) and
    -                    isinstance(data, unicode) and
    -                    fp.encoding is not None):
    +            if (
    +                isinstance(fp, file)
    +                and isinstance(data, unicode)
    +                and fp.encoding is not None
    +            ):
                     errors = getattr(fp, "errors", None)
                     if errors is None:
                         errors = "strict"
                     data = data.encode(fp.encoding, errors)
                 fp.write(data)
    +
             want_unicode = False
             sep = kwargs.pop("sep", None)
             if sep is not None:
    @@ -773,6 +847,8 @@ if print_ is None:
                     write(sep)
                 write(arg)
             write(end)
    +
    +
     if sys.version_info[:2] < (3, 3):
         _print = print_
     
    @@ -783,16 +859,24 @@ if sys.version_info[:2] < (3, 3):
             if flush and fp is not None:
                 fp.flush()
     
    +
     _add_doc(reraise, """Reraise an exception.""")
     
     if sys.version_info[0:2] < (3, 4):
    -    def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
    -              updated=functools.WRAPPER_UPDATES):
    +
    +    def wraps(
    +        wrapped,
    +        assigned=functools.WRAPPER_ASSIGNMENTS,
    +        updated=functools.WRAPPER_UPDATES,
    +    ):
             def wrapper(f):
                 f = functools.wraps(wrapped, assigned, updated)(f)
                 f.__wrapped__ = wrapped
                 return f
    +
             return wrapper
    +
    +
     else:
         wraps = functools.wraps
     
    @@ -802,29 +886,95 @@ def with_metaclass(meta, *bases):
         # This requires a bit of explanation: the basic idea is to make a dummy
         # metaclass for one level of class instantiation that replaces itself with
         # the actual metaclass.
    -    class metaclass(meta):
    -
    +    class metaclass(type):
             def __new__(cls, name, this_bases, d):
                 return meta(name, bases, d)
    -    return type.__new__(metaclass, 'temporary_class', (), {})
    +
    +        @classmethod
    +        def __prepare__(cls, name, this_bases):
    +            return meta.__prepare__(name, bases)
    +
    +    return type.__new__(metaclass, "temporary_class", (), {})
     
     
     def add_metaclass(metaclass):
         """Class decorator for creating a class with a metaclass."""
    +
         def wrapper(cls):
             orig_vars = cls.__dict__.copy()
    -        slots = orig_vars.get('__slots__')
    +        slots = orig_vars.get("__slots__")
             if slots is not None:
                 if isinstance(slots, str):
                     slots = [slots]
                 for slots_var in slots:
                     orig_vars.pop(slots_var)
    -        orig_vars.pop('__dict__', None)
    -        orig_vars.pop('__weakref__', None)
    +        orig_vars.pop("__dict__", None)
    +        orig_vars.pop("__weakref__", None)
    +        if hasattr(cls, "__qualname__"):
    +            orig_vars["__qualname__"] = cls.__qualname__
             return metaclass(cls.__name__, cls.__bases__, orig_vars)
    +
         return wrapper
     
     
    +def ensure_binary(s, encoding="utf-8", errors="strict"):
    +    """Coerce **s** to six.binary_type.
    +
    +    For Python 2:
    +      - `unicode` -> encoded to `str`
    +      - `str` -> `str`
    +
    +    For Python 3:
    +      - `str` -> encoded to `bytes`
    +      - `bytes` -> `bytes`
    +    """
    +    if isinstance(s, text_type):
    +        return s.encode(encoding, errors)
    +    elif isinstance(s, binary_type):
    +        return s
    +    else:
    +        raise TypeError("not expecting type '%s'" % type(s))
    +
    +
    +def ensure_str(s, encoding="utf-8", errors="strict"):
    +    """Coerce *s* to `str`.
    +
    +    For Python 2:
    +      - `unicode` -> encoded to `str`
    +      - `str` -> `str`
    +
    +    For Python 3:
    +      - `str` -> `str`
    +      - `bytes` -> decoded to `str`
    +    """
    +    if not isinstance(s, (text_type, binary_type)):
    +        raise TypeError("not expecting type '%s'" % type(s))
    +    if PY2 and isinstance(s, text_type):
    +        s = s.encode(encoding, errors)
    +    elif PY3 and isinstance(s, binary_type):
    +        s = s.decode(encoding, errors)
    +    return s
    +
    +
    +def ensure_text(s, encoding="utf-8", errors="strict"):
    +    """Coerce *s* to six.text_type.
    +
    +    For Python 2:
    +      - `unicode` -> `unicode`
    +      - `str` -> `unicode`
    +
    +    For Python 3:
    +      - `str` -> `str`
    +      - `bytes` -> decoded to `str`
    +    """
    +    if isinstance(s, binary_type):
    +        return s.decode(encoding, errors)
    +    elif isinstance(s, text_type):
    +        return s
    +    else:
    +        raise TypeError("not expecting type '%s'" % type(s))
    +
    +
     def python_2_unicode_compatible(klass):
         """
         A decorator that defines __unicode__ and __str__ methods under Python 2.
    @@ -834,12 +984,13 @@ def python_2_unicode_compatible(klass):
         returning text and apply this decorator to the class.
         """
         if PY2:
    -        if '__str__' not in klass.__dict__:
    -            raise ValueError("@python_2_unicode_compatible cannot be applied "
    -                             "to %s because it doesn't define __str__()." %
    -                             klass.__name__)
    +        if "__str__" not in klass.__dict__:
    +            raise ValueError(
    +                "@python_2_unicode_compatible cannot be applied "
    +                "to %s because it doesn't define __str__()." % klass.__name__
    +            )
             klass.__unicode__ = klass.__str__
    -        klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
    +        klass.__str__ = lambda self: self.__unicode__().encode("utf-8")
         return klass
     
     
    @@ -859,8 +1010,10 @@ if sys.meta_path:
             # be floating around. Therefore, we can't use isinstance() to check for
             # the six meta path importer, since the other six instance will have
             # inserted an importer with different class.
    -        if (type(importer).__name__ == "_SixMetaPathImporter" and
    -                importer.name == __name__):
    +        if (
    +            type(importer).__name__ == "_SixMetaPathImporter"
    +            and importer.name == __name__
    +        ):
                 del sys.meta_path[i]
                 break
         del i, importer
    diff --git a/lib/urllib3/packages/ssl_match_hostname/__init__.py b/lib/urllib3/packages/ssl_match_hostname/__init__.py
    index d6594eb2..75b6bb1c 100644
    --- a/lib/urllib3/packages/ssl_match_hostname/__init__.py
    +++ b/lib/urllib3/packages/ssl_match_hostname/__init__.py
    @@ -16,4 +16,4 @@ except ImportError:
             from ._implementation import CertificateError, match_hostname
     
     # Not needed, but documenting what we provide.
    -__all__ = ('CertificateError', 'match_hostname')
    +__all__ = ("CertificateError", "match_hostname")
    diff --git a/lib/urllib3/packages/ssl_match_hostname/_implementation.py b/lib/urllib3/packages/ssl_match_hostname/_implementation.py
    index 1fd42f38..2d8e7a17 100644
    --- a/lib/urllib3/packages/ssl_match_hostname/_implementation.py
    +++ b/lib/urllib3/packages/ssl_match_hostname/_implementation.py
    @@ -9,14 +9,13 @@ import sys
     # ipaddress has been backported to 2.6+ in pypi.  If it is installed on the
     # system, use it to handle IPAddress ServerAltnames (this was added in
     # python-3.5) otherwise only do DNS matching.  This allows
    -# backports.ssl_match_hostname to continue to be used all the way back to
    -# python-2.4.
    +# backports.ssl_match_hostname to continue to be used in Python 2.7.
     try:
         import ipaddress
     except ImportError:
         ipaddress = None
     
    -__version__ = '3.5.0.1'
    +__version__ = "3.5.0.1"
     
     
     class CertificateError(ValueError):
    @@ -34,18 +33,19 @@ def _dnsname_match(dn, hostname, max_wildcards=1):
     
         # Ported from python3-syntax:
         # leftmost, *remainder = dn.split(r'.')
    -    parts = dn.split(r'.')
    +    parts = dn.split(r".")
         leftmost = parts[0]
         remainder = parts[1:]
     
    -    wildcards = leftmost.count('*')
    +    wildcards = leftmost.count("*")
         if wildcards > max_wildcards:
             # Issue #17980: avoid denials of service by refusing more
             # than one wildcard per fragment.  A survey of established
             # policy among SSL implementations showed it to be a
             # reasonable choice.
             raise CertificateError(
    -            "too many wildcards in certificate DNS name: " + repr(dn))
    +            "too many wildcards in certificate DNS name: " + repr(dn)
    +        )
     
         # speed up common case w/o wildcards
         if not wildcards:
    @@ -54,11 +54,11 @@ def _dnsname_match(dn, hostname, max_wildcards=1):
         # RFC 6125, section 6.4.3, subitem 1.
         # The client SHOULD NOT attempt to match a presented identifier in which
         # the wildcard character comprises a label other than the left-most label.
    -    if leftmost == '*':
    +    if leftmost == "*":
             # When '*' is a fragment by itself, it matches a non-empty dotless
             # fragment.
    -        pats.append('[^.]+')
    -    elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
    +        pats.append("[^.]+")
    +    elif leftmost.startswith("xn--") or hostname.startswith("xn--"):
             # RFC 6125, section 6.4.3, subitem 3.
             # The client SHOULD NOT attempt to match a presented identifier
             # where the wildcard character is embedded within an A-label or
    @@ -66,21 +66,22 @@ def _dnsname_match(dn, hostname, max_wildcards=1):
             pats.append(re.escape(leftmost))
         else:
             # Otherwise, '*' matches any dotless string, e.g. www*
    -        pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
    +        pats.append(re.escape(leftmost).replace(r"\*", "[^.]*"))
     
         # add the remaining fragments, ignore any wildcards
         for frag in remainder:
             pats.append(re.escape(frag))
     
    -    pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
    +    pat = re.compile(r"\A" + r"\.".join(pats) + r"\Z", re.IGNORECASE)
         return pat.match(hostname)
     
     
     def _to_unicode(obj):
         if isinstance(obj, str) and sys.version_info < (3,):
    -        obj = unicode(obj, encoding='ascii', errors='strict')
    +        obj = unicode(obj, encoding="ascii", errors="strict")
         return obj
     
    +
     def _ipaddress_match(ipname, host_ip):
         """Exact matching of IP addresses.
     
    @@ -102,9 +103,11 @@ def match_hostname(cert, hostname):
         returns nothing.
         """
         if not cert:
    -        raise ValueError("empty or no certificate, match_hostname needs a "
    -                         "SSL socket or SSL context with either "
    -                         "CERT_OPTIONAL or CERT_REQUIRED")
    +        raise ValueError(
    +            "empty or no certificate, match_hostname needs a "
    +            "SSL socket or SSL context with either "
    +            "CERT_OPTIONAL or CERT_REQUIRED"
    +        )
         try:
             # Divergence from upstream: ipaddress can't handle byte str
             host_ip = ipaddress.ip_address(_to_unicode(hostname))
    @@ -123,35 +126,37 @@ def match_hostname(cert, hostname):
             else:
                 raise
         dnsnames = []
    -    san = cert.get('subjectAltName', ())
    +    san = cert.get("subjectAltName", ())
         for key, value in san:
    -        if key == 'DNS':
    +        if key == "DNS":
                 if host_ip is None and _dnsname_match(value, hostname):
                     return
                 dnsnames.append(value)
    -        elif key == 'IP Address':
    +        elif key == "IP Address":
                 if host_ip is not None and _ipaddress_match(value, host_ip):
                     return
                 dnsnames.append(value)
         if not dnsnames:
             # The subject is only checked when there is no dNSName entry
             # in subjectAltName
    -        for sub in cert.get('subject', ()):
    +        for sub in cert.get("subject", ()):
                 for key, value in sub:
                     # XXX according to RFC 2818, the most specific Common Name
                     # must be used.
    -                if key == 'commonName':
    +                if key == "commonName":
                         if _dnsname_match(value, hostname):
                             return
                         dnsnames.append(value)
         if len(dnsnames) > 1:
    -        raise CertificateError("hostname %r "
    -            "doesn't match either of %s"
    -            % (hostname, ', '.join(map(repr, dnsnames))))
    +        raise CertificateError(
    +            "hostname %r "
    +            "doesn't match either of %s" % (hostname, ", ".join(map(repr, dnsnames)))
    +        )
         elif len(dnsnames) == 1:
    -        raise CertificateError("hostname %r "
    -            "doesn't match %r"
    -            % (hostname, dnsnames[0]))
    +        raise CertificateError(
    +            "hostname %r " "doesn't match %r" % (hostname, dnsnames[0])
    +        )
         else:
    -        raise CertificateError("no appropriate commonName or "
    -            "subjectAltName fields were found")
    +        raise CertificateError(
    +            "no appropriate commonName or " "subjectAltName fields were found"
    +        )
    diff --git a/lib/urllib3/poolmanager.py b/lib/urllib3/poolmanager.py
    index 4ae91744..242a2f82 100644
    --- a/lib/urllib3/poolmanager.py
    +++ b/lib/urllib3/poolmanager.py
    @@ -7,51 +7,62 @@ from ._collections import RecentlyUsedContainer
     from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
     from .connectionpool import port_by_scheme
     from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
    +from .packages import six
     from .packages.six.moves.urllib.parse import urljoin
     from .request import RequestMethods
     from .util.url import parse_url
     from .util.retry import Retry
     
     
    -__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
    +__all__ = ["PoolManager", "ProxyManager", "proxy_from_url"]
     
     
     log = logging.getLogger(__name__)
     
    -SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
    -                'ssl_version', 'ca_cert_dir', 'ssl_context')
    +SSL_KEYWORDS = (
    +    "key_file",
    +    "cert_file",
    +    "cert_reqs",
    +    "ca_certs",
    +    "ssl_version",
    +    "ca_cert_dir",
    +    "ssl_context",
    +    "key_password",
    +)
     
     # All known keyword arguments that could be provided to the pool manager, its
     # pools, or the underlying connections. This is used to construct a pool key.
     _key_fields = (
    -    'key_scheme',  # str
    -    'key_host',  # str
    -    'key_port',  # int
    -    'key_timeout',  # int or float or Timeout
    -    'key_retries',  # int or Retry
    -    'key_strict',  # bool
    -    'key_block',  # bool
    -    'key_source_address',  # str
    -    'key_key_file',  # str
    -    'key_cert_file',  # str
    -    'key_cert_reqs',  # str
    -    'key_ca_certs',  # str
    -    'key_ssl_version',  # str
    -    'key_ca_cert_dir',  # str
    -    'key_ssl_context',  # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
    -    'key_maxsize',  # int
    -    'key_headers',  # dict
    -    'key__proxy',  # parsed proxy url
    -    'key__proxy_headers',  # dict
    -    'key_socket_options',  # list of (level (int), optname (int), value (int or str)) tuples
    -    'key__socks_options',  # dict
    -    'key_assert_hostname',  # bool or string
    -    'key_assert_fingerprint',  # str
    +    "key_scheme",  # str
    +    "key_host",  # str
    +    "key_port",  # int
    +    "key_timeout",  # int or float or Timeout
    +    "key_retries",  # int or Retry
    +    "key_strict",  # bool
    +    "key_block",  # bool
    +    "key_source_address",  # str
    +    "key_key_file",  # str
    +    "key_key_password",  # str
    +    "key_cert_file",  # str
    +    "key_cert_reqs",  # str
    +    "key_ca_certs",  # str
    +    "key_ssl_version",  # str
    +    "key_ca_cert_dir",  # str
    +    "key_ssl_context",  # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
    +    "key_maxsize",  # int
    +    "key_headers",  # dict
    +    "key__proxy",  # parsed proxy url
    +    "key__proxy_headers",  # dict
    +    "key_socket_options",  # list of (level (int), optname (int), value (int or str)) tuples
    +    "key__socks_options",  # dict
    +    "key_assert_hostname",  # bool or string
    +    "key_assert_fingerprint",  # str
    +    "key_server_hostname",  # str
     )
     
     #: The namedtuple class used to construct keys for the connection pool.
     #: All custom key schemes should include the fields in this key at a minimum.
    -PoolKey = collections.namedtuple('PoolKey', _key_fields)
    +PoolKey = collections.namedtuple("PoolKey", _key_fields)
     
     
     def _default_key_normalizer(key_class, request_context):
    @@ -76,24 +87,24 @@ def _default_key_normalizer(key_class, request_context):
         """
         # Since we mutate the dictionary, make a copy first
         context = request_context.copy()
    -    context['scheme'] = context['scheme'].lower()
    -    context['host'] = context['host'].lower()
    +    context["scheme"] = context["scheme"].lower()
    +    context["host"] = context["host"].lower()
     
         # These are both dictionaries and need to be transformed into frozensets
    -    for key in ('headers', '_proxy_headers', '_socks_options'):
    +    for key in ("headers", "_proxy_headers", "_socks_options"):
             if key in context and context[key] is not None:
                 context[key] = frozenset(context[key].items())
     
         # The socket_options key may be a list and needs to be transformed into a
         # tuple.
    -    socket_opts = context.get('socket_options')
    +    socket_opts = context.get("socket_options")
         if socket_opts is not None:
    -        context['socket_options'] = tuple(socket_opts)
    +        context["socket_options"] = tuple(socket_opts)
     
         # Map the kwargs to the names in the namedtuple - this is necessary since
         # namedtuples can't have fields starting with '_'.
         for key in list(context.keys()):
    -        context['key_' + key] = context.pop(key)
    +        context["key_" + key] = context.pop(key)
     
         # Default to ``None`` for keys missing from the context
         for field in key_class._fields:
    @@ -108,14 +119,11 @@ def _default_key_normalizer(key_class, request_context):
     #: Each PoolManager makes a copy of this dictionary so they can be configured
     #: globally here, or individually on the instance.
     key_fn_by_scheme = {
    -    'http': functools.partial(_default_key_normalizer, PoolKey),
    -    'https': functools.partial(_default_key_normalizer, PoolKey),
    +    "http": functools.partial(_default_key_normalizer, PoolKey),
    +    "https": functools.partial(_default_key_normalizer, PoolKey),
     }
     
    -pool_classes_by_scheme = {
    -    'http': HTTPConnectionPool,
    -    'https': HTTPSConnectionPool,
    -}
    +pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool}
     
     
     class PoolManager(RequestMethods):
    @@ -151,8 +159,7 @@ class PoolManager(RequestMethods):
         def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
             RequestMethods.__init__(self, headers)
             self.connection_pool_kw = connection_pool_kw
    -        self.pools = RecentlyUsedContainer(num_pools,
    -                                           dispose_func=lambda p: p.close())
    +        self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close())
     
             # Locally set the pool classes and keys so other PoolManagers can
             # override them.
    @@ -185,10 +192,10 @@ class PoolManager(RequestMethods):
             # this function has historically only used the scheme, host, and port
             # in the positional args. When an API change is acceptable these can
             # be removed.
    -        for key in ('scheme', 'host', 'port'):
    +        for key in ("scheme", "host", "port"):
                 request_context.pop(key, None)
     
    -        if scheme == 'http':
    +        if scheme == "http":
                 for kw in SSL_KEYWORDS:
                     request_context.pop(kw, None)
     
    @@ -203,7 +210,7 @@ class PoolManager(RequestMethods):
             """
             self.pools.clear()
     
    -    def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
    +    def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
             """
             Get a :class:`ConnectionPool` based on the host, port, and scheme.
     
    @@ -218,11 +225,11 @@ class PoolManager(RequestMethods):
                 raise LocationValueError("No host specified.")
     
             request_context = self._merge_pool_kwargs(pool_kwargs)
    -        request_context['scheme'] = scheme or 'http'
    +        request_context["scheme"] = scheme or "http"
             if not port:
    -            port = port_by_scheme.get(request_context['scheme'].lower(), 80)
    -        request_context['port'] = port
    -        request_context['host'] = host
    +            port = port_by_scheme.get(request_context["scheme"].lower(), 80)
    +        request_context["port"] = port
    +        request_context["host"] = host
     
             return self.connection_from_context(request_context)
     
    @@ -233,7 +240,7 @@ class PoolManager(RequestMethods):
             ``request_context`` must at least contain the ``scheme`` key and its
             value must be a key in ``key_fn_by_scheme`` instance variable.
             """
    -        scheme = request_context['scheme'].lower()
    +        scheme = request_context["scheme"].lower()
             pool_key_constructor = self.key_fn_by_scheme[scheme]
             pool_key = pool_key_constructor(request_context)
     
    @@ -255,9 +262,9 @@ class PoolManager(RequestMethods):
                     return pool
     
                 # Make a fresh ConnectionPool of the desired type
    -            scheme = request_context['scheme']
    -            host = request_context['host']
    -            port = request_context['port']
    +            scheme = request_context["scheme"]
    +            host = request_context["host"]
    +            port = request_context["port"]
                 pool = self._new_pool(scheme, host, port, request_context=request_context)
                 self.pools[pool_key] = pool
     
    @@ -275,8 +282,9 @@ class PoolManager(RequestMethods):
             not used.
             """
             u = parse_url(url)
    -        return self.connection_from_host(u.host, port=u.port, scheme=u.scheme,
    -                                         pool_kwargs=pool_kwargs)
    +        return self.connection_from_host(
    +            u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
    +        )
     
         def _merge_pool_kwargs(self, override):
             """
    @@ -310,10 +318,11 @@ class PoolManager(RequestMethods):
             u = parse_url(url)
             conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
     
    -        kw['assert_same_host'] = False
    -        kw['redirect'] = False
    -        if 'headers' not in kw:
    -            kw['headers'] = self.headers
    +        kw["assert_same_host"] = False
    +        kw["redirect"] = False
    +
    +        if "headers" not in kw:
    +            kw["headers"] = self.headers.copy()
     
             if self.proxy is not None and u.scheme == "http":
                 response = conn.urlopen(method, url, **kw)
    @@ -329,12 +338,23 @@ class PoolManager(RequestMethods):
     
             # RFC 7231, Section 6.4.4
             if response.status == 303:
    -            method = 'GET'
    +            method = "GET"
     
    -        retries = kw.get('retries')
    +        retries = kw.get("retries")
             if not isinstance(retries, Retry):
                 retries = Retry.from_int(retries, redirect=redirect)
     
    +        # Strip headers marked as unsafe to forward to the redirected location.
    +        # Check remove_headers_on_redirect to avoid a potential network call within
    +        # conn.is_same_host() which may use socket.gethostbyname() in the future.
    +        if retries.remove_headers_on_redirect and not conn.is_same_host(
    +            redirect_location
    +        ):
    +            headers = list(six.iterkeys(kw["headers"]))
    +            for header in headers:
    +                if header.lower() in retries.remove_headers_on_redirect:
    +                    kw["headers"].pop(header, None)
    +
             try:
                 retries = retries.increment(method, url, response=response, _pool=conn)
             except MaxRetryError:
    @@ -342,8 +362,8 @@ class PoolManager(RequestMethods):
                     raise
                 return response
     
    -        kw['retries'] = retries
    -        kw['redirect'] = redirect
    +        kw["retries"] = retries
    +        kw["redirect"] = redirect
     
             log.info("Redirecting %s -> %s", url, redirect_location)
             return self.urlopen(method, redirect_location, **kw)
    @@ -358,7 +378,7 @@ class ProxyManager(PoolManager):
             The URL of the proxy to be used.
     
         :param proxy_headers:
    -        A dictionary contaning headers that will be sent to the proxy. In case
    +        A dictionary containing headers that will be sent to the proxy. In case
             of HTTP they are being sent with each request, while in the
             HTTPS/CONNECT case they are sent only once. Could be used for proxy
             authentication.
    @@ -376,12 +396,21 @@ class ProxyManager(PoolManager):
     
         """
     
    -    def __init__(self, proxy_url, num_pools=10, headers=None,
    -                 proxy_headers=None, **connection_pool_kw):
    +    def __init__(
    +        self,
    +        proxy_url,
    +        num_pools=10,
    +        headers=None,
    +        proxy_headers=None,
    +        **connection_pool_kw
    +    ):
     
             if isinstance(proxy_url, HTTPConnectionPool):
    -            proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
    -                                        proxy_url.port)
    +            proxy_url = "%s://%s:%i" % (
    +                proxy_url.scheme,
    +                proxy_url.host,
    +                proxy_url.port,
    +            )
             proxy = parse_url(proxy_url)
             if not proxy.port:
                 port = port_by_scheme.get(proxy.scheme, 80)
    @@ -393,30 +422,31 @@ class ProxyManager(PoolManager):
             self.proxy = proxy
             self.proxy_headers = proxy_headers or {}
     
    -        connection_pool_kw['_proxy'] = self.proxy
    -        connection_pool_kw['_proxy_headers'] = self.proxy_headers
    +        connection_pool_kw["_proxy"] = self.proxy
    +        connection_pool_kw["_proxy_headers"] = self.proxy_headers
     
    -        super(ProxyManager, self).__init__(
    -            num_pools, headers, **connection_pool_kw)
    +        super(ProxyManager, self).__init__(num_pools, headers, **connection_pool_kw)
     
    -    def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
    +    def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
             if scheme == "https":
                 return super(ProxyManager, self).connection_from_host(
    -                host, port, scheme, pool_kwargs=pool_kwargs)
    +                host, port, scheme, pool_kwargs=pool_kwargs
    +            )
     
             return super(ProxyManager, self).connection_from_host(
    -            self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs)
    +            self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs
    +        )
     
         def _set_proxy_headers(self, url, headers=None):
             """
             Sets headers needed by proxies: specifically, the Accept and Host
             headers. Only sets headers not provided by the user.
             """
    -        headers_ = {'Accept': '*/*'}
    +        headers_ = {"Accept": "*/*"}
     
             netloc = parse_url(url).netloc
             if netloc:
    -            headers_['Host'] = netloc
    +            headers_["Host"] = netloc
     
             if headers:
                 headers_.update(headers)
    @@ -430,8 +460,8 @@ class ProxyManager(PoolManager):
                 # For proxied HTTPS requests, httplib sets the necessary headers
                 # on the CONNECT to the proxy. For HTTP, we'll definitely
                 # need to set 'Host' at the very least.
    -            headers = kw.get('headers', self.headers)
    -            kw['headers'] = self._set_proxy_headers(url, headers)
    +            headers = kw.get("headers", self.headers)
    +            kw["headers"] = self._set_proxy_headers(url, headers)
     
             return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
     
    diff --git a/lib/urllib3/request.py b/lib/urllib3/request.py
    index c0fddff0..55f160bb 100644
    --- a/lib/urllib3/request.py
    +++ b/lib/urllib3/request.py
    @@ -4,7 +4,7 @@ from .filepost import encode_multipart_formdata
     from .packages.six.moves.urllib.parse import urlencode
     
     
    -__all__ = ['RequestMethods']
    +__all__ = ["RequestMethods"]
     
     
     class RequestMethods(object):
    @@ -36,16 +36,25 @@ class RequestMethods(object):
             explicitly.
         """
     
    -    _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
    +    _encode_url_methods = {"DELETE", "GET", "HEAD", "OPTIONS"}
     
         def __init__(self, headers=None):
             self.headers = headers or {}
     
    -    def urlopen(self, method, url, body=None, headers=None,
    -                encode_multipart=True, multipart_boundary=None,
    -                **kw):  # Abstract
    -        raise NotImplemented("Classes extending RequestMethods must implement "
    -                             "their own ``urlopen`` method.")
    +    def urlopen(
    +        self,
    +        method,
    +        url,
    +        body=None,
    +        headers=None,
    +        encode_multipart=True,
    +        multipart_boundary=None,
    +        **kw
    +    ):  # Abstract
    +        raise NotImplementedError(
    +            "Classes extending RequestMethods must implement "
    +            "their own ``urlopen`` method."
    +        )
     
         def request(self, method, url, fields=None, headers=None, **urlopen_kw):
             """
    @@ -60,17 +69,18 @@ class RequestMethods(object):
             """
             method = method.upper()
     
    -        if method in self._encode_url_methods:
    -            return self.request_encode_url(method, url, fields=fields,
    -                                           headers=headers,
    -                                           **urlopen_kw)
    -        else:
    -            return self.request_encode_body(method, url, fields=fields,
    -                                            headers=headers,
    -                                            **urlopen_kw)
    +        urlopen_kw["request_url"] = url
     
    -    def request_encode_url(self, method, url, fields=None, headers=None,
    -                           **urlopen_kw):
    +        if method in self._encode_url_methods:
    +            return self.request_encode_url(
    +                method, url, fields=fields, headers=headers, **urlopen_kw
    +            )
    +        else:
    +            return self.request_encode_body(
    +                method, url, fields=fields, headers=headers, **urlopen_kw
    +            )
    +
    +    def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw):
             """
             Make a request using :meth:`urlopen` with the ``fields`` encoded in
             the url. This is useful for request methods like GET, HEAD, DELETE, etc.
    @@ -78,17 +88,24 @@ class RequestMethods(object):
             if headers is None:
                 headers = self.headers
     
    -        extra_kw = {'headers': headers}
    +        extra_kw = {"headers": headers}
             extra_kw.update(urlopen_kw)
     
             if fields:
    -            url += '?' + urlencode(fields)
    +            url += "?" + urlencode(fields)
     
             return self.urlopen(method, url, **extra_kw)
     
    -    def request_encode_body(self, method, url, fields=None, headers=None,
    -                            encode_multipart=True, multipart_boundary=None,
    -                            **urlopen_kw):
    +    def request_encode_body(
    +        self,
    +        method,
    +        url,
    +        fields=None,
    +        headers=None,
    +        encode_multipart=True,
    +        multipart_boundary=None,
    +        **urlopen_kw
    +    ):
             """
             Make a request using :meth:`urlopen` with the ``fields`` encoded in
             the body. This is useful for request methods like POST, PUT, PATCH, etc.
    @@ -117,7 +134,7 @@ class RequestMethods(object):
                 }
     
             When uploading a file, providing a filename (the first parameter of the
    -        tuple) is optional but recommended to best mimick behavior of browsers.
    +        tuple) is optional but recommended to best mimic behavior of browsers.
     
             Note that if ``headers`` are supplied, the 'Content-Type' header will
             be overwritten because it depends on the dynamic random boundary string
    @@ -127,22 +144,28 @@ class RequestMethods(object):
             if headers is None:
                 headers = self.headers
     
    -        extra_kw = {'headers': {}}
    +        extra_kw = {"headers": {}}
     
             if fields:
    -            if 'body' in urlopen_kw:
    +            if "body" in urlopen_kw:
                     raise TypeError(
    -                    "request got values for both 'fields' and 'body', can only specify one.")
    +                    "request got values for both 'fields' and 'body', can only specify one."
    +                )
     
                 if encode_multipart:
    -                body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
    +                body, content_type = encode_multipart_formdata(
    +                    fields, boundary=multipart_boundary
    +                )
                 else:
    -                body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
    +                body, content_type = (
    +                    urlencode(fields),
    +                    "application/x-www-form-urlencoded",
    +                )
     
    -            extra_kw['body'] = body
    -            extra_kw['headers'] = {'Content-Type': content_type}
    +            extra_kw["body"] = body
    +            extra_kw["headers"] = {"Content-Type": content_type}
     
    -        extra_kw['headers'].update(headers)
    +        extra_kw["headers"].update(headers)
             extra_kw.update(urlopen_kw)
     
             return self.urlopen(method, url, **extra_kw)
    diff --git a/lib/urllib3/response.py b/lib/urllib3/response.py
    index d3e5a1e6..adc321e7 100644
    --- a/lib/urllib3/response.py
    +++ b/lib/urllib3/response.py
    @@ -6,12 +6,22 @@ import logging
     from socket import timeout as SocketTimeout
     from socket import error as SocketError
     
    +try:
    +    import brotli
    +except ImportError:
    +    brotli = None
    +
     from ._collections import HTTPHeaderDict
     from .exceptions import (
    -    BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError,
    -    ResponseNotChunked, IncompleteRead, InvalidHeader
    +    BodyNotHttplibCompatible,
    +    ProtocolError,
    +    DecodeError,
    +    ReadTimeoutError,
    +    ResponseNotChunked,
    +    IncompleteRead,
    +    InvalidHeader,
     )
    -from .packages.six import string_types as basestring, binary_type, PY3
    +from .packages.six import string_types as basestring, PY3
     from .packages.six.moves import http_client as httplib
     from .connection import HTTPException, BaseSSLError
     from .util.response import is_fp_closed, is_response_to_head
    @@ -20,10 +30,9 @@ log = logging.getLogger(__name__)
     
     
     class DeflateDecoder(object):
    -
         def __init__(self):
             self._first_try = True
    -        self._data = binary_type()
    +        self._data = b""
             self._obj = zlib.decompressobj()
     
         def __getattr__(self, name):
    @@ -52,24 +61,94 @@ class DeflateDecoder(object):
                     self._data = None
     
     
    -class GzipDecoder(object):
    +class GzipDecoderState(object):
     
    +    FIRST_MEMBER = 0
    +    OTHER_MEMBERS = 1
    +    SWALLOW_DATA = 2
    +
    +
    +class GzipDecoder(object):
         def __init__(self):
             self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
    +        self._state = GzipDecoderState.FIRST_MEMBER
     
         def __getattr__(self, name):
             return getattr(self._obj, name)
     
         def decompress(self, data):
    -        if not data:
    -            return data
    -        return self._obj.decompress(data)
    +        ret = bytearray()
    +        if self._state == GzipDecoderState.SWALLOW_DATA or not data:
    +            return bytes(ret)
    +        while True:
    +            try:
    +                ret += self._obj.decompress(data)
    +            except zlib.error:
    +                previous_state = self._state
    +                # Ignore data after the first error
    +                self._state = GzipDecoderState.SWALLOW_DATA
    +                if previous_state == GzipDecoderState.OTHER_MEMBERS:
    +                    # Allow trailing garbage acceptable in other gzip clients
    +                    return bytes(ret)
    +                raise
    +            data = self._obj.unused_data
    +            if not data:
    +                return bytes(ret)
    +            self._state = GzipDecoderState.OTHER_MEMBERS
    +            self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
    +
    +
    +if brotli is not None:
    +
    +    class BrotliDecoder(object):
    +        # Supports both 'brotlipy' and 'Brotli' packages
    +        # since they share an import name. The top branches
    +        # are for 'brotlipy' and bottom branches for 'Brotli'
    +        def __init__(self):
    +            self._obj = brotli.Decompressor()
    +
    +        def decompress(self, data):
    +            if hasattr(self._obj, "decompress"):
    +                return self._obj.decompress(data)
    +            return self._obj.process(data)
    +
    +        def flush(self):
    +            if hasattr(self._obj, "flush"):
    +                return self._obj.flush()
    +            return b""
    +
    +
    +class MultiDecoder(object):
    +    """
    +    From RFC7231:
    +        If one or more encodings have been applied to a representation, the
    +        sender that applied the encodings MUST generate a Content-Encoding
    +        header field that lists the content codings in the order in which
    +        they were applied.
    +    """
    +
    +    def __init__(self, modes):
    +        self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
    +
    +    def flush(self):
    +        return self._decoders[0].flush()
    +
    +    def decompress(self, data):
    +        for d in reversed(self._decoders):
    +            data = d.decompress(data)
    +        return data
     
     
     def _get_decoder(mode):
    -    if mode == 'gzip':
    +    if "," in mode:
    +        return MultiDecoder(mode)
    +
    +    if mode == "gzip":
             return GzipDecoder()
     
    +    if brotli is not None and mode == "br":
    +        return BrotliDecoder()
    +
         return DeflateDecoder()
     
     
    @@ -89,9 +168,8 @@ class HTTPResponse(io.IOBase):
             If True, the response's body will be preloaded during construction.
     
         :param decode_content:
    -        If True, attempts to decode specific content-encoding's based on headers
    -        (like 'gzip' and 'deflate') will be skipped and raw data will be used
    -        instead.
    +        If True, will attempt to decode the body based on the
    +        'content-encoding' header.
     
         :param original_response:
             When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
    @@ -107,13 +185,31 @@ class HTTPResponse(io.IOBase):
             value of Content-Length header, if present. Otherwise, raise error.
         """
     
    -    CONTENT_DECODERS = ['gzip', 'deflate']
    +    CONTENT_DECODERS = ["gzip", "deflate"]
    +    if brotli is not None:
    +        CONTENT_DECODERS += ["br"]
         REDIRECT_STATUSES = [301, 302, 303, 307, 308]
     
    -    def __init__(self, body='', headers=None, status=0, version=0, reason=None,
    -                 strict=0, preload_content=True, decode_content=True,
    -                 original_response=None, pool=None, connection=None,
    -                 retries=None, enforce_content_length=False, request_method=None):
    +    def __init__(
    +        self,
    +        body="",
    +        headers=None,
    +        status=0,
    +        version=0,
    +        reason=None,
    +        strict=0,
    +        preload_content=True,
    +        decode_content=True,
    +        original_response=None,
    +        pool=None,
    +        connection=None,
    +        msg=None,
    +        retries=None,
    +        enforce_content_length=False,
    +        request_method=None,
    +        request_url=None,
    +        auto_close=True,
    +    ):
     
             if isinstance(headers, HTTPHeaderDict):
                 self.headers = headers
    @@ -126,26 +222,29 @@ class HTTPResponse(io.IOBase):
             self.decode_content = decode_content
             self.retries = retries
             self.enforce_content_length = enforce_content_length
    +        self.auto_close = auto_close
     
             self._decoder = None
             self._body = None
             self._fp = None
             self._original_response = original_response
             self._fp_bytes_read = 0
    +        self.msg = msg
    +        self._request_url = request_url
     
    -        if body and isinstance(body, (basestring, binary_type)):
    +        if body and isinstance(body, (basestring, bytes)):
                 self._body = body
     
             self._pool = pool
             self._connection = connection
     
    -        if hasattr(body, 'read'):
    +        if hasattr(body, "read"):
                 self._fp = body
     
             # Are we using the chunked-style of transfer encoding?
             self.chunked = False
             self.chunk_left = None
    -        tr_enc = self.headers.get('transfer-encoding', '').lower()
    +        tr_enc = self.headers.get("transfer-encoding", "").lower()
             # Don't incur the penalty of creating a list and then discarding it
             encodings = (enc.strip() for enc in tr_enc.split(","))
             if "chunked" in encodings:
    @@ -167,7 +266,7 @@ class HTTPResponse(io.IOBase):
                 location. ``False`` if not a redirect status code.
             """
             if self.status in self.REDIRECT_STATUSES:
    -            return self.headers.get('location')
    +            return self.headers.get("location")
     
             return False
     
    @@ -191,6 +290,9 @@ class HTTPResponse(io.IOBase):
         def connection(self):
             return self._connection
     
    +    def isclosed(self):
    +        return is_fp_closed(self._fp)
    +
         def tell(self):
             """
             Obtain the number of bytes pulled over the wire so far. May differ from
    @@ -203,30 +305,34 @@ class HTTPResponse(io.IOBase):
             """
             Set initial length value for Response content if available.
             """
    -        length = self.headers.get('content-length')
    +        length = self.headers.get("content-length")
     
    -        if length is not None and self.chunked:
    -            # This Response will fail with an IncompleteRead if it can't be
    -            # received as chunked. This method falls back to attempt reading
    -            # the response before raising an exception.
    -            log.warning("Received response with both Content-Length and "
    -                        "Transfer-Encoding set. This is expressly forbidden "
    -                        "by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
    -                        "attempting to process response as Transfer-Encoding: "
    -                        "chunked.")
    -            return None
    +        if length is not None:
    +            if self.chunked:
    +                # This Response will fail with an IncompleteRead if it can't be
    +                # received as chunked. This method falls back to attempt reading
    +                # the response before raising an exception.
    +                log.warning(
    +                    "Received response with both Content-Length and "
    +                    "Transfer-Encoding set. This is expressly forbidden "
    +                    "by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
    +                    "attempting to process response as Transfer-Encoding: "
    +                    "chunked."
    +                )
    +                return None
     
    -        elif length is not None:
                 try:
                     # RFC 7230 section 3.3.2 specifies multiple content lengths can
                     # be sent in a single Content-Length header
                     # (e.g. Content-Length: 42, 42). This line ensures the values
                     # are all valid ints and that as long as the `set` length is 1,
                     # all values are the same. Otherwise, the header is invalid.
    -                lengths = set([int(val) for val in length.split(',')])
    +                lengths = set([int(val) for val in length.split(",")])
                     if len(lengths) > 1:
    -                    raise InvalidHeader("Content-Length contained multiple "
    -                                        "unmatching values (%s)" % length)
    +                    raise InvalidHeader(
    +                        "Content-Length contained multiple "
    +                        "unmatching values (%s)" % length
    +                    )
                     length = lengths.pop()
                 except ValueError:
                     length = None
    @@ -242,7 +348,7 @@ class HTTPResponse(io.IOBase):
                 status = 0
     
             # Check for responses that shouldn't include a body
    -        if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD':
    +        if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD":
                 length = 0
     
             return length
    @@ -253,24 +359,41 @@ class HTTPResponse(io.IOBase):
             """
             # Note: content-encoding value should be case-insensitive, per RFC 7230
             # Section 3.2
    -        content_encoding = self.headers.get('content-encoding', '').lower()
    -        if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
    -            self._decoder = _get_decoder(content_encoding)
    +        content_encoding = self.headers.get("content-encoding", "").lower()
    +        if self._decoder is None:
    +            if content_encoding in self.CONTENT_DECODERS:
    +                self._decoder = _get_decoder(content_encoding)
    +            elif "," in content_encoding:
    +                encodings = [
    +                    e.strip()
    +                    for e in content_encoding.split(",")
    +                    if e.strip() in self.CONTENT_DECODERS
    +                ]
    +                if len(encodings):
    +                    self._decoder = _get_decoder(content_encoding)
    +
    +    DECODER_ERROR_CLASSES = (IOError, zlib.error)
    +    if brotli is not None:
    +        DECODER_ERROR_CLASSES += (brotli.error,)
     
         def _decode(self, data, decode_content, flush_decoder):
             """
             Decode the data passed in and potentially flush the decoder.
             """
    +        if not decode_content:
    +            return data
    +
             try:
    -            if decode_content and self._decoder:
    +            if self._decoder:
                     data = self._decoder.decompress(data)
    -        except (IOError, zlib.error) as e:
    -            content_encoding = self.headers.get('content-encoding', '').lower()
    +        except self.DECODER_ERROR_CLASSES as e:
    +            content_encoding = self.headers.get("content-encoding", "").lower()
                 raise DecodeError(
                     "Received response with content-encoding: %s, but "
    -                "failed to decode it." % content_encoding, e)
    -
    -        if flush_decoder and decode_content:
    +                "failed to decode it." % content_encoding,
    +                e,
    +            )
    +        if flush_decoder:
                 data += self._flush_decoder()
     
             return data
    @@ -281,10 +404,10 @@ class HTTPResponse(io.IOBase):
             being used.
             """
             if self._decoder:
    -            buf = self._decoder.decompress(b'')
    +            buf = self._decoder.decompress(b"")
                 return buf + self._decoder.flush()
     
    -        return b''
    +        return b""
     
         @contextmanager
         def _error_catcher(self):
    @@ -304,20 +427,20 @@ class HTTPResponse(io.IOBase):
                 except SocketTimeout:
                     # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
                     # there is yet no clean way to get at it from this context.
    -                raise ReadTimeoutError(self._pool, None, 'Read timed out.')
    +                raise ReadTimeoutError(self._pool, None, "Read timed out.")
     
                 except BaseSSLError as e:
                     # FIXME: Is there a better way to differentiate between SSLErrors?
    -                if 'read operation timed out' not in str(e):  # Defensive:
    +                if "read operation timed out" not in str(e):  # Defensive:
                         # This shouldn't happen but just in case we're missing an edge
                         # case, let's avoid swallowing SSL errors.
                         raise
     
    -                raise ReadTimeoutError(self._pool, None, 'Read timed out.')
    +                raise ReadTimeoutError(self._pool, None, "Read timed out.")
     
                 except (HTTPException, SocketError) as e:
                     # This includes IncompleteRead.
    -                raise ProtocolError('Connection broken: %r' % e, e)
    +                raise ProtocolError("Connection broken: %r" % e, e)
     
                 # If no exception is thrown, we should avoid cleaning up
                 # unnecessarily.
    @@ -372,17 +495,19 @@ class HTTPResponse(io.IOBase):
                 return
     
             flush_decoder = False
    -        data = None
    +        fp_closed = getattr(self._fp, "closed", False)
     
             with self._error_catcher():
                 if amt is None:
                     # cStringIO doesn't like amt=None
    -                data = self._fp.read()
    +                data = self._fp.read() if not fp_closed else b""
                     flush_decoder = True
                 else:
                     cache_content = False
    -                data = self._fp.read(amt)
    -                if amt != 0 and not data:  # Platform-specific: Buggy versions of Python.
    +                data = self._fp.read(amt) if not fp_closed else b""
    +                if (
    +                    amt != 0 and not data
    +                ):  # Platform-specific: Buggy versions of Python.
                         # Close the connection when no data is returned
                         #
                         # This is redundant to what httplib/http.client _should_
    @@ -392,7 +517,10 @@ class HTTPResponse(io.IOBase):
                         # no harm in redundantly calling close.
                         self._fp.close()
                         flush_decoder = True
    -                    if self.enforce_content_length and self.length_remaining not in (0, None):
    +                    if self.enforce_content_length and self.length_remaining not in (
    +                        0,
    +                        None,
    +                    ):
                             # This is an edge case that httplib failed to cover due
                             # to concerns of backward compatibility. We're
                             # addressing it here to make sure IncompleteRead is
    @@ -412,7 +540,7 @@ class HTTPResponse(io.IOBase):
     
             return data
     
    -    def stream(self, amt=2**16, decode_content=None):
    +    def stream(self, amt=2 ** 16, decode_content=None):
             """
             A generator wrapper for the read() method. A call will block until
             ``amt`` bytes have been read from the connection or until the
    @@ -450,21 +578,24 @@ class HTTPResponse(io.IOBase):
             headers = r.msg
     
             if not isinstance(headers, HTTPHeaderDict):
    -            if PY3:  # Python 3
    +            if PY3:
                     headers = HTTPHeaderDict(headers.items())
    -            else:  # Python 2
    +            else:
    +                # Python 2.7
                     headers = HTTPHeaderDict.from_httplib(headers)
     
             # HTTPResponse objects in Python 3 don't have a .strict attribute
    -        strict = getattr(r, 'strict', 0)
    -        resp = ResponseCls(body=r,
    -                           headers=headers,
    -                           status=r.status,
    -                           version=r.version,
    -                           reason=r.reason,
    -                           strict=strict,
    -                           original_response=r,
    -                           **response_kw)
    +        strict = getattr(r, "strict", 0)
    +        resp = ResponseCls(
    +            body=r,
    +            headers=headers,
    +            status=r.status,
    +            version=r.version,
    +            reason=r.reason,
    +            strict=strict,
    +            original_response=r,
    +            **response_kw
    +        )
             return resp
     
         # Backwards-compatibility methods for httplib.HTTPResponse
    @@ -486,13 +617,18 @@ class HTTPResponse(io.IOBase):
             if self._connection:
                 self._connection.close()
     
    +        if not self.auto_close:
    +            io.IOBase.close(self)
    +
         @property
         def closed(self):
    -        if self._fp is None:
    +        if not self.auto_close:
    +            return io.IOBase.closed.__get__(self)
    +        elif self._fp is None:
                 return True
    -        elif hasattr(self._fp, 'isclosed'):
    +        elif hasattr(self._fp, "isclosed"):
                 return self._fp.isclosed()
    -        elif hasattr(self._fp, 'closed'):
    +        elif hasattr(self._fp, "closed"):
                 return self._fp.closed
             else:
                 return True
    @@ -503,11 +639,17 @@ class HTTPResponse(io.IOBase):
             elif hasattr(self._fp, "fileno"):
                 return self._fp.fileno()
             else:
    -            raise IOError("The file-like object this HTTPResponse is wrapped "
    -                          "around has no file descriptor")
    +            raise IOError(
    +                "The file-like object this HTTPResponse is wrapped "
    +                "around has no file descriptor"
    +            )
     
         def flush(self):
    -        if self._fp is not None and hasattr(self._fp, 'flush'):
    +        if (
    +            self._fp is not None
    +            and hasattr(self._fp, "flush")
    +            and not getattr(self._fp, "closed", False)
    +        ):
                 return self._fp.flush()
     
         def readable(self):
    @@ -520,7 +662,7 @@ class HTTPResponse(io.IOBase):
             if len(temp) == 0:
                 return 0
             else:
    -            b[:len(temp)] = temp
    +            b[: len(temp)] = temp
                 return len(temp)
     
         def supports_chunked_reads(self):
    @@ -530,7 +672,7 @@ class HTTPResponse(io.IOBase):
             attribute. If it is present we assume it returns raw chunks as
             processed by read_chunked().
             """
    -        return hasattr(self._fp, 'fp')
    +        return hasattr(self._fp, "fp")
     
         def _update_chunk_length(self):
             # First, we'll figure out length of a chunk and then
    @@ -538,7 +680,7 @@ class HTTPResponse(io.IOBase):
             if self.chunk_left is not None:
                 return
             line = self._fp.fp.readline()
    -        line = line.split(b';', 1)[0]
    +        line = line.split(b";", 1)[0]
             try:
                 self.chunk_left = int(line, 16)
             except ValueError:
    @@ -573,6 +715,11 @@ class HTTPResponse(io.IOBase):
             Similar to :meth:`HTTPResponse.read`, but with an additional
             parameter: ``decode_content``.
     
    +        :param amt:
    +            How much of the content to read. If specified, caching is skipped
    +            because it doesn't make sense to cache partial content as the full
    +            response.
    +
             :param decode_content:
                 If True, will attempt to decode the body based on the
                 'content-encoding' header.
    @@ -582,25 +729,33 @@ class HTTPResponse(io.IOBase):
             if not self.chunked:
                 raise ResponseNotChunked(
                     "Response is not chunked. "
    -                "Header 'transfer-encoding: chunked' is missing.")
    +                "Header 'transfer-encoding: chunked' is missing."
    +            )
             if not self.supports_chunked_reads():
                 raise BodyNotHttplibCompatible(
                     "Body should be httplib.HTTPResponse like. "
    -                "It should have have an fp attribute which returns raw chunks.")
    -
    -        # Don't bother reading the body of a HEAD request.
    -        if self._original_response and is_response_to_head(self._original_response):
    -            self._original_response.close()
    -            return
    +                "It should have have an fp attribute which returns raw chunks."
    +            )
     
             with self._error_catcher():
    +            # Don't bother reading the body of a HEAD request.
    +            if self._original_response and is_response_to_head(self._original_response):
    +                self._original_response.close()
    +                return
    +
    +            # If a response is already read and closed
    +            # then return immediately.
    +            if self._fp.fp is None:
    +                return
    +
                 while True:
                     self._update_chunk_length()
                     if self.chunk_left == 0:
                         break
                     chunk = self._handle_chunk(amt)
    -                decoded = self._decode(chunk, decode_content=decode_content,
    -                                       flush_decoder=False)
    +                decoded = self._decode(
    +                    chunk, decode_content=decode_content, flush_decoder=False
    +                )
                     if decoded:
                         yield decoded
     
    @@ -618,9 +773,37 @@ class HTTPResponse(io.IOBase):
                     if not line:
                         # Some sites may not end with '\r\n'.
                         break
    -                if line == b'\r\n':
    +                if line == b"\r\n":
                         break
     
                 # We read everything; close the "file".
                 if self._original_response:
                     self._original_response.close()
    +
    +    def geturl(self):
    +        """
    +        Returns the URL that was the source of this response.
    +        If the request that generated this response redirected, this method
    +        will return the final redirect location.
    +        """
    +        if self.retries is not None and len(self.retries.history):
    +            return self.retries.history[-1].redirect_location
    +        else:
    +            return self._request_url
    +
    +    def __iter__(self):
    +        buffer = [b""]
    +        for chunk in self.stream(decode_content=True):
    +            if b"\n" in chunk:
    +                chunk = chunk.split(b"\n")
    +                yield b"".join(buffer) + chunk[0] + b"\n"
    +                for x in chunk[1:-1]:
    +                    yield x + b"\n"
    +                if chunk[-1]:
    +                    buffer = [chunk[-1]]
    +                else:
    +                    buffer = []
    +            else:
    +                buffer.append(chunk)
    +        if buffer:
    +            yield b"".join(buffer)
    diff --git a/lib/urllib3/util/__init__.py b/lib/urllib3/util/__init__.py
    index 2f2770b6..a96c73a9 100644
    --- a/lib/urllib3/util/__init__.py
    +++ b/lib/urllib3/util/__init__.py
    @@ -1,4 +1,5 @@
     from __future__ import absolute_import
    +
     # For backwards compatibility, provide imports that used to be here.
     from .connection import is_connection_dropped
     from .request import make_headers
    @@ -12,43 +13,34 @@ from .ssl_ import (
         resolve_cert_reqs,
         resolve_ssl_version,
         ssl_wrap_socket,
    +    PROTOCOL_TLS,
     )
    -from .timeout import (
    -    current_time,
    -    Timeout,
    -)
    +from .timeout import current_time, Timeout
     
     from .retry import Retry
    -from .url import (
    -    get_host,
    -    parse_url,
    -    split_first,
    -    Url,
    -)
    -from .wait import (
    -    wait_for_read,
    -    wait_for_write
    -)
    +from .url import get_host, parse_url, split_first, Url
    +from .wait import wait_for_read, wait_for_write
     
     __all__ = (
    -    'HAS_SNI',
    -    'IS_PYOPENSSL',
    -    'IS_SECURETRANSPORT',
    -    'SSLContext',
    -    'Retry',
    -    'Timeout',
    -    'Url',
    -    'assert_fingerprint',
    -    'current_time',
    -    'is_connection_dropped',
    -    'is_fp_closed',
    -    'get_host',
    -    'parse_url',
    -    'make_headers',
    -    'resolve_cert_reqs',
    -    'resolve_ssl_version',
    -    'split_first',
    -    'ssl_wrap_socket',
    -    'wait_for_read',
    -    'wait_for_write'
    +    "HAS_SNI",
    +    "IS_PYOPENSSL",
    +    "IS_SECURETRANSPORT",
    +    "SSLContext",
    +    "PROTOCOL_TLS",
    +    "Retry",
    +    "Timeout",
    +    "Url",
    +    "assert_fingerprint",
    +    "current_time",
    +    "is_connection_dropped",
    +    "is_fp_closed",
    +    "get_host",
    +    "parse_url",
    +    "make_headers",
    +    "resolve_cert_reqs",
    +    "resolve_ssl_version",
    +    "split_first",
    +    "ssl_wrap_socket",
    +    "wait_for_read",
    +    "wait_for_write",
     )
    diff --git a/lib/urllib3/util/connection.py b/lib/urllib3/util/connection.py
    index bf699cfd..0e111262 100644
    --- a/lib/urllib3/util/connection.py
    +++ b/lib/urllib3/util/connection.py
    @@ -1,7 +1,7 @@
     from __future__ import absolute_import
     import socket
    -from .wait import wait_for_read
    -from .selectors import HAS_SELECT, SelectorError
    +from .wait import NoWayToWaitForSocketError, wait_for_read
    +from ..contrib import _appengine_environ
     
     
     def is_connection_dropped(conn):  # Platform-specific
    @@ -14,27 +14,28 @@ def is_connection_dropped(conn):  # Platform-specific
         Note: For platforms like AppEngine, this will always return ``False`` to
         let the platform handle connection recycling transparently for us.
         """
    -    sock = getattr(conn, 'sock', False)
    +    sock = getattr(conn, "sock", False)
         if sock is False:  # Platform-specific: AppEngine
             return False
         if sock is None:  # Connection already closed (such as by httplib).
             return True
    -
    -    if not HAS_SELECT:
    -        return False
    -
         try:
    -        return bool(wait_for_read(sock, timeout=0.0))
    -    except SelectorError:
    -        return True
    +        # Returns True if readable, which here means it's been dropped
    +        return wait_for_read(sock, timeout=0.0)
    +    except NoWayToWaitForSocketError:  # Platform-specific: AppEngine
    +        return False
     
     
     # This function is copied from socket.py in the Python 2.7 standard
     # library test suite. Added to its signature is only `socket_options`.
     # One additional modification is that we avoid binding to IPv6 servers
     # discovered in DNS if the system doesn't have IPv6 functionality.
    -def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
    -                      source_address=None, socket_options=None):
    +def create_connection(
    +    address,
    +    timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
    +    source_address=None,
    +    socket_options=None,
    +):
         """Connect to *address* and return the socket object.
     
         Convenience function.  Connect to *address* (a 2-tuple ``(host,
    @@ -48,8 +49,8 @@ def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
         """
     
         host, port = address
    -    if host.startswith('['):
    -        host = host.strip('[]')
    +    if host.startswith("["):
    +        host = host.strip("[]")
         err = None
     
         # Using the value from allowed_gai_family() in the context of getaddrinfo lets
    @@ -109,6 +110,13 @@ def _has_ipv6(host):
         sock = None
         has_ipv6 = False
     
    +    # App Engine doesn't support IPV6 sockets and actually has a quota on the
    +    # number of sockets that can be used, so just early out here instead of
    +    # creating a socket needlessly.
    +    # See https://github.com/urllib3/urllib3/issues/1446
    +    if _appengine_environ.is_appengine_sandbox():
    +        return False
    +
         if socket.has_ipv6:
             # has_ipv6 returns true if cPython was compiled with IPv6 support.
             # It does not tell us if the system has IPv6 support enabled. To
    @@ -127,4 +135,4 @@ def _has_ipv6(host):
         return has_ipv6
     
     
    -HAS_IPV6 = _has_ipv6('::1')
    +HAS_IPV6 = _has_ipv6("::1")
    diff --git a/lib/urllib3/util/queue.py b/lib/urllib3/util/queue.py
    new file mode 100644
    index 00000000..d3d379a1
    --- /dev/null
    +++ b/lib/urllib3/util/queue.py
    @@ -0,0 +1,21 @@
    +import collections
    +from ..packages import six
    +from ..packages.six.moves import queue
    +
    +if six.PY2:
    +    # Queue is imported for side effects on MS Windows. See issue #229.
    +    import Queue as _unused_module_Queue  # noqa: F401
    +
    +
    +class LifoQueue(queue.Queue):
    +    def _init(self, _):
    +        self.queue = collections.deque()
    +
    +    def _qsize(self, len=len):
    +        return len(self.queue)
    +
    +    def _put(self, item):
    +        self.queue.append(item)
    +
    +    def _get(self):
    +        return self.queue.pop()
    diff --git a/lib/urllib3/util/request.py b/lib/urllib3/util/request.py
    index 3ddfcd55..262a6d61 100644
    --- a/lib/urllib3/util/request.py
    +++ b/lib/urllib3/util/request.py
    @@ -4,12 +4,25 @@ from base64 import b64encode
     from ..packages.six import b, integer_types
     from ..exceptions import UnrewindableBodyError
     
    -ACCEPT_ENCODING = 'gzip,deflate'
    +ACCEPT_ENCODING = "gzip,deflate"
    +try:
    +    import brotli as _unused_module_brotli  # noqa: F401
    +except ImportError:
    +    pass
    +else:
    +    ACCEPT_ENCODING += ",br"
    +
     _FAILEDTELL = object()
     
     
    -def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
    -                 basic_auth=None, proxy_basic_auth=None, disable_cache=None):
    +def make_headers(
    +    keep_alive=None,
    +    accept_encoding=None,
    +    user_agent=None,
    +    basic_auth=None,
    +    proxy_basic_auth=None,
    +    disable_cache=None,
    +):
         """
         Shortcuts for generating request headers.
     
    @@ -49,27 +62,27 @@ def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
             if isinstance(accept_encoding, str):
                 pass
             elif isinstance(accept_encoding, list):
    -            accept_encoding = ','.join(accept_encoding)
    +            accept_encoding = ",".join(accept_encoding)
             else:
                 accept_encoding = ACCEPT_ENCODING
    -        headers['accept-encoding'] = accept_encoding
    +        headers["accept-encoding"] = accept_encoding
     
         if user_agent:
    -        headers['user-agent'] = user_agent
    +        headers["user-agent"] = user_agent
     
         if keep_alive:
    -        headers['connection'] = 'keep-alive'
    +        headers["connection"] = "keep-alive"
     
         if basic_auth:
    -        headers['authorization'] = 'Basic ' + \
    -            b64encode(b(basic_auth)).decode('utf-8')
    +        headers["authorization"] = "Basic " + b64encode(b(basic_auth)).decode("utf-8")
     
         if proxy_basic_auth:
    -        headers['proxy-authorization'] = 'Basic ' + \
    -            b64encode(b(proxy_basic_auth)).decode('utf-8')
    +        headers["proxy-authorization"] = "Basic " + b64encode(
    +            b(proxy_basic_auth)
    +        ).decode("utf-8")
     
         if disable_cache:
    -        headers['cache-control'] = 'no-cache'
    +        headers["cache-control"] = "no-cache"
     
         return headers
     
    @@ -81,7 +94,7 @@ def set_file_position(body, pos):
         """
         if pos is not None:
             rewind_body(body, pos)
    -    elif getattr(body, 'tell', None) is not None:
    +    elif getattr(body, "tell", None) is not None:
             try:
                 pos = body.tell()
             except (IOError, OSError):
    @@ -103,16 +116,20 @@ def rewind_body(body, body_pos):
         :param int pos:
             Position to seek to in file.
         """
    -    body_seek = getattr(body, 'seek', None)
    +    body_seek = getattr(body, "seek", None)
         if body_seek is not None and isinstance(body_pos, integer_types):
             try:
                 body_seek(body_pos)
             except (IOError, OSError):
    -            raise UnrewindableBodyError("An error occurred when rewinding request "
    -                                        "body for redirect/retry.")
    +            raise UnrewindableBodyError(
    +                "An error occurred when rewinding request " "body for redirect/retry."
    +            )
         elif body_pos is _FAILEDTELL:
    -        raise UnrewindableBodyError("Unable to record file position for rewinding "
    -                                    "request body during a redirect/retry.")
    +        raise UnrewindableBodyError(
    +            "Unable to record file position for rewinding "
    +            "request body during a redirect/retry."
    +        )
         else:
    -        raise ValueError("body_pos must be of type integer, "
    -                         "instead it was %s." % type(body_pos))
    +        raise ValueError(
    +            "body_pos must be of type integer, " "instead it was %s." % type(body_pos)
    +        )
    diff --git a/lib/urllib3/util/response.py b/lib/urllib3/util/response.py
    index 67cf730a..715868dd 100644
    --- a/lib/urllib3/util/response.py
    +++ b/lib/urllib3/util/response.py
    @@ -52,15 +52,20 @@ def assert_header_parsing(headers):
         # This will fail silently if we pass in the wrong kind of parameter.
         # To make debugging easier add an explicit check.
         if not isinstance(headers, httplib.HTTPMessage):
    -        raise TypeError('expected httplib.Message, got {0}.'.format(
    -            type(headers)))
    +        raise TypeError("expected httplib.Message, got {0}.".format(type(headers)))
     
    -    defects = getattr(headers, 'defects', None)
    -    get_payload = getattr(headers, 'get_payload', None)
    +    defects = getattr(headers, "defects", None)
    +    get_payload = getattr(headers, "get_payload", None)
     
         unparsed_data = None
    -    if get_payload:  # Platform-specific: Python 3.
    -        unparsed_data = get_payload()
    +    if get_payload:
    +        # get_payload is actually email.message.Message.get_payload;
    +        # we're only interested in the result if it's not a multipart message
    +        if not headers.is_multipart():
    +            payload = get_payload()
    +
    +            if isinstance(payload, (bytes, str)):
    +                unparsed_data = payload
     
         if defects or unparsed_data:
             raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
    @@ -78,4 +83,4 @@ def is_response_to_head(response):
         method = response._method
         if isinstance(method, int):  # Platform-specific: Appengine
             return method == 3
    -    return method.upper() == 'HEAD'
    +    return method.upper() == "HEAD"
    diff --git a/lib/urllib3/util/retry.py b/lib/urllib3/util/retry.py
    index c603cb49..5a049fe6 100644
    --- a/lib/urllib3/util/retry.py
    +++ b/lib/urllib3/util/retry.py
    @@ -19,9 +19,11 @@ from ..packages import six
     
     log = logging.getLogger(__name__)
     
    +
     # Data structure for representing the metadata of requests that result in a retry.
    -RequestHistory = namedtuple('RequestHistory', ["method", "url", "error",
    -                                               "status", "redirect_location"])
    +RequestHistory = namedtuple(
    +    "RequestHistory", ["method", "url", "error", "status", "redirect_location"]
    +)
     
     
     class Retry(object):
    @@ -114,7 +116,7 @@ class Retry(object):
             (most errors are resolved immediately by a second try without a
             delay). urllib3 will sleep for::
     
    -            {backoff factor} * (2 ^ ({number of total retries} - 1))
    +            {backoff factor} * (2 ** ({number of total retries} - 1))
     
             seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
             for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
    @@ -139,20 +141,39 @@ class Retry(object):
             Whether to respect Retry-After header on status codes defined as
             :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
     
    +    :param iterable remove_headers_on_redirect:
    +        Sequence of headers to remove from the request when a response
    +        indicating a redirect is returned before firing off the redirected
    +        request.
         """
     
    -    DEFAULT_METHOD_WHITELIST = frozenset([
    -        'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
    +    DEFAULT_METHOD_WHITELIST = frozenset(
    +        ["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
    +    )
     
         RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
     
    +    DEFAULT_REDIRECT_HEADERS_BLACKLIST = frozenset(["Authorization"])
    +
         #: Maximum backoff time.
         BACKOFF_MAX = 120
     
    -    def __init__(self, total=10, connect=None, read=None, redirect=None, status=None,
    -                 method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
    -                 backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
    -                 history=None, respect_retry_after_header=True):
    +    def __init__(
    +        self,
    +        total=10,
    +        connect=None,
    +        read=None,
    +        redirect=None,
    +        status=None,
    +        method_whitelist=DEFAULT_METHOD_WHITELIST,
    +        status_forcelist=None,
    +        backoff_factor=0,
    +        raise_on_redirect=True,
    +        raise_on_status=True,
    +        history=None,
    +        respect_retry_after_header=True,
    +        remove_headers_on_redirect=DEFAULT_REDIRECT_HEADERS_BLACKLIST,
    +    ):
     
             self.total = total
             self.connect = connect
    @@ -171,17 +192,25 @@ class Retry(object):
             self.raise_on_status = raise_on_status
             self.history = history or tuple()
             self.respect_retry_after_header = respect_retry_after_header
    +        self.remove_headers_on_redirect = frozenset(
    +            [h.lower() for h in remove_headers_on_redirect]
    +        )
     
         def new(self, **kw):
             params = dict(
                 total=self.total,
    -            connect=self.connect, read=self.read, redirect=self.redirect, status=self.status,
    +            connect=self.connect,
    +            read=self.read,
    +            redirect=self.redirect,
    +            status=self.status,
                 method_whitelist=self.method_whitelist,
                 status_forcelist=self.status_forcelist,
                 backoff_factor=self.backoff_factor,
                 raise_on_redirect=self.raise_on_redirect,
                 raise_on_status=self.raise_on_status,
                 history=self.history,
    +            remove_headers_on_redirect=self.remove_headers_on_redirect,
    +            respect_retry_after_header=self.respect_retry_after_header,
             )
             params.update(kw)
             return type(self)(**params)
    @@ -206,8 +235,11 @@ class Retry(object):
             :rtype: float
             """
             # We want to consider only the last consecutive errors sequence (Ignore redirects).
    -        consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None,
    -                                                    reversed(self.history))))
    +        consecutive_errors_len = len(
    +            list(
    +                takewhile(lambda x: x.redirect_location is None, reversed(self.history))
    +            )
    +        )
             if consecutive_errors_len <= 1:
                 return 0
     
    @@ -263,7 +295,7 @@ class Retry(object):
             this method will return immediately.
             """
     
    -        if response:
    +        if self.respect_retry_after_header and response:
                 slept = self.sleep_for_retry(response)
                 if slept:
                     return
    @@ -304,8 +336,12 @@ class Retry(object):
             if self.status_forcelist and status_code in self.status_forcelist:
                 return True
     
    -        return (self.total and self.respect_retry_after_header and
    -                has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES))
    +        return (
    +            self.total
    +            and self.respect_retry_after_header
    +            and has_retry_after
    +            and (status_code in self.RETRY_AFTER_STATUS_CODES)
    +        )
     
         def is_exhausted(self):
             """ Are we out of retries? """
    @@ -316,8 +352,15 @@ class Retry(object):
     
             return min(retry_counts) < 0
     
    -    def increment(self, method=None, url=None, response=None, error=None,
    -                  _pool=None, _stacktrace=None):
    +    def increment(
    +        self,
    +        method=None,
    +        url=None,
    +        response=None,
    +        error=None,
    +        _pool=None,
    +        _stacktrace=None,
    +    ):
             """ Return a new Retry object with incremented retry counters.
     
             :param response: A response object, or None, if the server did not
    @@ -340,7 +383,7 @@ class Retry(object):
             read = self.read
             redirect = self.redirect
             status_count = self.status
    -        cause = 'unknown'
    +        cause = "unknown"
             status = None
             redirect_location = None
     
    @@ -362,7 +405,7 @@ class Retry(object):
                 # Redirect retry?
                 if redirect is not None:
                     redirect -= 1
    -            cause = 'too many redirects'
    +            cause = "too many redirects"
                 redirect_location = response.get_redirect_location()
                 status = response.status
     
    @@ -373,16 +416,21 @@ class Retry(object):
                 if response and response.status:
                     if status_count is not None:
                         status_count -= 1
    -                cause = ResponseError.SPECIFIC_ERROR.format(
    -                    status_code=response.status)
    +                cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
                     status = response.status
     
    -        history = self.history + (RequestHistory(method, url, error, status, redirect_location),)
    +        history = self.history + (
    +            RequestHistory(method, url, error, status, redirect_location),
    +        )
     
             new_retry = self.new(
                 total=total,
    -            connect=connect, read=read, redirect=redirect, status=status_count,
    -            history=history)
    +            connect=connect,
    +            read=read,
    +            redirect=redirect,
    +            status=status_count,
    +            history=history,
    +        )
     
             if new_retry.is_exhausted():
                 raise MaxRetryError(_pool, url, error or ResponseError(cause))
    @@ -392,9 +440,10 @@ class Retry(object):
             return new_retry
     
         def __repr__(self):
    -        return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
    -                'read={self.read}, redirect={self.redirect}, status={self.status})').format(
    -                    cls=type(self), self=self)
    +        return (
    +            "{cls.__name__}(total={self.total}, connect={self.connect}, "
    +            "read={self.read}, redirect={self.redirect}, status={self.status})"
    +        ).format(cls=type(self), self=self)
     
     
     # For backwards compatibility (equivalent to pre-v1.9):
    diff --git a/lib/urllib3/util/selectors.py b/lib/urllib3/util/selectors.py
    deleted file mode 100644
    index d75cb266..00000000
    --- a/lib/urllib3/util/selectors.py
    +++ /dev/null
    @@ -1,581 +0,0 @@
    -# Backport of selectors.py from Python 3.5+ to support Python < 3.4
    -# Also has the behavior specified in PEP 475 which is to retry syscalls
    -# in the case of an EINTR error. This module is required because selectors34
    -# does not follow this behavior and instead returns that no dile descriptor
    -# events have occurred rather than retry the syscall. The decision to drop
    -# support for select.devpoll is made to maintain 100% test coverage.
    -
    -import errno
    -import math
    -import select
    -import socket
    -import sys
    -import time
    -from collections import namedtuple, Mapping
    -
    -try:
    -    monotonic = time.monotonic
    -except (AttributeError, ImportError):  # Python 3.3<
    -    monotonic = time.time
    -
    -EVENT_READ = (1 << 0)
    -EVENT_WRITE = (1 << 1)
    -
    -HAS_SELECT = True  # Variable that shows whether the platform has a selector.
    -_SYSCALL_SENTINEL = object()  # Sentinel in case a system call returns None.
    -_DEFAULT_SELECTOR = None
    -
    -
    -class SelectorError(Exception):
    -    def __init__(self, errcode):
    -        super(SelectorError, self).__init__()
    -        self.errno = errcode
    -
    -    def __repr__(self):
    -        return "".format(self.errno)
    -
    -    def __str__(self):
    -        return self.__repr__()
    -
    -
    -def _fileobj_to_fd(fileobj):
    -    """ Return a file descriptor from a file object. If
    -    given an integer will simply return that integer back. """
    -    if isinstance(fileobj, int):
    -        fd = fileobj
    -    else:
    -        try:
    -            fd = int(fileobj.fileno())
    -        except (AttributeError, TypeError, ValueError):
    -            raise ValueError("Invalid file object: {0!r}".format(fileobj))
    -    if fd < 0:
    -        raise ValueError("Invalid file descriptor: {0}".format(fd))
    -    return fd
    -
    -
    -# Determine which function to use to wrap system calls because Python 3.5+
    -# already handles the case when system calls are interrupted.
    -if sys.version_info >= (3, 5):
    -    def _syscall_wrapper(func, _, *args, **kwargs):
    -        """ This is the short-circuit version of the below logic
    -        because in Python 3.5+ all system calls automatically restart
    -        and recalculate their timeouts. """
    -        try:
    -            return func(*args, **kwargs)
    -        except (OSError, IOError, select.error) as e:
    -            errcode = None
    -            if hasattr(e, "errno"):
    -                errcode = e.errno
    -            raise SelectorError(errcode)
    -else:
    -    def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
    -        """ Wrapper function for syscalls that could fail due to EINTR.
    -        All functions should be retried if there is time left in the timeout
    -        in accordance with PEP 475. """
    -        timeout = kwargs.get("timeout", None)
    -        if timeout is None:
    -            expires = None
    -            recalc_timeout = False
    -        else:
    -            timeout = float(timeout)
    -            if timeout < 0.0:  # Timeout less than 0 treated as no timeout.
    -                expires = None
    -            else:
    -                expires = monotonic() + timeout
    -
    -        args = list(args)
    -        if recalc_timeout and "timeout" not in kwargs:
    -            raise ValueError(
    -                "Timeout must be in args or kwargs to be recalculated")
    -
    -        result = _SYSCALL_SENTINEL
    -        while result is _SYSCALL_SENTINEL:
    -            try:
    -                result = func(*args, **kwargs)
    -            # OSError is thrown by select.select
    -            # IOError is thrown by select.epoll.poll
    -            # select.error is thrown by select.poll.poll
    -            # Aren't we thankful for Python 3.x rework for exceptions?
    -            except (OSError, IOError, select.error) as e:
    -                # select.error wasn't a subclass of OSError in the past.
    -                errcode = None
    -                if hasattr(e, "errno"):
    -                    errcode = e.errno
    -                elif hasattr(e, "args"):
    -                    errcode = e.args[0]
    -
    -                # Also test for the Windows equivalent of EINTR.
    -                is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
    -                                                           errcode == errno.WSAEINTR))
    -
    -                if is_interrupt:
    -                    if expires is not None:
    -                        current_time = monotonic()
    -                        if current_time > expires:
    -                            raise OSError(errno=errno.ETIMEDOUT)
    -                        if recalc_timeout:
    -                            if "timeout" in kwargs:
    -                                kwargs["timeout"] = expires - current_time
    -                    continue
    -                if errcode:
    -                    raise SelectorError(errcode)
    -                else:
    -                    raise
    -        return result
    -
    -
    -SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
    -
    -
    -class _SelectorMapping(Mapping):
    -    """ Mapping of file objects to selector keys """
    -
    -    def __init__(self, selector):
    -        self._selector = selector
    -
    -    def __len__(self):
    -        return len(self._selector._fd_to_key)
    -
    -    def __getitem__(self, fileobj):
    -        try:
    -            fd = self._selector._fileobj_lookup(fileobj)
    -            return self._selector._fd_to_key[fd]
    -        except KeyError:
    -            raise KeyError("{0!r} is not registered.".format(fileobj))
    -
    -    def __iter__(self):
    -        return iter(self._selector._fd_to_key)
    -
    -
    -class BaseSelector(object):
    -    """ Abstract Selector class
    -
    -    A selector supports registering file objects to be monitored
    -    for specific I/O events.
    -
    -    A file object is a file descriptor or any object with a
    -    `fileno()` method. An arbitrary object can be attached to the
    -    file object which can be used for example to store context info,
    -    a callback, etc.
    -
    -    A selector can use various implementations (select(), poll(), epoll(),
    -    and kqueue()) depending on the platform. The 'DefaultSelector' class uses
    -    the most efficient implementation for the current platform.
    -    """
    -    def __init__(self):
    -        # Maps file descriptors to keys.
    -        self._fd_to_key = {}
    -
    -        # Read-only mapping returned by get_map()
    -        self._map = _SelectorMapping(self)
    -
    -    def _fileobj_lookup(self, fileobj):
    -        """ Return a file descriptor from a file object.
    -        This wraps _fileobj_to_fd() to do an exhaustive
    -        search in case the object is invalid but we still
    -        have it in our map. Used by unregister() so we can
    -        unregister an object that was previously registered
    -        even if it is closed. It is also used by _SelectorMapping
    -        """
    -        try:
    -            return _fileobj_to_fd(fileobj)
    -        except ValueError:
    -
    -            # Search through all our mapped keys.
    -            for key in self._fd_to_key.values():
    -                if key.fileobj is fileobj:
    -                    return key.fd
    -
    -            # Raise ValueError after all.
    -            raise
    -
    -    def register(self, fileobj, events, data=None):
    -        """ Register a file object for a set of events to monitor. """
    -        if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
    -            raise ValueError("Invalid events: {0!r}".format(events))
    -
    -        key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
    -
    -        if key.fd in self._fd_to_key:
    -            raise KeyError("{0!r} (FD {1}) is already registered"
    -                           .format(fileobj, key.fd))
    -
    -        self._fd_to_key[key.fd] = key
    -        return key
    -
    -    def unregister(self, fileobj):
    -        """ Unregister a file object from being monitored. """
    -        try:
    -            key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
    -        except KeyError:
    -            raise KeyError("{0!r} is not registered".format(fileobj))
    -
    -        # Getting the fileno of a closed socket on Windows errors with EBADF.
    -        except socket.error as e:  # Platform-specific: Windows.
    -            if e.errno != errno.EBADF:
    -                raise
    -            else:
    -                for key in self._fd_to_key.values():
    -                    if key.fileobj is fileobj:
    -                        self._fd_to_key.pop(key.fd)
    -                        break
    -                else:
    -                    raise KeyError("{0!r} is not registered".format(fileobj))
    -        return key
    -
    -    def modify(self, fileobj, events, data=None):
    -        """ Change a registered file object monitored events and data. """
    -        # NOTE: Some subclasses optimize this operation even further.
    -        try:
    -            key = self._fd_to_key[self._fileobj_lookup(fileobj)]
    -        except KeyError:
    -            raise KeyError("{0!r} is not registered".format(fileobj))
    -
    -        if events != key.events:
    -            self.unregister(fileobj)
    -            key = self.register(fileobj, events, data)
    -
    -        elif data != key.data:
    -            # Use a shortcut to update the data.
    -            key = key._replace(data=data)
    -            self._fd_to_key[key.fd] = key
    -
    -        return key
    -
    -    def select(self, timeout=None):
    -        """ Perform the actual selection until some monitored file objects
    -        are ready or the timeout expires. """
    -        raise NotImplementedError()
    -
    -    def close(self):
    -        """ Close the selector. This must be called to ensure that all
    -        underlying resources are freed. """
    -        self._fd_to_key.clear()
    -        self._map = None
    -
    -    def get_key(self, fileobj):
    -        """ Return the key associated with a registered file object. """
    -        mapping = self.get_map()
    -        if mapping is None:
    -            raise RuntimeError("Selector is closed")
    -        try:
    -            return mapping[fileobj]
    -        except KeyError:
    -            raise KeyError("{0!r} is not registered".format(fileobj))
    -
    -    def get_map(self):
    -        """ Return a mapping of file objects to selector keys """
    -        return self._map
    -
    -    def _key_from_fd(self, fd):
    -        """ Return the key associated to a given file descriptor
    -         Return None if it is not found. """
    -        try:
    -            return self._fd_to_key[fd]
    -        except KeyError:
    -            return None
    -
    -    def __enter__(self):
    -        return self
    -
    -    def __exit__(self, *args):
    -        self.close()
    -
    -
    -# Almost all platforms have select.select()
    -if hasattr(select, "select"):
    -    class SelectSelector(BaseSelector):
    -        """ Select-based selector. """
    -        def __init__(self):
    -            super(SelectSelector, self).__init__()
    -            self._readers = set()
    -            self._writers = set()
    -
    -        def register(self, fileobj, events, data=None):
    -            key = super(SelectSelector, self).register(fileobj, events, data)
    -            if events & EVENT_READ:
    -                self._readers.add(key.fd)
    -            if events & EVENT_WRITE:
    -                self._writers.add(key.fd)
    -            return key
    -
    -        def unregister(self, fileobj):
    -            key = super(SelectSelector, self).unregister(fileobj)
    -            self._readers.discard(key.fd)
    -            self._writers.discard(key.fd)
    -            return key
    -
    -        def _select(self, r, w, timeout=None):
    -            """ Wrapper for select.select because timeout is a positional arg """
    -            return select.select(r, w, [], timeout)
    -
    -        def select(self, timeout=None):
    -            # Selecting on empty lists on Windows errors out.
    -            if not len(self._readers) and not len(self._writers):
    -                return []
    -
    -            timeout = None if timeout is None else max(timeout, 0.0)
    -            ready = []
    -            r, w, _ = _syscall_wrapper(self._select, True, self._readers,
    -                                       self._writers, timeout)
    -            r = set(r)
    -            w = set(w)
    -            for fd in r | w:
    -                events = 0
    -                if fd in r:
    -                    events |= EVENT_READ
    -                if fd in w:
    -                    events |= EVENT_WRITE
    -
    -                key = self._key_from_fd(fd)
    -                if key:
    -                    ready.append((key, events & key.events))
    -            return ready
    -
    -
    -if hasattr(select, "poll"):
    -    class PollSelector(BaseSelector):
    -        """ Poll-based selector """
    -        def __init__(self):
    -            super(PollSelector, self).__init__()
    -            self._poll = select.poll()
    -
    -        def register(self, fileobj, events, data=None):
    -            key = super(PollSelector, self).register(fileobj, events, data)
    -            event_mask = 0
    -            if events & EVENT_READ:
    -                event_mask |= select.POLLIN
    -            if events & EVENT_WRITE:
    -                event_mask |= select.POLLOUT
    -            self._poll.register(key.fd, event_mask)
    -            return key
    -
    -        def unregister(self, fileobj):
    -            key = super(PollSelector, self).unregister(fileobj)
    -            self._poll.unregister(key.fd)
    -            return key
    -
    -        def _wrap_poll(self, timeout=None):
    -            """ Wrapper function for select.poll.poll() so that
    -            _syscall_wrapper can work with only seconds. """
    -            if timeout is not None:
    -                if timeout <= 0:
    -                    timeout = 0
    -                else:
    -                    # select.poll.poll() has a resolution of 1 millisecond,
    -                    # round away from zero to wait *at least* timeout seconds.
    -                    timeout = math.ceil(timeout * 1e3)
    -
    -            result = self._poll.poll(timeout)
    -            return result
    -
    -        def select(self, timeout=None):
    -            ready = []
    -            fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
    -            for fd, event_mask in fd_events:
    -                events = 0
    -                if event_mask & ~select.POLLIN:
    -                    events |= EVENT_WRITE
    -                if event_mask & ~select.POLLOUT:
    -                    events |= EVENT_READ
    -
    -                key = self._key_from_fd(fd)
    -                if key:
    -                    ready.append((key, events & key.events))
    -
    -            return ready
    -
    -
    -if hasattr(select, "epoll"):
    -    class EpollSelector(BaseSelector):
    -        """ Epoll-based selector """
    -        def __init__(self):
    -            super(EpollSelector, self).__init__()
    -            self._epoll = select.epoll()
    -
    -        def fileno(self):
    -            return self._epoll.fileno()
    -
    -        def register(self, fileobj, events, data=None):
    -            key = super(EpollSelector, self).register(fileobj, events, data)
    -            events_mask = 0
    -            if events & EVENT_READ:
    -                events_mask |= select.EPOLLIN
    -            if events & EVENT_WRITE:
    -                events_mask |= select.EPOLLOUT
    -            _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
    -            return key
    -
    -        def unregister(self, fileobj):
    -            key = super(EpollSelector, self).unregister(fileobj)
    -            try:
    -                _syscall_wrapper(self._epoll.unregister, False, key.fd)
    -            except SelectorError:
    -                # This can occur when the fd was closed since registry.
    -                pass
    -            return key
    -
    -        def select(self, timeout=None):
    -            if timeout is not None:
    -                if timeout <= 0:
    -                    timeout = 0.0
    -                else:
    -                    # select.epoll.poll() has a resolution of 1 millisecond
    -                    # but luckily takes seconds so we don't need a wrapper
    -                    # like PollSelector. Just for better rounding.
    -                    timeout = math.ceil(timeout * 1e3) * 1e-3
    -                timeout = float(timeout)
    -            else:
    -                timeout = -1.0  # epoll.poll() must have a float.
    -
    -            # We always want at least 1 to ensure that select can be called
    -            # with no file descriptors registered. Otherwise will fail.
    -            max_events = max(len(self._fd_to_key), 1)
    -
    -            ready = []
    -            fd_events = _syscall_wrapper(self._epoll.poll, True,
    -                                         timeout=timeout,
    -                                         maxevents=max_events)
    -            for fd, event_mask in fd_events:
    -                events = 0
    -                if event_mask & ~select.EPOLLIN:
    -                    events |= EVENT_WRITE
    -                if event_mask & ~select.EPOLLOUT:
    -                    events |= EVENT_READ
    -
    -                key = self._key_from_fd(fd)
    -                if key:
    -                    ready.append((key, events & key.events))
    -            return ready
    -
    -        def close(self):
    -            self._epoll.close()
    -            super(EpollSelector, self).close()
    -
    -
    -if hasattr(select, "kqueue"):
    -    class KqueueSelector(BaseSelector):
    -        """ Kqueue / Kevent-based selector """
    -        def __init__(self):
    -            super(KqueueSelector, self).__init__()
    -            self._kqueue = select.kqueue()
    -
    -        def fileno(self):
    -            return self._kqueue.fileno()
    -
    -        def register(self, fileobj, events, data=None):
    -            key = super(KqueueSelector, self).register(fileobj, events, data)
    -            if events & EVENT_READ:
    -                kevent = select.kevent(key.fd,
    -                                       select.KQ_FILTER_READ,
    -                                       select.KQ_EV_ADD)
    -
    -                _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
    -
    -            if events & EVENT_WRITE:
    -                kevent = select.kevent(key.fd,
    -                                       select.KQ_FILTER_WRITE,
    -                                       select.KQ_EV_ADD)
    -
    -                _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
    -
    -            return key
    -
    -        def unregister(self, fileobj):
    -            key = super(KqueueSelector, self).unregister(fileobj)
    -            if key.events & EVENT_READ:
    -                kevent = select.kevent(key.fd,
    -                                       select.KQ_FILTER_READ,
    -                                       select.KQ_EV_DELETE)
    -                try:
    -                    _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
    -                except SelectorError:
    -                    pass
    -            if key.events & EVENT_WRITE:
    -                kevent = select.kevent(key.fd,
    -                                       select.KQ_FILTER_WRITE,
    -                                       select.KQ_EV_DELETE)
    -                try:
    -                    _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
    -                except SelectorError:
    -                    pass
    -
    -            return key
    -
    -        def select(self, timeout=None):
    -            if timeout is not None:
    -                timeout = max(timeout, 0)
    -
    -            max_events = len(self._fd_to_key) * 2
    -            ready_fds = {}
    -
    -            kevent_list = _syscall_wrapper(self._kqueue.control, True,
    -                                           None, max_events, timeout)
    -
    -            for kevent in kevent_list:
    -                fd = kevent.ident
    -                event_mask = kevent.filter
    -                events = 0
    -                if event_mask == select.KQ_FILTER_READ:
    -                    events |= EVENT_READ
    -                if event_mask == select.KQ_FILTER_WRITE:
    -                    events |= EVENT_WRITE
    -
    -                key = self._key_from_fd(fd)
    -                if key:
    -                    if key.fd not in ready_fds:
    -                        ready_fds[key.fd] = (key, events & key.events)
    -                    else:
    -                        old_events = ready_fds[key.fd][1]
    -                        ready_fds[key.fd] = (key, (events | old_events) & key.events)
    -
    -            return list(ready_fds.values())
    -
    -        def close(self):
    -            self._kqueue.close()
    -            super(KqueueSelector, self).close()
    -
    -
    -if not hasattr(select, 'select'):  # Platform-specific: AppEngine
    -    HAS_SELECT = False
    -
    -
    -def _can_allocate(struct):
    -    """ Checks that select structs can be allocated by the underlying
    -    operating system, not just advertised by the select module. We don't
    -    check select() because we'll be hopeful that most platforms that
    -    don't have it available will not advertise it. (ie: GAE) """
    -    try:
    -        # select.poll() objects won't fail until used.
    -        if struct == 'poll':
    -            p = select.poll()
    -            p.poll(0)
    -
    -        # All others will fail on allocation.
    -        else:
    -            getattr(select, struct)().close()
    -        return True
    -    except (OSError, AttributeError) as e:
    -        return False
    -
    -
    -# Choose the best implementation, roughly:
    -# kqueue == epoll > poll > select. Devpoll not supported. (See above)
    -# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
    -def DefaultSelector():
    -    """ This function serves as a first call for DefaultSelector to
    -    detect if the select module is being monkey-patched incorrectly
    -    by eventlet, greenlet, and preserve proper behavior. """
    -    global _DEFAULT_SELECTOR
    -    if _DEFAULT_SELECTOR is None:
    -        if _can_allocate('kqueue'):
    -            _DEFAULT_SELECTOR = KqueueSelector
    -        elif _can_allocate('epoll'):
    -            _DEFAULT_SELECTOR = EpollSelector
    -        elif _can_allocate('poll'):
    -            _DEFAULT_SELECTOR = PollSelector
    -        elif hasattr(select, 'select'):
    -            _DEFAULT_SELECTOR = SelectSelector
    -        else:  # Platform-specific: AppEngine
    -            raise ValueError('Platform does not have a selector')
    -    return _DEFAULT_SELECTOR()
    diff --git a/lib/urllib3/util/ssl_.py b/lib/urllib3/util/ssl_.py
    index 32fd9eda..8495b775 100644
    --- a/lib/urllib3/util/ssl_.py
    +++ b/lib/urllib3/util/ssl_.py
    @@ -2,11 +2,14 @@ from __future__ import absolute_import
     import errno
     import warnings
     import hmac
    +import sys
     
     from binascii import hexlify, unhexlify
     from hashlib import md5, sha1, sha256
     
    +from .url import IPV4_RE, BRACELESS_IPV6_ADDRZ_RE
     from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
    +from ..packages import six
     
     
     SSLContext = None
    @@ -15,11 +18,7 @@ IS_PYOPENSSL = False
     IS_SECURETRANSPORT = False
     
     # Maps the length of a digest to a possible hash function producing this digest
    -HASHFUNC_MAP = {
    -    32: md5,
    -    40: sha1,
    -    64: sha256,
    -}
    +HASHFUNC_MAP = {32: md5, 40: sha1, 64: sha256}
     
     
     def _const_compare_digest_backport(a, b):
    @@ -35,17 +34,27 @@ def _const_compare_digest_backport(a, b):
         return result == 0
     
     
    -_const_compare_digest = getattr(hmac, 'compare_digest',
    -                                _const_compare_digest_backport)
    -
    +_const_compare_digest = getattr(hmac, "compare_digest", _const_compare_digest_backport)
     
     try:  # Test for SSL features
         import ssl
    -    from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
    +    from ssl import wrap_socket, CERT_REQUIRED
         from ssl import HAS_SNI  # Has SNI?
     except ImportError:
         pass
     
    +try:  # Platform-specific: Python 3.6
    +    from ssl import PROTOCOL_TLS
    +
    +    PROTOCOL_SSLv23 = PROTOCOL_TLS
    +except ImportError:
    +    try:
    +        from ssl import PROTOCOL_SSLv23 as PROTOCOL_TLS
    +
    +        PROTOCOL_SSLv23 = PROTOCOL_TLS
    +    except ImportError:
    +        PROTOCOL_SSLv23 = PROTOCOL_TLS = 2
    +
     
     try:
         from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
    @@ -53,6 +62,7 @@ except ImportError:
         OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
         OP_NO_COMPRESSION = 0x20000
     
    +
     # A secure default.
     # Sources for more information on TLS ciphers:
     #
    @@ -61,41 +71,39 @@ except ImportError:
     # - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
     #
     # The general intent is:
    -# - Prefer TLS 1.3 cipher suites
     # - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
     # - prefer ECDHE over DHE for better performance,
     # - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
     #   security,
     # - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
    -# - disable NULL authentication, MD5 MACs and DSS for security reasons.
    -DEFAULT_CIPHERS = ':'.join([
    -    'TLS13-AES-256-GCM-SHA384',
    -    'TLS13-CHACHA20-POLY1305-SHA256',
    -    'TLS13-AES-128-GCM-SHA256',
    -    'ECDH+AESGCM',
    -    'ECDH+CHACHA20',
    -    'DH+AESGCM',
    -    'DH+CHACHA20',
    -    'ECDH+AES256',
    -    'DH+AES256',
    -    'ECDH+AES128',
    -    'DH+AES',
    -    'RSA+AESGCM',
    -    'RSA+AES',
    -    '!aNULL',
    -    '!eNULL',
    -    '!MD5',
    -])
    +# - disable NULL authentication, MD5 MACs, DSS, and other
    +#   insecure ciphers for security reasons.
    +# - NOTE: TLS 1.3 cipher suites are managed through a different interface
    +#   not exposed by CPython (yet!) and are enabled by default if they're available.
    +DEFAULT_CIPHERS = ":".join(
    +    [
    +        "ECDHE+AESGCM",
    +        "ECDHE+CHACHA20",
    +        "DHE+AESGCM",
    +        "DHE+CHACHA20",
    +        "ECDH+AESGCM",
    +        "DH+AESGCM",
    +        "ECDH+AES",
    +        "DH+AES",
    +        "RSA+AESGCM",
    +        "RSA+AES",
    +        "!aNULL",
    +        "!eNULL",
    +        "!MD5",
    +        "!DSS",
    +    ]
    +)
     
     try:
         from ssl import SSLContext  # Modern SSL?
     except ImportError:
    -    import sys
    -
    -    class SSLContext(object):  # Platform-specific: Python 2 & 3.1
    -        supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
    -                                (3, 2) <= sys.version_info)
     
    +    class SSLContext(object):  # Platform-specific: Python 2
             def __init__(self, protocol_version):
                 self.protocol = protocol_version
                 # Use default values from a real SSLContext
    @@ -118,36 +126,27 @@ except ImportError:
                     raise SSLError("CA directories not supported in older Pythons")
     
             def set_ciphers(self, cipher_suite):
    -            if not self.supports_set_ciphers:
    -                raise TypeError(
    -                    'Your version of Python does not support setting '
    -                    'a custom cipher suite. Please upgrade to Python '
    -                    '2.7, 3.2, or later if you need this functionality.'
    -                )
                 self.ciphers = cipher_suite
     
             def wrap_socket(self, socket, server_hostname=None, server_side=False):
                 warnings.warn(
    -                'A true SSLContext object is not available. This prevents '
    -                'urllib3 from configuring SSL appropriately and may cause '
    -                'certain SSL connections to fail. You can upgrade to a newer '
    -                'version of Python to solve this. For more information, see '
    -                'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
    -                '#ssl-warnings',
    -                InsecurePlatformWarning
    +                "A true SSLContext object is not available. This prevents "
    +                "urllib3 from configuring SSL appropriately and may cause "
    +                "certain SSL connections to fail. You can upgrade to a newer "
    +                "version of Python to solve this. For more information, see "
    +                "https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
    +                "#ssl-warnings",
    +                InsecurePlatformWarning,
                 )
                 kwargs = {
    -                'keyfile': self.keyfile,
    -                'certfile': self.certfile,
    -                'ca_certs': self.ca_certs,
    -                'cert_reqs': self.verify_mode,
    -                'ssl_version': self.protocol,
    -                'server_side': server_side,
    +                "keyfile": self.keyfile,
    +                "certfile": self.certfile,
    +                "ca_certs": self.ca_certs,
    +                "cert_reqs": self.verify_mode,
    +                "ssl_version": self.protocol,
    +                "server_side": server_side,
                 }
    -            if self.supports_set_ciphers:  # Platform-specific: Python 2.7+
    -                return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
    -            else:  # Platform-specific: Python 2.6
    -                return wrap_socket(socket, **kwargs)
    +            return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
     
     
     def assert_fingerprint(cert, fingerprint):
    @@ -160,12 +159,11 @@ def assert_fingerprint(cert, fingerprint):
             Fingerprint as string of hexdigits, can be interspersed by colons.
         """
     
    -    fingerprint = fingerprint.replace(':', '').lower()
    +    fingerprint = fingerprint.replace(":", "").lower()
         digest_length = len(fingerprint)
         hashfunc = HASHFUNC_MAP.get(digest_length)
         if not hashfunc:
    -        raise SSLError(
    -            'Fingerprint of invalid length: {0}'.format(fingerprint))
    +        raise SSLError("Fingerprint of invalid length: {0}".format(fingerprint))
     
         # We need encode() here for py32; works on py2 and p33.
         fingerprint_bytes = unhexlify(fingerprint.encode())
    @@ -173,8 +171,11 @@ def assert_fingerprint(cert, fingerprint):
         cert_digest = hashfunc(cert).digest()
     
         if not _const_compare_digest(cert_digest, fingerprint_bytes):
    -        raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
    -                       .format(fingerprint, hexlify(cert_digest)))
    +        raise SSLError(
    +            'Fingerprints did not match. Expected "{0}", got "{1}".'.format(
    +                fingerprint, hexlify(cert_digest)
    +            )
    +        )
     
     
     def resolve_cert_reqs(candidate):
    @@ -183,18 +184,18 @@ def resolve_cert_reqs(candidate):
         the wrap_socket function/method from the ssl module.
         Defaults to :data:`ssl.CERT_NONE`.
         If given a string it is assumed to be the name of the constant in the
    -    :mod:`ssl` module or its abbrevation.
    +    :mod:`ssl` module or its abbreviation.
         (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
         If it's neither `None` nor a string we assume it is already the numeric
         constant which can directly be passed to wrap_socket.
         """
         if candidate is None:
    -        return CERT_NONE
    +        return CERT_REQUIRED
     
         if isinstance(candidate, str):
             res = getattr(ssl, candidate, None)
             if res is None:
    -            res = getattr(ssl, 'CERT_' + candidate)
    +            res = getattr(ssl, "CERT_" + candidate)
             return res
     
         return candidate
    @@ -205,19 +206,20 @@ def resolve_ssl_version(candidate):
         like resolve_cert_reqs
         """
         if candidate is None:
    -        return PROTOCOL_SSLv23
    +        return PROTOCOL_TLS
     
         if isinstance(candidate, str):
             res = getattr(ssl, candidate, None)
             if res is None:
    -            res = getattr(ssl, 'PROTOCOL_' + candidate)
    +            res = getattr(ssl, "PROTOCOL_" + candidate)
             return res
     
         return candidate
     
     
    -def create_urllib3_context(ssl_version=None, cert_reqs=None,
    -                           options=None, ciphers=None):
    +def create_urllib3_context(
    +    ssl_version=None, cert_reqs=None, options=None, ciphers=None
    +):
         """All arguments have the same meaning as ``ssl_wrap_socket``.
     
         By default, this function does a lot of the same work that
    @@ -251,7 +253,9 @@ def create_urllib3_context(ssl_version=None, cert_reqs=None,
             Constructed SSLContext object with specified options
         :rtype: SSLContext
         """
    -    context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
    +    context = SSLContext(ssl_version or PROTOCOL_TLS)
    +
    +    context.set_ciphers(ciphers or DEFAULT_CIPHERS)
     
         # Setting the default here, as we may have no ssl module on import
         cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
    @@ -268,21 +272,40 @@ def create_urllib3_context(ssl_version=None, cert_reqs=None,
     
         context.options |= options
     
    -    if getattr(context, 'supports_set_ciphers', True):  # Platform-specific: Python 2.6
    -        context.set_ciphers(ciphers or DEFAULT_CIPHERS)
    +    # Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is
    +    # necessary for conditional client cert authentication with TLS 1.3.
    +    # The attribute is None for OpenSSL <= 1.1.0 or does not exist in older
    +    # versions of Python.  We only enable on Python 3.7.4+ or if certificate
    +    # verification is enabled to work around Python issue #37428
    +    # See: https://bugs.python.org/issue37428
    +    if (cert_reqs == ssl.CERT_REQUIRED or sys.version_info >= (3, 7, 4)) and getattr(
    +        context, "post_handshake_auth", None
    +    ) is not None:
    +        context.post_handshake_auth = True
     
         context.verify_mode = cert_reqs
    -    if getattr(context, 'check_hostname', None) is not None:  # Platform-specific: Python 3.2
    +    if (
    +        getattr(context, "check_hostname", None) is not None
    +    ):  # Platform-specific: Python 3.2
             # We do our own verification, including fingerprints and alternative
             # hostnames. So disable it here
             context.check_hostname = False
         return context
     
     
    -def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
    -                    ca_certs=None, server_hostname=None,
    -                    ssl_version=None, ciphers=None, ssl_context=None,
    -                    ca_cert_dir=None):
    +def ssl_wrap_socket(
    +    sock,
    +    keyfile=None,
    +    certfile=None,
    +    cert_reqs=None,
    +    ca_certs=None,
    +    server_hostname=None,
    +    ssl_version=None,
    +    ciphers=None,
    +    ssl_context=None,
    +    ca_cert_dir=None,
    +    key_password=None,
    +):
         """
         All arguments except for server_hostname, ssl_context, and ca_cert_dir have
         the same meaning as they do when using :func:`ssl.wrap_socket`.
    @@ -293,25 +316,25 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
             A pre-made :class:`SSLContext` object. If none is provided, one will
             be created using :func:`create_urllib3_context`.
         :param ciphers:
    -        A string of ciphers we wish the client to support. This is not
    -        supported on Python 2.6 as the ssl module does not support it.
    +        A string of ciphers we wish the client to support.
         :param ca_cert_dir:
             A directory containing CA certificates in multiple separate files, as
             supported by OpenSSL's -CApath flag or the capath argument to
             SSLContext.load_verify_locations().
    +    :param key_password:
    +        Optional password if the keyfile is encrypted.
         """
         context = ssl_context
         if context is None:
             # Note: This branch of code and all the variables in it are no longer
             # used by urllib3 itself. We should consider deprecating and removing
             # this code.
    -        context = create_urllib3_context(ssl_version, cert_reqs,
    -                                         ciphers=ciphers)
    +        context = create_urllib3_context(ssl_version, cert_reqs, ciphers=ciphers)
     
         if ca_certs or ca_cert_dir:
             try:
                 context.load_verify_locations(ca_certs, ca_cert_dir)
    -        except IOError as e:  # Platform-specific: Python 2.6, 2.7, 3.2
    +        except IOError as e:  # Platform-specific: Python 2.7
                 raise SSLError(e)
             # Py33 raises FileNotFoundError which subclasses OSError
             # These are not equivalent unless we check the errno attribute
    @@ -319,23 +342,66 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
                 if e.errno == errno.ENOENT:
                     raise SSLError(e)
                 raise
    -    elif getattr(context, 'load_default_certs', None) is not None:
    +
    +    elif ssl_context is None and hasattr(context, "load_default_certs"):
             # try to load OS default certs; works well on Windows (require Python3.4+)
             context.load_default_certs()
     
    -    if certfile:
    -        context.load_cert_chain(certfile, keyfile)
    -    if HAS_SNI:  # Platform-specific: OpenSSL with enabled SNI
    -        return context.wrap_socket(sock, server_hostname=server_hostname)
    +    # Attempt to detect if we get the goofy behavior of the
    +    # keyfile being encrypted and OpenSSL asking for the
    +    # passphrase via the terminal and instead error out.
    +    if keyfile and key_password is None and _is_key_file_encrypted(keyfile):
    +        raise SSLError("Client private key is encrypted, password is required")
    +
    +    if certfile:
    +        if key_password is None:
    +            context.load_cert_chain(certfile, keyfile)
    +        else:
    +            context.load_cert_chain(certfile, keyfile, key_password)
    +
    +    # If we detect server_hostname is an IP address then the SNI
    +    # extension should not be used according to RFC3546 Section 3.1
    +    # We shouldn't warn the user if SNI isn't available but we would
    +    # not be using SNI anyways due to IP address for server_hostname.
    +    if (
    +        server_hostname is not None and not is_ipaddress(server_hostname)
    +    ) or IS_SECURETRANSPORT:
    +        if HAS_SNI and server_hostname is not None:
    +            return context.wrap_socket(sock, server_hostname=server_hostname)
    +
    +        warnings.warn(
    +            "An HTTPS request has been made, but the SNI (Server Name "
    +            "Indication) extension to TLS is not available on this platform. "
    +            "This may cause the server to present an incorrect TLS "
    +            "certificate, which can cause validation failures. You can upgrade to "
    +            "a newer version of Python to solve this. For more information, see "
    +            "https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
    +            "#ssl-warnings",
    +            SNIMissingWarning,
    +        )
     
    -    warnings.warn(
    -        'An HTTPS request has been made, but the SNI (Subject Name '
    -        'Indication) extension to TLS is not available on this platform. '
    -        'This may cause the server to present an incorrect TLS '
    -        'certificate, which can cause validation failures. You can upgrade to '
    -        'a newer version of Python to solve this. For more information, see '
    -        'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
    -        '#ssl-warnings',
    -        SNIMissingWarning
    -    )
         return context.wrap_socket(sock)
    +
    +
    +def is_ipaddress(hostname):
    +    """Detects whether the hostname given is an IPv4 or IPv6 address.
    +    Also detects IPv6 addresses with Zone IDs.
    +
    +    :param str hostname: Hostname to examine.
    +    :return: True if the hostname is an IP address, False otherwise.
    +    """
    +    if not six.PY2 and isinstance(hostname, bytes):
    +        # IDN A-label bytes are ASCII compatible.
    +        hostname = hostname.decode("ascii")
    +    return bool(IPV4_RE.match(hostname) or BRACELESS_IPV6_ADDRZ_RE.match(hostname))
    +
    +
    +def _is_key_file_encrypted(key_file):
    +    """Detects if a key file is encrypted or not."""
    +    with open(key_file, "r") as f:
    +        for line in f:
    +            # Look for Proc-Type: 4,ENCRYPTED
    +            if "ENCRYPTED" in line:
    +                return True
    +
    +    return False
    diff --git a/lib/urllib3/util/timeout.py b/lib/urllib3/util/timeout.py
    index cec817e6..c1dc1e97 100644
    --- a/lib/urllib3/util/timeout.py
    +++ b/lib/urllib3/util/timeout.py
    @@ -1,4 +1,5 @@
     from __future__ import absolute_import
    +
     # The default socket timeout, used by httplib to indicate that no timeout was
     # specified by the user
     from socket import _GLOBAL_DEFAULT_TIMEOUT
    @@ -45,19 +46,20 @@ class Timeout(object):
         :type total: integer, float, or None
     
         :param connect:
    -        The maximum amount of time to wait for a connection attempt to a server
    -        to succeed. Omitting the parameter will default the connect timeout to
    -        the system default, probably `the global default timeout in socket.py
    +        The maximum amount of time (in seconds) to wait for a connection
    +        attempt to a server to succeed. Omitting the parameter will default the
    +        connect timeout to the system default, probably `the global default
    +        timeout in socket.py
             `_.
             None will set an infinite timeout for connection attempts.
     
         :type connect: integer, float, or None
     
         :param read:
    -        The maximum amount of time to wait between consecutive
    -        read operations for a response from the server. Omitting
    -        the parameter will default the read timeout to the system
    -        default, probably `the global default timeout in socket.py
    +        The maximum amount of time (in seconds) to wait between consecutive
    +        read operations for a response from the server. Omitting the parameter
    +        will default the read timeout to the system default, probably `the
    +        global default timeout in socket.py
             `_.
             None will set an infinite timeout.
     
    @@ -91,14 +93,18 @@ class Timeout(object):
         DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
     
         def __init__(self, total=None, connect=_Default, read=_Default):
    -        self._connect = self._validate_timeout(connect, 'connect')
    -        self._read = self._validate_timeout(read, 'read')
    -        self.total = self._validate_timeout(total, 'total')
    +        self._connect = self._validate_timeout(connect, "connect")
    +        self._read = self._validate_timeout(read, "read")
    +        self.total = self._validate_timeout(total, "total")
             self._start_connect = None
     
         def __str__(self):
    -        return '%s(connect=%r, read=%r, total=%r)' % (
    -            type(self).__name__, self._connect, self._read, self.total)
    +        return "%s(connect=%r, read=%r, total=%r)" % (
    +            type(self).__name__,
    +            self._connect,
    +            self._read,
    +            self.total,
    +        )
     
         @classmethod
         def _validate_timeout(cls, value, name):
    @@ -118,22 +124,31 @@ class Timeout(object):
                 return value
     
             if isinstance(value, bool):
    -            raise ValueError("Timeout cannot be a boolean value. It must "
    -                             "be an int, float or None.")
    +            raise ValueError(
    +                "Timeout cannot be a boolean value. It must "
    +                "be an int, float or None."
    +            )
             try:
                 float(value)
             except (TypeError, ValueError):
    -            raise ValueError("Timeout value %s was %s, but it must be an "
    -                             "int, float or None." % (name, value))
    +            raise ValueError(
    +                "Timeout value %s was %s, but it must be an "
    +                "int, float or None." % (name, value)
    +            )
     
             try:
                 if value <= 0:
    -                raise ValueError("Attempted to set %s timeout to %s, but the "
    -                                 "timeout cannot be set to a value less "
    -                                 "than or equal to 0." % (name, value))
    -        except TypeError:  # Python 3
    -            raise ValueError("Timeout value %s was %s, but it must be an "
    -                             "int, float or None." % (name, value))
    +                raise ValueError(
    +                    "Attempted to set %s timeout to %s, but the "
    +                    "timeout cannot be set to a value less "
    +                    "than or equal to 0." % (name, value)
    +                )
    +        except TypeError:
    +            # Python 3
    +            raise ValueError(
    +                "Timeout value %s was %s, but it must be an "
    +                "int, float or None." % (name, value)
    +            )
     
             return value
     
    @@ -165,8 +180,7 @@ class Timeout(object):
             # We can't use copy.deepcopy because that will also create a new object
             # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
             # detect the user default.
    -        return Timeout(connect=self._connect, read=self._read,
    -                       total=self.total)
    +        return Timeout(connect=self._connect, read=self._read, total=self.total)
     
         def start_connect(self):
             """ Start the timeout clock, used during a connect() attempt
    @@ -182,14 +196,15 @@ class Timeout(object):
         def get_connect_duration(self):
             """ Gets the time elapsed since the call to :meth:`start_connect`.
     
    -        :return: Elapsed time.
    +        :return: Elapsed time in seconds.
             :rtype: float
             :raises urllib3.exceptions.TimeoutStateError: if you attempt
                 to get duration for a timer that hasn't been started.
             """
             if self._start_connect is None:
    -            raise TimeoutStateError("Can't get connect duration for timer "
    -                                    "that has not started.")
    +            raise TimeoutStateError(
    +                "Can't get connect duration for timer " "that has not started."
    +            )
             return current_time() - self._start_connect
     
         @property
    @@ -227,15 +242,16 @@ class Timeout(object):
             :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
                 has not yet been called on this object.
             """
    -        if (self.total is not None and
    -                self.total is not self.DEFAULT_TIMEOUT and
    -                self._read is not None and
    -                self._read is not self.DEFAULT_TIMEOUT):
    +        if (
    +            self.total is not None
    +            and self.total is not self.DEFAULT_TIMEOUT
    +            and self._read is not None
    +            and self._read is not self.DEFAULT_TIMEOUT
    +        ):
                 # In case the connect timeout has not yet been established.
                 if self._start_connect is None:
                     return self._read
    -            return max(0, min(self.total - self.get_connect_duration(),
    -                              self._read))
    +            return max(0, min(self.total - self.get_connect_duration(), self._read))
             elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
                 return max(0, self.total - self.get_connect_duration())
             else:
    diff --git a/lib/urllib3/util/url.py b/lib/urllib3/util/url.py
    index 6b6f9968..9675f742 100644
    --- a/lib/urllib3/util/url.py
    +++ b/lib/urllib3/util/url.py
    @@ -1,34 +1,110 @@
     from __future__ import absolute_import
    +import re
     from collections import namedtuple
     
     from ..exceptions import LocationParseError
    +from ..packages import six
     
     
    -url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
    +url_attrs = ["scheme", "auth", "host", "port", "path", "query", "fragment"]
     
     # We only want to normalize urls with an HTTP(S) scheme.
     # urllib3 infers URLs without a scheme (None) to be http.
    -NORMALIZABLE_SCHEMES = ('http', 'https', None)
    +NORMALIZABLE_SCHEMES = ("http", "https", None)
    +
    +# Almost all of these patterns were derived from the
    +# 'rfc3986' module: https://github.com/python-hyper/rfc3986
    +PERCENT_RE = re.compile(r"%[a-fA-F0-9]{2}")
    +SCHEME_RE = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+-]*:|/)")
    +URI_RE = re.compile(
    +    r"^(?:([a-zA-Z][a-zA-Z0-9+.-]*):)?"
    +    r"(?://([^/?#]*))?"
    +    r"([^?#]*)"
    +    r"(?:\?([^#]*))?"
    +    r"(?:#(.*))?$",
    +    re.UNICODE | re.DOTALL,
    +)
    +
    +IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
    +HEX_PAT = "[0-9A-Fa-f]{1,4}"
    +LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT)
    +_subs = {"hex": HEX_PAT, "ls32": LS32_PAT}
    +_variations = [
    +    #                            6( h16 ":" ) ls32
    +    "(?:%(hex)s:){6}%(ls32)s",
    +    #                       "::" 5( h16 ":" ) ls32
    +    "::(?:%(hex)s:){5}%(ls32)s",
    +    # [               h16 ] "::" 4( h16 ":" ) ls32
    +    "(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
    +    # [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
    +    "(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
    +    # [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
    +    "(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
    +    # [ *3( h16 ":" ) h16 ] "::"    h16 ":"   ls32
    +    "(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
    +    # [ *4( h16 ":" ) h16 ] "::"              ls32
    +    "(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
    +    # [ *5( h16 ":" ) h16 ] "::"              h16
    +    "(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
    +    # [ *6( h16 ":" ) h16 ] "::"
    +    "(?:(?:%(hex)s:){0,6}%(hex)s)?::",
    +]
    +
    +UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~"
    +IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
    +ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
    +IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]"
    +REG_NAME_PAT = r"(?:[^\[\]%:/?#]|%[a-fA-F0-9]{2})*"
    +TARGET_RE = re.compile(r"^(/[^?]*)(?:\?([^#]+))?(?:#(.*))?$")
    +
    +IPV4_RE = re.compile("^" + IPV4_PAT + "$")
    +IPV6_RE = re.compile("^" + IPV6_PAT + "$")
    +IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$")
    +BRACELESS_IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT[2:-2] + "$")
    +ZONE_ID_RE = re.compile("(" + ZONE_ID_PAT + r")\]$")
    +
    +SUBAUTHORITY_PAT = (u"^(?:(.*)@)?(%s|%s|%s)(?::([0-9]{0,5}))?$") % (
    +    REG_NAME_PAT,
    +    IPV4_PAT,
    +    IPV6_ADDRZ_PAT,
    +)
    +SUBAUTHORITY_RE = re.compile(SUBAUTHORITY_PAT, re.UNICODE | re.DOTALL)
    +
    +UNRESERVED_CHARS = set(
    +    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._-~"
    +)
    +SUB_DELIM_CHARS = set("!$&'()*+,;=")
    +USERINFO_CHARS = UNRESERVED_CHARS | SUB_DELIM_CHARS | {":"}
    +PATH_CHARS = USERINFO_CHARS | {"@", "/"}
    +QUERY_CHARS = FRAGMENT_CHARS = PATH_CHARS | {"?"}
     
     
    -class Url(namedtuple('Url', url_attrs)):
    +class Url(namedtuple("Url", url_attrs)):
         """
    -    Datastructure for representing an HTTP URL. Used as a return value for
    +    Data structure for representing an HTTP URL. Used as a return value for
         :func:`parse_url`. Both the scheme and host are normalized as they are
         both case-insensitive according to RFC 3986.
         """
    +
         __slots__ = ()
     
    -    def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
    -                query=None, fragment=None):
    -        if path and not path.startswith('/'):
    -            path = '/' + path
    -        if scheme:
    +    def __new__(
    +        cls,
    +        scheme=None,
    +        auth=None,
    +        host=None,
    +        port=None,
    +        path=None,
    +        query=None,
    +        fragment=None,
    +    ):
    +        if path and not path.startswith("/"):
    +            path = "/" + path
    +        if scheme is not None:
                 scheme = scheme.lower()
    -        if host and scheme in NORMALIZABLE_SCHEMES:
    -            host = host.lower()
    -        return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
    -                                       query, fragment)
    +        return super(Url, cls).__new__(
    +            cls, scheme, auth, host, port, path, query, fragment
    +        )
     
         @property
         def hostname(self):
    @@ -38,10 +114,10 @@ class Url(namedtuple('Url', url_attrs)):
         @property
         def request_uri(self):
             """Absolute path including the query string."""
    -        uri = self.path or '/'
    +        uri = self.path or "/"
     
             if self.query is not None:
    -            uri += '?' + self.query
    +            uri += "?" + self.query
     
             return uri
     
    @@ -49,7 +125,7 @@ class Url(namedtuple('Url', url_attrs)):
         def netloc(self):
             """Network location including host and port"""
             if self.port:
    -            return '%s:%d' % (self.host, self.port)
    +            return "%s:%d" % (self.host, self.port)
             return self.host
     
         @property
    @@ -72,23 +148,23 @@ class Url(namedtuple('Url', url_attrs)):
                 'http://username:password@host.com:80/path?query#fragment'
             """
             scheme, auth, host, port, path, query, fragment = self
    -        url = ''
    +        url = u""
     
             # We use "is not None" we want things to happen with empty strings (or 0 port)
             if scheme is not None:
    -            url += scheme + '://'
    +            url += scheme + u"://"
             if auth is not None:
    -            url += auth + '@'
    +            url += auth + u"@"
             if host is not None:
                 url += host
             if port is not None:
    -            url += ':' + str(port)
    +            url += u":" + str(port)
             if path is not None:
                 url += path
             if query is not None:
    -            url += '?' + query
    +            url += u"?" + query
             if fragment is not None:
    -            url += '#' + fragment
    +            url += u"#" + fragment
     
             return url
     
    @@ -98,6 +174,8 @@ class Url(namedtuple('Url', url_attrs)):
     
     def split_first(s, delims):
         """
    +    .. deprecated:: 1.25
    +
         Given a string and an iterable of delimiters, split on the first found
         delimiter. Return two split parts and the matched delimiter.
     
    @@ -124,15 +202,150 @@ def split_first(s, delims):
                 min_delim = d
     
         if min_idx is None or min_idx < 0:
    -        return s, '', None
    +        return s, "", None
     
    -    return s[:min_idx], s[min_idx + 1:], min_delim
    +    return s[:min_idx], s[min_idx + 1 :], min_delim
    +
    +
    +def _encode_invalid_chars(component, allowed_chars, encoding="utf-8"):
    +    """Percent-encodes a URI component without reapplying
    +    onto an already percent-encoded component.
    +    """
    +    if component is None:
    +        return component
    +
    +    component = six.ensure_text(component)
    +
    +    # Try to see if the component we're encoding is already percent-encoded
    +    # so we can skip all '%' characters but still encode all others.
    +    percent_encodings = PERCENT_RE.findall(component)
    +
    +    # Normalize existing percent-encoded bytes.
    +    for enc in percent_encodings:
    +        if not enc.isupper():
    +            component = component.replace(enc, enc.upper())
    +
    +    uri_bytes = component.encode("utf-8", "surrogatepass")
    +    is_percent_encoded = len(percent_encodings) == uri_bytes.count(b"%")
    +
    +    encoded_component = bytearray()
    +
    +    for i in range(0, len(uri_bytes)):
    +        # Will return a single character bytestring on both Python 2 & 3
    +        byte = uri_bytes[i : i + 1]
    +        byte_ord = ord(byte)
    +        if (is_percent_encoded and byte == b"%") or (
    +            byte_ord < 128 and byte.decode() in allowed_chars
    +        ):
    +            encoded_component.extend(byte)
    +            continue
    +        encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper()))
    +
    +    return encoded_component.decode(encoding)
    +
    +
    +def _remove_path_dot_segments(path):
    +    # See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code
    +    segments = path.split("/")  # Turn the path into a list of segments
    +    output = []  # Initialize the variable to use to store output
    +
    +    for segment in segments:
    +        # '.' is the current directory, so ignore it, it is superfluous
    +        if segment == ".":
    +            continue
    +        # Anything other than '..', should be appended to the output
    +        elif segment != "..":
    +            output.append(segment)
    +        # In this case segment == '..', if we can, we should pop the last
    +        # element
    +        elif output:
    +            output.pop()
    +
    +    # If the path starts with '/' and the output is empty or the first string
    +    # is non-empty
    +    if path.startswith("/") and (not output or output[0]):
    +        output.insert(0, "")
    +
    +    # If the path starts with '/.' or '/..' ensure we add one more empty
    +    # string to add a trailing '/'
    +    if path.endswith(("/.", "/..")):
    +        output.append("")
    +
    +    return "/".join(output)
    +
    +
    +def _normalize_host(host, scheme):
    +    if host:
    +        if isinstance(host, six.binary_type):
    +            host = six.ensure_str(host)
    +
    +        if scheme in NORMALIZABLE_SCHEMES:
    +            is_ipv6 = IPV6_ADDRZ_RE.match(host)
    +            if is_ipv6:
    +                match = ZONE_ID_RE.search(host)
    +                if match:
    +                    start, end = match.span(1)
    +                    zone_id = host[start:end]
    +
    +                    if zone_id.startswith("%25") and zone_id != "%25":
    +                        zone_id = zone_id[3:]
    +                    else:
    +                        zone_id = zone_id[1:]
    +                    zone_id = "%" + _encode_invalid_chars(zone_id, UNRESERVED_CHARS)
    +                    return host[:start].lower() + zone_id + host[end:]
    +                else:
    +                    return host.lower()
    +            elif not IPV4_RE.match(host):
    +                return six.ensure_str(
    +                    b".".join([_idna_encode(label) for label in host.split(".")])
    +                )
    +    return host
    +
    +
    +def _idna_encode(name):
    +    if name and any([ord(x) > 128 for x in name]):
    +        try:
    +            import idna
    +        except ImportError:
    +            six.raise_from(
    +                LocationParseError("Unable to parse URL without the 'idna' module"),
    +                None,
    +            )
    +        try:
    +            return idna.encode(name.lower(), strict=True, std3_rules=True)
    +        except idna.IDNAError:
    +            six.raise_from(
    +                LocationParseError(u"Name '%s' is not a valid IDNA label" % name), None
    +            )
    +    return name.lower().encode("ascii")
    +
    +
    +def _encode_target(target):
    +    """Percent-encodes a request target so that there are no invalid characters"""
    +    if not target.startswith("/"):
    +        return target
    +
    +    path, query, fragment = TARGET_RE.match(target).groups()
    +    target = _encode_invalid_chars(path, PATH_CHARS)
    +    query = _encode_invalid_chars(query, QUERY_CHARS)
    +    fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)
    +    if query is not None:
    +        target += "?" + query
    +    if fragment is not None:
    +        target += "#" + target
    +    return target
     
     
     def parse_url(url):
         """
         Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
         performed to parse incomplete urls. Fields not provided will be None.
    +    This parser is RFC 3986 compliant.
    +
    +    The parser logic and helper functions are based heavily on
    +    work done in the ``rfc3986`` module.
    +
    +    :param str url: URL to parse into a :class:`.Url` namedtuple.
     
         Partly backwards-compatible with :mod:`urlparse`.
     
    @@ -145,81 +358,77 @@ def parse_url(url):
             >>> parse_url('/foo?bar')
             Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
         """
    -
    -    # While this code has overlap with stdlib's urlparse, it is much
    -    # simplified for our needs and less annoying.
    -    # Additionally, this implementations does silly things to be optimal
    -    # on CPython.
    -
         if not url:
             # Empty
             return Url()
     
    -    scheme = None
    -    auth = None
    -    host = None
    -    port = None
    -    path = None
    -    fragment = None
    -    query = None
    +    source_url = url
    +    if not SCHEME_RE.search(url):
    +        url = "//" + url
     
    -    # Scheme
    -    if '://' in url:
    -        scheme, url = url.split('://', 1)
    +    try:
    +        scheme, authority, path, query, fragment = URI_RE.match(url).groups()
    +        normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES
     
    -    # Find the earliest Authority Terminator
    -    # (http://tools.ietf.org/html/rfc3986#section-3.2)
    -    url, path_, delim = split_first(url, ['/', '?', '#'])
    +        if scheme:
    +            scheme = scheme.lower()
     
    -    if delim:
    -        # Reassemble the path
    -        path = delim + path_
    -
    -    # Auth
    -    if '@' in url:
    -        # Last '@' denotes end of auth part
    -        auth, url = url.rsplit('@', 1)
    -
    -    # IPv6
    -    if url and url[0] == '[':
    -        host, url = url.split(']', 1)
    -        host += ']'
    -
    -    # Port
    -    if ':' in url:
    -        _host, port = url.split(':', 1)
    -
    -        if not host:
    -            host = _host
    -
    -        if port:
    -            # If given, ports must be integers. No whitespace, no plus or
    -            # minus prefixes, no non-integer digits such as ^2 (superscript).
    -            if not port.isdigit():
    -                raise LocationParseError(url)
    -            try:
    -                port = int(port)
    -            except ValueError:
    -                raise LocationParseError(url)
    +        if authority:
    +            auth, host, port = SUBAUTHORITY_RE.match(authority).groups()
    +            if auth and normalize_uri:
    +                auth = _encode_invalid_chars(auth, USERINFO_CHARS)
    +            if port == "":
    +                port = None
             else:
    -            # Blank ports are cool, too. (rfc3986#section-3.2.3)
    -            port = None
    +            auth, host, port = None, None, None
     
    -    elif not host and url:
    -        host = url
    +        if port is not None:
    +            port = int(port)
    +            if not (0 <= port <= 65535):
    +                raise LocationParseError(url)
     
    +        host = _normalize_host(host, scheme)
    +
    +        if normalize_uri and path:
    +            path = _remove_path_dot_segments(path)
    +            path = _encode_invalid_chars(path, PATH_CHARS)
    +        if normalize_uri and query:
    +            query = _encode_invalid_chars(query, QUERY_CHARS)
    +        if normalize_uri and fragment:
    +            fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)
    +
    +    except (ValueError, AttributeError):
    +        return six.raise_from(LocationParseError(source_url), None)
    +
    +    # For the sake of backwards compatibility we put empty
    +    # string values for path if there are any defined values
    +    # beyond the path in the URL.
    +    # TODO: Remove this when we break backwards compatibility.
         if not path:
    -        return Url(scheme, auth, host, port, path, query, fragment)
    +        if query is not None or fragment is not None:
    +            path = ""
    +        else:
    +            path = None
     
    -    # Fragment
    -    if '#' in path:
    -        path, fragment = path.split('#', 1)
    +    # Ensure that each part of the URL is a `str` for
    +    # backwards compatibility.
    +    if isinstance(url, six.text_type):
    +        ensure_func = six.ensure_text
    +    else:
    +        ensure_func = six.ensure_str
     
    -    # Query
    -    if '?' in path:
    -        path, query = path.split('?', 1)
    +    def ensure_type(x):
    +        return x if x is None else ensure_func(x)
     
    -    return Url(scheme, auth, host, port, path, query, fragment)
    +    return Url(
    +        scheme=ensure_type(scheme),
    +        auth=ensure_type(auth),
    +        host=ensure_type(host),
    +        port=port,
    +        path=ensure_type(path),
    +        query=ensure_type(query),
    +        fragment=ensure_type(fragment),
    +    )
     
     
     def get_host(url):
    @@ -227,4 +436,4 @@ def get_host(url):
         Deprecated. Use :func:`parse_url` instead.
         """
         p = parse_url(url)
    -    return p.scheme or 'http', p.hostname, p.port
    +    return p.scheme or "http", p.hostname, p.port
    diff --git a/lib/urllib3/util/wait.py b/lib/urllib3/util/wait.py
    index cb396e50..d71d2fd7 100644
    --- a/lib/urllib3/util/wait.py
    +++ b/lib/urllib3/util/wait.py
    @@ -1,40 +1,153 @@
    -from .selectors import (
    -    HAS_SELECT,
    -    DefaultSelector,
    -    EVENT_READ,
    -    EVENT_WRITE
    -)
    +import errno
    +from functools import partial
    +import select
    +import sys
    +
    +try:
    +    from time import monotonic
    +except ImportError:
    +    from time import time as monotonic
    +
    +__all__ = ["NoWayToWaitForSocketError", "wait_for_read", "wait_for_write"]
     
     
    -def _wait_for_io_events(socks, events, timeout=None):
    -    """ Waits for IO events to be available from a list of sockets
    -    or optionally a single socket if passed in. Returns a list of
    -    sockets that can be interacted with immediately. """
    -    if not HAS_SELECT:
    -        raise ValueError('Platform does not have a selector')
    -    if not isinstance(socks, list):
    -        # Probably just a single socket.
    -        if hasattr(socks, "fileno"):
    -            socks = [socks]
    -        # Otherwise it might be a non-list iterable.
    +class NoWayToWaitForSocketError(Exception):
    +    pass
    +
    +
    +# How should we wait on sockets?
    +#
    +# There are two types of APIs you can use for waiting on sockets: the fancy
    +# modern stateful APIs like epoll/kqueue, and the older stateless APIs like
    +# select/poll. The stateful APIs are more efficient when you have a lots of
    +# sockets to keep track of, because you can set them up once and then use them
    +# lots of times. But we only ever want to wait on a single socket at a time
    +# and don't want to keep track of state, so the stateless APIs are actually
    +# more efficient. So we want to use select() or poll().
    +#
    +# Now, how do we choose between select() and poll()? On traditional Unixes,
    +# select() has a strange calling convention that makes it slow, or fail
    +# altogether, for high-numbered file descriptors. The point of poll() is to fix
    +# that, so on Unixes, we prefer poll().
    +#
    +# On Windows, there is no poll() (or at least Python doesn't provide a wrapper
    +# for it), but that's OK, because on Windows, select() doesn't have this
    +# strange calling convention; plain select() works fine.
    +#
    +# So: on Windows we use select(), and everywhere else we use poll(). We also
    +# fall back to select() in case poll() is somehow broken or missing.
    +
    +if sys.version_info >= (3, 5):
    +    # Modern Python, that retries syscalls by default
    +    def _retry_on_intr(fn, timeout):
    +        return fn(timeout)
    +
    +
    +else:
    +    # Old and broken Pythons.
    +    def _retry_on_intr(fn, timeout):
    +        if timeout is None:
    +            deadline = float("inf")
             else:
    -            socks = list(socks)
    -    with DefaultSelector() as selector:
    -        for sock in socks:
    -            selector.register(sock, events)
    -        return [key[0].fileobj for key in
    -                selector.select(timeout) if key[1] & events]
    +            deadline = monotonic() + timeout
    +
    +        while True:
    +            try:
    +                return fn(timeout)
    +            # OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7
    +            except (OSError, select.error) as e:
    +                # 'e.args[0]' incantation works for both OSError and select.error
    +                if e.args[0] != errno.EINTR:
    +                    raise
    +                else:
    +                    timeout = deadline - monotonic()
    +                    if timeout < 0:
    +                        timeout = 0
    +                    if timeout == float("inf"):
    +                        timeout = None
    +                    continue
     
     
    -def wait_for_read(socks, timeout=None):
    -    """ Waits for reading to be available from a list of sockets
    -    or optionally a single socket if passed in. Returns a list of
    -    sockets that can be read from immediately. """
    -    return _wait_for_io_events(socks, EVENT_READ, timeout)
    +def select_wait_for_socket(sock, read=False, write=False, timeout=None):
    +    if not read and not write:
    +        raise RuntimeError("must specify at least one of read=True, write=True")
    +    rcheck = []
    +    wcheck = []
    +    if read:
    +        rcheck.append(sock)
    +    if write:
    +        wcheck.append(sock)
    +    # When doing a non-blocking connect, most systems signal success by
    +    # marking the socket writable. Windows, though, signals success by marked
    +    # it as "exceptional". We paper over the difference by checking the write
    +    # sockets for both conditions. (The stdlib selectors module does the same
    +    # thing.)
    +    fn = partial(select.select, rcheck, wcheck, wcheck)
    +    rready, wready, xready = _retry_on_intr(fn, timeout)
    +    return bool(rready or wready or xready)
     
     
    -def wait_for_write(socks, timeout=None):
    -    """ Waits for writing to be available from a list of sockets
    -    or optionally a single socket if passed in. Returns a list of
    -    sockets that can be written to immediately. """
    -    return _wait_for_io_events(socks, EVENT_WRITE, timeout)
    +def poll_wait_for_socket(sock, read=False, write=False, timeout=None):
    +    if not read and not write:
    +        raise RuntimeError("must specify at least one of read=True, write=True")
    +    mask = 0
    +    if read:
    +        mask |= select.POLLIN
    +    if write:
    +        mask |= select.POLLOUT
    +    poll_obj = select.poll()
    +    poll_obj.register(sock, mask)
    +
    +    # For some reason, poll() takes timeout in milliseconds
    +    def do_poll(t):
    +        if t is not None:
    +            t *= 1000
    +        return poll_obj.poll(t)
    +
    +    return bool(_retry_on_intr(do_poll, timeout))
    +
    +
    +def null_wait_for_socket(*args, **kwargs):
    +    raise NoWayToWaitForSocketError("no select-equivalent available")
    +
    +
    +def _have_working_poll():
    +    # Apparently some systems have a select.poll that fails as soon as you try
    +    # to use it, either due to strange configuration or broken monkeypatching
    +    # from libraries like eventlet/greenlet.
    +    try:
    +        poll_obj = select.poll()
    +        _retry_on_intr(poll_obj.poll, 0)
    +    except (AttributeError, OSError):
    +        return False
    +    else:
    +        return True
    +
    +
    +def wait_for_socket(*args, **kwargs):
    +    # We delay choosing which implementation to use until the first time we're
    +    # called. We could do it at import time, but then we might make the wrong
    +    # decision if someone goes wild with monkeypatching select.poll after
    +    # we're imported.
    +    global wait_for_socket
    +    if _have_working_poll():
    +        wait_for_socket = poll_wait_for_socket
    +    elif hasattr(select, "select"):
    +        wait_for_socket = select_wait_for_socket
    +    else:  # Platform-specific: Appengine.
    +        wait_for_socket = null_wait_for_socket
    +    return wait_for_socket(*args, **kwargs)
    +
    +
    +def wait_for_read(sock, timeout=None):
    +    """ Waits for reading to be available on a given socket.
    +    Returns True if the socket is readable, or False if the timeout expired.
    +    """
    +    return wait_for_socket(sock, read=True, timeout=timeout)
    +
    +
    +def wait_for_write(sock, timeout=None):
    +    """ Waits for writing to be available on a given socket.
    +    Returns True if the socket is readable, or False if the timeout expired.
    +    """
    +    return wait_for_socket(sock, write=True, timeout=timeout)
    diff --git a/platformcode/launcher.py b/platformcode/launcher.py
    index 140cb416..ee917c5c 100644
    --- a/platformcode/launcher.py
    +++ b/platformcode/launcher.py
    @@ -6,7 +6,7 @@
     import os
     import sys
     
    -from specials import resolverdns
    +
     import urllib2
     
     from core import channeltools
    @@ -35,6 +35,7 @@ def start():
         # se lo ha: non lo fa entrare nell'addon
         # se ha problemi di DNS avvia ma lascia entrare
         # se tutto ok: entra nell'addon
    +    from specials import resolverdns
         from specials.checkhost import test_conn
         import threading
         threading.Thread(target=test_conn, args=(True, True, True, [], [], True)).start()
    @@ -270,20 +271,11 @@ def run(item=None):
                 elif item.action == "search":
                     logger.info("item.action=%s" % item.action.upper())
     
    -                last_search = ""
    -                last_search_active = config.get_setting("last_search", "search")
    -                if last_search_active:
    -                    try:
    -                        current_saved_searches_list = list(config.get_setting("saved_searches_list", "search"))
    -                        last_search = current_saved_searches_list[0]
    -                    except:
    -                        pass
    +                last_search = channeltools.get_channel_setting('Last_searched', 'search', '')
     
                     tecleado = platformtools.dialog_input(last_search)
                     if tecleado is not None:
    -                    if last_search_active and not tecleado.startswith("http"):
    -                        from specials import search
    -                        search.save_search(tecleado)
    +                    channeltools.set_channel_setting('Last_searched', tecleado, 'search')
     
                         if 'search' in dir(channel):
                             itemlist = channel.search(item, tecleado)
    diff --git a/platformcode/platformtools.py b/platformcode/platformtools.py
    index fdf97ce8..567c9dbc 100644
    --- a/platformcode/platformtools.py
    +++ b/platformcode/platformtools.py
    @@ -558,23 +558,27 @@ def set_context_commands(item, parent_item):
     
             # Buscar en otros canales
             if item.contentType in ['movie', 'tvshow'] and item.channel != 'search':
    +
    +
                 # Buscar en otros canales
                 if item.contentSerieName != '':
                     item.wanted = item.contentSerieName
                 else:
                     item.wanted = item.contentTitle
    -            context_commands.append((config.get_localized_string(60350),
    -                                     "XBMC.Container.Update (%s?%s)" % (sys.argv[0],
    -                                                                        item.clone(channel='search',
    -                                                                                   action="do_search",
    -                                                                                   from_channel=item.channel,
    -                                                                                   contextual=True).tourl())))
    +
                 if item.contentType == 'tvshow':
                     mediatype = 'tv'
                 else:
                     mediatype = item.contentType
    +            context_commands.append((config.get_localized_string(60350),
    +                                     "XBMC.Container.Update (%s?%s)" % (sys.argv[0],
    +                                                                        item.clone(channel='search',
    +                                                                                   action="from_context",
    +                                                                                   from_channel=item.channel,
    +                                                                                   contextual=True,
    +                                                                                   text=item.wanted).tourl())))
                 context_commands.append(("[B]%s[/B]" % config.get_localized_string(70561), "XBMC.Container.Update (%s?%s)" % (
    -            sys.argv[0], item.clone(channel='search', action='discover_list', search_type='list', page='1',
    +            sys.argv[0], item.clone(channel='search', action='from_context', search_type='list', page='1',
                                         list_type='%s/%s/similar' % (mediatype,item.infoLabels['tmdb_id'])).tourl())))
     
             # Definir como Pagina de inicio
    diff --git a/platformcode/updater.py b/platformcode/updater.py
    index 1ccedfa3..89bc7ae1 100644
    --- a/platformcode/updater.py
    +++ b/platformcode/updater.py
    @@ -90,7 +90,7 @@ def check_addon_init():
                     localCommitFile.close()
                     c['sha'] = updateFromZip('Aggiornamento in corso...')
                     localCommitFile = open(addonDir + trackingFile, 'w')  # il file di tracking viene eliminato, lo ricreo
    -                changelog += commitJson['commit']['message'] + " | "
    +                changelog += commitJson['commit']['message'] + "\n"
                     nCommitApplied += 3  # il messaggio sarà lungo, probabilmente, il tempo di vis. è maggiorato
                     break
     
    @@ -145,11 +145,10 @@ def check_addon_init():
                                 filetools.move(addonDir + file['previous_filename'], addonDir + file['filename'])
                                 alreadyApplied = False
                 if not alreadyApplied:  # non mando notifica se già applicata (es. scaricato zip da github)
    -                changelog += commitJson['commit']['message'] + " | "
    +                changelog += commitJson['commit']['message'] + "\n"
                     nCommitApplied += 1
             if addon.getSetting("addon_update_message"):
    -            time = nCommitApplied * 2000 if nCommitApplied < 10 else 20000
    -            platformtools.dialog_notification('Kodi on Demand', 'Aggiornamenti applicati:\n' + changelog[:-3], time)
    +            platformtools.dialog_ok('Kodi on Demand', 'Aggiornamenti applicati:\n' + changelog)
     
             localCommitFile.seek(0)
             localCommitFile.truncate()
    diff --git a/resources/language/English/strings.po b/resources/language/English/strings.po
    index 6cc290a8..ee933baf 100644
    --- a/resources/language/English/strings.po
    +++ b/resources/language/English/strings.po
    @@ -2715,7 +2715,7 @@ msgid "Global Search"
     msgstr ""
     
     msgctxt "#60673"
    -msgid "MultiThread Search"
    +msgid "Number of Search Threads"
     msgstr ""
     
     msgctxt "#60674"
    @@ -3839,7 +3839,7 @@ msgid "Add Torrent channels in search"
     msgstr ""
     
     msgctxt "#70276"
    -msgid "Search by title"
    +msgid "Search by Title"
     msgstr ""
     
     msgctxt "#70277"
    @@ -5681,6 +5681,26 @@ msgctxt "#70740"
     msgid "Your system does not have a web browser, so here's the short link you can open in another device:\n%s"
     msgstr ""
     
    +msgctxt "#70741"
    +msgid "Search %s"
    +msgstr ""
    +
    +msgctxt "#70742"
    +msgid "Movies by Year"
    +msgstr ""
    +
    +msgctxt "#70743"
    +msgid "Series by Year"
    +msgstr ""
    +
    +msgctxt "#70744"
    +msgid "%s channels remaining"
    +msgstr ""
    +
    +msgctxt "#70745"
    +msgid "Enter another year..."
    +msgstr ""
    +
     # DNS start [ settings and declaration ]
     msgctxt "#707401"
     msgid "Enable DNS Check Alert"
    diff --git a/resources/language/Italian/strings.po b/resources/language/Italian/strings.po
    index 45e35cd1..c9a879f2 100644
    --- a/resources/language/Italian/strings.po
    +++ b/resources/language/Italian/strings.po
    @@ -319,11 +319,11 @@ msgstr "Informazioni Persona"
     
     msgctxt "#30980"
     msgid "Search by Title"
    -msgstr "Cerca per Titolo ..."
    +msgstr "Cerca per Titolo"
     
     msgctxt "#30981"
     msgid "Search by Person"
    -msgstr "Cerca per Persona ..."
    +msgstr "Cerca per Persona"
     
     msgctxt "#30982"
     msgid "Search by Company"
    @@ -343,7 +343,7 @@ msgstr "Migliori"
     
     msgctxt "#30986"
     msgid "Search by Collection"
    -msgstr "Ricerca per Collezione ..."
    +msgstr "Ricerca per Collezione"
     
     msgctxt "#30987"
     msgid "Genre"
    @@ -351,15 +351,15 @@ msgstr "Genere"
     
     msgctxt "#30988"
     msgid "Search by Year"
    -msgstr "Ricerca per Anno ..."
    +msgstr "Ricerca per Anno"
     
     msgctxt "#30989"
     msgid "Search Similar Movies"
    -msgstr "Cerca Film Simili ..."
    +msgstr "Cerca Film Simili"
     
     msgctxt "#30990"
     msgid "Search TV show"
    -msgstr "Cerca Serie TV ..."
    +msgstr "Cerca Serie TV"
     
     msgctxt "#30991"
     msgid "Library"
    @@ -371,11 +371,11 @@ msgstr "Successivo"
     
     msgctxt "#30993"
     msgid "Looking for %s..."
    -msgstr "Ricerca di %s ..."
    +msgstr "Ricerca di %s..."
     
     msgctxt "#30994"
     msgid "Searching in %s..."
    -msgstr "Sto cercando in %s ..."
    +msgstr "Sto cercando in %s..."
     
     msgctxt "#30995"
     msgid "%d found so far: %s"
    @@ -2714,8 +2714,8 @@ msgid "Global Search"
     msgstr "Ricerca globale"
     
     msgctxt "#60673"
    -msgid "MultiThread Search"
    -msgstr "Ricerca MultiThread"
    +msgid "Number of Search Threads"
    +msgstr "Numero di Threads di Ricerca"
     
     msgctxt "#60674"
     msgid "Show Results:"
    @@ -3838,8 +3838,8 @@ msgid "Add Torrent channels in search"
     msgstr "Includere i canali Torrent nella ricerca"
     
     msgctxt "#70276"
    -msgid "Search by title"
    -msgstr "Cerca per titolo"
    +msgid "Search by Title"
    +msgstr "Cerca per Titolo"
     
     msgctxt "#70277"
     msgid "MediaServer Language (Restart Required)"
    @@ -3963,7 +3963,7 @@ msgstr "Film più valutati"
     
     msgctxt "#70309"
     msgid "Movies Now in Theatres "
    -msgstr "[Film] Ora in sala"
    +msgstr "Film Ora in sala"
     
     msgctxt "#70310"
     msgid "Series by Genre"
    @@ -3975,7 +3975,7 @@ msgstr "Serie più popolari"
     
     msgctxt "#70312"
     msgid "Series in progress"
    -msgstr "[Serie TV] In corso"
    +msgstr "Serie TV In corso"
     
     msgctxt "#70313"
     msgid "Top rated Series"
    @@ -5685,6 +5685,26 @@ msgctxt "#70740"
     msgid "Your system does not have a web browser, so here's the short link you can open in another device:\n%s"
     msgstr "Il tuo sistema non ha un browser, quindi ecco un link corto che puoi aprire in un altro dispositivo\n%s"
     
    +msgctxt "#70741"
    +msgid "Search %s"
    +msgstr "Cerca %s"
    +
    +msgctxt "#70742"
    +msgid "Movies by Year"
    +msgstr "Film per Anno"
    +
    +msgctxt "#70743"
    +msgid "Series by Year"
    +msgstr "Serie per Anno"
    +
    +msgctxt "#70744"
    +msgid "%s channels remaining"
    +msgstr "%s canali restanti"
    +
    +msgctxt "#70745"
    +msgid "Enter another year..."
    +msgstr "Inserisci un altro anno..."
    +
     # DNS start [ settings and declaration ]
     msgctxt "#707401"
     msgid "Enable DNS Check Alert"
    @@ -5745,4 +5765,5 @@ msgstr "Stai utilizzando i DNS impostati sul dispositivo o modem"
     msgctxt "#707415"
     msgid "You have not selected any choice, the cloudflare DNS will be used."
     msgstr "Non hai selezionato nessuna scelta!\nVerranno utilizzati i DNS di cloudflare."
    +
     # end DNS
    diff --git a/resources/media/themes/default/thumb_search_generic.png b/resources/media/themes/default/thumb_search_generic.png
    new file mode 100644
    index 00000000..6cc4ee98
    Binary files /dev/null and b/resources/media/themes/default/thumb_search_generic.png differ
    diff --git a/resources/media/themes/default/thumb_search_more.png b/resources/media/themes/default/thumb_search_more.png
    new file mode 100644
    index 00000000..6ab8b575
    Binary files /dev/null and b/resources/media/themes/default/thumb_search_more.png differ
    diff --git a/resources/media/themes/default/thumb_search_tvshow.png b/resources/media/themes/default/thumb_search_tvshow.png
    new file mode 100644
    index 00000000..062cb435
    Binary files /dev/null and b/resources/media/themes/default/thumb_search_tvshow.png differ
    diff --git a/resources/media/themes/default/thumb_years.png b/resources/media/themes/default/thumb_years.png
    new file mode 100644
    index 00000000..750a8055
    Binary files /dev/null and b/resources/media/themes/default/thumb_years.png differ
    diff --git a/servers/clipwatching.json b/servers/clipwatching.json
    index c30208df..c72359fa 100644
    --- a/servers/clipwatching.json
    +++ b/servers/clipwatching.json
    @@ -4,7 +4,7 @@
         "ignore_urls": [],
         "patterns": [
           {
    -        "pattern": "clipwatching.com/((?:embed-)?[a-zA-Z0-9./_-]+).html",
    +        "pattern": "clipwatching.com/((?:embed-)?[a-zA-Z0-9./_\\-\\[\\]\\(\\)]+).html",
             "url": "http://clipwatching.com/\\1.html"
           }
         ]
    diff --git a/servers/cloudvideo.json b/servers/cloudvideo.json
    index b1073973..67daa35b 100644
    --- a/servers/cloudvideo.json
    +++ b/servers/cloudvideo.json
    @@ -4,7 +4,7 @@
         "ignore_urls": [],
         "patterns": [
           {
    -        "pattern": "cloudvideo.tv/(?:embed-)?([a-z0-9]+).html",
    +        "pattern": "cloudvideo.tv/(?:embed-)?([a-z0-9]+)(?:.html)?",
             "url": "https://cloudvideo.tv/embed-\\1.html"
           }
         ]
    diff --git a/servers/gounlimited.py b/servers/gounlimited.py
    index a03c1a96..bc240fd1 100644
    --- a/servers/gounlimited.py
    +++ b/servers/gounlimited.py
    @@ -14,20 +14,21 @@ from platformcode import logger
     def test_video_exists(page_url):
         data = httptools.downloadpage(page_url).data
         if data == "File was deleted":
    -        return False, "[gounlimited] El video ha sido borrado"
    +        return False, config.get_localized_string(70449) % "Go Unlimited"
         return True, ""
     
     
     def get_video_url(page_url, premium=False, user="", password="", video_password=""):
         logger.info("url=" + page_url)
         video_urls = []
    -    data = httptools.downloadpage(page_url).data
    +    data = httptools.downloadpage(page_url, use_requests=True, verify=False).data
         data = re.sub(r'"|\n|\r|\t| |
    |\s{2,}', "", data) + logger.info('GOUN DATA= '+data) packed_data = scrapertools.find_single_match(data, "javascript'>(eval.*?)") unpacked = jsunpack.unpack(packed_data) - patron = "sources..([^\]]+)" + patron = r"sources..([^\]]+)" matches = re.compile(patron, re.DOTALL).findall(unpacked) for url in matches: url += "|Referer=%s" %page_url - video_urls.append(['mp4', url]) + video_urls.append(['mp4 [Go Unlimited]', url]) return video_urls diff --git a/servers/mixdrop.py b/servers/mixdrop.py index bdc2e5e9..e679dfbc 100644 --- a/servers/mixdrop.py +++ b/servers/mixdrop.py @@ -26,7 +26,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= data = re.sub(r'>\s\s*<', '><', data) jsCode = scrapertoolsV2.find_single_match(data, r'') jsUnpacked = jsunpack.unpack(jsCode) - url = "https://" + scrapertoolsV2.find_single_match(jsUnpacked, r'MDCore\.vsr(?:c)?="//([^"]+)') + url = "https://" + scrapertoolsV2.find_single_match(jsUnpacked, r'vsr[^=]*="(?:/)?(/[^"]+)') itemlist.append([".mp4 [MixDrop]", url]) diff --git a/specials/community.py b/specials/community.py index 48d66525..85aa5b71 100644 --- a/specials/community.py +++ b/specials/community.py @@ -97,6 +97,7 @@ def show_channels(item): def show_menu(item): global list_data itemlist = [] + add_search = True support.log() # If Second Level Menu if item.menu: @@ -127,11 +128,16 @@ def show_menu(item): filterkey=key if not url else '' )) if menu.has_key('search'): + if type(menu['search']) == dict and menu['search'].has_key('url'): + url = relative('url', menu['search'], item.path) + else: + url = '' itemlist.append(Item(channel=item.channel, title=typo('Cerca ' + item.fulltitle +'...','color kod bold'), thumbnail=get_thumb('search.png'), action='search', url=item.url, + custom_url=url, path=item.path)) return itemlist @@ -146,16 +152,28 @@ def show_menu(item): url = relative('link', option, item.path) submenu = option['submenu'] if option.has_key('submenu') else [] level2 = option['level2'] if option.has_key('level2') else [] - itemlist.append(Item(channel=item.channel, - title=format_title(option['title']), - fulltitle=option['title'], - thumbnail=thumbnail, - fanart=fanart, - plot=plot, - action='show_menu', - url=url, - path=item.path, - menu=level2)) + if option.has_key('title'): + itemlist.append(Item(channel=item.channel, + title=format_title(option['title']), + fulltitle=option['title'], + thumbnail=thumbnail, + fanart=fanart, + plot=plot, + action='show_menu', + url=url, + path=item.path, + menu=level2)) + if option.has_key('search'): + menu = json_data['menu'] + if type(option['search']) == dict and option['search'].has_key('url'): + url = relative('url', option['search'], item.path) + itemlist.append(Item(channel=item.channel, + title=typo('Cerca nel Canale...','color kod bold'), + thumbnail=get_thumb('search.png'), + action='search', + url=url, + path=item.path)) + add_search = False if submenu: for key in submenu: @@ -177,6 +195,10 @@ def show_menu(item): action='submenu', filterkey=key)) if submenu.has_key('search'): + if type(submenu['search']) == dict and submenu['search'].has_key('url'): + url = relative('url', submenu['search'], item.path) + else: + url = '' itemlist.append(Item(channel=item.channel, title=typo('Cerca ' + option['title'] +'...','color kod bold'), thumbnail=get_thumb('search.png'), @@ -205,13 +227,13 @@ def show_menu(item): itemlist += list_all(item) # add Search - if 'channel_name' in json_data: + if 'channel_name' in json_data and add_search: itemlist.append(Item(channel=item.channel, - title=typo('Cerca nel Canale...','color kod bold'), - thumbnail=get_thumb('search.png'), - action='search', - url=item.url, - path=item.path)) + title=typo('Cerca nel Canale...','color kod bold'), + thumbnail=get_thumb('search.png'), + action='search', + url=item.url, + path=item.path)) return itemlist @@ -245,6 +267,7 @@ def submenu(item): thumbnail = '' plot = '' if item.filterkey in ['director','actors']: + tmdb.set_infoLabels(itemlist, seekTmdb=True) load_info = load_json('http://api.themoviedb.org/3/search/person/?api_key=' + tmdb_api + '&language=' + lang + '&query=' + filter) id = str(load_info['results'][0]['id']) if load_info.has_key('results') else '' if id: @@ -655,7 +678,9 @@ def add_channel(item): community_json = open(path, "r") community_json = jsontools.load(community_json.read()) - id = len(community_json['channels']) + 1 + id = 1 + while community_json['channels'].has_key(str(id)): + id +=1 community_json['channels'][id]=(channel_to_add) with open(path, "w") as file: @@ -732,6 +757,8 @@ def format_title(title): def search(item, text): support.log('Search ', text) + if item.custom_url: + item.url=item.custom_url + text itemlist = [] json_data = load_json(item) @@ -782,8 +809,9 @@ def load_links(item, itemlist, json_data, text): if json_data.has_key('menu'): for option in json_data['menu']: - json_data = load_json(option['link'] if option['link'].startswith('http') else item.path+option['link']) - load_links(item, itemlist, json_data, text) + if option.has_key('link'): + json_data = load_json(option['link'] if option['link'].startswith('http') else item.path+option['link']) + load_links(item, itemlist, json_data, text) else: links(item, itemlist, json_data, text) diff --git a/specials/resolverdns.py b/specials/resolverdns.py index 92770ca2..e502a7ee 100644 --- a/specials/resolverdns.py +++ b/specials/resolverdns.py @@ -2,29 +2,25 @@ # -*- OVERRIDE RESOLVE DNS -*- from platformcode import config -from core import support -##if config.get_setting('resolver_dns') or config.get_setting('resolver_dns_custom'): if config.get_setting('resolver_dns'): + import xbmc from lib import dns - import dns.resolver + from dns import resolver, name from dns.resolver import override_system_resolver - import dns.name - import socket - import requests - - res = dns.resolver.Resolver(configure=True) - - """ - legge le impostazioni dalla configurazione - e setta i relativi DNS - """ + from core import support + support.log("platform Android: {}".format(xbmc.getCondVisibility('System.Platform.Android'))) + if xbmc.getCondVisibility('System.Platform.Android') == True: + res = resolver.Resolver(filename='/system/etc/resolv.conf', configure=True) + #res = resolver.Resolver(filename='/system/etc/dhcpcd/dhcpcd-hooks/20-dns.conf', configure=True) + else: + res = resolver.Resolver(configure=True) + #legge le impostazioni dalla configurazione e setta i relativi DNS if config.get_setting('resolver_dns_custom') and not config.get_setting('resolver_dns_service_choose'): res.nameservers = [config.get_setting('resolver_dns_custom1'),config.get_setting('resolver_dns_custom2')] else: nameservers_dns = config.get_setting('resolver_dns_service') - # config.get_setting('resolver_dns_service_choose') == true if nameservers_dns == 1:# 'Google' res.nameservers = ['8.8.8.8', '2001:4860:4860::8888', '8.8.4.4', '2001:4860:4860::8844'] @@ -39,6 +35,6 @@ if config.get_setting('resolver_dns'): res.nameservers = ['1.1.1.1', '2606:4700:4700::1111', '1.0.0.1', '2606:4700:4700::1001'] # log di verifica dei DNS impostati, d'aiuto quando gli utenti smanettano... - support.log("NAME SERVER2: {}".format(res.nameservers)) + support.log("NAME SERVER: {}".format(res.nameservers)) override_system_resolver(res) diff --git a/specials/search.json b/specials/search.json index fc48e3c8..62d846ec 100644 --- a/specials/search.json +++ b/specials/search.json @@ -1,44 +1,20 @@ { - "id": "search", - "name": "@60672", - "active": false, - "adult": false, - "language": ["ita"], - "categories": ["movie"], - "settings": [ - { - "id": "multithread", - "type": "bool", - "label": "@60673", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "result_mode", - "type": "list", - "label": "@60674", - "default": 0, - "enabled": true, - "visible": true, - "lvalues": ["@60675","@60676"] - }, - { - "id": "saved_searches_limit", - "type": "list", - "label": "@60677", - "default": 0, - "enabled": true, - "visible": true, - "lvalues": ["10","20","30","40"] - }, - { - "id": "last_search", - "type": "bool", - "label": "@60678", - "default": true, - "enabled": true, - "visible": true - } - ] + "id": "search", + "name": "search", + "active": false, + "adult": false, + "thumbnail": "", + "banner": "", + "categories": [], + "settings": [ + { + "id": "thread_number", + "type": "list", + "label": "@60673", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": ["auto","1","2","4","6","8","16","24","32","64"] + } + ] } \ No newline at end of file diff --git a/specials/search.py b/specials/search.py index b7d14d4a..2aa55a2c 100755 --- a/specials/search.py +++ b/specials/search.py @@ -1,160 +1,374 @@ # -*- coding: utf-8 -*- +# -*- Channel New Search -*- +# -*- Created for Alfa-addon -*- +# -*- By the Alfa Develop Group -*- -import glob -import os -import re -import time -from threading import Thread - -import xbmcaddon - -from channelselector import get_thumb, auto_filter -from core import channeltools -from core import scrapertools -from core import tmdb +import os, json, time, inspect, channelselector +from lib.concurrent import futures from core.item import Item -from platformcode import config, logger -from platformcode import platformtools +from core import tmdb, scrapertools, channeltools, filetools, jsontools +from channelselector import get_thumb +from platformcode import logger, config, platformtools, unify from core.support import typo +import gc +gc.disable() + +import xbmcaddon addon = xbmcaddon.Addon('metadata.themoviedb.org') def_lang = addon.getSetting('language') -link_list = [] -max_links = 30 - - def mainlist(item): logger.info() - item.channel = "search" - itemlist = [] - context = [{"title": config.get_localized_string(60412), "action": "setting_channel", "channel": item.channel}] - itemlist.append(Item(channel=item.channel, action="sub_menu", title="[B]" + config.get_localized_string(70305)+ "[/B]", context=context, + itemlist = list() + + itemlist.append(Item(channel=item.channel, title=config.get_localized_string(70276), action='new_search', mode='all', thumbnail=get_thumb("search.png"))) - itemlist.append(Item(channel=item.channel, action='genres_menu', title=config.get_localized_string(70306), type='movie', - thumbnail=get_thumb("genres.png"))) - itemlist.append (Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70307), - context=context, search_type='list', list_type='movie/popular', - thumbnail=get_thumb("popular.png"))) - itemlist.append(Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70308), - context=context, search_type='list', list_type='movie/top_rated', - thumbnail=get_thumb("top_rated.png"))) - #itemlist.append(Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70309), context=context, - # search_type='list', list_type='movie/now_playing', - # thumbnail=get_thumb("now_playing.png"))) - itemlist.append(Item(channel=item.channel, action='genres_menu', title=config.get_localized_string(70310), type='tv', - thumbnail=get_thumb("genres.png"))) - itemlist.append( - Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70311), context=context, - search_type='list',list_type='tv/popular', thumbnail=get_thumb("popular.png"))) - #itemlist.append(Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70312), context=context, - # search_type='list', list_type='tv/on_the_air', thumbnail=get_thumb("on_the_air.png"))) - itemlist.append(Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70313), context=context, - search_type='list', list_type='tv/top_rated', thumbnail=get_thumb("top_rated.png"))) + itemlist.append(Item(channel=item.channel, title=config.get_localized_string(70741) % config.get_localized_string(30122), action='new_search', mode='movie', + thumbnail=get_thumb("search_movie.png"))) + + itemlist.append(Item(channel=item.channel, title=config.get_localized_string(70741) % config.get_localized_string(30123), action='new_search', mode='tvshow', + thumbnail=get_thumb("search_tvshow.png"))) + + itemlist.append(Item(channel=item.channel, title=config.get_localized_string(70741) % config.get_localized_string(70314), action='new_search', + page=1, mode='person', thumbnail=get_thumb("search_star.png"))) + + itemlist.append(Item(channel=item.channel, title=config.get_localized_string(60420), action='sub_menu', + thumbnail=get_thumb('search.png'))) + + itemlist.append(Item(channel=item.channel, title=config.get_localized_string(59994), action='opciones', + thumbnail=get_thumb('setting_0.png'))) + + itemlist.append(Item(channel=item.channel, title=config.get_localized_string(60415), action='settings', + thumbnail=get_thumb('setting_0.png'))) + + itemlist = set_context(itemlist) return itemlist -def genres_menu(item): - from channelselector import thumb - - itemlist = [] - - genres = tmdb.get_genres(item.type) - - logger.debug(genres) - logger.debug(genres[item.type]) - - for key, value in genres[item.type].items(): - itemlist.append(item.clone(title=value, action='discover_list', search_type='discover', - list_type=key, page='1')) - return thumb(sorted(itemlist, key=lambda it: it.title), genre=True) - def sub_menu(item): logger.info() - item.channel = "search" itemlist = list() - context = [{"title": config.get_localized_string(70273), - "action": "setting_channel", - "channel": item.channel}] - itemlist.append(Item(channel=item.channel, action="search", - title=config.get_localized_string(30980), context=context, - thumbnail=get_thumb("search.png"))) - thumbnail = get_thumb("search_star.png") + itemlist.append(Item(channel=item.channel, action='genres_menu', title=config.get_localized_string(70306), + mode='movie', thumbnail=get_thumb("channels_movie_genre.png"))) - itemlist.append(Item(channel='tvmoviedb', title=config.get_localized_string(70036), action="search_", - search={'url': 'search/person', 'language': def_lang, 'page': 1}, star=True, - thumbnail=thumbnail)) + itemlist.append(Item(channel=item.channel, action='years_menu', title=config.get_localized_string(70742), + mode='movie', thumbnail=get_thumb("channels_movie_year.png"))) - itemlist.append(Item(channel=item.channel, action="search", - title=config.get_localized_string(59998), extra="categorias", - context=context, - thumbnail=get_thumb("search.png"))) - itemlist.append(Item(channel=item.channel, action="opciones", title=config.get_localized_string(59997), - thumbnail=get_thumb("search.png"))) + itemlist.append(Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70307), + search_type='list', list_type='movie/popular', mode='movie', + thumbnail=get_thumb("channels_movie_popular.png"))) + + itemlist.append(Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70308), + search_type='list', list_type='movie/top_rated', mode='movie', + thumbnail=get_thumb("channels_movie_top.png"))) + + itemlist.append(Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70309), + search_type='list', list_type='movie/now_playing', mode='movie', + thumbnail=get_thumb("channels_movie_now_playing.png"))) + + itemlist.append(Item(channel=item.channel, action='genres_menu', title=config.get_localized_string(70310), + mode='tvshow', thumbnail=get_thumb("channels_tvshow_genre.png"))) + + itemlist.append(Item(channel=item.channel, action='years_menu', title=config.get_localized_string(70743), + mode='tvshow', thumbnail=get_thumb("channels_tvshow_year.png"))) + + itemlist.append(Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70311), + search_type='list', list_type='tv/popular', mode='tvshow', + thumbnail=get_thumb("popular.png"))) + + itemlist.append(Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70312), + search_type='list', list_type='tv/on_the_air', mode='tvshow', + thumbnail=get_thumb("channels_tvshow_on_the_air.png"))) + + itemlist.append(Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70313), + search_type='list', list_type='tv/top_rated', mode='tvshow', + thumbnail=get_thumb("channels_tvshow_top.png"))) itemlist.append(Item(channel="tvmoviedb", action="mainlist", title=config.get_localized_string(70274), thumbnail=get_thumb("search.png"))) - saved_searches_list = get_saved_searches() - context2 = context[:] - context2.append({"title": config.get_localized_string(59996), - "action": "clear_saved_searches", - "channel": item.channel}) - logger.info("saved_searches_list=%s" % saved_searches_list) - - if saved_searches_list: - itemlist.append(Item(channel=item.channel, action="", - title=config.get_localized_string(59995), context=context2, - thumbnail=get_thumb("search.png"))) - for saved_search_text in saved_searches_list: - itemlist.append(Item(channel=item.channel, action="do_search", - title=' "' + saved_search_text + '"', - extra=saved_search_text, context=context2, - category=saved_search_text, - thumbnail=get_thumb("search.png"))) + itemlist = set_context(itemlist) return itemlist +def new_search(item): + logger.info() + + itemlist = [] + + last_search = channeltools.get_channel_setting('Last_searched', 'search', '') + searched_text = platformtools.dialog_input(default=last_search, heading='') + + if not searched_text: + return + + channeltools.set_channel_setting('Last_searched', searched_text, 'search') + searched_text = searched_text.replace("+", " ") + + if item.mode == 'person': + item.searched_text = searched_text + return actor_list(item) + + if item.mode != 'all': + tmdb_info = tmdb.Tmdb(texto_buscado=searched_text, tipo=item.mode.replace('show', '')) + results = tmdb_info.results + for result in results: + result = tmdb_info.get_infoLabels(result, origen=result) + if item.mode == 'movie': + title = result['title'] + else: + title = result['name'] + item.mode = 'tvshow' + + thumbnail = result.get('thumbnail', '') + fanart = result.get('fanart', '') + + new_item = Item(channel=item.channel, + action='channel_search', + title=title, + text=searched_text, + thumbnail=thumbnail, + fanart=fanart, + mode=item.mode, + infoLabels=result) + + if item.mode == 'movie': + new_item.contentTitle = result['title'] + else: + new_item.contentSerieName = result['name'] + + itemlist.append(new_item) + + if item.mode == 'all' or not itemlist: + itemlist = channel_search(Item(channel=item.channel, + title=searched_text, + text=searched_text, + mode='all', + infoLabels={})) + + return itemlist + + +def channel_search(item): + logger.info(item) + + start = time.time() + searching = list() + searching_titles = list() + results = list() + valid = list() + ch_list = dict() + mode = item.mode + max_results = 10 + if item.infoLabels['title']: + item.text = item.infoLabels['title'] + + searched_id = item.infoLabels['tmdb_id'] + + channel_list, channel_titles = get_channels(item) + + from lib import cloudscraper + session = cloudscraper.create_scraper() + + searching += channel_list + searching_titles += channel_titles + cnt = 0 + + progress = platformtools.dialog_progress(config.get_localized_string(30993) % item.title, config.get_localized_string(70744) % len(channel_list), + str(searching_titles)) + config.set_setting('tmdb_active', False) + + with futures.ThreadPoolExecutor(max_workers=set_workers()) as executor: + c_results = [executor.submit(get_channel_results, ch, item, session) for ch in channel_list] + + for res in futures.as_completed(c_results): + cnt += 1 + finished = res.result()[0] + if res.result()[1]: + ch_list[res.result()[0]] = res.result()[1] + + if progress.iscanceled(): + break + if finished in searching: + searching_titles.remove(searching_titles[searching.index(finished)]) + searching.remove(finished) + progress.update((cnt * 100) / len(channel_list), config.get_localized_string(70744) % str(len(channel_list) - cnt), + str(searching_titles)) + + progress.close() + + cnt = 0 + progress = platformtools.dialog_progress(config.get_localized_string(30993) % item.title, config.get_localized_string(60295), + config.get_localized_string(60293)) + + config.set_setting('tmdb_active', True) + res_count = 0 + for key, value in ch_list.items(): + ch_name = channel_titles[channel_list.index(key)] + grouped = list() + cnt += 1 + progress.update((cnt * 100) / len(ch_list), config.get_localized_string(60295), config.get_localized_string(60293)) + if len(value) <= max_results and item.mode != 'all': + if len(value) == 1: + if not value[0].action or config.get_localized_string(70006).lower() in value[0].title.lower(): + continue + tmdb.set_infoLabels_itemlist(value, True, forced=True) + for elem in value: + if not elem.infoLabels.get('year', ""): + elem.infoLabels['year'] = '-' + tmdb.set_infoLabels_item(elem, True) + + if elem.infoLabels['tmdb_id'] == searched_id: + elem.from_channel = key + if not config.get_setting('unify'): + elem.title += ' [%s]' % key + valid.append(elem) + + for it in value: + if it.channel == item.channel: + it.channel = key + if it in valid: + continue + if mode == 'all' or (it.contentType and mode == it.contentType): + grouped.append(it) + elif (mode == 'movie' and it.contentTitle) or (mode == 'tvshow' and (it.contentSerieName or it.show)): + grouped.append(it) + else: + continue + + if not grouped: + continue + # to_temp[key] = grouped + + if not config.get_setting('unify'): + title = typo(ch_name,'bold') + typo(str(len(grouped)), '_ [] color kod bold') + else: + title = typo('%s %s' % (len(grouped), config.get_localized_string(70695)), 'bold') + res_count += len(grouped) + plot='' + for it in grouped: + plot += it.title +'\n' + ch_thumb = channeltools.get_channel_parameters(key)['thumbnail'] + results.append(Item(channel='search', title=title, + action='get_from_temp', thumbnail=ch_thumb, itemlist=[ris.tourl() for ris in grouped], plot=plot, page=1)) + + results = sorted(results, key=lambda it: it.from_channel) + + # send_to_temp(to_temp) + config.set_setting('tmdb_active', True) + if item.mode == 'all': + results_statistic = config.get_localized_string(59972) % (item.title, res_count, time.time() - start) + results.insert(0, Item(title = typo(results_statistic,'color kod bold'))) + # logger.debug(results_statistic) + + return valid + results + + +def get_channel_results(ch, item, session): + max_results = 10 + results = list() + + ch_params = channeltools.get_channel_parameters(ch) + + exec "from channels import " + ch_params["channel"] + " as module" + + mainlist = module.mainlist(Item(channel=ch_params["channel"])) + search_action = [elem for elem in mainlist if elem.action == "search" and (item.mode == 'all' or elem.contentType == item.mode)] + + if search_action: + for search_ in search_action: + search_.session = session + try: + results.extend(module.search(search_, item.text)) + except: + pass + else: + try: + results.extend(module.search(item, item.text)) + except: + pass + + if len(results) < 0 and len(results) < max_results and item.mode != 'all': + + if len(results) == 1: + if not results[0].action or config.get_localized_string(30992).lower() in results[0].title.lower(): + return [ch, []] + + results = get_info(results) + + return [ch, results] + + +def get_info(itemlist): + logger.info() + tmdb.set_infoLabels_itemlist(itemlist, True, forced=True) + + return itemlist + + +def get_channels(item): + logger.info() + + channels_list = list() + title_list = list() + all_channels = channelselector.filterchannels('all') + + for ch in all_channels: + channel = ch.channel + ch_param = channeltools.get_channel_parameters(channel) + if not ch_param.get("active", False): + continue + list_cat = ch_param.get("categories", []) + + if not ch_param.get("include_in_global_search", False): + continue + + if 'anime' in list_cat: + n = list_cat.index('anime') + list_cat[n] = 'tvshow' + + if item.mode == 'all' or (item.mode in list_cat): + if config.get_setting("include_in_global_search", channel): + channels_list.append(channel) + title_list.append(ch_param.get('title', channel)) + + return channels_list, title_list + + def opciones(item): - itemlist = list() - itemlist.append(Item(channel=item.channel, action="setting_channel", - title=config.get_localized_string(59994), folder=False, - thumbnail=get_thumb("search.png"))) - itemlist.append(Item(channel=item.channel, action="clear_saved_searches", title=config.get_localized_string(59996), - folder=False, thumbnail=get_thumb("search.png"))) - itemlist.append(Item(channel=item.channel, action="settings", title=config.get_localized_string(60531), folder=False, - thumbnail=get_thumb("search.png"))) - return itemlist - + return setting_channel_new(item) def settings(item): return platformtools.show_channel_settings(caption=config.get_localized_string(59993)) - -def setting_channel(item): - if config.get_platform(True)['num_version'] >= 17.0: # A partir de Kodi 16 se puede usar multiselect, y de 17 con preselect - return setting_channel_new(item) - else: - return setting_channel_old(item) +def set_workers(): + list_mode=[None,1,2,4,6,8,16,24,32,64] + index = config.get_setting('thread_number', 'search') + return list_mode[index] def setting_channel_new(item): - import channelselector, xbmcgui - from core import channeltools + import xbmcgui - # Cargar lista de opciones (canales activos del usuario y que permitan búsqueda global) - # ------------------------ - lista = []; ids = []; lista_lang = []; lista_ctgs = [] + # Load list of options (active user channels that allow global search) + lista = [] + ids = [] + lista_lang = [] + lista_ctgs = [] channels_list = channelselector.filterchannels('all') for channel in channels_list: + if channel.action == '': + continue + channel_parameters = channeltools.get_channel_parameters(channel.channel) - # No incluir si en la configuracion del canal no existe "include_in_global_search" + # Do not include if "include_in_global_search" does not exist in the channel configuration if not channel_parameters['include_in_global_search']: continue @@ -162,47 +376,48 @@ def setting_channel_new(item): lbl += ' %s' % ', '.join(config.get_localized_category(categ) for categ in channel_parameters['categories']) it = xbmcgui.ListItem(channel.title, lbl) - it.setArt({ 'thumb': channel.thumbnail, 'fanart': channel.fanart }) + it.setArt({'thumb': channel.thumbnail, 'fanart': channel.fanart}) lista.append(it) ids.append(channel.channel) lista_lang.append(channel_parameters['language']) lista_ctgs.append(channel_parameters['categories']) - # Diálogo para pre-seleccionar - # ---------------------------- + # Pre-select dialog preselecciones = [ - config.get_localized_string(70570), - config.get_localized_string(70571), - config.get_localized_string(70572), - config.get_localized_string(70573), - # config.get_localized_string(70574), - # config.get_localized_string(70575), - config.get_localized_string(70576) + config.get_localized_string(70570), + config.get_localized_string(70571), + # 'Modificar partiendo de Recomendados', + # 'Modificar partiendo de Frecuentes', + config.get_localized_string(70572), + config.get_localized_string(70573), + # 'Modificar partiendo de Castellano', + # 'Modificar partiendo de Latino' ] - presel_values = ['skip', 'actual', 'all', 'none', 'cast', 'lat', 'ita'] + # presel_values = ['skip', 'actual', 'recom', 'freq', 'all', 'none', 'cast', 'lat'] + presel_values = ['skip', 'actual', 'all', 'none'] categs = ['movie', 'tvshow', 'documentary', 'anime', 'vos', 'direct', 'torrent'] - if config.get_setting('adult_mode') > 0: categs.append('adult') + if config.get_setting('adult_mode') > 0: + categs.append('adult') for c in categs: preselecciones.append(config.get_localized_string(70577) + config.get_localized_category(c)) presel_values.append(c) - if item.action == 'setting_channel': # Configuración de los canales incluídos en la búsqueda + if item.action == 'setting_channel': # Configuración de los canales incluídos en la búsqueda del preselecciones[0] del presel_values[0] - #else: # Llamada desde "buscar en otros canales" (se puede saltar la selección e ir directo a la búsqueda) - + # else: # Call from "search on other channels" (you can skip the selection and go directly to the search) + ret = platformtools.dialog_select(config.get_localized_string(59994), preselecciones) - if ret == -1: return False # pedido cancel - if presel_values[ret] == 'skip': return True # continuar sin modificar - elif presel_values[ret] == 'none': preselect = [] - elif presel_values[ret] == 'all': preselect = range(len(ids)) - elif presel_values[ret] in ['cast', 'lat']: + if ret == -1: + return False # order cancel + if presel_values[ret] == 'skip': + return True # continue unmodified + elif presel_values[ret] == 'none': preselect = [] - for i, lg in enumerate(lista_lang): - if presel_values[ret] in lg or '*' in lg: - preselect.append(i) - elif presel_values[ret] in ['ita']: + elif presel_values[ret] == 'all': + preselect = range(len(ids)) + elif presel_values[ret] in ['cast', 'lat']: preselect = [] for i, lg in enumerate(lista_lang): if presel_values[ret] in lg or '*' in lg: @@ -213,23 +428,37 @@ def setting_channel_new(item): channel_status = config.get_setting('include_in_global_search', canal) if channel_status: preselect.append(i) + + elif presel_values[ret] == 'recom': + preselect = [] + for i, canal in enumerate(ids): + _not, set_canal_list = channeltools.get_channel_controls_settings(canal) + if set_canal_list.get('include_in_global_search', False): + preselect.append(i) + + elif presel_values[ret] == 'freq': + preselect = [] + for i, canal in enumerate(ids): + frequency = channeltools.get_channel_setting('frequency', canal, 0) + if frequency > 0: + preselect.append(i) else: preselect = [] for i, ctgs in enumerate(lista_ctgs): if presel_values[ret] in ctgs: preselect.append(i) - # Diálogo para seleccionar - # ------------------------ + # Dialog to select ret = xbmcgui.Dialog().multiselect(config.get_localized_string(59994), lista, preselect=preselect, useDetails=True) - if ret == None: return False # pedido cancel + if not ret: + return False # order cancel seleccionados = [ids[i] for i in ret] - # Guardar cambios en canales para la búsqueda - # ------------------------------------------- + # Save changes to search channels for canal in ids: channel_status = config.get_setting('include_in_global_search', canal) - if channel_status is None: channel_status = True + # if not channel_status: + # channel_status = True if channel_status and canal not in seleccionados: config.set_setting('include_in_global_search', False, canal) @@ -238,544 +467,230 @@ def setting_channel_new(item): return True -def setting_channel_old(item): - channels_path = os.path.join(config.get_runtime_path(), "channels", '*.json') - # channel_language = config.get_setting("channel_language", default="all") - channel_language = auto_filter() - list_controls = [] - for infile in sorted(glob.glob(channels_path)): - channel_name = os.path.basename(infile)[:-5] - channel_parameters = channeltools.get_channel_parameters(channel_name) +def genres_menu(item): + itemlist = [] + mode = item.mode.replace('show', '') - # No incluir si es un canal inactivo - if not channel_parameters["active"]: - continue + genres = tmdb.get_genres(mode) + for key, value in genres[mode].items(): + discovery = {'url': 'discover/%s' % mode, 'with_genres': key, + 'language': def_lang, 'page': '1'} - # No incluir si es un canal para adultos, y el modo adulto está desactivado - if channel_parameters["adult"] and config.get_setting("adult_mode") == 0: - continue - - # No incluir si el canal es en un idioma filtrado - if channel_language != "all" and channel_language not in str(channel_parameters["language"]) \ - and "*" not in channel_parameters["language"]: - continue - - # No incluir si en la configuracion del canal no existe "include_in_global_search" - include_in_global_search = channel_parameters["include_in_global_search"] - - if not include_in_global_search: - continue - else: - # Se busca en la configuración del canal el valor guardado - include_in_global_search = config.get_setting("include_in_global_search", channel_name) - - control = {'id': channel_name, - 'type': "bool", - 'label': channel_parameters["title"], - 'default': include_in_global_search, - 'enabled': True, - 'visible': True} - - list_controls.append(control) - - if config.get_setting("custom_button_value", item.channel): - custom_button_label = config.get_localized_string(59992) - else: - custom_button_label = config.get_localized_string(59991) - - return platformtools.show_channel_settings(list_controls=list_controls, - caption=config.get_localized_string(59990), - callback="save_settings", item=item, - custom_button={'visible': True, - 'function': "cb_custom_button", - 'close': False, - 'label': custom_button_label}) + itemlist.append(Item(channel=item.channel, title=typo(value, 'bold'), page=1, + action='discover_list', discovery=discovery, + mode=item.mode)) + channelselector.thumb(itemlist) + return sorted(itemlist, key=lambda it: it.title) -def save_settings(item, dict_values): - progreso = platformtools.dialog_progress(config.get_localized_string(59988), config.get_localized_string(59989)) - n = len(dict_values) - for i, v in enumerate(dict_values): - progreso.update((i * 100) / n, config.get_localized_string(59988)) - config.set_setting("include_in_global_search", dict_values[v], v) - - progreso.close() - return True - - -def cb_custom_button(item, dict_values): - value = config.get_setting("custom_button_value", item.channel) - if value == "": - value = False - - for v in dict_values.keys(): - dict_values[v] = not value - - if config.set_setting("custom_button_value", not value, item.channel) == True: - return {"label": config.get_localized_string(59992)} - else: - return {"label": config.get_localized_string(59991)} - - -def searchbycat(item): - # Only in xbmc/kodi - # Abre un cuadro de dialogo con las categorías en las que hacer la búsqueda - - categories = [config.get_localized_string(30122), config.get_localized_string(30123), config.get_localized_string(30124), config.get_localized_string(30125), config.get_localized_string(59975), config.get_localized_string(59976)] - categories_id = ["movie", "tvshow", "anime", "documentary", "vos", "latino"] - list_controls = [] - for i, category in enumerate(categories): - control = {'id': categories_id[i], - 'type': "bool", - 'label': category, - 'default': False, - 'enabled': True, - 'visible': True} - - list_controls.append(control) - control = {'id': "separador", - 'type': "label", - 'label': '', - 'default': "", - 'enabled': True, - 'visible': True} - list_controls.append(control) - control = {'id': "torrent", - 'type': "bool", - 'label': config.get_localized_string(70275), - 'default': True, - 'enabled': True, - 'visible': True} - list_controls.append(control) - - return platformtools.show_channel_settings(list_controls=list_controls, caption=config.get_localized_string(59974), - callback="search_cb", item=item) - - -def search_cb(item, values=""): - cat = [] - for c in values: - if values[c]: - cat.append(c) - - if not len(cat): - return None - else: - logger.info(item.tostring()) - logger.info(str(cat)) - return do_search(item, cat) - - -# Al llamar a esta función, el sistema pedirá primero el texto a buscar -# y lo pasará en el parámetro "tecleado" -def search(item, tecleado): - logger.info() - tecleado = tecleado.replace("+", " ") - item.category = tecleado - - if tecleado != "": - save_search(tecleado) - - if item.extra == "categorias": - item.extra = tecleado - itemlist = searchbycat(item) - else: - item.extra = tecleado - itemlist = do_search(item, []) - - return itemlist - - -def show_result(item): - tecleado = None - if item.adult and config.get_setting("adult_request_password"): - # Solicitar contraseña - tecleado = platformtools.dialog_input("", config.get_localized_string(60334), True) - if tecleado is None or tecleado != config.get_setting("adult_password"): - return [] - - item.channel = item.__dict__.pop('from_channel') - item.action = item.__dict__.pop('from_action') - if item.__dict__.has_key('tecleado'): - tecleado = item.__dict__.pop('tecleado') - - # try: - # channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel]) - # except: - # import traceback - # logger.error(traceback.format_exc()) - # return [] - - if tecleado: - # Mostrar resultados: agrupados por canales - return [Item().fromurl(i) for i in item.itemlist] - # return channel.search(item, tecleado) - else: - # Mostrar resultados: todos juntos - if item.infoPlus: #Si viene de una ventana de InfoPlus, hay que salir de esta forma... - del item.infoPlus #si no, se mete en un bucle mostrando la misma pantalla, - item.title = item.title.strip() #dando error en "handle -1" - return getattr(channel, item.action)(item) - try: - from platformcode import launcher - launcher.run(item) - except ImportError: - return getattr(channel, item.action)(item) - - -def channel_search(search_results, channel_parameters, tecleado, session): - try: - exec("from channels import " + channel_parameters["channel"] + " as module") - mainlist = module.mainlist(Item(channel=channel_parameters["channel"])) - search_items = [item for item in mainlist if item.action == "search"] - if not search_items: - search_items = [Item(channel=channel_parameters["channel"], action="search")] - - for item in search_items: - item.session = session - result = module.search(item.clone(), tecleado) - if result is None: - result = [] - if len(result): - if not channel_parameters["title"].capitalize() in search_results: - search_results[channel_parameters["title"].capitalize()] = [] - search_results[channel_parameters['title'].capitalize()].append({"item": item, - "itemlist": result, - "thumbnail": channel_parameters["thumbnail"], - "adult": channel_parameters["adult"]}) - - except: - logger.error("No se puede buscar en: %s" % channel_parameters["title"]) - import traceback - logger.error(traceback.format_exc()) - - -# Esta es la función que realmente realiza la búsqueda -def do_search(item, categories=None): - logger.info("blaa categorias %s" % categories) - - if item.contextual==True: - categories = ["Películas"] - setting_item = Item(channel=item.channel, title=config.get_localized_string(59994), folder=False, - thumbnail=get_thumb("search.png")) - if not setting_channel(setting_item): - return False - - if categories is None: - categories = [] - - multithread = config.get_setting("multithread", "search") - result_mode = config.get_setting("result_mode", "search") - - if item.wanted!='': - tecleado=item.wanted - else: - tecleado = item.extra - +def years_menu(item): + import datetime itemlist = [] - channels_path = os.path.join(config.get_runtime_path(), "channels", '*.json') - logger.info("channels_path=%s" % channels_path) + mode = item.mode.replace('show', '') - # channel_language = config.get_setting("channel_language", default="all") - channel_language = auto_filter() - logger.info("channel_language=%s" % channel_language) + par_year = 'primary_release_year' + thumb = channelselector.get_thumb('channels_movie_year.png') - # Para Kodi es necesario esperar antes de cargar el progreso, de lo contrario - # el cuadro de progreso queda "detras" del cuadro "cargando..." y no se le puede dar a cancelar - time.sleep(0.5) - progreso = platformtools.dialog_progress(config.get_localized_string(30993) % tecleado, "") - channel_files = sorted(glob.glob(channels_path), key=lambda x: os.path.basename(x)) - import math + if mode != 'movie': + par_year = 'first_air_date_year' + thumb = channelselector.get_thumb('channels_tvshow_year.png') - threads = [] - search_results = {} - start_time = time.time() - list_channels_search = [] + c_year = datetime.datetime.now().year + 1 + l_year = c_year - 31 - # Extrae solo los canales a buscar - for index, infile in enumerate(channel_files): - try: - basename = os.path.basename(infile) - basename_without_extension = basename[:-5] - logger.info("%s..." % basename_without_extension) + for year in range(l_year, c_year): + discovery = {'url': 'discover/%s' % mode, 'page': '1', + '%s' % par_year: '%s' % year, + 'sort_by': 'popularity.desc', 'language': def_lang} - channel_parameters = channeltools.get_channel_parameters(basename_without_extension) - - # No busca si es un canal inactivo - if not channel_parameters["active"]: - logger.info("%s -no activo-" % basename_without_extension) - continue - - # En caso de búsqueda por categorias - if categories: - - # Si no se ha seleccionado torrent no se muestra - #if "torrent" not in categories and "infoPlus" not in categories: - # if "torrent" in channel_parameters["categories"]: - # logger.info("%s -torrent-" % basename_without_extension) - # continue - - for cat in categories: - if cat not in channel_parameters["categories"]: - logger.info("%s -no en %s-" % (basename_without_extension, cat)) - continue - - # No busca si es un canal para adultos, y el modo adulto está desactivado - if channel_parameters["adult"] and config.get_setting("adult_mode") == 0: - logger.info("%s -adulto-" % basename_without_extension) - continue - - # No busca si el canal es en un idioma filtrado - if channel_language != "all" and channel_language not in str(channel_parameters["language"]) \ - and "*" not in channel_parameters["language"]: - logger.info("%s -idioma no válido-" % basename_without_extension) - continue - - # No busca si es un canal excluido de la búsqueda global - include_in_global_search = channel_parameters["include_in_global_search"] - if include_in_global_search: - # Buscar en la configuracion del canal - include_in_global_search = config.get_setting("include_in_global_search", basename_without_extension) - - if not include_in_global_search: - logger.info("%s -no incluido en lista a buscar-" % basename_without_extension) - continue - list_channels_search.append(infile) - except: - logger.error("No se puede buscar en: %s" % channel_parameters["title"]) - import traceback - logger.error(traceback.format_exc()) - continue - - from lib import cloudscraper - session = cloudscraper.create_scraper() - - for index, infile in enumerate(list_channels_search): - try: - # fix float porque la division se hace mal en python 2.x - percentage = int(float((index+1))/len(list_channels_search)*float(100)) - basename = os.path.basename(infile) - basename_without_extension = basename[:-5] - logger.info("%s..." % basename_without_extension) - channel_parameters = channeltools.get_channel_parameters(basename_without_extension) - # Movido aqui el progreso, para que muestre el canal exacto que está buscando - progreso.update(percentage, - config.get_localized_string(60520) % (channel_parameters["title"])) - # Modo Multi Thread - if progreso.iscanceled(): - progreso.close() - logger.info("Búsqueda cancelada") - return itemlist - if multithread: - t = Thread(target=channel_search, args=[search_results, channel_parameters, tecleado, session], - name=channel_parameters["title"]) - t.setDaemon(True) - t.start() - threads.append(t) - # Modo single Thread - else: - logger.info("Intentado búsqueda en %s de %s " % (basename_without_extension, tecleado)) - channel_search(search_results, channel_parameters, tecleado, session) - except: - logger.error("No se puede buscar en: %s" % channel_parameters["title"]) - import traceback - logger.error(traceback.format_exc()) - continue - - # Modo Multi Thread - # Usando isAlive() no es necesario try-except, - # ya que esta funcion (a diferencia de is_alive()) - # es compatible tanto con versiones antiguas de python como nuevas - if multithread: - pendent = [a for a in threads if a.isAlive()] - if len(pendent) > 0: t = float(100) / len(pendent) - while len(pendent) > 0: - index = (len(threads) - len(pendent)) + 1 - percentage = int(math.ceil(index * t)) - - list_pendent_names = [a.getName() for a in pendent] - mensaje = config.get_localized_string(70282) % (", ".join(list_pendent_names)) - progreso.update(percentage, config.get_localized_string(60521) % (len(threads) - len(pendent) + 1, len(threads)), - mensaje) - if progreso.iscanceled(): - logger.info("Búsqueda cancelada") - break - time.sleep(0.5) - pendent = [a for a in threads if a.isAlive()] - total = 0 - for channel in sorted(search_results.keys()): - for element in search_results[channel]: - total += len(element["itemlist"]) - title = channel - # resultados agrupados por canales - if item.contextual == True or item.action == 'search_tmdb': - result_mode = 1 - if result_mode == 0: - if len(search_results[channel]) > 1: - title += " -%s" % element["item"].title.strip() - title = re.sub(r"\[[^\]]+\]|•", "", title) - title = typo(title,'bold') + typo("%02d" % len(element["itemlist"]),'_ [] color kod bold') - plot = config.get_localized_string(60491) + '\n' + typo('','submenu')+ '\n' - for i in element["itemlist"]: - if type(i) == Item: - plot += re.sub(r'\[(?:/)?B\]','', i.title) + '\n' - itemlist.append(Item(title=title, channel="search", action="show_result", url=element["item"].url, - extra=element["item"].extra, folder=True, adult=element["adult"], plot=plot, - thumbnail=element["thumbnail"], itemlist=[e.tourl() for e in element["itemlist"]], - from_action="search", from_channel=element["item"].channel, tecleado=tecleado)) - # todos los resultados juntos, en la misma lista - else: - title = config.get_localized_string(70697) % channel - itemlist.append(Item(title=title, channel="search", action="", - folder=False, text_bold=True, from_channel=channel)) - for i in element["itemlist"]: - if i.action: - title = " " + i.title - if "infoPlus" in categories: #Se manrca vi viene de una ventana de InfoPlus - i.infoPlus = True - itemlist.append(i.clone(title=title, from_action=i.action, from_channel=i.channel, - channel="search", action="show_result", adult=element["adult"])) - title = config.get_localized_string(59972) % ( - tecleado, total, time.time() - start_time) - itemlist.insert(0, Item(title=typo(title, 'bold color kod'))) - progreso.close() - #Para opcion Buscar en otros canales - if item.contextual == True: - return exact_results(itemlist, tecleado) - else: - return itemlist - - -def exact_results(results, wanted): - logger.info() - itemlist =[] - - for item in results: - if item.action=='': - channel=item.from_channel - if item.action != '' and item.contentTitle==wanted: - item.title = typo(item.title,'bold') + typo(channel,'_ [] color kod bold') #'%s [%s]' % (item.title, channel) - itemlist.append(item) + itemlist.append(Item(channel=item.channel, title=typo(str(year), 'bold'), action='discover_list', + discovery=discovery, mode=item.mode, year_=str(year), thumbnail=thumb)) + itemlist.reverse() + itemlist.append(Item(channel=item.channel, title=typo(config.get_localized_string(70745),'color kod bold'), url='', + action="year_cus", mode=item.mode, par_year=par_year)) return itemlist -def save_search(text): - saved_searches_limit = int((10, 20, 30, 40,)[int(config.get_setting("saved_searches_limit", "search"))]) +def year_cus(item): + mode = item.mode.replace('show', '') - current_saved_searches_list = config.get_setting("saved_searches_list", "search") - if current_saved_searches_list is None: - saved_searches_list = [] - else: - saved_searches_list = list(current_saved_searches_list) - - if text in saved_searches_list: - saved_searches_list.remove(text) - - saved_searches_list.insert(0, text) - - config.set_setting("saved_searches_list", saved_searches_list[:saved_searches_limit], "search") + heading = config.get_localized_string(70042) + year = platformtools.dialog_numeric(0, heading, default="") + item.discovery = {'url': 'discover/%s' % mode, 'page': '1', + '%s' % item.par_year: '%s' % year, + 'sort_by': 'popularity.desc', 'language': def_lang} + item.action = "discover_list" + if year and len(year) == 4: + return discover_list(item) -def clear_saved_searches(item): - config.set_setting("saved_searches_list", list(), "search") - platformtools.dialog_ok(config.get_localized_string(60329), config.get_localized_string(60424)) +def actor_list(item): + itemlist = [] + dict_ = {'url': 'search/person', 'language': def_lang, 'query': item.searched_text, 'page': item.page} -def get_saved_searches(): - current_saved_searches_list = config.get_setting("saved_searches_list", "search") - if current_saved_searches_list is None: - saved_searches_list = [] - else: - saved_searches_list = list(current_saved_searches_list) + prof = {'Acting': 'Actor', 'Directing': 'Director', 'Production': 'Productor'} + plot = '' + item.search_type = 'person' - return saved_searches_list + tmdb_inf = tmdb.discovery(item, dict_=dict_) + results = tmdb_inf.results + + if not results: + return results + + for elem in results: + name = elem.get('name', '') + if not name: + continue + + rol = elem.get('known_for_department', '') + rol = prof.get(rol, rol) + # genero = elem.get('gender', 0) + # if genero == 1 and rol in prof: + # rol += 'a' + # rol = rol.replace('Actora', 'Actriz') + + know_for = elem.get('known_for', '') + cast_id = elem.get('id', '') + if know_for: + t_k = know_for[0].get('title', '') + if t_k: + plot = '%s in %s' % (rol, t_k) + + thumbnail = 'http://image.tmdb.org/t/p/original%s' % elem.get('profile_path', '') + title = typo(name,'bold')+typo(rol,'_ [] color kod bold') + + discovery = {'url': 'person/%s/combined_credits' % cast_id, 'page': '1', + 'sort_by': 'primary_release_date.desc', 'language': def_lang} + + itemlist.append(Item(channel=item.channel, title=title, action='discover_list', cast_='cast', + discovery=discovery, thumbnail=thumbnail, plot=plot, page=1)) + + if len(results) > 19: + next_ = item.page + 1 + itemlist.append(Item(channel=item.channel, title=typo(config.get_localized_string(30992),'bold color kod'), action='actor_list', + page=next_, thumbnail=thumbnail, + searched_text=item.searched_text)) + return itemlist def discover_list(item): - from platformcode import unify + import datetime itemlist = [] - result = tmdb.discovery(item) - + year = 0 + tmdb_inf = tmdb.discovery(item, dict_=item.discovery, cast=item.cast_) + result = tmdb_inf.results tvshow = False - logger.debug(item) - for elem in result: - elem['tmdb_id']=elem['id'] + elem = tmdb_inf.get_infoLabels(elem, origen=elem) if 'title' in elem: title = unify.normalize(elem['title']).capitalize() - elem['year'] = scrapertools.find_single_match(elem['release_date'], r'(\d{4})-\d+-\d+') else: title = unify.normalize(elem['name']).capitalize() tvshow = True + elem['tmdb_id'] = elem['id'] - new_item = Item(channel='search', title=typo(title, 'bold'), infoLabels=elem, action='do_search', extra=title, - category=config.get_localized_string(70695), context ='') + mode = item.mode or elem['media_type'] + thumbnail = elem.get('thumbnail', '') + fanart = elem.get('fanart', '') - if tvshow: - new_item.contentSerieName = title - else: - new_item.contentTitle = title + if item.cast_: + release = elem.get('release_date', '0000') or elem.get('first_air_date', '0000') + year = scrapertools.find_single_match(release, r'(\d{4})') - itemlist.append(new_item) + if not item.cast_ or (item.cast_ and (int(year) <= int(datetime.datetime.today().year))): + new_item = Item(channel='search', title=typo(title, 'bold'), infoLabels=elem, + action='channel_search', text=title, + thumbnail=thumbnail, fanart=fanart, + context='', mode=mode, + release_date=year) - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + if tvshow: + new_item.contentSerieName = title + else: + new_item.contentTitle = title - if item.page != '' and len(itemlist)>0: - next_page = str(int(item.page)+1) - #if not 'similar' in item.list_type: - # itemlist.append(item.clone(title='Pagina Siguente', page=next_page)) - #else: - itemlist.append(Item(channel=item.channel, action='discover_list', title=typo(config.get_localized_string(30992), 'color kod bold'), - search_type=item.search_type, list_type=item.list_type, type=item.type, page=next_page)) + itemlist.append(new_item) + + itemlist = set_context(itemlist) + + if item.cast_: + itemlist.sort(key=lambda it: int(it.release_date), reverse=True) + return itemlist + + elif len(result) > 19 and item.discovery: + item.discovery['page'] = str(int(item.discovery['page']) + 1) + itemlist.append(Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70065), + list_type=item.list_type, discovery=item.discovery, text_color='gold')) + elif len(result) > 19: + next_page = str(int(item.page) + 1) + + itemlist.append(Item(channel=item.channel, action='discover_list', title=config.get_localized_string(70065), + list_type=item.list_type, search_type=item.search_type, mode=item.mode, page=next_page, + text_color='gold')) return itemlist -def search_tmdb(item): - logger.debug(item) - itemlist = [] - threads = [] - logger.debug(item) - wanted = item.contentTitle - - search = do_search(item) - - if item.contentSerieName == '': - results = exact_results(search, wanted) - for result in results: - logger.debug(result) - t = Thread(target=get_links, args=[result]) - t.start() - threads.append(t) - - for thread in threads: - thread.join() - - # try: - # get_links(result) - # except: - # pass - - for link in link_list: - if link.action == 'play' and not 'trailer' in link.title.lower() and len(itemlist) < max_links: - itemlist.append(link) - - return sorted(itemlist, key=lambda it: it.server) - else: - for item in search: - if item.contentSerieName != '' and item.contentSerieName == wanted: - logger.debug(item) - itemlist.append(item) - return itemlist - -def get_links (item): +def from_context(item): logger.info() - results =[] - channel = __import__('channels.%s' % item.from_channel, None, None, ["channels.%s" % item.from_channel]) - if len(link_list) <= max_links: - link_list.extend(getattr(channel, item.from_action)(item)) + + select = setting_channel_new(item) + + if not select: + return + + if 'infoLabels' in item and 'mediatype' in item.infoLabels: + item.mode = item.infoLabels['mediatype'] + else: + return + + if 'list_type' not in item: + if 'wanted' in item: + item.title = item.wanted + return channel_search(item) + + return discover_list(item) + + +def set_context(itemlist): + logger.info() + + for elem in itemlist: + elem.context = [{"title": config.get_localized_string(60412), + "action": "setting_channel_new", + "channel": "search"}, + {"title": config.get_localized_string(60415), + "action": "settings", + "channel": "search"}] + + return itemlist + + +def get_from_temp(item): + logger.info() + + n = 30 + nTotal = len(item.itemlist) + nextp = n * item.page + prevp = n * (item.page - 1) + + results = [Item().fromurl(elem) for elem in item.itemlist[prevp:nextp]] + + if nextp < nTotal: + results.append(Item(channel='search', title=typo(config.get_localized_string(30992),'bold color kod'), + action='get_from_temp', itemlist=item.itemlist, page=item.page + 1)) + + tmdb.set_infoLabels_itemlist(results, True) + for elem in results: + if not elem.infoLabels.get('year', ""): + elem.infoLabels['year'] = '-' + tmdb.set_infoLabels_item(elem, True) + + return results \ No newline at end of file